diff --git a/.all-contributorsrc b/.all-contributorsrc index a3b85cae68..b30f3b2499 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -309,7 +309,18 @@ "contributions": [ "code" ] + }, + { + "login": "Tilix4", + "name": "Félix David", + "avatar_url": "https://avatars.githubusercontent.com/u/22875539?v=4", + "profile": "http://felixdavid.com/", + "contributions": [ + "code", + "doc" + ] } ], - "contributorsPerLine": 7 -} \ No newline at end of file + "contributorsPerLine": 7, + "skipCi": true +} diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 6ed6ae428c..96e768e420 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -6,6 +6,8 @@ labels: bug assignees: '' --- +**Running version** +[ex. 3.14.1-nightly.2] **Describe the bug** A clear and concise description of what the bug is. diff --git a/.github/workflows/milestone_assign.yml b/.github/workflows/milestone_assign.yml new file mode 100644 index 0000000000..4b52dfc30d --- /dev/null +++ b/.github/workflows/milestone_assign.yml @@ -0,0 +1,28 @@ +name: Milestone - assign to PRs + +on: + pull_request_target: + types: [closed] + +jobs: + run_if_release: + if: startsWith(github.base_ref, 'release/') + runs-on: ubuntu-latest + steps: + - name: 'Assign Milestone [next-minor]' + if: github.event.pull_request.milestone == null + uses: zoispag/action-assign-milestone@v1 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" + milestone: 'next-minor' + + run_if_develop: + if: ${{ github.base_ref == 'develop' }} + runs-on: ubuntu-latest + steps: + - name: 'Assign Milestone [next-patch]' + if: github.event.pull_request.milestone == null + uses: zoispag/action-assign-milestone@v1 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" + milestone: 'next-patch' \ No newline at end of file diff --git a/.github/workflows/milestone_create.yml b/.github/workflows/milestone_create.yml new file mode 100644 index 0000000000..b56ca81dc1 --- /dev/null +++ b/.github/workflows/milestone_create.yml @@ -0,0 +1,62 @@ +name: Milestone - create default + +on: + milestone: + types: [closed, edited] + +jobs: + generate-next-patch: + runs-on: ubuntu-latest + steps: + - name: 'Get Milestones' + uses: "WyriHaximus/github-action-get-milestones@master" + id: milestones + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + + - run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number') + id: querymilestone + env: + MILESTONES: ${{ steps.milestones.outputs.milestones }} + MILESTONE: "next-patch" + + - name: Read output + run: | + echo "${{ steps.querymilestone.outputs.number }}" + + - name: 'Create `next-patch` milestone' + if: steps.querymilestone.outputs.number == '' + id: createmilestone + uses: "WyriHaximus/github-action-create-milestone@v1" + with: + title: 'next-patch' + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + + generate-next-minor: + runs-on: ubuntu-latest + steps: + - name: 'Get Milestones' + uses: "WyriHaximus/github-action-get-milestones@master" + id: milestones + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + + - run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number') + id: querymilestone + env: + MILESTONES: ${{ steps.milestones.outputs.milestones }} + MILESTONE: "next-minor" + + - name: Read output + run: | + echo "${{ steps.querymilestone.outputs.number }}" + + - name: 'Create `next-minor` milestone' + if: steps.querymilestone.outputs.number == '' + id: createmilestone + uses: "WyriHaximus/github-action-create-milestone@v1" + with: + title: 'next-minor' + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" \ No newline at end of file diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml index 5acd20007c..078f6c85bb 100644 --- a/.github/workflows/prerelease.yml +++ b/.github/workflows/prerelease.yml @@ -37,27 +37,27 @@ jobs: echo ::set-output name=next_tag::$RESULT - - name: "✏️ Generate full changelog" - if: steps.version_type.outputs.type != 'skip' - id: generate-full-changelog - uses: heinrichreimer/github-changelog-generator-action@v2.2 - with: - token: ${{ secrets.ADMIN_TOKEN }} - addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}' - issues: false - issuesWoLabels: false - sinceTag: "3.0.0" - maxIssues: 100 - pullRequests: true - prWoLabels: false - author: false - unreleased: true - compareLink: true - stripGeneratorNotice: true - verbose: true - unreleasedLabel: ${{ steps.version.outputs.next_tag }} - excludeTagsRegex: "CI/.+" - releaseBranch: "main" + # - name: "✏️ Generate full changelog" + # if: steps.version_type.outputs.type != 'skip' + # id: generate-full-changelog + # uses: heinrichreimer/github-changelog-generator-action@v2.3 + # with: + # token: ${{ secrets.ADMIN_TOKEN }} + # addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}' + # issues: false + # issuesWoLabels: false + # sinceTag: "3.12.0" + # maxIssues: 100 + # pullRequests: true + # prWoLabels: false + # author: false + # unreleased: true + # compareLink: true + # stripGeneratorNotice: true + # verbose: true + # unreleasedLabel: ${{ steps.version.outputs.next_tag }} + # excludeTagsRegex: "CI/.+" + # releaseBranch: "main" - name: "🖨️ Print changelog to console" if: steps.version_type.outputs.type != 'skip' @@ -69,16 +69,14 @@ jobs: run: | git config user.email ${{ secrets.CI_EMAIL }} git config user.name ${{ secrets.CI_USER }} - cd repos/avalon-core git checkout main git pull - cd ../.. git add . git commit -m "[Automated] Bump version" tag_name="CI/${{ steps.version.outputs.next_tag }}" echo $tag_name git tag -a $tag_name -m "nightly build" - + - name: Push to protected main branch uses: CasperWA/push-protected@v2.10.0 with: @@ -87,11 +85,11 @@ jobs: tags: true unprotect_reviews: true - - name: 🔨 Merge main back to develop + - name: 🔨 Merge main back to develop uses: everlytic/branch-merge@1.1.0 if: steps.version_type.outputs.type != 'skip' with: github_token: ${{ secrets.ADMIN_TOKEN }} source_ref: 'main' target_branch: 'develop' - commit_message_template: '[Automated] Merged {source_ref} into {target_branch}' \ No newline at end of file + commit_message_template: '[Automated] Merged {source_ref} into {target_branch}' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 85864b4442..754f3d32d6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -2,7 +2,7 @@ name: Stable Release on: release: - types: + types: - prereleased jobs: @@ -13,7 +13,7 @@ jobs: steps: - name: 🚛 Checkout Code uses: actions/checkout@v2 - with: + with: fetch-depth: 0 - name: Set up Python @@ -33,27 +33,27 @@ jobs: echo ::set-output name=last_release::$LASTRELEASE echo ::set-output name=release_tag::$RESULT - - name: "✏️ Generate full changelog" - if: steps.version.outputs.release_tag != 'skip' - id: generate-full-changelog - uses: heinrichreimer/github-changelog-generator-action@v2.2 - with: - token: ${{ secrets.ADMIN_TOKEN }} - addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}' - issues: false - issuesWoLabels: false - sinceTag: "3.0.0" - maxIssues: 100 - pullRequests: true - prWoLabels: false - author: false - unreleased: true - compareLink: true - stripGeneratorNotice: true - verbose: true - futureRelease: ${{ steps.version.outputs.release_tag }} - excludeTagsRegex: "CI/.+" - releaseBranch: "main" + # - name: "✏️ Generate full changelog" + # if: steps.version.outputs.release_tag != 'skip' + # id: generate-full-changelog + # uses: heinrichreimer/github-changelog-generator-action@v2.3 + # with: + # token: ${{ secrets.ADMIN_TOKEN }} + # addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}' + # issues: false + # issuesWoLabels: false + # sinceTag: "3.12.0" + # maxIssues: 100 + # pullRequests: true + # prWoLabels: false + # author: false + # unreleased: true + # compareLink: true + # stripGeneratorNotice: true + # verbose: true + # futureRelease: ${{ steps.version.outputs.release_tag }} + # excludeTagsRegex: "CI/.+" + # releaseBranch: "main" - name: 💾 Commit and Tag id: git_commit @@ -73,8 +73,8 @@ jobs: token: ${{ secrets.ADMIN_TOKEN }} branch: main tags: true - unprotect_reviews: true - + unprotect_reviews: true + - name: "✏️ Generate last changelog" if: steps.version.outputs.release_tag != 'skip' id: generate-last-changelog @@ -114,11 +114,11 @@ jobs: with: tag: "${{ steps.version.outputs.current_version }}" - - name: 🔁 Merge main back to develop + - name: 🔁 Merge main back to develop if: steps.version.outputs.release_tag != 'skip' uses: everlytic/branch-merge@1.1.0 with: github_token: ${{ secrets.ADMIN_TOKEN }} source_ref: 'main' target_branch: 'develop' - commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}' \ No newline at end of file + commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}' diff --git a/.gitignore b/.gitignore index 28cfb4b1e9..18e7cd7bf2 100644 --- a/.gitignore +++ b/.gitignore @@ -102,3 +102,13 @@ website/.docusaurus .poetry/ .python-version +.editorconfig +.pre-commit-config.yaml +mypy.ini + +tools/run_eventserver.* + +# Developer tools +tools/dev_* + +.github_changelog_generator diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000..fe93791c4e --- /dev/null +++ b/.gitmodules @@ -0,0 +1,7 @@ +[submodule "tools/modules/powershell/BurntToast"] + path = tools/modules/powershell/BurntToast + url = https://github.com/Windos/BurntToast.git + +[submodule "tools/modules/powershell/PSWriteColor"] + path = tools/modules/powershell/PSWriteColor + url = https://github.com/EvotecIT/PSWriteColor.git \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index e2ff9f919c..707b61676f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,42 +1,838 @@ # Changelog -## [3.10.0-nightly.2](https://github.com/pypeclub/OpenPype/tree/HEAD) +## [3.14.6](https://github.com/pypeclub/OpenPype/tree/HEAD) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.4...HEAD) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.5...HEAD) + +### 📖 Documentation + +- Documentation: Minor updates to dev\_requirements.md [\#4025](https://github.com/pypeclub/OpenPype/pull/4025) + +**🆕 New features** + +- Nuke: add 13.2 variant [\#4041](https://github.com/pypeclub/OpenPype/pull/4041) + +**🚀 Enhancements** + +- Publish Report Viewer: Store reports locally on machine [\#4040](https://github.com/pypeclub/OpenPype/pull/4040) +- General: More specific error in burnins script [\#4026](https://github.com/pypeclub/OpenPype/pull/4026) +- General: Extract review does not crash with old settings overrides [\#4023](https://github.com/pypeclub/OpenPype/pull/4023) +- Publisher: Convertors for legacy instances [\#4020](https://github.com/pypeclub/OpenPype/pull/4020) +- workflows: adding milestone creator and assigner [\#4018](https://github.com/pypeclub/OpenPype/pull/4018) +- Publisher: Catch creator errors [\#4015](https://github.com/pypeclub/OpenPype/pull/4015) + +**🐛 Bug fixes** + +- Hiero - effect collection fixes [\#4038](https://github.com/pypeclub/OpenPype/pull/4038) +- Nuke - loader clip correct hash conversion in path [\#4037](https://github.com/pypeclub/OpenPype/pull/4037) +- Maya: Soft fail when applying capture preset [\#4034](https://github.com/pypeclub/OpenPype/pull/4034) +- Igniter: handle missing directory [\#4032](https://github.com/pypeclub/OpenPype/pull/4032) +- StandalonePublisher: Fix thumbnail publishing [\#4029](https://github.com/pypeclub/OpenPype/pull/4029) +- Experimental Tools: Fix publisher import [\#4027](https://github.com/pypeclub/OpenPype/pull/4027) +- Houdini: fix wrong path in ASS loader [\#4016](https://github.com/pypeclub/OpenPype/pull/4016) + +**🔀 Refactored code** + +- General: Import lib functions from lib [\#4017](https://github.com/pypeclub/OpenPype/pull/4017) + +## [3.14.5](https://github.com/pypeclub/OpenPype/tree/3.14.5) (2022-10-24) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.4...3.14.5) + +**🚀 Enhancements** + +- Maya: add OBJ extractor to model family [\#4021](https://github.com/pypeclub/OpenPype/pull/4021) +- Publish report viewer tool [\#4010](https://github.com/pypeclub/OpenPype/pull/4010) +- Nuke | Global: adding custom tags representation filtering [\#4009](https://github.com/pypeclub/OpenPype/pull/4009) +- Publisher: Create context has shared data for collection phase [\#3995](https://github.com/pypeclub/OpenPype/pull/3995) +- Resolve: updating to v18 compatibility [\#3986](https://github.com/pypeclub/OpenPype/pull/3986) + +**🐛 Bug fixes** + +- TrayPublisher: Fix missing argument [\#4019](https://github.com/pypeclub/OpenPype/pull/4019) +- General: Fix python 2 compatibility of ffmpeg and oiio tools discovery [\#4011](https://github.com/pypeclub/OpenPype/pull/4011) + +**🔀 Refactored code** + +- Maya: Removed unused imports [\#4008](https://github.com/pypeclub/OpenPype/pull/4008) +- Unreal: Fix import of moved function [\#4007](https://github.com/pypeclub/OpenPype/pull/4007) +- Houdini: Change import of RepairAction [\#4005](https://github.com/pypeclub/OpenPype/pull/4005) +- Nuke/Hiero: Refactor openpype.api imports [\#4000](https://github.com/pypeclub/OpenPype/pull/4000) +- TVPaint: Defined with HostBase [\#3994](https://github.com/pypeclub/OpenPype/pull/3994) + +**Merged pull requests:** + +- Unreal: Remove redundant Creator stub [\#4012](https://github.com/pypeclub/OpenPype/pull/4012) +- Unreal: add `uproject` extension to Unreal project template [\#4004](https://github.com/pypeclub/OpenPype/pull/4004) +- Unreal: fix order of includes [\#4002](https://github.com/pypeclub/OpenPype/pull/4002) +- Fusion: Implement backwards compatibility \(+/- Fusion 17.2\) [\#3958](https://github.com/pypeclub/OpenPype/pull/3958) + +## [3.14.4](https://github.com/pypeclub/OpenPype/tree/3.14.4) (2022-10-19) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.3...3.14.4) + +**🆕 New features** + +- Webpublisher: use max next published version number for all items in batch [\#3961](https://github.com/pypeclub/OpenPype/pull/3961) +- General: Control Thumbnail integration via explicit configuration profiles [\#3951](https://github.com/pypeclub/OpenPype/pull/3951) + +**🚀 Enhancements** + +- Publisher: Multiselection in card view [\#3993](https://github.com/pypeclub/OpenPype/pull/3993) +- TrayPublisher: Original Basename cause crash too early [\#3990](https://github.com/pypeclub/OpenPype/pull/3990) +- Tray Publisher: add `originalBasename` data to simple creators [\#3988](https://github.com/pypeclub/OpenPype/pull/3988) +- General: Custom paths to ffmpeg and OpenImageIO tools [\#3982](https://github.com/pypeclub/OpenPype/pull/3982) +- Integrate: Preserve existing subset group if instance does not set it for new version [\#3976](https://github.com/pypeclub/OpenPype/pull/3976) +- Publisher: Prepare publisher controller for remote publishing [\#3972](https://github.com/pypeclub/OpenPype/pull/3972) +- Maya: new style dataclasses in maya deadline submitter plugin [\#3968](https://github.com/pypeclub/OpenPype/pull/3968) +- Maya: Define preffered Qt bindings for Qt.py and qtpy [\#3963](https://github.com/pypeclub/OpenPype/pull/3963) +- Settings: Move imageio from project anatomy to project settings \[pypeclub\] [\#3959](https://github.com/pypeclub/OpenPype/pull/3959) +- TrayPublisher: Extract thumbnail for other families [\#3952](https://github.com/pypeclub/OpenPype/pull/3952) +- Publisher: Pass instance to subset name method on update [\#3949](https://github.com/pypeclub/OpenPype/pull/3949) +- General: Set root environments before DCC launch [\#3947](https://github.com/pypeclub/OpenPype/pull/3947) +- Refactor: changed legacy way to update database for Hero version integrate [\#3941](https://github.com/pypeclub/OpenPype/pull/3941) +- Maya: Moved plugin from global to maya [\#3939](https://github.com/pypeclub/OpenPype/pull/3939) +- Publisher: Create dialog is part of main window [\#3936](https://github.com/pypeclub/OpenPype/pull/3936) +- Fusion: Implement Alembic and FBX mesh loader [\#3927](https://github.com/pypeclub/OpenPype/pull/3927) + +**🐛 Bug fixes** + +- TrayPublisher: Disable sequences in batch mov creator [\#3996](https://github.com/pypeclub/OpenPype/pull/3996) +- Fix - tags might be missing on representation [\#3985](https://github.com/pypeclub/OpenPype/pull/3985) +- Resolve: Fix usage of functions from lib [\#3983](https://github.com/pypeclub/OpenPype/pull/3983) +- Maya: remove invalid prefix token for non-multipart outputs [\#3981](https://github.com/pypeclub/OpenPype/pull/3981) +- Ftrack: Fix schema cache for Python 2 [\#3980](https://github.com/pypeclub/OpenPype/pull/3980) +- Maya: add object to attr.s declaration [\#3973](https://github.com/pypeclub/OpenPype/pull/3973) +- Maya: Deadline OutputFilePath hack regression for Renderman [\#3950](https://github.com/pypeclub/OpenPype/pull/3950) +- Houdini: Fix validate workfile paths for non-parm file references [\#3948](https://github.com/pypeclub/OpenPype/pull/3948) +- Photoshop: missed sync published version of workfile with workfile [\#3946](https://github.com/pypeclub/OpenPype/pull/3946) +- Maya: Set default value for RenderSetupIncludeLights option [\#3944](https://github.com/pypeclub/OpenPype/pull/3944) +- Maya: fix regression of Renderman Deadline hack [\#3943](https://github.com/pypeclub/OpenPype/pull/3943) +- Kitsu: 2 fixes, nb\_frames and Shot type error [\#3940](https://github.com/pypeclub/OpenPype/pull/3940) +- Tray: Change order of attribute changes [\#3938](https://github.com/pypeclub/OpenPype/pull/3938) +- AttributeDefs: Fix crashing multivalue of files widget [\#3937](https://github.com/pypeclub/OpenPype/pull/3937) +- General: Fix links query on hero version [\#3900](https://github.com/pypeclub/OpenPype/pull/3900) +- Publisher: Files Drag n Drop cleanup [\#3888](https://github.com/pypeclub/OpenPype/pull/3888) + +**🔀 Refactored code** + +- Flame: Import lib functions from lib [\#3992](https://github.com/pypeclub/OpenPype/pull/3992) +- General: Fix deprecated warning in legacy creator [\#3978](https://github.com/pypeclub/OpenPype/pull/3978) +- Blender: Remove openpype api imports [\#3977](https://github.com/pypeclub/OpenPype/pull/3977) +- General: Use direct import of resources [\#3964](https://github.com/pypeclub/OpenPype/pull/3964) +- General: Direct settings imports [\#3934](https://github.com/pypeclub/OpenPype/pull/3934) +- General: import 'Logger' from 'openpype.lib' [\#3926](https://github.com/pypeclub/OpenPype/pull/3926) +- General: Remove deprecated functions from lib [\#3907](https://github.com/pypeclub/OpenPype/pull/3907) + +**Merged pull requests:** + +- Maya + Yeti: Load Yeti Cache fix frame number recognition [\#3942](https://github.com/pypeclub/OpenPype/pull/3942) +- Fusion: Implement callbacks to Fusion's event system thread [\#3928](https://github.com/pypeclub/OpenPype/pull/3928) +- Photoshop: create single frame image in Ftrack as review [\#3908](https://github.com/pypeclub/OpenPype/pull/3908) + +## [3.14.3](https://github.com/pypeclub/OpenPype/tree/3.14.3) (2022-10-03) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.2...3.14.3) + +**🚀 Enhancements** + +- Publisher: Enhancement proposals [\#3897](https://github.com/pypeclub/OpenPype/pull/3897) + +**🐛 Bug fixes** + +- Maya: Fix Render single camera validator [\#3929](https://github.com/pypeclub/OpenPype/pull/3929) +- Flame: loading multilayer exr to batch/reel is working [\#3901](https://github.com/pypeclub/OpenPype/pull/3901) +- Hiero: Fix inventory check on launch [\#3895](https://github.com/pypeclub/OpenPype/pull/3895) +- WebPublisher: Fix import after refactor [\#3891](https://github.com/pypeclub/OpenPype/pull/3891) + +**🔀 Refactored code** + +- Maya: Remove unused 'openpype.api' imports in plugins [\#3925](https://github.com/pypeclub/OpenPype/pull/3925) +- Resolve: Use new Extractor location [\#3918](https://github.com/pypeclub/OpenPype/pull/3918) +- Unreal: Use new Extractor location [\#3917](https://github.com/pypeclub/OpenPype/pull/3917) +- Flame: Use new Extractor location [\#3916](https://github.com/pypeclub/OpenPype/pull/3916) +- Houdini: Use new Extractor location [\#3894](https://github.com/pypeclub/OpenPype/pull/3894) +- Harmony: Use new Extractor location [\#3893](https://github.com/pypeclub/OpenPype/pull/3893) + +**Merged pull requests:** + +- Maya: Fix Scene Inventory possibly starting off-screen due to maya preferences [\#3923](https://github.com/pypeclub/OpenPype/pull/3923) + +## [3.14.2](https://github.com/pypeclub/OpenPype/tree/3.14.2) (2022-09-12) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.1...3.14.2) + +### 📖 Documentation + +- Documentation: Anatomy templates [\#3618](https://github.com/pypeclub/OpenPype/pull/3618) + +**🆕 New features** + +- Nuke: Build workfile by template [\#3763](https://github.com/pypeclub/OpenPype/pull/3763) +- Houdini: Publishing workfiles [\#3697](https://github.com/pypeclub/OpenPype/pull/3697) +- Global: making collect audio plugin global [\#3679](https://github.com/pypeclub/OpenPype/pull/3679) + +**🚀 Enhancements** + +- Flame: Adding Creator's retimed shot and handles switch [\#3826](https://github.com/pypeclub/OpenPype/pull/3826) +- Flame: OpenPype submenu to batch and media manager [\#3825](https://github.com/pypeclub/OpenPype/pull/3825) +- General: Better pixmap scaling [\#3809](https://github.com/pypeclub/OpenPype/pull/3809) +- Photoshop: attempt to speed up ExtractImage [\#3793](https://github.com/pypeclub/OpenPype/pull/3793) +- SyncServer: Added cli commands for sync server [\#3765](https://github.com/pypeclub/OpenPype/pull/3765) +- Kitsu: Drop 'entities root' setting. [\#3739](https://github.com/pypeclub/OpenPype/pull/3739) +- git: update gitignore [\#3722](https://github.com/pypeclub/OpenPype/pull/3722) +- Blender: Publisher collect workfile representation [\#3670](https://github.com/pypeclub/OpenPype/pull/3670) +- Maya: move set render settings menu entry [\#3669](https://github.com/pypeclub/OpenPype/pull/3669) +- Scene Inventory: Maya add actions to select from or to scene [\#3659](https://github.com/pypeclub/OpenPype/pull/3659) +- Scene Inventory: Add subsetGroup column [\#3658](https://github.com/pypeclub/OpenPype/pull/3658) + +**🐛 Bug fixes** + +- General: Fix Pattern access in client code [\#3828](https://github.com/pypeclub/OpenPype/pull/3828) +- Launcher: Skip opening last work file works for groups [\#3822](https://github.com/pypeclub/OpenPype/pull/3822) +- Maya: Publishing data key change [\#3811](https://github.com/pypeclub/OpenPype/pull/3811) +- Igniter: Fix status handling when version is already installed [\#3804](https://github.com/pypeclub/OpenPype/pull/3804) +- Resolve: Addon import is Python 2 compatible [\#3798](https://github.com/pypeclub/OpenPype/pull/3798) +- Hiero: retimed clip publishing is working [\#3792](https://github.com/pypeclub/OpenPype/pull/3792) +- nuke: validate write node is not failing due wrong type [\#3780](https://github.com/pypeclub/OpenPype/pull/3780) +- Fix - changed format of version string in pyproject.toml [\#3777](https://github.com/pypeclub/OpenPype/pull/3777) +- Ftrack status fix typo prgoress -\> progress [\#3761](https://github.com/pypeclub/OpenPype/pull/3761) +- Fix version resolution [\#3757](https://github.com/pypeclub/OpenPype/pull/3757) +- Maya: `containerise` dont skip empty values [\#3674](https://github.com/pypeclub/OpenPype/pull/3674) + +**🔀 Refactored code** + +- Photoshop: Use new Extractor location [\#3789](https://github.com/pypeclub/OpenPype/pull/3789) +- Blender: Use new Extractor location [\#3787](https://github.com/pypeclub/OpenPype/pull/3787) +- AfterEffects: Use new Extractor location [\#3784](https://github.com/pypeclub/OpenPype/pull/3784) +- General: Remove unused teshost [\#3773](https://github.com/pypeclub/OpenPype/pull/3773) +- General: Copied 'Extractor' plugin to publish pipeline [\#3771](https://github.com/pypeclub/OpenPype/pull/3771) +- General: Move queries of asset and representation links [\#3770](https://github.com/pypeclub/OpenPype/pull/3770) +- General: Move create project folders to pipeline [\#3768](https://github.com/pypeclub/OpenPype/pull/3768) +- General: Create project function moved to client code [\#3766](https://github.com/pypeclub/OpenPype/pull/3766) +- Maya: Refactor submit deadline to use AbstractSubmitDeadline [\#3759](https://github.com/pypeclub/OpenPype/pull/3759) +- General: Change publish template settings location [\#3755](https://github.com/pypeclub/OpenPype/pull/3755) +- General: Move hostdirname functionality into host [\#3749](https://github.com/pypeclub/OpenPype/pull/3749) +- General: Move publish utils to pipeline [\#3745](https://github.com/pypeclub/OpenPype/pull/3745) +- Houdini: Define houdini as addon [\#3735](https://github.com/pypeclub/OpenPype/pull/3735) +- Fusion: Defined fusion as addon [\#3733](https://github.com/pypeclub/OpenPype/pull/3733) +- Flame: Defined flame as addon [\#3732](https://github.com/pypeclub/OpenPype/pull/3732) +- Resolve: Define resolve as addon [\#3727](https://github.com/pypeclub/OpenPype/pull/3727) + +**Merged pull requests:** + +- Standalone Publisher: Ignore empty labels, then still use name like other asset models [\#3779](https://github.com/pypeclub/OpenPype/pull/3779) +- Kitsu - sync\_all\_project - add list ignore\_projects [\#3776](https://github.com/pypeclub/OpenPype/pull/3776) + +## [3.14.1](https://github.com/pypeclub/OpenPype/tree/3.14.1) (2022-08-30) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.0...3.14.1) + +### 📖 Documentation + +- Documentation: Few updates [\#3698](https://github.com/pypeclub/OpenPype/pull/3698) +- Documentation: Settings development [\#3660](https://github.com/pypeclub/OpenPype/pull/3660) + +**🆕 New features** + +- Webpublisher:change create flatten image into tri state [\#3678](https://github.com/pypeclub/OpenPype/pull/3678) +- Blender: validators code correction with settings and defaults [\#3662](https://github.com/pypeclub/OpenPype/pull/3662) + +**🚀 Enhancements** + +- General: Thumbnail can use project roots [\#3750](https://github.com/pypeclub/OpenPype/pull/3750) +- Settings: Remove settings lock on tray exit [\#3720](https://github.com/pypeclub/OpenPype/pull/3720) +- General: Added helper getters to modules manager [\#3712](https://github.com/pypeclub/OpenPype/pull/3712) +- Unreal: Define unreal as module and use host class [\#3701](https://github.com/pypeclub/OpenPype/pull/3701) +- Settings: Lock settings UI session [\#3700](https://github.com/pypeclub/OpenPype/pull/3700) +- General: Benevolent context label collector [\#3686](https://github.com/pypeclub/OpenPype/pull/3686) +- Ftrack: Store ftrack entities on hierarchy integration to instances [\#3677](https://github.com/pypeclub/OpenPype/pull/3677) +- Ftrack: More logs related to auto sync value change [\#3671](https://github.com/pypeclub/OpenPype/pull/3671) +- Blender: ops refresh manager after process events [\#3663](https://github.com/pypeclub/OpenPype/pull/3663) + +**🐛 Bug fixes** + +- Maya: Fix typo in getPanel argument `with_focus` -\> `withFocus` [\#3753](https://github.com/pypeclub/OpenPype/pull/3753) +- General: Smaller fixes of imports [\#3748](https://github.com/pypeclub/OpenPype/pull/3748) +- General: Logger tweaks [\#3741](https://github.com/pypeclub/OpenPype/pull/3741) +- Nuke: missing job dependency if multiple bake streams [\#3737](https://github.com/pypeclub/OpenPype/pull/3737) +- Nuke: color-space settings from anatomy is working [\#3721](https://github.com/pypeclub/OpenPype/pull/3721) +- Settings: Fix studio default anatomy save [\#3716](https://github.com/pypeclub/OpenPype/pull/3716) +- Maya: Use project name instead of project code [\#3709](https://github.com/pypeclub/OpenPype/pull/3709) +- Settings: Fix project overrides save [\#3708](https://github.com/pypeclub/OpenPype/pull/3708) +- Workfiles tool: Fix published workfile filtering [\#3704](https://github.com/pypeclub/OpenPype/pull/3704) +- PS, AE: Provide default variant value for workfile subset [\#3703](https://github.com/pypeclub/OpenPype/pull/3703) +- RoyalRender: handle host name that is not set [\#3695](https://github.com/pypeclub/OpenPype/pull/3695) +- Flame: retime is working on clip publishing [\#3684](https://github.com/pypeclub/OpenPype/pull/3684) +- Webpublisher: added check for empty context [\#3682](https://github.com/pypeclub/OpenPype/pull/3682) + +**🔀 Refactored code** + +- General: Move delivery logic to pipeline [\#3751](https://github.com/pypeclub/OpenPype/pull/3751) +- General: Host addons cleanup [\#3744](https://github.com/pypeclub/OpenPype/pull/3744) +- Webpublisher: Webpublisher is used as addon [\#3740](https://github.com/pypeclub/OpenPype/pull/3740) +- Photoshop: Defined photoshop as addon [\#3736](https://github.com/pypeclub/OpenPype/pull/3736) +- Harmony: Defined harmony as addon [\#3734](https://github.com/pypeclub/OpenPype/pull/3734) +- General: Module interfaces cleanup [\#3731](https://github.com/pypeclub/OpenPype/pull/3731) +- AfterEffects: Move AE functions from general lib [\#3730](https://github.com/pypeclub/OpenPype/pull/3730) +- Blender: Define blender as module [\#3729](https://github.com/pypeclub/OpenPype/pull/3729) +- AfterEffects: Define AfterEffects as module [\#3728](https://github.com/pypeclub/OpenPype/pull/3728) +- General: Replace PypeLogger with Logger [\#3725](https://github.com/pypeclub/OpenPype/pull/3725) +- Nuke: Define nuke as module [\#3724](https://github.com/pypeclub/OpenPype/pull/3724) +- General: Move subset name functionality [\#3723](https://github.com/pypeclub/OpenPype/pull/3723) +- General: Move creators plugin getter [\#3714](https://github.com/pypeclub/OpenPype/pull/3714) +- General: Move constants from lib to client [\#3713](https://github.com/pypeclub/OpenPype/pull/3713) +- Loader: Subset groups using client operations [\#3710](https://github.com/pypeclub/OpenPype/pull/3710) +- TVPaint: Defined as module [\#3707](https://github.com/pypeclub/OpenPype/pull/3707) +- StandalonePublisher: Define StandalonePublisher as module [\#3706](https://github.com/pypeclub/OpenPype/pull/3706) +- TrayPublisher: Define TrayPublisher as module [\#3705](https://github.com/pypeclub/OpenPype/pull/3705) +- General: Move context specific functions to context tools [\#3702](https://github.com/pypeclub/OpenPype/pull/3702) + +**Merged pull requests:** + +- Hiero: Define hiero as module [\#3717](https://github.com/pypeclub/OpenPype/pull/3717) +- Deadline: better logging for DL webservice failures [\#3694](https://github.com/pypeclub/OpenPype/pull/3694) +- Photoshop: resize saved images in ExtractReview for ffmpeg [\#3676](https://github.com/pypeclub/OpenPype/pull/3676) +- Nuke: Validation refactory to new publisher [\#3567](https://github.com/pypeclub/OpenPype/pull/3567) + +## [3.14.0](https://github.com/pypeclub/OpenPype/tree/3.14.0) (2022-08-18) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.13.0...3.14.0) + +**🆕 New features** + +- Maya: Build workfile by template [\#3578](https://github.com/pypeclub/OpenPype/pull/3578) +- Maya: Implementation of JSON layout for Unreal workflow [\#3353](https://github.com/pypeclub/OpenPype/pull/3353) +- Maya: Build workfile by template [\#3315](https://github.com/pypeclub/OpenPype/pull/3315) + +**🚀 Enhancements** + +- Ftrack: Addiotional component metadata [\#3685](https://github.com/pypeclub/OpenPype/pull/3685) +- Ftrack: Set task status on farm publishing [\#3680](https://github.com/pypeclub/OpenPype/pull/3680) +- Ftrack: Set task status on task creation in integrate hierarchy [\#3675](https://github.com/pypeclub/OpenPype/pull/3675) +- Maya: Disable rendering of all lights for render instances submitted through Deadline. [\#3661](https://github.com/pypeclub/OpenPype/pull/3661) +- General: Optimized OCIO configs [\#3650](https://github.com/pypeclub/OpenPype/pull/3650) + +**🐛 Bug fixes** + +- General: Switch from hero version to versioned works [\#3691](https://github.com/pypeclub/OpenPype/pull/3691) +- General: Fix finding of last version [\#3656](https://github.com/pypeclub/OpenPype/pull/3656) +- General: Extract Review can scale with pixel aspect ratio [\#3644](https://github.com/pypeclub/OpenPype/pull/3644) +- Maya: Refactor moved usage of CreateRender settings [\#3643](https://github.com/pypeclub/OpenPype/pull/3643) +- General: Hero version representations have full context [\#3638](https://github.com/pypeclub/OpenPype/pull/3638) +- Nuke: color settings for render write node is working now [\#3632](https://github.com/pypeclub/OpenPype/pull/3632) +- Maya: FBX support for update in reference loader [\#3631](https://github.com/pypeclub/OpenPype/pull/3631) + +**🔀 Refactored code** + +- General: Use client projects getter [\#3673](https://github.com/pypeclub/OpenPype/pull/3673) +- Resolve: Match folder structure to other hosts [\#3653](https://github.com/pypeclub/OpenPype/pull/3653) +- Maya: Hosts as modules [\#3647](https://github.com/pypeclub/OpenPype/pull/3647) +- TimersManager: Plugins are in timers manager module [\#3639](https://github.com/pypeclub/OpenPype/pull/3639) +- General: Move workfiles functions into pipeline [\#3637](https://github.com/pypeclub/OpenPype/pull/3637) +- General: Workfiles builder using query functions [\#3598](https://github.com/pypeclub/OpenPype/pull/3598) + +**Merged pull requests:** + +- Deadline: Global job pre load is not Pype 2 compatible [\#3666](https://github.com/pypeclub/OpenPype/pull/3666) +- Maya: Remove unused get current renderer logic [\#3645](https://github.com/pypeclub/OpenPype/pull/3645) +- Kitsu|Fix: Movie project type fails & first loop children names [\#3636](https://github.com/pypeclub/OpenPype/pull/3636) +- fix the bug of failing to extract look when UDIMs format used in AiImage [\#3628](https://github.com/pypeclub/OpenPype/pull/3628) + +## [3.13.0](https://github.com/pypeclub/OpenPype/tree/3.13.0) (2022-08-09) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.2...3.13.0) + +**🆕 New features** + +- Support for mutliple installed versions - 3.13 [\#3605](https://github.com/pypeclub/OpenPype/pull/3605) +- Traypublisher: simple editorial publishing [\#3492](https://github.com/pypeclub/OpenPype/pull/3492) + +**🚀 Enhancements** + +- Editorial: Mix audio use side file for ffmpeg filters [\#3630](https://github.com/pypeclub/OpenPype/pull/3630) +- Ftrack: Comment template can contain optional keys [\#3615](https://github.com/pypeclub/OpenPype/pull/3615) +- Ftrack: Add more metadata to ftrack components [\#3612](https://github.com/pypeclub/OpenPype/pull/3612) +- General: Add context to pyblish context [\#3594](https://github.com/pypeclub/OpenPype/pull/3594) +- Kitsu: Shot&Sequence name with prefix over appends [\#3593](https://github.com/pypeclub/OpenPype/pull/3593) +- Photoshop: implemented {layer} placeholder in subset template [\#3591](https://github.com/pypeclub/OpenPype/pull/3591) +- General: Python module appdirs from git [\#3589](https://github.com/pypeclub/OpenPype/pull/3589) +- Ftrack: Update ftrack api to 2.3.3 [\#3588](https://github.com/pypeclub/OpenPype/pull/3588) +- General: New Integrator small fixes [\#3583](https://github.com/pypeclub/OpenPype/pull/3583) +- Maya: Render Creator has configurable options. [\#3097](https://github.com/pypeclub/OpenPype/pull/3097) + +**🐛 Bug fixes** + +- Maya: fix aov separator in Redshift [\#3625](https://github.com/pypeclub/OpenPype/pull/3625) +- Fix for multi-version build on Mac [\#3622](https://github.com/pypeclub/OpenPype/pull/3622) +- Ftrack: Sync hierarchical attributes can handle new created entities [\#3621](https://github.com/pypeclub/OpenPype/pull/3621) +- General: Extract review aspect ratio scale is calculated by ffmpeg [\#3620](https://github.com/pypeclub/OpenPype/pull/3620) +- Maya: Fix types of default settings [\#3617](https://github.com/pypeclub/OpenPype/pull/3617) +- Integrator: Don't force to have dot before frame [\#3611](https://github.com/pypeclub/OpenPype/pull/3611) +- AfterEffects: refactored integrate doesnt work formulti frame publishes [\#3610](https://github.com/pypeclub/OpenPype/pull/3610) +- Maya look data contents fails with custom attribute on group [\#3607](https://github.com/pypeclub/OpenPype/pull/3607) +- TrayPublisher: Fix wrong conflict merge [\#3600](https://github.com/pypeclub/OpenPype/pull/3600) +- Bugfix: Add OCIO as submodule to prepare for handling `maketx` color space conversion. [\#3590](https://github.com/pypeclub/OpenPype/pull/3590) +- Fix general settings environment variables resolution [\#3587](https://github.com/pypeclub/OpenPype/pull/3587) +- Editorial publishing workflow improvements [\#3580](https://github.com/pypeclub/OpenPype/pull/3580) +- General: Update imports in start script [\#3579](https://github.com/pypeclub/OpenPype/pull/3579) +- Nuke: render family integration consistency [\#3576](https://github.com/pypeclub/OpenPype/pull/3576) +- Ftrack: Handle missing published path in integrator [\#3570](https://github.com/pypeclub/OpenPype/pull/3570) +- Nuke: publish existing frames with slate with correct range [\#3555](https://github.com/pypeclub/OpenPype/pull/3555) + +**🔀 Refactored code** + +- General: Plugin settings handled by plugins [\#3623](https://github.com/pypeclub/OpenPype/pull/3623) +- General: Naive implementation of document create, update, delete [\#3601](https://github.com/pypeclub/OpenPype/pull/3601) +- General: Use query functions in general code [\#3596](https://github.com/pypeclub/OpenPype/pull/3596) +- General: Separate extraction of template data into more functions [\#3574](https://github.com/pypeclub/OpenPype/pull/3574) +- General: Lib cleanup [\#3571](https://github.com/pypeclub/OpenPype/pull/3571) + +**Merged pull requests:** + +- Webpublisher: timeout for PS studio processing [\#3619](https://github.com/pypeclub/OpenPype/pull/3619) +- Core: translated validate\_containers.py into New publisher style [\#3614](https://github.com/pypeclub/OpenPype/pull/3614) +- Enable write color sets on animation publish automatically [\#3582](https://github.com/pypeclub/OpenPype/pull/3582) + +## [3.12.2](https://github.com/pypeclub/OpenPype/tree/3.12.2) (2022-07-27) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.1...3.12.2) + +### 📖 Documentation + +- Update website with more studios [\#3554](https://github.com/pypeclub/OpenPype/pull/3554) +- Documentation: Update publishing dev docs [\#3549](https://github.com/pypeclub/OpenPype/pull/3549) + +**🚀 Enhancements** + +- General: Global thumbnail extractor is ready for more cases [\#3561](https://github.com/pypeclub/OpenPype/pull/3561) +- Maya: add additional validators to Settings [\#3540](https://github.com/pypeclub/OpenPype/pull/3540) +- General: Interactive console in cli [\#3526](https://github.com/pypeclub/OpenPype/pull/3526) +- Ftrack: Automatic daily review session creation can define trigger hour [\#3516](https://github.com/pypeclub/OpenPype/pull/3516) +- Ftrack: add source into Note [\#3509](https://github.com/pypeclub/OpenPype/pull/3509) +- Ftrack: Trigger custom ftrack topic of project structure creation [\#3506](https://github.com/pypeclub/OpenPype/pull/3506) +- Settings UI: Add extract to file action on project view [\#3505](https://github.com/pypeclub/OpenPype/pull/3505) +- Add pack and unpack convenience scripts [\#3502](https://github.com/pypeclub/OpenPype/pull/3502) +- General: Event system [\#3499](https://github.com/pypeclub/OpenPype/pull/3499) +- NewPublisher: Keep plugins with mismatch target in report [\#3498](https://github.com/pypeclub/OpenPype/pull/3498) +- Nuke: load clip with options from settings [\#3497](https://github.com/pypeclub/OpenPype/pull/3497) +- TrayPublisher: implemented render\_mov\_batch [\#3486](https://github.com/pypeclub/OpenPype/pull/3486) +- Migrate basic families to the new Tray Publisher [\#3469](https://github.com/pypeclub/OpenPype/pull/3469) +- Enhance powershell build scripts [\#1827](https://github.com/pypeclub/OpenPype/pull/1827) + +**🐛 Bug fixes** + +- Maya: fix Review image plane attribute [\#3569](https://github.com/pypeclub/OpenPype/pull/3569) +- Maya: Fix animated attributes \(ie. overscan\) on loaded cameras breaking review publishing. [\#3562](https://github.com/pypeclub/OpenPype/pull/3562) +- NewPublisher: Python 2 compatible html escape [\#3559](https://github.com/pypeclub/OpenPype/pull/3559) +- Remove invalid submodules from `/vendor` [\#3557](https://github.com/pypeclub/OpenPype/pull/3557) +- General: Remove hosts filter on integrator plugins [\#3556](https://github.com/pypeclub/OpenPype/pull/3556) +- Settings: Clean default values of environments [\#3550](https://github.com/pypeclub/OpenPype/pull/3550) +- Module interfaces: Fix import error [\#3547](https://github.com/pypeclub/OpenPype/pull/3547) +- Workfiles tool: Show of tool and it's flags [\#3539](https://github.com/pypeclub/OpenPype/pull/3539) +- General: Create workfile documents works again [\#3538](https://github.com/pypeclub/OpenPype/pull/3538) +- Additional fixes for powershell scripts [\#3525](https://github.com/pypeclub/OpenPype/pull/3525) +- Maya: Added wrapper around cmds.setAttr [\#3523](https://github.com/pypeclub/OpenPype/pull/3523) +- Nuke: double slate [\#3521](https://github.com/pypeclub/OpenPype/pull/3521) +- General: Fix hash of centos oiio archive [\#3519](https://github.com/pypeclub/OpenPype/pull/3519) +- Maya: Renderman display output fix [\#3514](https://github.com/pypeclub/OpenPype/pull/3514) +- TrayPublisher: Simple creation enhancements and fixes [\#3513](https://github.com/pypeclub/OpenPype/pull/3513) +- NewPublisher: Publish attributes are properly collected [\#3510](https://github.com/pypeclub/OpenPype/pull/3510) +- TrayPublisher: Make sure host name is filled [\#3504](https://github.com/pypeclub/OpenPype/pull/3504) +- NewPublisher: Groups work and enum multivalue [\#3501](https://github.com/pypeclub/OpenPype/pull/3501) + +**🔀 Refactored code** + +- General: Use query functions in integrator [\#3563](https://github.com/pypeclub/OpenPype/pull/3563) +- General: Mongo core connection moved to client [\#3531](https://github.com/pypeclub/OpenPype/pull/3531) +- Refactor Integrate Asset [\#3530](https://github.com/pypeclub/OpenPype/pull/3530) +- General: Client docstrings cleanup [\#3529](https://github.com/pypeclub/OpenPype/pull/3529) +- General: Move load related functions into pipeline [\#3527](https://github.com/pypeclub/OpenPype/pull/3527) +- General: Get current context document functions [\#3522](https://github.com/pypeclub/OpenPype/pull/3522) +- Kitsu: Use query function from client [\#3496](https://github.com/pypeclub/OpenPype/pull/3496) +- TimersManager: Use query functions [\#3495](https://github.com/pypeclub/OpenPype/pull/3495) +- Deadline: Use query functions [\#3466](https://github.com/pypeclub/OpenPype/pull/3466) +- Refactor Integrate Asset [\#2898](https://github.com/pypeclub/OpenPype/pull/2898) + +**Merged pull requests:** + +- Maya: fix active pane loss [\#3566](https://github.com/pypeclub/OpenPype/pull/3566) + +## [3.12.1](https://github.com/pypeclub/OpenPype/tree/3.12.1) (2022-07-13) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.0...3.12.1) + +### 📖 Documentation + +- Docs: Added minimal permissions for MongoDB [\#3441](https://github.com/pypeclub/OpenPype/pull/3441) + +**🆕 New features** + +- Maya: Add VDB to Arnold loader [\#3433](https://github.com/pypeclub/OpenPype/pull/3433) + +**🚀 Enhancements** + +- TrayPublisher: Added more options for grouping of instances [\#3494](https://github.com/pypeclub/OpenPype/pull/3494) +- NewPublisher: Align creator attributes from top to bottom [\#3487](https://github.com/pypeclub/OpenPype/pull/3487) +- NewPublisher: Added ability to use label of instance [\#3484](https://github.com/pypeclub/OpenPype/pull/3484) +- General: Creator Plugins have access to project [\#3476](https://github.com/pypeclub/OpenPype/pull/3476) +- General: Better arguments order in creator init [\#3475](https://github.com/pypeclub/OpenPype/pull/3475) +- Ftrack: Trigger custom ftrack events on project creation and preparation [\#3465](https://github.com/pypeclub/OpenPype/pull/3465) +- Windows installer: Clean old files and add version subfolder [\#3445](https://github.com/pypeclub/OpenPype/pull/3445) +- Blender: Bugfix - Set fps properly on open [\#3426](https://github.com/pypeclub/OpenPype/pull/3426) +- Hiero: Add custom scripts menu [\#3425](https://github.com/pypeclub/OpenPype/pull/3425) +- Blender: pre pyside install for all platforms [\#3400](https://github.com/pypeclub/OpenPype/pull/3400) +- Maya: Add additional playblast options to review Extractor. [\#3384](https://github.com/pypeclub/OpenPype/pull/3384) +- Maya: Ability to set resolution for playblasts from asset, and override through review instance. [\#3360](https://github.com/pypeclub/OpenPype/pull/3360) +- Maya: Redshift Volume Loader Implement update, remove, switch + fix vdb sequence support [\#3197](https://github.com/pypeclub/OpenPype/pull/3197) +- Maya: Implement `iter_visible_nodes_in_range` for extracting Alembics [\#3100](https://github.com/pypeclub/OpenPype/pull/3100) + +**🐛 Bug fixes** + +- TrayPublisher: Keep use instance label in list view [\#3493](https://github.com/pypeclub/OpenPype/pull/3493) +- General: Extract review use first frame of input sequence [\#3491](https://github.com/pypeclub/OpenPype/pull/3491) +- General: Fix Plist loading for application launch [\#3485](https://github.com/pypeclub/OpenPype/pull/3485) +- Nuke: Workfile tools open on start [\#3479](https://github.com/pypeclub/OpenPype/pull/3479) +- New Publisher: Disabled context change allows creation [\#3478](https://github.com/pypeclub/OpenPype/pull/3478) +- General: thumbnail extractor fix [\#3474](https://github.com/pypeclub/OpenPype/pull/3474) +- Kitsu: bugfix with sync-service ans publish plugins [\#3473](https://github.com/pypeclub/OpenPype/pull/3473) +- Flame: solved problem with multi-selected loading [\#3470](https://github.com/pypeclub/OpenPype/pull/3470) +- General: Fix query function in update logic [\#3468](https://github.com/pypeclub/OpenPype/pull/3468) +- Resolve: removed few bugs [\#3464](https://github.com/pypeclub/OpenPype/pull/3464) +- General: Delete old versions is safer when ftrack is disabled [\#3462](https://github.com/pypeclub/OpenPype/pull/3462) +- Nuke: fixing metadata slate TC difference [\#3455](https://github.com/pypeclub/OpenPype/pull/3455) +- Nuke: prerender reviewable fails [\#3450](https://github.com/pypeclub/OpenPype/pull/3450) +- Maya: fix hashing in Python 3 for tile rendering [\#3447](https://github.com/pypeclub/OpenPype/pull/3447) +- LogViewer: Escape html characters in log message [\#3443](https://github.com/pypeclub/OpenPype/pull/3443) +- Nuke: Slate frame is integrated [\#3427](https://github.com/pypeclub/OpenPype/pull/3427) +- Maya: Camera extra data - additional fix for \#3304 [\#3386](https://github.com/pypeclub/OpenPype/pull/3386) +- Maya: Handle excluding `model` family from frame range validator. [\#3370](https://github.com/pypeclub/OpenPype/pull/3370) + +**🔀 Refactored code** + +- Maya: Merge animation + pointcache extractor logic [\#3461](https://github.com/pypeclub/OpenPype/pull/3461) +- Maya: Re-use `maintained_time` from lib [\#3460](https://github.com/pypeclub/OpenPype/pull/3460) +- General: Use query functions in global plugins [\#3459](https://github.com/pypeclub/OpenPype/pull/3459) +- Clockify: Use query functions in clockify actions [\#3458](https://github.com/pypeclub/OpenPype/pull/3458) +- General: Use query functions in rest api calls [\#3457](https://github.com/pypeclub/OpenPype/pull/3457) +- General: Use query functions in openpype lib functions [\#3454](https://github.com/pypeclub/OpenPype/pull/3454) +- General: Use query functions in load utils [\#3446](https://github.com/pypeclub/OpenPype/pull/3446) +- General: Move publish plugin and publish render abstractions [\#3442](https://github.com/pypeclub/OpenPype/pull/3442) +- General: Use Anatomy after move to pipeline [\#3436](https://github.com/pypeclub/OpenPype/pull/3436) +- General: Anatomy moved to pipeline [\#3435](https://github.com/pypeclub/OpenPype/pull/3435) +- Fusion: Use client query functions [\#3380](https://github.com/pypeclub/OpenPype/pull/3380) +- Resolve: Use client query functions [\#3379](https://github.com/pypeclub/OpenPype/pull/3379) +- General: Host implementation defined with class [\#3337](https://github.com/pypeclub/OpenPype/pull/3337) + +## [3.12.0](https://github.com/pypeclub/OpenPype/tree/3.12.0) (2022-06-28) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.11.1...3.12.0) + +### 📖 Documentation + +- Fix typo in documentation: pyenv on mac [\#3417](https://github.com/pypeclub/OpenPype/pull/3417) +- Linux: update OIIO package [\#3401](https://github.com/pypeclub/OpenPype/pull/3401) + +**🆕 New features** + +- Shotgrid: Add production beta of shotgrid integration [\#2921](https://github.com/pypeclub/OpenPype/pull/2921) + +**🚀 Enhancements** + +- Webserver: Added CORS middleware [\#3422](https://github.com/pypeclub/OpenPype/pull/3422) +- Attribute Defs UI: Files widget show what is allowed to drop in [\#3411](https://github.com/pypeclub/OpenPype/pull/3411) +- General: Add ability to change user value for templates [\#3366](https://github.com/pypeclub/OpenPype/pull/3366) +- Hosts: More options for in-host callbacks [\#3357](https://github.com/pypeclub/OpenPype/pull/3357) +- Multiverse: expose some settings to GUI [\#3350](https://github.com/pypeclub/OpenPype/pull/3350) +- Maya: Allow more data to be published along camera 🎥 [\#3304](https://github.com/pypeclub/OpenPype/pull/3304) +- Add root keys and project keys to create starting folder [\#2755](https://github.com/pypeclub/OpenPype/pull/2755) + +**🐛 Bug fixes** + +- NewPublisher: Fix subset name change on change of creator plugin [\#3420](https://github.com/pypeclub/OpenPype/pull/3420) +- Bug: fix invalid avalon import [\#3418](https://github.com/pypeclub/OpenPype/pull/3418) +- Nuke: Fix keyword argument in query function [\#3414](https://github.com/pypeclub/OpenPype/pull/3414) +- Houdini: fix loading and updating vbd/bgeo sequences [\#3408](https://github.com/pypeclub/OpenPype/pull/3408) +- Nuke: Collect representation files based on Write [\#3407](https://github.com/pypeclub/OpenPype/pull/3407) +- General: Filter representations before integration start [\#3398](https://github.com/pypeclub/OpenPype/pull/3398) +- Maya: look collector typo [\#3392](https://github.com/pypeclub/OpenPype/pull/3392) +- TVPaint: Make sure exit code is set to not None [\#3382](https://github.com/pypeclub/OpenPype/pull/3382) +- Maya: vray device aspect ratio fix [\#3381](https://github.com/pypeclub/OpenPype/pull/3381) +- Flame: bunch of publishing issues [\#3377](https://github.com/pypeclub/OpenPype/pull/3377) +- Harmony: added unc path to zifile command in Harmony [\#3372](https://github.com/pypeclub/OpenPype/pull/3372) +- Standalone: settings improvements [\#3355](https://github.com/pypeclub/OpenPype/pull/3355) +- Nuke: Load full model hierarchy by default [\#3328](https://github.com/pypeclub/OpenPype/pull/3328) +- Nuke: multiple baking streams with correct slate [\#3245](https://github.com/pypeclub/OpenPype/pull/3245) +- Maya: fix image prefix warning in validator [\#3128](https://github.com/pypeclub/OpenPype/pull/3128) + +**🔀 Refactored code** + +- Unreal: Use client query functions [\#3421](https://github.com/pypeclub/OpenPype/pull/3421) +- General: Move editorial lib to pipeline [\#3419](https://github.com/pypeclub/OpenPype/pull/3419) +- Kitsu: renaming to plural func sync\_all\_projects [\#3397](https://github.com/pypeclub/OpenPype/pull/3397) +- Houdini: Use client query functions [\#3395](https://github.com/pypeclub/OpenPype/pull/3395) +- Hiero: Use client query functions [\#3393](https://github.com/pypeclub/OpenPype/pull/3393) +- Nuke: Use client query functions [\#3391](https://github.com/pypeclub/OpenPype/pull/3391) +- Maya: Use client query functions [\#3385](https://github.com/pypeclub/OpenPype/pull/3385) +- Harmony: Use client query functions [\#3378](https://github.com/pypeclub/OpenPype/pull/3378) +- Celaction: Use client query functions [\#3376](https://github.com/pypeclub/OpenPype/pull/3376) +- Photoshop: Use client query functions [\#3375](https://github.com/pypeclub/OpenPype/pull/3375) +- AfterEffects: Use client query functions [\#3374](https://github.com/pypeclub/OpenPype/pull/3374) +- TVPaint: Use client query functions [\#3340](https://github.com/pypeclub/OpenPype/pull/3340) +- Ftrack: Use client query functions [\#3339](https://github.com/pypeclub/OpenPype/pull/3339) +- Standalone Publisher: Use client query functions [\#3330](https://github.com/pypeclub/OpenPype/pull/3330) + +**Merged pull requests:** + +- Sync Queue: Added far future value for null values for dates [\#3371](https://github.com/pypeclub/OpenPype/pull/3371) +- Maya - added support for single frame playblast review [\#3369](https://github.com/pypeclub/OpenPype/pull/3369) +- Houdini: Implement Redshift Proxy Export [\#3196](https://github.com/pypeclub/OpenPype/pull/3196) + +## [3.11.1](https://github.com/pypeclub/OpenPype/tree/3.11.1) (2022-06-20) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.11.0...3.11.1) + +**🆕 New features** + +- Flame: custom export temp folder [\#3346](https://github.com/pypeclub/OpenPype/pull/3346) +- Nuke: removing third-party plugins [\#3344](https://github.com/pypeclub/OpenPype/pull/3344) + +**🚀 Enhancements** + +- Pyblish Pype: Hiding/Close issues [\#3367](https://github.com/pypeclub/OpenPype/pull/3367) +- Ftrack: Removed requirement of pypeclub role from default settings [\#3354](https://github.com/pypeclub/OpenPype/pull/3354) +- Kitsu: Prevent crash on missing frames information [\#3352](https://github.com/pypeclub/OpenPype/pull/3352) +- Ftrack: Open browser from tray [\#3320](https://github.com/pypeclub/OpenPype/pull/3320) +- Enhancement: More control over thumbnail processing. [\#3259](https://github.com/pypeclub/OpenPype/pull/3259) + +**🐛 Bug fixes** + +- Nuke: bake streams with slate on farm [\#3368](https://github.com/pypeclub/OpenPype/pull/3368) +- Harmony: audio validator has wrong logic [\#3364](https://github.com/pypeclub/OpenPype/pull/3364) +- Nuke: Fix missing variable in extract thumbnail [\#3363](https://github.com/pypeclub/OpenPype/pull/3363) +- Nuke: Fix precollect writes [\#3361](https://github.com/pypeclub/OpenPype/pull/3361) +- AE- fix validate\_scene\_settings and renderLocal [\#3358](https://github.com/pypeclub/OpenPype/pull/3358) +- deadline: fixing misidentification of revieables [\#3356](https://github.com/pypeclub/OpenPype/pull/3356) +- General: Create only one thumbnail per instance [\#3351](https://github.com/pypeclub/OpenPype/pull/3351) +- nuke: adding extract thumbnail settings 3.10 [\#3347](https://github.com/pypeclub/OpenPype/pull/3347) +- General: Fix last version function [\#3345](https://github.com/pypeclub/OpenPype/pull/3345) +- Deadline: added OPENPYPE\_MONGO to filter [\#3336](https://github.com/pypeclub/OpenPype/pull/3336) +- Nuke: fixing farm publishing if review is disabled [\#3306](https://github.com/pypeclub/OpenPype/pull/3306) +- Maya: Fix Yeti errors on Create, Publish and Load [\#3198](https://github.com/pypeclub/OpenPype/pull/3198) + +**🔀 Refactored code** + +- Webpublisher: Use client query functions [\#3333](https://github.com/pypeclub/OpenPype/pull/3333) + +## [3.11.0](https://github.com/pypeclub/OpenPype/tree/3.11.0) (2022-06-17) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.10.0...3.11.0) + +### 📖 Documentation + +- Documentation: Add app key to template documentation [\#3299](https://github.com/pypeclub/OpenPype/pull/3299) +- doc: adding royal render and multiverse to the web site [\#3285](https://github.com/pypeclub/OpenPype/pull/3285) +- Module: Kitsu module [\#2650](https://github.com/pypeclub/OpenPype/pull/2650) + +**🆕 New features** + +- Multiverse: fixed composition write, full docs, cosmetics [\#3178](https://github.com/pypeclub/OpenPype/pull/3178) + +**🚀 Enhancements** + +- Settings: Settings can be extracted from UI [\#3323](https://github.com/pypeclub/OpenPype/pull/3323) +- updated poetry installation source [\#3316](https://github.com/pypeclub/OpenPype/pull/3316) +- Ftrack: Action to easily create daily review session [\#3310](https://github.com/pypeclub/OpenPype/pull/3310) +- TVPaint: Extractor use mark in/out range to render [\#3309](https://github.com/pypeclub/OpenPype/pull/3309) +- Ftrack: Delivery action can work on ReviewSessions [\#3307](https://github.com/pypeclub/OpenPype/pull/3307) +- Maya: Look assigner UI improvements [\#3298](https://github.com/pypeclub/OpenPype/pull/3298) +- Ftrack: Action to transfer values of hierarchical attributes [\#3284](https://github.com/pypeclub/OpenPype/pull/3284) +- Maya: better handling of legacy review subsets names [\#3269](https://github.com/pypeclub/OpenPype/pull/3269) +- General: Updated windows oiio tool [\#3268](https://github.com/pypeclub/OpenPype/pull/3268) +- Unreal: add support for skeletalMesh and staticMesh to loaders [\#3267](https://github.com/pypeclub/OpenPype/pull/3267) +- Maya: reference loaders could store placeholder in referenced url [\#3264](https://github.com/pypeclub/OpenPype/pull/3264) +- TVPaint: Init file for TVPaint worker also handle guideline images [\#3250](https://github.com/pypeclub/OpenPype/pull/3250) +- Nuke: Change default icon path in settings [\#3247](https://github.com/pypeclub/OpenPype/pull/3247) +- Maya: publishing of animation and pointcache on a farm [\#3225](https://github.com/pypeclub/OpenPype/pull/3225) +- Maya: Look assigner UI improvements [\#3208](https://github.com/pypeclub/OpenPype/pull/3208) +- Nuke: add pointcache and animation to loader [\#3186](https://github.com/pypeclub/OpenPype/pull/3186) +- Nuke: Add a gizmo menu [\#3172](https://github.com/pypeclub/OpenPype/pull/3172) +- Support for Unreal 5 [\#3122](https://github.com/pypeclub/OpenPype/pull/3122) + +**🐛 Bug fixes** + +- General: Handle empty source key on instance [\#3342](https://github.com/pypeclub/OpenPype/pull/3342) +- Houdini: Fix Houdini VDB manage update wrong file attribute name [\#3322](https://github.com/pypeclub/OpenPype/pull/3322) +- Nuke: anatomy compatibility issue hacks [\#3321](https://github.com/pypeclub/OpenPype/pull/3321) +- hiero: otio p3 compatibility issue - metadata on effect use update 3.11 [\#3314](https://github.com/pypeclub/OpenPype/pull/3314) +- General: Vendorized modules for Python 2 and update poetry lock [\#3305](https://github.com/pypeclub/OpenPype/pull/3305) +- Fix - added local targets to install host [\#3303](https://github.com/pypeclub/OpenPype/pull/3303) +- Settings: Add missing default settings for nuke gizmo [\#3301](https://github.com/pypeclub/OpenPype/pull/3301) +- Maya: Fix swaped width and height in reviews [\#3300](https://github.com/pypeclub/OpenPype/pull/3300) +- Maya: point cache publish handles Maya instances [\#3297](https://github.com/pypeclub/OpenPype/pull/3297) +- Global: extract review slate issues [\#3286](https://github.com/pypeclub/OpenPype/pull/3286) +- Webpublisher: return only active projects in ProjectsEndpoint [\#3281](https://github.com/pypeclub/OpenPype/pull/3281) +- Hiero: add support for task tags 3.10.x [\#3279](https://github.com/pypeclub/OpenPype/pull/3279) +- General: Fix Oiio tool path resolving [\#3278](https://github.com/pypeclub/OpenPype/pull/3278) +- Maya: Fix udim support for e.g. uppercase \ tag [\#3266](https://github.com/pypeclub/OpenPype/pull/3266) +- Nuke: bake reformat was failing on string type [\#3261](https://github.com/pypeclub/OpenPype/pull/3261) +- Maya: hotfix Pxr multitexture in looks [\#3260](https://github.com/pypeclub/OpenPype/pull/3260) +- Unreal: Fix Camera Loading if Layout is missing [\#3255](https://github.com/pypeclub/OpenPype/pull/3255) +- Unreal: Fixed Animation loading in UE5 [\#3240](https://github.com/pypeclub/OpenPype/pull/3240) +- Unreal: Fixed Render creation in UE5 [\#3239](https://github.com/pypeclub/OpenPype/pull/3239) +- Unreal: Fixed Camera loading in UE5 [\#3238](https://github.com/pypeclub/OpenPype/pull/3238) +- Flame: debugging [\#3224](https://github.com/pypeclub/OpenPype/pull/3224) +- add silent audio to slate [\#3162](https://github.com/pypeclub/OpenPype/pull/3162) +- Add timecode to slate [\#2929](https://github.com/pypeclub/OpenPype/pull/2929) + +**🔀 Refactored code** + +- Blender: Use client query functions [\#3331](https://github.com/pypeclub/OpenPype/pull/3331) +- General: Define query functions [\#3288](https://github.com/pypeclub/OpenPype/pull/3288) + +**Merged pull requests:** + +- Maya: add pointcache family to gpu cache loader [\#3318](https://github.com/pypeclub/OpenPype/pull/3318) +- Maya look: skip empty file attributes [\#3274](https://github.com/pypeclub/OpenPype/pull/3274) + +## [3.10.0](https://github.com/pypeclub/OpenPype/tree/3.10.0) (2022-05-26) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.8...3.10.0) ### 📖 Documentation - Docs: add all-contributors config and initial list [\#3094](https://github.com/pypeclub/OpenPype/pull/3094) - Nuke docs with videos [\#3052](https://github.com/pypeclub/OpenPype/pull/3052) +**🆕 New features** + +- General: OpenPype modules publish plugins are registered in host [\#3180](https://github.com/pypeclub/OpenPype/pull/3180) +- General: Creator plugins from addons can be registered [\#3179](https://github.com/pypeclub/OpenPype/pull/3179) +- Ftrack: Single image reviewable [\#3157](https://github.com/pypeclub/OpenPype/pull/3157) +- Nuke: Expose write attributes to settings [\#3123](https://github.com/pypeclub/OpenPype/pull/3123) +- Hiero: Initial frame publish support [\#3106](https://github.com/pypeclub/OpenPype/pull/3106) +- Unreal: Render Publishing [\#2917](https://github.com/pypeclub/OpenPype/pull/2917) +- AfterEffects: Implemented New Publisher [\#2838](https://github.com/pypeclub/OpenPype/pull/2838) +- Unreal: Rendering implementation [\#2410](https://github.com/pypeclub/OpenPype/pull/2410) + **🚀 Enhancements** +- Maya: FBX camera export [\#3253](https://github.com/pypeclub/OpenPype/pull/3253) +- General: updating common vendor `scriptmenu` to 1.5.2 [\#3246](https://github.com/pypeclub/OpenPype/pull/3246) +- Project Manager: Allow to paste Tasks into multiple assets at the same time [\#3226](https://github.com/pypeclub/OpenPype/pull/3226) +- Project manager: Sped up project load [\#3216](https://github.com/pypeclub/OpenPype/pull/3216) +- Loader UI: Speed issues of loader with sync server [\#3199](https://github.com/pypeclub/OpenPype/pull/3199) +- Looks: add basic support for Renderman [\#3190](https://github.com/pypeclub/OpenPype/pull/3190) +- Maya: added clean\_import option to Import loader [\#3181](https://github.com/pypeclub/OpenPype/pull/3181) +- Add the scripts menu definition to nuke [\#3168](https://github.com/pypeclub/OpenPype/pull/3168) +- Maya: add maya 2023 to default applications [\#3167](https://github.com/pypeclub/OpenPype/pull/3167) +- Compressed bgeo publishing in SAP and Houdini loader [\#3153](https://github.com/pypeclub/OpenPype/pull/3153) +- General: Add 'dataclasses' to required python modules [\#3149](https://github.com/pypeclub/OpenPype/pull/3149) +- Hooks: Tweak logging grammar [\#3147](https://github.com/pypeclub/OpenPype/pull/3147) +- Nuke: settings for reformat node in CreateWriteRender node [\#3143](https://github.com/pypeclub/OpenPype/pull/3143) +- Houdini: Add loader for alembic through Alembic Archive node [\#3140](https://github.com/pypeclub/OpenPype/pull/3140) +- Publisher: UI Modifications and fixes [\#3139](https://github.com/pypeclub/OpenPype/pull/3139) +- General: Simplified OP modules/addons import [\#3137](https://github.com/pypeclub/OpenPype/pull/3137) +- Terminal: Tweak coloring of TrayModuleManager logging enabled states [\#3133](https://github.com/pypeclub/OpenPype/pull/3133) +- General: Cleanup some Loader docstrings [\#3131](https://github.com/pypeclub/OpenPype/pull/3131) +- Nuke: render instance with subset name filtered overrides [\#3117](https://github.com/pypeclub/OpenPype/pull/3117) +- Unreal: Layout and Camera update and remove functions reimplemented and improvements [\#3116](https://github.com/pypeclub/OpenPype/pull/3116) +- Settings: Remove environment groups from settings [\#3115](https://github.com/pypeclub/OpenPype/pull/3115) +- TVPaint: Match renderlayer key with other hosts [\#3110](https://github.com/pypeclub/OpenPype/pull/3110) +- Ftrack: AssetVersion status on publish [\#3108](https://github.com/pypeclub/OpenPype/pull/3108) +- Tray publisher: Simple families from settings [\#3105](https://github.com/pypeclub/OpenPype/pull/3105) +- Local Settings UI: Overlay messages on save and reset [\#3104](https://github.com/pypeclub/OpenPype/pull/3104) +- General: Remove repos related logic [\#3087](https://github.com/pypeclub/OpenPype/pull/3087) - Standalone publisher: add support for bgeo and vdb [\#3080](https://github.com/pypeclub/OpenPype/pull/3080) +- Houdini: Fix FPS + outdated content pop-ups [\#3079](https://github.com/pypeclub/OpenPype/pull/3079) +- General: Add global log verbose arguments [\#3070](https://github.com/pypeclub/OpenPype/pull/3070) +- Flame: extract presets distribution [\#3063](https://github.com/pypeclub/OpenPype/pull/3063) - Update collect\_render.py [\#3055](https://github.com/pypeclub/OpenPype/pull/3055) - SiteSync: Added compute\_resource\_sync\_sites to sync\_server\_module [\#2983](https://github.com/pypeclub/OpenPype/pull/2983) +- Maya: Implement Hardware Renderer 2.0 support for Render Products [\#2611](https://github.com/pypeclub/OpenPype/pull/2611) **🐛 Bug fixes** +- nuke: use framerange issue [\#3254](https://github.com/pypeclub/OpenPype/pull/3254) +- Ftrack: Chunk sizes for queries has minimal condition [\#3244](https://github.com/pypeclub/OpenPype/pull/3244) +- Maya: renderman displays needs to be filtered [\#3242](https://github.com/pypeclub/OpenPype/pull/3242) +- Ftrack: Validate that the user exists on ftrack [\#3237](https://github.com/pypeclub/OpenPype/pull/3237) +- Maya: Fix support for multiple resolutions [\#3236](https://github.com/pypeclub/OpenPype/pull/3236) +- TVPaint: Look for more groups than 12 [\#3228](https://github.com/pypeclub/OpenPype/pull/3228) +- Hiero: debugging frame range and other 3.10 [\#3222](https://github.com/pypeclub/OpenPype/pull/3222) +- Project Manager: Fix persistent editors on project change [\#3218](https://github.com/pypeclub/OpenPype/pull/3218) +- Deadline: instance data overwrite fix [\#3214](https://github.com/pypeclub/OpenPype/pull/3214) +- Ftrack: Push hierarchical attributes action works [\#3210](https://github.com/pypeclub/OpenPype/pull/3210) +- Standalone Publisher: Always create new representation for thumbnail [\#3203](https://github.com/pypeclub/OpenPype/pull/3203) +- Photoshop: skip collector when automatic testing [\#3202](https://github.com/pypeclub/OpenPype/pull/3202) +- Nuke: render/workfile version sync doesn't work on farm [\#3185](https://github.com/pypeclub/OpenPype/pull/3185) +- Ftrack: Review image only if there are no mp4 reviews [\#3183](https://github.com/pypeclub/OpenPype/pull/3183) +- Ftrack: Locations deepcopy issue [\#3177](https://github.com/pypeclub/OpenPype/pull/3177) +- General: Avoid creating multiple thumbnails [\#3176](https://github.com/pypeclub/OpenPype/pull/3176) +- General/Hiero: better clip duration calculation [\#3169](https://github.com/pypeclub/OpenPype/pull/3169) +- General: Oiio conversion for ffmpeg checks for invalid characters [\#3166](https://github.com/pypeclub/OpenPype/pull/3166) +- Fix for attaching render to subset [\#3164](https://github.com/pypeclub/OpenPype/pull/3164) +- Harmony: fixed missing task name in render instance [\#3163](https://github.com/pypeclub/OpenPype/pull/3163) +- Ftrack: Action delete old versions formatting works [\#3152](https://github.com/pypeclub/OpenPype/pull/3152) +- Deadline: fix the output directory [\#3144](https://github.com/pypeclub/OpenPype/pull/3144) +- General: New Session schema [\#3141](https://github.com/pypeclub/OpenPype/pull/3141) +- General: Missing version on headless mode crash properly [\#3136](https://github.com/pypeclub/OpenPype/pull/3136) +- TVPaint: Composite layers in reversed order [\#3135](https://github.com/pypeclub/OpenPype/pull/3135) +- Nuke: fixing default settings for workfile builder loaders [\#3120](https://github.com/pypeclub/OpenPype/pull/3120) +- Nuke: fix anatomy imageio regex default [\#3119](https://github.com/pypeclub/OpenPype/pull/3119) +- General: Python 3 compatibility in queries [\#3112](https://github.com/pypeclub/OpenPype/pull/3112) +- General: TemplateResult can be copied [\#3099](https://github.com/pypeclub/OpenPype/pull/3099) +- General: Collect loaded versions skips not existing representations [\#3095](https://github.com/pypeclub/OpenPype/pull/3095) - RoyalRender Control Submission - AVALON\_APP\_NAME default [\#3091](https://github.com/pypeclub/OpenPype/pull/3091) - Ftrack: Update Create Folders action [\#3089](https://github.com/pypeclub/OpenPype/pull/3089) +- Maya: Collect Render fix any render cameras check [\#3088](https://github.com/pypeclub/OpenPype/pull/3088) - Project Manager: Avoid unnecessary updates of asset documents [\#3083](https://github.com/pypeclub/OpenPype/pull/3083) - Standalone publisher: Fix plugins install [\#3077](https://github.com/pypeclub/OpenPype/pull/3077) - General: Extract review sequence is not converted with same names [\#3076](https://github.com/pypeclub/OpenPype/pull/3076) - Webpublisher: Use variant value [\#3068](https://github.com/pypeclub/OpenPype/pull/3068) - Nuke: Add aov matching even for remainder and prerender [\#3060](https://github.com/pypeclub/OpenPype/pull/3060) +- Fix support for Renderman in Maya [\#3006](https://github.com/pypeclub/OpenPype/pull/3006) **🔀 Refactored code** +- Avalon repo removed from Jobs workflow [\#3193](https://github.com/pypeclub/OpenPype/pull/3193) +- General: Remove remaining imports from avalon [\#3130](https://github.com/pypeclub/OpenPype/pull/3130) +- General: Move mongo db logic and remove avalon repository [\#3066](https://github.com/pypeclub/OpenPype/pull/3066) - General: Move host install [\#3009](https://github.com/pypeclub/OpenPype/pull/3009) **Merged pull requests:** +- Harmony: message length in 21.1 [\#3257](https://github.com/pypeclub/OpenPype/pull/3257) +- Harmony: 21.1 fix [\#3249](https://github.com/pypeclub/OpenPype/pull/3249) +- Maya: added jpg to filter for Image Plane Loader [\#3223](https://github.com/pypeclub/OpenPype/pull/3223) +- Webpublisher: replace space by underscore in subset names [\#3160](https://github.com/pypeclub/OpenPype/pull/3160) +- StandalonePublisher: removed Extract Background plugins [\#3093](https://github.com/pypeclub/OpenPype/pull/3093) - Nuke: added suspend\_publish knob [\#3078](https://github.com/pypeclub/OpenPype/pull/3078) - Bump async from 2.6.3 to 2.6.4 in /website [\#3065](https://github.com/pypeclub/OpenPype/pull/3065) +- SiteSync: Download all workfile inputs [\#2966](https://github.com/pypeclub/OpenPype/pull/2966) +- Photoshop: New Publisher [\#2933](https://github.com/pypeclub/OpenPype/pull/2933) +- Bump pillow from 9.0.0 to 9.0.1 [\#2880](https://github.com/pypeclub/OpenPype/pull/2880) +- AfterEffects: Allow configuration of default variant via Settings [\#2856](https://github.com/pypeclub/OpenPype/pull/2856) + +## [3.9.8](https://github.com/pypeclub/OpenPype/tree/3.9.8) (2022-05-19) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.7...3.9.8) + +## [3.9.7](https://github.com/pypeclub/OpenPype/tree/3.9.7) (2022-05-11) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.6...3.9.7) + +## [3.9.6](https://github.com/pypeclub/OpenPype/tree/3.9.6) (2022-05-03) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.5...3.9.6) + +## [3.9.5](https://github.com/pypeclub/OpenPype/tree/3.9.5) (2022-04-25) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.4...3.9.5) ## [3.9.4](https://github.com/pypeclub/OpenPype/tree/3.9.4) (2022-04-15) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.4-nightly.2...3.9.4) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.3...3.9.4) ### 📖 Documentation @@ -47,6 +843,7 @@ **🆕 New features** - General: Local overrides for environment variables [\#3045](https://github.com/pypeclub/OpenPype/pull/3045) +- Flame: Flare integration preparation [\#2928](https://github.com/pypeclub/OpenPype/pull/2928) **🚀 Enhancements** @@ -67,8 +864,8 @@ - LibraryLoader: Use current project for asset query in families filter [\#3042](https://github.com/pypeclub/OpenPype/pull/3042) - SiteSync: Providers ignore that site is disabled [\#3041](https://github.com/pypeclub/OpenPype/pull/3041) - Unreal: Creator import fixes [\#3040](https://github.com/pypeclub/OpenPype/pull/3040) -- Settings UI: Version column can be extended so version are visible [\#3032](https://github.com/pypeclub/OpenPype/pull/3032) - SiteSync: fix transitive alternate sites, fix dropdown in Local Settings [\#3018](https://github.com/pypeclub/OpenPype/pull/3018) +- Maya: invalid review flag on rendered AOVs [\#2915](https://github.com/pypeclub/OpenPype/pull/2915) **Merged pull requests:** @@ -77,29 +874,55 @@ ## [3.9.3](https://github.com/pypeclub/OpenPype/tree/3.9.3) (2022-04-07) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.3-nightly.2...3.9.3) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.2...3.9.3) ### 📖 Documentation +- Documentation: Added mention of adding My Drive as a root [\#2999](https://github.com/pypeclub/OpenPype/pull/2999) - Website Docs: Manager Ftrack fix broken links [\#2979](https://github.com/pypeclub/OpenPype/pull/2979) +- Docs: Added MongoDB requirements [\#2951](https://github.com/pypeclub/OpenPype/pull/2951) +- Documentation: New publisher develop docs [\#2896](https://github.com/pypeclub/OpenPype/pull/2896) **🆕 New features** - Ftrack: Add description integrator [\#3027](https://github.com/pypeclub/OpenPype/pull/3027) +- nuke: bypass baking [\#2992](https://github.com/pypeclub/OpenPype/pull/2992) - Publishing textures for Unreal [\#2988](https://github.com/pypeclub/OpenPype/pull/2988) +- Maya to Unreal: Static and Skeletal Meshes [\#2978](https://github.com/pypeclub/OpenPype/pull/2978) +- Multiverse: Initial Support [\#2908](https://github.com/pypeclub/OpenPype/pull/2908) **🚀 Enhancements** +- General: default workfile subset name for workfile [\#3011](https://github.com/pypeclub/OpenPype/pull/3011) - Ftrack: Add more options for note text of integrate ftrack note [\#3025](https://github.com/pypeclub/OpenPype/pull/3025) - Console Interpreter: Changed how console splitter size are reused on show [\#3016](https://github.com/pypeclub/OpenPype/pull/3016) - Deadline: Use more suitable name for sequence review logic [\#3015](https://github.com/pypeclub/OpenPype/pull/3015) -- General: default workfile subset name for workfile [\#3011](https://github.com/pypeclub/OpenPype/pull/3011) +- Nuke: add concurrency attr to deadline job [\#3005](https://github.com/pypeclub/OpenPype/pull/3005) +- Photoshop: create image without instance [\#3001](https://github.com/pypeclub/OpenPype/pull/3001) +- TVPaint: Render scene family [\#3000](https://github.com/pypeclub/OpenPype/pull/3000) - Deadline: priority configurable in Maya jobs [\#2995](https://github.com/pypeclub/OpenPype/pull/2995) +- Nuke: ReviewDataMov Read RAW attribute [\#2985](https://github.com/pypeclub/OpenPype/pull/2985) +- General: `METADATA_KEYS` constant as `frozenset` for optimal immutable lookup [\#2980](https://github.com/pypeclub/OpenPype/pull/2980) +- General: Tools with host filters [\#2975](https://github.com/pypeclub/OpenPype/pull/2975) +- Hero versions: Use custom templates [\#2967](https://github.com/pypeclub/OpenPype/pull/2967) +- Slack: Added configurable maximum file size of review upload to Slack [\#2945](https://github.com/pypeclub/OpenPype/pull/2945) +- NewPublisher: Prepared implementation of optional pyblish plugin [\#2943](https://github.com/pypeclub/OpenPype/pull/2943) +- TVPaint: Extractor to convert PNG into EXR [\#2942](https://github.com/pypeclub/OpenPype/pull/2942) +- Workfiles tool: Save as published workfiles [\#2937](https://github.com/pypeclub/OpenPype/pull/2937) +- Workfiles: Open published workfiles [\#2925](https://github.com/pypeclub/OpenPype/pull/2925) +- General: Default modules loaded dynamically [\#2923](https://github.com/pypeclub/OpenPype/pull/2923) +- CI: change the version bump logic [\#2919](https://github.com/pypeclub/OpenPype/pull/2919) +- Deadline: Add headless argument [\#2916](https://github.com/pypeclub/OpenPype/pull/2916) +- Nuke: Add no-audio Tag [\#2911](https://github.com/pypeclub/OpenPype/pull/2911) +- Ftrack: Fill workfile in custom attribute [\#2906](https://github.com/pypeclub/OpenPype/pull/2906) +- Nuke: improving readability [\#2903](https://github.com/pypeclub/OpenPype/pull/2903) +- Settings UI: Add simple tooltips for settings entities [\#2901](https://github.com/pypeclub/OpenPype/pull/2901) **🐛 Bug fixes** -- Deadline: Fixed default value of use sequence for review [\#3033](https://github.com/pypeclub/OpenPype/pull/3033) - General: Fix validate asset docs plug-in filename and class name [\#3029](https://github.com/pypeclub/OpenPype/pull/3029) +- Deadline: Fixed default value of use sequence for review [\#3033](https://github.com/pypeclub/OpenPype/pull/3033) +- Settings UI: Version column can be extended so version are visible [\#3032](https://github.com/pypeclub/OpenPype/pull/3032) - General: Fix import after movements [\#3028](https://github.com/pypeclub/OpenPype/pull/3028) - Harmony: Added creating subset name for workfile from template [\#3024](https://github.com/pypeclub/OpenPype/pull/3024) - AfterEffects: Added creating subset name for workfile from template [\#3023](https://github.com/pypeclub/OpenPype/pull/3023) @@ -107,42 +930,10 @@ - Maya: Remove missing import [\#3017](https://github.com/pypeclub/OpenPype/pull/3017) - Ftrack: multiple reviewable componets [\#3012](https://github.com/pypeclub/OpenPype/pull/3012) - Tray publisher: Fixes after code movement [\#3010](https://github.com/pypeclub/OpenPype/pull/3010) -- Nuke: fixing unicode type detection in effect loaders [\#3002](https://github.com/pypeclub/OpenPype/pull/3002) -- Nuke: removing redundant Ftrack asset when farm publishing [\#2996](https://github.com/pypeclub/OpenPype/pull/2996) - -**Merged pull requests:** - -- Maya: Allow to select invalid camera contents if no cameras found [\#3030](https://github.com/pypeclub/OpenPype/pull/3030) -- General: adding limitations for pyright [\#2994](https://github.com/pypeclub/OpenPype/pull/2994) - -## [3.9.2](https://github.com/pypeclub/OpenPype/tree/3.9.2) (2022-04-04) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.2-nightly.4...3.9.2) - -### 📖 Documentation - -- Documentation: Added mention of adding My Drive as a root [\#2999](https://github.com/pypeclub/OpenPype/pull/2999) -- Docs: Added MongoDB requirements [\#2951](https://github.com/pypeclub/OpenPype/pull/2951) - -**🆕 New features** - -- nuke: bypass baking [\#2992](https://github.com/pypeclub/OpenPype/pull/2992) -- Maya to Unreal: Static and Skeletal Meshes [\#2978](https://github.com/pypeclub/OpenPype/pull/2978) - -**🚀 Enhancements** - -- Nuke: add concurrency attr to deadline job [\#3005](https://github.com/pypeclub/OpenPype/pull/3005) -- Photoshop: create image without instance [\#3001](https://github.com/pypeclub/OpenPype/pull/3001) -- TVPaint: Render scene family [\#3000](https://github.com/pypeclub/OpenPype/pull/3000) -- Nuke: ReviewDataMov Read RAW attribute [\#2985](https://github.com/pypeclub/OpenPype/pull/2985) -- General: `METADATA\_KEYS` constant as `frozenset` for optimal immutable lookup [\#2980](https://github.com/pypeclub/OpenPype/pull/2980) -- General: Tools with host filters [\#2975](https://github.com/pypeclub/OpenPype/pull/2975) -- Hero versions: Use custom templates [\#2967](https://github.com/pypeclub/OpenPype/pull/2967) - -**🐛 Bug fixes** - - Hosts: Remove path existence checks in 'add\_implementation\_envs' [\#3004](https://github.com/pypeclub/OpenPype/pull/3004) +- Nuke: fixing unicode type detection in effect loaders [\#3002](https://github.com/pypeclub/OpenPype/pull/3002) - Fix - remove doubled dot in workfile created from template [\#2998](https://github.com/pypeclub/OpenPype/pull/2998) +- Nuke: removing redundant Ftrack asset when farm publishing [\#2996](https://github.com/pypeclub/OpenPype/pull/2996) - PS: fix renaming subset incorrectly in PS [\#2991](https://github.com/pypeclub/OpenPype/pull/2991) - Fix: Disable setuptools auto discovery [\#2990](https://github.com/pypeclub/OpenPype/pull/2990) - AEL: fix opening existing workfile if no scene opened [\#2989](https://github.com/pypeclub/OpenPype/pull/2989) @@ -153,81 +944,969 @@ - General: OIIO conversion for ffmeg can handle sequences [\#2958](https://github.com/pypeclub/OpenPype/pull/2958) - Settings: Conditional dictionary avoid invalid logs [\#2956](https://github.com/pypeclub/OpenPype/pull/2956) - General: Smaller fixes and typos [\#2950](https://github.com/pypeclub/OpenPype/pull/2950) +- LogViewer: Don't refresh on initialization [\#2949](https://github.com/pypeclub/OpenPype/pull/2949) +- nuke: python3 compatibility issue with `iteritems` [\#2948](https://github.com/pypeclub/OpenPype/pull/2948) +- General: anatomy data with correct task short key [\#2947](https://github.com/pypeclub/OpenPype/pull/2947) +- SceneInventory: Fix imports in UI [\#2944](https://github.com/pypeclub/OpenPype/pull/2944) +- Slack: add generic exception [\#2941](https://github.com/pypeclub/OpenPype/pull/2941) +- General: Python specific vendor paths on env injection [\#2939](https://github.com/pypeclub/OpenPype/pull/2939) +- General: More fail safe delete old versions [\#2936](https://github.com/pypeclub/OpenPype/pull/2936) +- Settings UI: Collapsed of collapsible wrapper works as expected [\#2934](https://github.com/pypeclub/OpenPype/pull/2934) +- Maya: Do not pass `set` to maya commands \(fixes support for older maya versions\) [\#2932](https://github.com/pypeclub/OpenPype/pull/2932) +- General: Don't print log record on OSError [\#2926](https://github.com/pypeclub/OpenPype/pull/2926) +- Hiero: Fix import of 'register\_event\_callback' [\#2924](https://github.com/pypeclub/OpenPype/pull/2924) +- Flame: centos related debugging [\#2922](https://github.com/pypeclub/OpenPype/pull/2922) +- Ftrack: Missing Ftrack id after editorial publish [\#2905](https://github.com/pypeclub/OpenPype/pull/2905) +- AfterEffects: Fix rendering for single frame in DL [\#2875](https://github.com/pypeclub/OpenPype/pull/2875) + +**🔀 Refactored code** + +- General: Move plugins register and discover [\#2935](https://github.com/pypeclub/OpenPype/pull/2935) +- General: Move Attribute Definitions from pipeline [\#2931](https://github.com/pypeclub/OpenPype/pull/2931) +- General: Removed silo references and terminal splash [\#2927](https://github.com/pypeclub/OpenPype/pull/2927) +- General: Move pipeline constants to OpenPype [\#2918](https://github.com/pypeclub/OpenPype/pull/2918) +- General: Move formatting and workfile functions [\#2914](https://github.com/pypeclub/OpenPype/pull/2914) +- General: Move remaining plugins from avalon [\#2912](https://github.com/pypeclub/OpenPype/pull/2912) **Merged pull requests:** +- Maya: Allow to select invalid camera contents if no cameras found [\#3030](https://github.com/pypeclub/OpenPype/pull/3030) - Bump paramiko from 2.9.2 to 2.10.1 [\#2973](https://github.com/pypeclub/OpenPype/pull/2973) - Bump minimist from 1.2.5 to 1.2.6 in /website [\#2954](https://github.com/pypeclub/OpenPype/pull/2954) - Bump node-forge from 1.2.1 to 1.3.0 in /website [\#2953](https://github.com/pypeclub/OpenPype/pull/2953) - Maya - added transparency into review creator [\#2952](https://github.com/pypeclub/OpenPype/pull/2952) +## [3.9.2](https://github.com/pypeclub/OpenPype/tree/3.9.2) (2022-04-04) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.1...3.9.2) + ## [3.9.1](https://github.com/pypeclub/OpenPype/tree/3.9.1) (2022-03-18) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.1-nightly.3...3.9.1) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.0...3.9.1) + +**🚀 Enhancements** + +- General: Change how OPENPYPE\_DEBUG value is handled [\#2907](https://github.com/pypeclub/OpenPype/pull/2907) +- nuke: imageio adding ocio config version 1.2 [\#2897](https://github.com/pypeclub/OpenPype/pull/2897) +- Flame: support for comment with xml attribute overrides [\#2892](https://github.com/pypeclub/OpenPype/pull/2892) +- Nuke: ExtractReviewSlate can handle more codes and profiles [\#2879](https://github.com/pypeclub/OpenPype/pull/2879) +- Flame: sequence used for reference video [\#2869](https://github.com/pypeclub/OpenPype/pull/2869) + +**🐛 Bug fixes** + +- General: Fix use of Anatomy roots [\#2904](https://github.com/pypeclub/OpenPype/pull/2904) +- Fixing gap detection in extract review [\#2902](https://github.com/pypeclub/OpenPype/pull/2902) +- Pyblish Pype - ensure current state is correct when entering new group order [\#2899](https://github.com/pypeclub/OpenPype/pull/2899) +- SceneInventory: Fix import of load function [\#2894](https://github.com/pypeclub/OpenPype/pull/2894) +- Harmony - fixed creator issue [\#2891](https://github.com/pypeclub/OpenPype/pull/2891) +- General: Remove forgotten use of avalon Creator [\#2885](https://github.com/pypeclub/OpenPype/pull/2885) +- General: Avoid circular import [\#2884](https://github.com/pypeclub/OpenPype/pull/2884) +- Fixes for attaching loaded containers \(\#2837\) [\#2874](https://github.com/pypeclub/OpenPype/pull/2874) +- Maya: Deformer node ids validation plugin [\#2826](https://github.com/pypeclub/OpenPype/pull/2826) +- Flame Babypublisher optimalization [\#2806](https://github.com/pypeclub/OpenPype/pull/2806) +- hotfix: OIIO tool path - add extension on windows [\#2618](https://github.com/pypeclub/OpenPype/pull/2618) + +**🔀 Refactored code** + +- General: Reduce style usage to OpenPype repository [\#2889](https://github.com/pypeclub/OpenPype/pull/2889) +- General: Move loader logic from avalon to openpype [\#2886](https://github.com/pypeclub/OpenPype/pull/2886) ## [3.9.0](https://github.com/pypeclub/OpenPype/tree/3.9.0) (2022-03-14) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.0-nightly.9...3.9.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.2...3.9.0) + +**Deprecated:** + +- Houdini: Remove unused code [\#2779](https://github.com/pypeclub/OpenPype/pull/2779) +- Loader: Remove default family states for hosts from code [\#2706](https://github.com/pypeclub/OpenPype/pull/2706) +- AssetCreator: Remove the tool [\#2845](https://github.com/pypeclub/OpenPype/pull/2845) + +### 📖 Documentation + +- Documentation: fixed broken links [\#2799](https://github.com/pypeclub/OpenPype/pull/2799) +- Documentation: broken link fix [\#2785](https://github.com/pypeclub/OpenPype/pull/2785) +- Documentation: link fixes [\#2772](https://github.com/pypeclub/OpenPype/pull/2772) +- Update docusaurus to latest version [\#2760](https://github.com/pypeclub/OpenPype/pull/2760) +- Various testing updates [\#2726](https://github.com/pypeclub/OpenPype/pull/2726) +- documentation: add example to `repack-version` command [\#2669](https://github.com/pypeclub/OpenPype/pull/2669) +- Update docusaurus [\#2639](https://github.com/pypeclub/OpenPype/pull/2639) +- Documentation: Fixed relative links [\#2621](https://github.com/pypeclub/OpenPype/pull/2621) +- Documentation: Change Photoshop & AfterEffects plugin path [\#2878](https://github.com/pypeclub/OpenPype/pull/2878) + +**🆕 New features** + +- Flame: loading clips to reels [\#2622](https://github.com/pypeclub/OpenPype/pull/2622) +- General: Store settings by OpenPype version [\#2570](https://github.com/pypeclub/OpenPype/pull/2570) + +**🚀 Enhancements** + +- New: Validation exceptions [\#2841](https://github.com/pypeclub/OpenPype/pull/2841) +- General: Set context environments for non host applications [\#2803](https://github.com/pypeclub/OpenPype/pull/2803) +- Houdini: Remove duplicate ValidateOutputNode plug-in [\#2780](https://github.com/pypeclub/OpenPype/pull/2780) +- Tray publisher: New Tray Publisher host \(beta\) [\#2778](https://github.com/pypeclub/OpenPype/pull/2778) +- Slack: Added regex for filtering on subset names [\#2775](https://github.com/pypeclub/OpenPype/pull/2775) +- Houdini: Implement Reset Frame Range [\#2770](https://github.com/pypeclub/OpenPype/pull/2770) +- Pyblish Pype: Remove redundant new line in installed fonts printing [\#2758](https://github.com/pypeclub/OpenPype/pull/2758) +- Flame: use Shot Name on segment for asset name [\#2751](https://github.com/pypeclub/OpenPype/pull/2751) +- Flame: adding validator source clip [\#2746](https://github.com/pypeclub/OpenPype/pull/2746) +- Work Files: Preserve subversion comment of current filename by default [\#2734](https://github.com/pypeclub/OpenPype/pull/2734) +- Maya: set Deadline job/batch name to original source workfile name instead of published workfile [\#2733](https://github.com/pypeclub/OpenPype/pull/2733) +- Ftrack: Disable ftrack module by default [\#2732](https://github.com/pypeclub/OpenPype/pull/2732) +- Project Manager: Disable add task, add asset and save button when not in a project [\#2727](https://github.com/pypeclub/OpenPype/pull/2727) +- dropbox handle big file [\#2718](https://github.com/pypeclub/OpenPype/pull/2718) +- Fusion Move PR: Minor tweaks to Fusion integration [\#2716](https://github.com/pypeclub/OpenPype/pull/2716) +- RoyalRender: Minor enhancements [\#2700](https://github.com/pypeclub/OpenPype/pull/2700) +- Nuke: prerender with review knob [\#2691](https://github.com/pypeclub/OpenPype/pull/2691) +- Maya configurable unit validator [\#2680](https://github.com/pypeclub/OpenPype/pull/2680) +- General: Add settings for CleanUpFarm and disable the plugin by default [\#2679](https://github.com/pypeclub/OpenPype/pull/2679) +- Project Manager: Only allow scroll wheel edits when spinbox is active [\#2678](https://github.com/pypeclub/OpenPype/pull/2678) +- Ftrack: Sync description to assets [\#2670](https://github.com/pypeclub/OpenPype/pull/2670) +- Houdini: Moved to OpenPype [\#2658](https://github.com/pypeclub/OpenPype/pull/2658) +- Maya: Move implementation to OpenPype [\#2649](https://github.com/pypeclub/OpenPype/pull/2649) +- General: FFmpeg conversion also check attribute string length [\#2635](https://github.com/pypeclub/OpenPype/pull/2635) +- Houdini: Load Arnold .ass procedurals into Houdini [\#2606](https://github.com/pypeclub/OpenPype/pull/2606) +- Deadline: Simplify GlobalJobPreLoad logic [\#2605](https://github.com/pypeclub/OpenPype/pull/2605) +- Houdini: Implement Arnold .ass standin extraction from Houdini \(also support .ass.gz\) [\#2603](https://github.com/pypeclub/OpenPype/pull/2603) +- New Publisher: New features and preparations for new standalone publisher [\#2556](https://github.com/pypeclub/OpenPype/pull/2556) +- Fix Maya 2022 Python 3 compatibility [\#2445](https://github.com/pypeclub/OpenPype/pull/2445) +- TVPaint: Use new publisher exceptions in validators [\#2435](https://github.com/pypeclub/OpenPype/pull/2435) +- Harmony: Added new style validations for New Publisher [\#2434](https://github.com/pypeclub/OpenPype/pull/2434) +- Aftereffects: New style validations for New publisher [\#2430](https://github.com/pypeclub/OpenPype/pull/2430) +- Farm publishing: New cleanup plugin for Maya renders on farm [\#2390](https://github.com/pypeclub/OpenPype/pull/2390) +- General: Subset name filtering in ExtractReview outpus [\#2872](https://github.com/pypeclub/OpenPype/pull/2872) +- NewPublisher: Descriptions and Icons in creator dialog [\#2867](https://github.com/pypeclub/OpenPype/pull/2867) +- NewPublisher: Changing task on publishing instance [\#2863](https://github.com/pypeclub/OpenPype/pull/2863) +- TrayPublisher: Choose project widget is more clear [\#2859](https://github.com/pypeclub/OpenPype/pull/2859) +- Maya: add loaded containers to published instance [\#2837](https://github.com/pypeclub/OpenPype/pull/2837) +- Ftrack: Can sync fps as string [\#2836](https://github.com/pypeclub/OpenPype/pull/2836) +- General: Custom function for find executable [\#2822](https://github.com/pypeclub/OpenPype/pull/2822) +- General: Color dialog UI fixes [\#2817](https://github.com/pypeclub/OpenPype/pull/2817) +- global: letter box calculated on output as last process [\#2812](https://github.com/pypeclub/OpenPype/pull/2812) +- Nuke: adding Reformat to baking mov plugin [\#2811](https://github.com/pypeclub/OpenPype/pull/2811) +- Manager: Update all to latest button [\#2805](https://github.com/pypeclub/OpenPype/pull/2805) +- Houdini: Move Houdini Save Current File to beginning of ExtractorOrder [\#2747](https://github.com/pypeclub/OpenPype/pull/2747) +- Global: adding studio name/code to anatomy template formatting data [\#2630](https://github.com/pypeclub/OpenPype/pull/2630) + +**🐛 Bug fixes** + +- Settings UI: Search case sensitivity [\#2810](https://github.com/pypeclub/OpenPype/pull/2810) +- resolve: fixing fusion module loading [\#2802](https://github.com/pypeclub/OpenPype/pull/2802) +- Ftrack: Unset task ids from asset versions before tasks are removed [\#2800](https://github.com/pypeclub/OpenPype/pull/2800) +- Slack: fail gracefully if slack exception [\#2798](https://github.com/pypeclub/OpenPype/pull/2798) +- Flame: Fix version string in default settings [\#2783](https://github.com/pypeclub/OpenPype/pull/2783) +- After Effects: Fix typo in name `afftereffects` -\> `aftereffects` [\#2768](https://github.com/pypeclub/OpenPype/pull/2768) +- Houdini: Fix open last workfile [\#2767](https://github.com/pypeclub/OpenPype/pull/2767) +- Avoid renaming udim indexes [\#2765](https://github.com/pypeclub/OpenPype/pull/2765) +- Maya: Fix `unique_namespace` when in an namespace that is empty [\#2759](https://github.com/pypeclub/OpenPype/pull/2759) +- Loader UI: Fix right click in representation widget [\#2757](https://github.com/pypeclub/OpenPype/pull/2757) +- Harmony: Rendering in Deadline didn't work in other machines than submitter [\#2754](https://github.com/pypeclub/OpenPype/pull/2754) +- Aftereffects 2022 and Deadline [\#2748](https://github.com/pypeclub/OpenPype/pull/2748) +- Flame: bunch of bugs [\#2745](https://github.com/pypeclub/OpenPype/pull/2745) +- Maya: Save current scene on workfile publish [\#2744](https://github.com/pypeclub/OpenPype/pull/2744) +- Version Up: Preserve parts of filename after version number \(like subversion\) on version\_up [\#2741](https://github.com/pypeclub/OpenPype/pull/2741) +- Loader UI: Multiple asset selection and underline colors fixed [\#2731](https://github.com/pypeclub/OpenPype/pull/2731) +- General: Fix loading of unused chars in xml format [\#2729](https://github.com/pypeclub/OpenPype/pull/2729) +- TVPaint: Set objectName with members [\#2725](https://github.com/pypeclub/OpenPype/pull/2725) +- General: Don't use 'objectName' from loaded references [\#2715](https://github.com/pypeclub/OpenPype/pull/2715) +- Settings: Studio Project anatomy is queried using right keys [\#2711](https://github.com/pypeclub/OpenPype/pull/2711) +- Local Settings: Additional applications don't break UI [\#2710](https://github.com/pypeclub/OpenPype/pull/2710) +- Maya: Remove some unused code [\#2709](https://github.com/pypeclub/OpenPype/pull/2709) +- Houdini: Fix refactor of Houdini host move for CreateArnoldAss [\#2704](https://github.com/pypeclub/OpenPype/pull/2704) +- LookAssigner: Fix imports after moving code to OpenPype repository [\#2701](https://github.com/pypeclub/OpenPype/pull/2701) +- Multiple hosts: unify menu style across hosts [\#2693](https://github.com/pypeclub/OpenPype/pull/2693) +- Maya Redshift fixes [\#2692](https://github.com/pypeclub/OpenPype/pull/2692) +- Maya: fix fps validation popup [\#2685](https://github.com/pypeclub/OpenPype/pull/2685) +- Houdini Explicitly collect correct frame name even in case of single frame render when `frameStart` is provided [\#2676](https://github.com/pypeclub/OpenPype/pull/2676) +- hiero: fix effect collector name and order [\#2673](https://github.com/pypeclub/OpenPype/pull/2673) +- Maya: Fix menu callbacks [\#2671](https://github.com/pypeclub/OpenPype/pull/2671) +- hiero: removing obsolete unsupported plugin [\#2667](https://github.com/pypeclub/OpenPype/pull/2667) +- Launcher: Fix access to 'data' attribute on actions [\#2659](https://github.com/pypeclub/OpenPype/pull/2659) +- Maya `vrscene` loader fixes [\#2633](https://github.com/pypeclub/OpenPype/pull/2633) +- Houdini: fix usd family in loader and integrators [\#2631](https://github.com/pypeclub/OpenPype/pull/2631) +- Maya: Add only reference node to look family container like with other families [\#2508](https://github.com/pypeclub/OpenPype/pull/2508) +- General: Missing time function [\#2877](https://github.com/pypeclub/OpenPype/pull/2877) +- Deadline: Fix plugin name for tile assemble [\#2868](https://github.com/pypeclub/OpenPype/pull/2868) +- Nuke: gizmo precollect fix [\#2866](https://github.com/pypeclub/OpenPype/pull/2866) +- General: Fix hardlink for windows [\#2864](https://github.com/pypeclub/OpenPype/pull/2864) +- General: ffmpeg was crashing on slate merge [\#2860](https://github.com/pypeclub/OpenPype/pull/2860) +- WebPublisher: Video file was published with one too many frame [\#2858](https://github.com/pypeclub/OpenPype/pull/2858) +- New Publisher: Error dialog got right styles [\#2857](https://github.com/pypeclub/OpenPype/pull/2857) +- General: Fix getattr clalback on dynamic modules [\#2855](https://github.com/pypeclub/OpenPype/pull/2855) +- Nuke: slate resolution to input video resolution [\#2853](https://github.com/pypeclub/OpenPype/pull/2853) +- WebPublisher: Fix username stored in DB [\#2852](https://github.com/pypeclub/OpenPype/pull/2852) +- WebPublisher: Fix wrong number of frames for video file [\#2851](https://github.com/pypeclub/OpenPype/pull/2851) +- Nuke: Fix family test in validate\_write\_legacy to work with stillImage [\#2847](https://github.com/pypeclub/OpenPype/pull/2847) +- Nuke: fix multiple baking profile farm publishing [\#2842](https://github.com/pypeclub/OpenPype/pull/2842) +- Blender: Fixed parameters for FBX export of the camera [\#2840](https://github.com/pypeclub/OpenPype/pull/2840) +- Maya: Stop creation of reviews for Cryptomattes [\#2832](https://github.com/pypeclub/OpenPype/pull/2832) +- Deadline: Remove recreated event [\#2828](https://github.com/pypeclub/OpenPype/pull/2828) +- Deadline: Added missing events folder [\#2827](https://github.com/pypeclub/OpenPype/pull/2827) +- Settings: Missing document with OP versions may break start of OpenPype [\#2825](https://github.com/pypeclub/OpenPype/pull/2825) +- Deadline: more detailed temp file name for environment json [\#2824](https://github.com/pypeclub/OpenPype/pull/2824) +- General: Host name was formed from obsolete code [\#2821](https://github.com/pypeclub/OpenPype/pull/2821) +- Settings UI: Fix "Apply from" action [\#2820](https://github.com/pypeclub/OpenPype/pull/2820) +- Ftrack: Job killer with missing user [\#2819](https://github.com/pypeclub/OpenPype/pull/2819) +- Nuke: Use AVALON\_APP to get value for "app" key [\#2818](https://github.com/pypeclub/OpenPype/pull/2818) +- StandalonePublisher: use dynamic groups in subset names [\#2816](https://github.com/pypeclub/OpenPype/pull/2816) + +**🔀 Refactored code** + +- Ftrack: Moved module one hierarchy level higher [\#2792](https://github.com/pypeclub/OpenPype/pull/2792) +- SyncServer: Moved module one hierarchy level higher [\#2791](https://github.com/pypeclub/OpenPype/pull/2791) +- Royal render: Move module one hierarchy level higher [\#2790](https://github.com/pypeclub/OpenPype/pull/2790) +- Deadline: Move module one hierarchy level higher [\#2789](https://github.com/pypeclub/OpenPype/pull/2789) +- Refactor: move webserver tool to openpype [\#2876](https://github.com/pypeclub/OpenPype/pull/2876) +- General: Move create logic from avalon to OpenPype [\#2854](https://github.com/pypeclub/OpenPype/pull/2854) +- General: Add vendors from avalon [\#2848](https://github.com/pypeclub/OpenPype/pull/2848) +- General: Basic event system [\#2846](https://github.com/pypeclub/OpenPype/pull/2846) +- General: Move change context functions [\#2839](https://github.com/pypeclub/OpenPype/pull/2839) +- Tools: Don't use avalon tools code [\#2829](https://github.com/pypeclub/OpenPype/pull/2829) +- Move Unreal Implementation to OpenPype [\#2823](https://github.com/pypeclub/OpenPype/pull/2823) +- General: Extract template formatting from anatomy [\#2766](https://github.com/pypeclub/OpenPype/pull/2766) + +**Merged pull requests:** + +- Fusion: Moved implementation into OpenPype [\#2713](https://github.com/pypeclub/OpenPype/pull/2713) +- TVPaint: Plugin build without dependencies [\#2705](https://github.com/pypeclub/OpenPype/pull/2705) +- Webpublisher: Photoshop create a beauty png [\#2689](https://github.com/pypeclub/OpenPype/pull/2689) +- Ftrack: Hierarchical attributes are queried properly [\#2682](https://github.com/pypeclub/OpenPype/pull/2682) +- Maya: Add Validate Frame Range settings [\#2661](https://github.com/pypeclub/OpenPype/pull/2661) +- Harmony: move to Openpype [\#2657](https://github.com/pypeclub/OpenPype/pull/2657) +- Maya: cleanup duplicate rendersetup code [\#2642](https://github.com/pypeclub/OpenPype/pull/2642) +- Deadline: Be able to pass Mongo url to job [\#2616](https://github.com/pypeclub/OpenPype/pull/2616) ## [3.8.2](https://github.com/pypeclub/OpenPype/tree/3.8.2) (2022-02-07) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.8.2-nightly.3...3.8.2) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.1...3.8.2) + +### 📖 Documentation + +- Cosmetics: Fix common typos in openpype/website [\#2617](https://github.com/pypeclub/OpenPype/pull/2617) + +**🚀 Enhancements** + +- TVPaint: Image loaders also work on review family [\#2638](https://github.com/pypeclub/OpenPype/pull/2638) +- General: Project backup tools [\#2629](https://github.com/pypeclub/OpenPype/pull/2629) +- nuke: adding clear button to write nodes [\#2627](https://github.com/pypeclub/OpenPype/pull/2627) +- Ftrack: Family to Asset type mapping is in settings [\#2602](https://github.com/pypeclub/OpenPype/pull/2602) +- Nuke: load color space from representation data [\#2576](https://github.com/pypeclub/OpenPype/pull/2576) + +**🐛 Bug fixes** + +- Fix pulling of cx\_freeze 6.10 [\#2628](https://github.com/pypeclub/OpenPype/pull/2628) +- Global: fix broken otio review extractor [\#2590](https://github.com/pypeclub/OpenPype/pull/2590) + +**Merged pull requests:** + +- WebPublisher: fix instance duplicates [\#2641](https://github.com/pypeclub/OpenPype/pull/2641) +- Fix - safer pulling of task name for webpublishing from PS [\#2613](https://github.com/pypeclub/OpenPype/pull/2613) ## [3.8.1](https://github.com/pypeclub/OpenPype/tree/3.8.1) (2022-02-01) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.8.1-nightly.3...3.8.1) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.0...3.8.1) + +**🚀 Enhancements** + +- Webpublisher: Thumbnail extractor [\#2600](https://github.com/pypeclub/OpenPype/pull/2600) +- Loader: Allow to toggle default family filters between "include" or "exclude" filtering [\#2541](https://github.com/pypeclub/OpenPype/pull/2541) +- Launcher: Added context menu to to skip opening last workfile [\#2536](https://github.com/pypeclub/OpenPype/pull/2536) +- Unreal: JSON Layout Loading support [\#2066](https://github.com/pypeclub/OpenPype/pull/2066) + +**🐛 Bug fixes** + +- Release/3.8.0 [\#2619](https://github.com/pypeclub/OpenPype/pull/2619) +- Settings: Enum does not store empty string if has single item to select [\#2615](https://github.com/pypeclub/OpenPype/pull/2615) +- switch distutils to sysconfig for `get_platform()` [\#2594](https://github.com/pypeclub/OpenPype/pull/2594) +- Fix poetry index and speedcopy update [\#2589](https://github.com/pypeclub/OpenPype/pull/2589) +- Webpublisher: Fix - subset names from processed .psd used wrong value for task [\#2586](https://github.com/pypeclub/OpenPype/pull/2586) +- `vrscene` creator Deadline webservice URL handling [\#2580](https://github.com/pypeclub/OpenPype/pull/2580) +- global: track name was failing if duplicated root word in name [\#2568](https://github.com/pypeclub/OpenPype/pull/2568) +- Validate Maya Rig produces no cycle errors [\#2484](https://github.com/pypeclub/OpenPype/pull/2484) + +**Merged pull requests:** + +- Bump pillow from 8.4.0 to 9.0.0 [\#2595](https://github.com/pypeclub/OpenPype/pull/2595) +- Webpublisher: Skip version collect [\#2591](https://github.com/pypeclub/OpenPype/pull/2591) +- build\(deps\): bump pillow from 8.4.0 to 9.0.0 [\#2523](https://github.com/pypeclub/OpenPype/pull/2523) ## [3.8.0](https://github.com/pypeclub/OpenPype/tree/3.8.0) (2022-01-24) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.8.0-nightly.7...3.8.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.7.0...3.8.0) + +### 📖 Documentation + +- Variable in docs renamed to proper name [\#2546](https://github.com/pypeclub/OpenPype/pull/2546) + +**🆕 New features** + +- Flame: extracting segments with trans-coding [\#2547](https://github.com/pypeclub/OpenPype/pull/2547) +- Maya : V-Ray Proxy - load all ABC files via proxy [\#2544](https://github.com/pypeclub/OpenPype/pull/2544) +- Maya to Unreal: Extended static mesh workflow [\#2537](https://github.com/pypeclub/OpenPype/pull/2537) +- Flame: collecting publishable instances [\#2519](https://github.com/pypeclub/OpenPype/pull/2519) +- Flame: create publishable clips [\#2495](https://github.com/pypeclub/OpenPype/pull/2495) +- Flame: OpenTimelineIO Export Modul [\#2398](https://github.com/pypeclub/OpenPype/pull/2398) + +**🚀 Enhancements** + +- Webpublisher: Moved error at the beginning of the log [\#2559](https://github.com/pypeclub/OpenPype/pull/2559) +- Ftrack: Use ApplicationManager to get DJV path [\#2558](https://github.com/pypeclub/OpenPype/pull/2558) +- Webpublisher: Added endpoint to reprocess batch through UI [\#2555](https://github.com/pypeclub/OpenPype/pull/2555) +- Settings: PathInput strip passed string [\#2550](https://github.com/pypeclub/OpenPype/pull/2550) +- Global: Exctract Review anatomy fill data with output name [\#2548](https://github.com/pypeclub/OpenPype/pull/2548) +- Cosmetics: Clean up some cosmetics / typos [\#2542](https://github.com/pypeclub/OpenPype/pull/2542) +- General: Validate if current process OpenPype version is requested version [\#2529](https://github.com/pypeclub/OpenPype/pull/2529) +- General: Be able to use anatomy data in ffmpeg output arguments [\#2525](https://github.com/pypeclub/OpenPype/pull/2525) +- Expose toggle publish plug-in settings for Maya Look Shading Engine Naming [\#2521](https://github.com/pypeclub/OpenPype/pull/2521) +- Photoshop: Move implementation to OpenPype [\#2510](https://github.com/pypeclub/OpenPype/pull/2510) +- TimersManager: Move module one hierarchy higher [\#2501](https://github.com/pypeclub/OpenPype/pull/2501) +- Slack: notifications are sent with Openpype logo and bot name [\#2499](https://github.com/pypeclub/OpenPype/pull/2499) +- Slack: Add review to notification message [\#2498](https://github.com/pypeclub/OpenPype/pull/2498) +- Ftrack: Event handlers settings [\#2496](https://github.com/pypeclub/OpenPype/pull/2496) +- Tools: Fix style and modality of errors in loader and creator [\#2489](https://github.com/pypeclub/OpenPype/pull/2489) +- Maya: Collect 'fps' animation data only for "review" instances [\#2486](https://github.com/pypeclub/OpenPype/pull/2486) +- Project Manager: Remove project button cleanup [\#2482](https://github.com/pypeclub/OpenPype/pull/2482) +- Tools: Be able to change models of tasks and assets widgets [\#2475](https://github.com/pypeclub/OpenPype/pull/2475) +- Publish pype: Reduce publish process defering [\#2464](https://github.com/pypeclub/OpenPype/pull/2464) +- Maya: Improve speed of Collect History logic [\#2460](https://github.com/pypeclub/OpenPype/pull/2460) +- Maya: Validate Rig Controllers - fix Error: in script editor [\#2459](https://github.com/pypeclub/OpenPype/pull/2459) +- Maya: Validate NGONs simplify and speed-up [\#2458](https://github.com/pypeclub/OpenPype/pull/2458) +- Maya: Optimize Validate Locked Normals speed for dense polymeshes [\#2457](https://github.com/pypeclub/OpenPype/pull/2457) +- Maya: Refactor missing \_get\_reference\_node method [\#2455](https://github.com/pypeclub/OpenPype/pull/2455) +- Houdini: Remove broken unique name counter [\#2450](https://github.com/pypeclub/OpenPype/pull/2450) +- Maya: Improve lib.polyConstraint performance when Select tool is not the active tool context [\#2447](https://github.com/pypeclub/OpenPype/pull/2447) +- General: Validate third party before build [\#2425](https://github.com/pypeclub/OpenPype/pull/2425) +- Maya : add option to not group reference in ReferenceLoader [\#2383](https://github.com/pypeclub/OpenPype/pull/2383) + +**🐛 Bug fixes** + +- AfterEffects: Fix - removed obsolete import [\#2577](https://github.com/pypeclub/OpenPype/pull/2577) +- General: OpenPype version updates [\#2575](https://github.com/pypeclub/OpenPype/pull/2575) +- Ftrack: Delete action revision [\#2563](https://github.com/pypeclub/OpenPype/pull/2563) +- Webpublisher: ftrack shows incorrect user names [\#2560](https://github.com/pypeclub/OpenPype/pull/2560) +- General: Do not validate version if build does not support it [\#2557](https://github.com/pypeclub/OpenPype/pull/2557) +- Webpublisher: Fixed progress reporting [\#2553](https://github.com/pypeclub/OpenPype/pull/2553) +- Fix Maya AssProxyLoader version switch [\#2551](https://github.com/pypeclub/OpenPype/pull/2551) +- General: Fix install thread in igniter [\#2549](https://github.com/pypeclub/OpenPype/pull/2549) +- Houdini: vdbcache family preserve frame numbers on publish integration + enable validate version for Houdini [\#2535](https://github.com/pypeclub/OpenPype/pull/2535) +- Maya: Fix Load VDB to V-Ray [\#2533](https://github.com/pypeclub/OpenPype/pull/2533) +- Maya: ReferenceLoader fix not unique group name error for attach to root [\#2532](https://github.com/pypeclub/OpenPype/pull/2532) +- Maya: namespaced context go back to original namespace when started from inside a namespace [\#2531](https://github.com/pypeclub/OpenPype/pull/2531) +- Fix create zip tool - path argument [\#2522](https://github.com/pypeclub/OpenPype/pull/2522) +- Maya: Fix Extract Look with space in names [\#2518](https://github.com/pypeclub/OpenPype/pull/2518) +- Fix published frame content for sequence starting with 0 [\#2513](https://github.com/pypeclub/OpenPype/pull/2513) +- Maya: reset empty string attributes correctly to "" instead of "None" [\#2506](https://github.com/pypeclub/OpenPype/pull/2506) +- Improve FusionPreLaunch hook errors [\#2505](https://github.com/pypeclub/OpenPype/pull/2505) +- General: Settings work if OpenPypeVersion is available [\#2494](https://github.com/pypeclub/OpenPype/pull/2494) +- General: PYTHONPATH may break OpenPype dependencies [\#2493](https://github.com/pypeclub/OpenPype/pull/2493) +- General: Modules import function output fix [\#2492](https://github.com/pypeclub/OpenPype/pull/2492) +- AE: fix hiding of alert window below Publish [\#2491](https://github.com/pypeclub/OpenPype/pull/2491) +- Workfiles tool: Files widget show files on first show [\#2488](https://github.com/pypeclub/OpenPype/pull/2488) +- General: Custom template paths filter fix [\#2483](https://github.com/pypeclub/OpenPype/pull/2483) +- Loader: Remove always on top flag in tray [\#2480](https://github.com/pypeclub/OpenPype/pull/2480) +- General: Anatomy does not return root envs as unicode [\#2465](https://github.com/pypeclub/OpenPype/pull/2465) +- Maya: Validate Shape Zero do not keep fixed geometry vertices selected/active after repair [\#2456](https://github.com/pypeclub/OpenPype/pull/2456) + +**Merged pull requests:** + +- AfterEffects: Move implementation to OpenPype [\#2543](https://github.com/pypeclub/OpenPype/pull/2543) +- Maya: Remove Maya Look Assigner check on startup [\#2540](https://github.com/pypeclub/OpenPype/pull/2540) +- build\(deps\): bump shelljs from 0.8.4 to 0.8.5 in /website [\#2538](https://github.com/pypeclub/OpenPype/pull/2538) +- build\(deps\): bump follow-redirects from 1.14.4 to 1.14.7 in /website [\#2534](https://github.com/pypeclub/OpenPype/pull/2534) +- Nuke: Merge avalon's implementation into OpenPype [\#2514](https://github.com/pypeclub/OpenPype/pull/2514) +- Maya: Vray fix proxies look assignment [\#2392](https://github.com/pypeclub/OpenPype/pull/2392) +- Bump algoliasearch-helper from 3.4.4 to 3.6.2 in /website [\#2297](https://github.com/pypeclub/OpenPype/pull/2297) ## [3.7.0](https://github.com/pypeclub/OpenPype/tree/3.7.0) (2022-01-04) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.7.0-nightly.14...3.7.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.4...3.7.0) + +**Deprecated:** + +- General: Default modules hierarchy n2 [\#2368](https://github.com/pypeclub/OpenPype/pull/2368) + +### 📖 Documentation + +- docs\[website\]: Add Ellipse Studio \(logo\) as an OpenPype contributor [\#2324](https://github.com/pypeclub/OpenPype/pull/2324) + +**🆕 New features** + +- Settings UI use OpenPype styles [\#2296](https://github.com/pypeclub/OpenPype/pull/2296) +- Store typed version dependencies for workfiles [\#2192](https://github.com/pypeclub/OpenPype/pull/2192) +- OpenPypeV3: add key task type, task shortname and user to path templating construction [\#2157](https://github.com/pypeclub/OpenPype/pull/2157) +- Nuke: Alembic model workflow [\#2140](https://github.com/pypeclub/OpenPype/pull/2140) +- TVPaint: Load workfile from published. [\#1980](https://github.com/pypeclub/OpenPype/pull/1980) + +**🚀 Enhancements** + +- General: Workdir extra folders [\#2462](https://github.com/pypeclub/OpenPype/pull/2462) +- Photoshop: New style validations for New publisher [\#2429](https://github.com/pypeclub/OpenPype/pull/2429) +- General: Environment variables groups [\#2424](https://github.com/pypeclub/OpenPype/pull/2424) +- Unreal: Dynamic menu created in Python [\#2422](https://github.com/pypeclub/OpenPype/pull/2422) +- Settings UI: Hyperlinks to settings [\#2420](https://github.com/pypeclub/OpenPype/pull/2420) +- Modules: JobQueue module moved one hierarchy level higher [\#2419](https://github.com/pypeclub/OpenPype/pull/2419) +- TimersManager: Start timer post launch hook [\#2418](https://github.com/pypeclub/OpenPype/pull/2418) +- General: Run applications as separate processes under linux [\#2408](https://github.com/pypeclub/OpenPype/pull/2408) +- Ftrack: Check existence of object type on recreation [\#2404](https://github.com/pypeclub/OpenPype/pull/2404) +- Enhancement: Global cleanup plugin that explicitly remove paths from context [\#2402](https://github.com/pypeclub/OpenPype/pull/2402) +- General: MongoDB ability to specify replica set groups [\#2401](https://github.com/pypeclub/OpenPype/pull/2401) +- Flame: moving `utility_scripts` to api folder also with `scripts` [\#2385](https://github.com/pypeclub/OpenPype/pull/2385) +- Centos 7 dependency compatibility [\#2384](https://github.com/pypeclub/OpenPype/pull/2384) +- Enhancement: Settings: Use project settings values from another project [\#2382](https://github.com/pypeclub/OpenPype/pull/2382) +- Blender 3: Support auto install for new blender version [\#2377](https://github.com/pypeclub/OpenPype/pull/2377) +- Maya add render image path to settings [\#2375](https://github.com/pypeclub/OpenPype/pull/2375) +- Settings: Webpublisher in hosts enum [\#2367](https://github.com/pypeclub/OpenPype/pull/2367) +- Hiero: python3 compatibility [\#2365](https://github.com/pypeclub/OpenPype/pull/2365) +- Burnins: Be able recognize mxf OPAtom format [\#2361](https://github.com/pypeclub/OpenPype/pull/2361) +- Maya: Add is\_static\_image\_plane and is\_in\_all\_views option in imagePlaneLoader [\#2356](https://github.com/pypeclub/OpenPype/pull/2356) +- Local settings: Copyable studio paths [\#2349](https://github.com/pypeclub/OpenPype/pull/2349) +- Assets Widget: Clear model on project change [\#2345](https://github.com/pypeclub/OpenPype/pull/2345) +- General: OpenPype default modules hierarchy [\#2338](https://github.com/pypeclub/OpenPype/pull/2338) +- TVPaint: Move implementation to OpenPype [\#2336](https://github.com/pypeclub/OpenPype/pull/2336) +- General: FFprobe error exception contain original error message [\#2328](https://github.com/pypeclub/OpenPype/pull/2328) +- Resolve: Add experimental button to menu [\#2325](https://github.com/pypeclub/OpenPype/pull/2325) +- Hiero: Add experimental tools action [\#2323](https://github.com/pypeclub/OpenPype/pull/2323) +- Input links: Cleanup and unification of differences [\#2322](https://github.com/pypeclub/OpenPype/pull/2322) +- General: Don't validate vendor bin with executing them [\#2317](https://github.com/pypeclub/OpenPype/pull/2317) +- General: Multilayer EXRs support [\#2315](https://github.com/pypeclub/OpenPype/pull/2315) +- General: Run process log stderr as info log level [\#2309](https://github.com/pypeclub/OpenPype/pull/2309) +- General: Reduce vendor imports [\#2305](https://github.com/pypeclub/OpenPype/pull/2305) +- Tools: Cleanup of unused classes [\#2304](https://github.com/pypeclub/OpenPype/pull/2304) +- Project Manager: Added ability to delete project [\#2298](https://github.com/pypeclub/OpenPype/pull/2298) +- Ftrack: Synchronize input links [\#2287](https://github.com/pypeclub/OpenPype/pull/2287) +- StandalonePublisher: Remove unused plugin ExtractHarmonyZip [\#2277](https://github.com/pypeclub/OpenPype/pull/2277) +- Ftrack: Support multiple reviews [\#2271](https://github.com/pypeclub/OpenPype/pull/2271) +- Ftrack: Remove unused clean component plugin [\#2269](https://github.com/pypeclub/OpenPype/pull/2269) +- Royal Render: Support for rr channels in separate dirs [\#2268](https://github.com/pypeclub/OpenPype/pull/2268) +- Houdini: Add experimental tools action [\#2267](https://github.com/pypeclub/OpenPype/pull/2267) +- Nuke: extract baked review videos presets [\#2248](https://github.com/pypeclub/OpenPype/pull/2248) +- TVPaint: Workers rendering [\#2209](https://github.com/pypeclub/OpenPype/pull/2209) +- OpenPypeV3: Add key parent asset to path templating construction [\#2186](https://github.com/pypeclub/OpenPype/pull/2186) + +**🐛 Bug fixes** + +- TVPaint: Create render layer dialog is in front [\#2471](https://github.com/pypeclub/OpenPype/pull/2471) +- Short Pyblish plugin path [\#2428](https://github.com/pypeclub/OpenPype/pull/2428) +- PS: Introduced settings for invalid characters to use in ValidateNaming plugin [\#2417](https://github.com/pypeclub/OpenPype/pull/2417) +- Settings UI: Breadcrumbs path does not create new entities [\#2416](https://github.com/pypeclub/OpenPype/pull/2416) +- AfterEffects: Variant 2022 is in defaults but missing in schemas [\#2412](https://github.com/pypeclub/OpenPype/pull/2412) +- Nuke: baking representations was not additive [\#2406](https://github.com/pypeclub/OpenPype/pull/2406) +- General: Fix access to environments from default settings [\#2403](https://github.com/pypeclub/OpenPype/pull/2403) +- Fix: Placeholder Input color set fix [\#2399](https://github.com/pypeclub/OpenPype/pull/2399) +- Settings: Fix state change of wrapper label [\#2396](https://github.com/pypeclub/OpenPype/pull/2396) +- Flame: fix ftrack publisher [\#2381](https://github.com/pypeclub/OpenPype/pull/2381) +- hiero: solve custom ocio path [\#2379](https://github.com/pypeclub/OpenPype/pull/2379) +- hiero: fix workio and flatten [\#2378](https://github.com/pypeclub/OpenPype/pull/2378) +- Nuke: fixing menu re-drawing during context change [\#2374](https://github.com/pypeclub/OpenPype/pull/2374) +- Webpublisher: Fix assignment of families of TVpaint instances [\#2373](https://github.com/pypeclub/OpenPype/pull/2373) +- Nuke: fixing node name based on switched asset name [\#2369](https://github.com/pypeclub/OpenPype/pull/2369) +- JobQueue: Fix loading of settings [\#2362](https://github.com/pypeclub/OpenPype/pull/2362) +- Tools: Placeholder color [\#2359](https://github.com/pypeclub/OpenPype/pull/2359) +- Launcher: Minimize button on MacOs [\#2355](https://github.com/pypeclub/OpenPype/pull/2355) +- StandalonePublisher: Fix import of constant [\#2354](https://github.com/pypeclub/OpenPype/pull/2354) +- Houdini: Fix HDA creation [\#2350](https://github.com/pypeclub/OpenPype/pull/2350) +- Adobe products show issue [\#2347](https://github.com/pypeclub/OpenPype/pull/2347) +- Maya Look Assigner: Fix Python 3 compatibility [\#2343](https://github.com/pypeclub/OpenPype/pull/2343) +- Remove wrongly used host for hook [\#2342](https://github.com/pypeclub/OpenPype/pull/2342) +- Tools: Use Qt context on tools show [\#2340](https://github.com/pypeclub/OpenPype/pull/2340) +- Flame: Fix default argument value in custom dictionary [\#2339](https://github.com/pypeclub/OpenPype/pull/2339) +- Timers Manager: Disable auto stop timer on linux platform [\#2334](https://github.com/pypeclub/OpenPype/pull/2334) +- nuke: bake preset single input exception [\#2331](https://github.com/pypeclub/OpenPype/pull/2331) +- Hiero: fixing multiple templates at a hierarchy parent [\#2330](https://github.com/pypeclub/OpenPype/pull/2330) +- Fix - provider icons are pulled from a folder [\#2326](https://github.com/pypeclub/OpenPype/pull/2326) +- InputLinks: Typo in "inputLinks" key [\#2314](https://github.com/pypeclub/OpenPype/pull/2314) +- Deadline timeout and logging [\#2312](https://github.com/pypeclub/OpenPype/pull/2312) +- nuke: do not multiply representation on class method [\#2311](https://github.com/pypeclub/OpenPype/pull/2311) +- Workfiles tool: Fix task formatting [\#2306](https://github.com/pypeclub/OpenPype/pull/2306) +- Delivery: Fix delivery paths created on windows [\#2302](https://github.com/pypeclub/OpenPype/pull/2302) +- Maya: Deadline - fix limit groups [\#2295](https://github.com/pypeclub/OpenPype/pull/2295) +- Royal Render: Fix plugin order and OpenPype auto-detection [\#2291](https://github.com/pypeclub/OpenPype/pull/2291) +- New Publisher: Fix mapping of indexes [\#2285](https://github.com/pypeclub/OpenPype/pull/2285) +- Alternate site for site sync doesnt work for sequences [\#2284](https://github.com/pypeclub/OpenPype/pull/2284) +- FFmpeg: Execute ffprobe using list of arguments instead of string command [\#2281](https://github.com/pypeclub/OpenPype/pull/2281) +- Nuke: Anatomy fill data use task as dictionary [\#2278](https://github.com/pypeclub/OpenPype/pull/2278) +- Bug: fix variable name \_asset\_id in workfiles application [\#2274](https://github.com/pypeclub/OpenPype/pull/2274) +- Version handling fixes [\#2272](https://github.com/pypeclub/OpenPype/pull/2272) + +**Merged pull requests:** + +- Maya: Replaced PATH usage with vendored oiio path for maketx utility [\#2405](https://github.com/pypeclub/OpenPype/pull/2405) +- \[Fix\]\[MAYA\] Handle message type attribute within CollectLook [\#2394](https://github.com/pypeclub/OpenPype/pull/2394) +- Add validator to check correct version of extension for PS and AE [\#2387](https://github.com/pypeclub/OpenPype/pull/2387) +- Maya: configurable model top level validation [\#2321](https://github.com/pypeclub/OpenPype/pull/2321) +- Create test publish class for After Effects [\#2270](https://github.com/pypeclub/OpenPype/pull/2270) ## [3.6.4](https://github.com/pypeclub/OpenPype/tree/3.6.4) (2021-11-23) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.7.0-nightly.1...3.6.4) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.3...3.6.4) + +**🐛 Bug fixes** + +- Nuke: inventory update removes all loaded read nodes [\#2294](https://github.com/pypeclub/OpenPype/pull/2294) ## [3.6.3](https://github.com/pypeclub/OpenPype/tree/3.6.3) (2021-11-19) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.6.3-nightly.1...3.6.3) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.2...3.6.3) + +**🐛 Bug fixes** + +- Deadline: Fix publish targets [\#2280](https://github.com/pypeclub/OpenPype/pull/2280) ## [3.6.2](https://github.com/pypeclub/OpenPype/tree/3.6.2) (2021-11-18) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.6.2-nightly.2...3.6.2) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.1...3.6.2) + +**🚀 Enhancements** + +- Tools: Assets widget [\#2265](https://github.com/pypeclub/OpenPype/pull/2265) +- SceneInventory: Choose loader in asset switcher [\#2262](https://github.com/pypeclub/OpenPype/pull/2262) +- Style: New fonts in OpenPype style [\#2256](https://github.com/pypeclub/OpenPype/pull/2256) +- Tools: SceneInventory in OpenPype [\#2255](https://github.com/pypeclub/OpenPype/pull/2255) +- Tools: Tasks widget [\#2251](https://github.com/pypeclub/OpenPype/pull/2251) +- Tools: Creator in OpenPype [\#2244](https://github.com/pypeclub/OpenPype/pull/2244) +- Added endpoint for configured extensions [\#2221](https://github.com/pypeclub/OpenPype/pull/2221) + +**🐛 Bug fixes** + +- Tools: Parenting of tools in Nuke and Hiero [\#2266](https://github.com/pypeclub/OpenPype/pull/2266) +- limiting validator to specific editorial hosts [\#2264](https://github.com/pypeclub/OpenPype/pull/2264) +- Tools: Select Context dialog attribute fix [\#2261](https://github.com/pypeclub/OpenPype/pull/2261) +- Maya: Render publishing fails on linux [\#2260](https://github.com/pypeclub/OpenPype/pull/2260) +- LookAssigner: Fix tool reopen [\#2259](https://github.com/pypeclub/OpenPype/pull/2259) +- Standalone: editorial not publishing thumbnails on all subsets [\#2258](https://github.com/pypeclub/OpenPype/pull/2258) +- Burnins: Support mxf metadata [\#2247](https://github.com/pypeclub/OpenPype/pull/2247) +- Maya: Support for configurable AOV separator characters [\#2197](https://github.com/pypeclub/OpenPype/pull/2197) +- Maya: texture colorspace modes in looks [\#2195](https://github.com/pypeclub/OpenPype/pull/2195) ## [3.6.1](https://github.com/pypeclub/OpenPype/tree/3.6.1) (2021-11-16) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.6.1-nightly.1...3.6.1) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.0...3.6.1) + +**🐛 Bug fixes** + +- Loader doesn't allow changing of version before loading [\#2254](https://github.com/pypeclub/OpenPype/pull/2254) ## [3.6.0](https://github.com/pypeclub/OpenPype/tree/3.6.0) (2021-11-15) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.6.0-nightly.6...3.6.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.5.0...3.6.0) + +### 📖 Documentation + +- Add alternative sites for Site Sync [\#2206](https://github.com/pypeclub/OpenPype/pull/2206) +- Add command line way of running site sync server [\#2188](https://github.com/pypeclub/OpenPype/pull/2188) + +**🆕 New features** + +- Add validate active site button to sync queue on a project [\#2176](https://github.com/pypeclub/OpenPype/pull/2176) +- Maya : Colorspace configuration [\#2170](https://github.com/pypeclub/OpenPype/pull/2170) +- Blender: Added support for audio [\#2168](https://github.com/pypeclub/OpenPype/pull/2168) +- Flame: a host basic integration [\#2165](https://github.com/pypeclub/OpenPype/pull/2165) +- Houdini: simple HDA workflow [\#2072](https://github.com/pypeclub/OpenPype/pull/2072) +- Basic Royal Render Integration ✨ [\#2061](https://github.com/pypeclub/OpenPype/pull/2061) +- Camera handling between Blender and Unreal [\#1988](https://github.com/pypeclub/OpenPype/pull/1988) +- switch PyQt5 for PySide2 [\#1744](https://github.com/pypeclub/OpenPype/pull/1744) + +**🚀 Enhancements** + +- Tools: Subset manager in OpenPype [\#2243](https://github.com/pypeclub/OpenPype/pull/2243) +- General: Skip module directories without init file [\#2239](https://github.com/pypeclub/OpenPype/pull/2239) +- General: Static interfaces [\#2238](https://github.com/pypeclub/OpenPype/pull/2238) +- Style: Fix transparent image in style [\#2235](https://github.com/pypeclub/OpenPype/pull/2235) +- Add a "following workfile versioning" option on publish [\#2225](https://github.com/pypeclub/OpenPype/pull/2225) +- Modules: Module can add cli commands [\#2224](https://github.com/pypeclub/OpenPype/pull/2224) +- Webpublisher: Separate webpublisher logic [\#2222](https://github.com/pypeclub/OpenPype/pull/2222) +- Add both side availability on Site Sync sites to Loader [\#2220](https://github.com/pypeclub/OpenPype/pull/2220) +- Tools: Center loader and library loader on show [\#2219](https://github.com/pypeclub/OpenPype/pull/2219) +- Maya : Validate shape zero [\#2212](https://github.com/pypeclub/OpenPype/pull/2212) +- Maya : validate unique names [\#2211](https://github.com/pypeclub/OpenPype/pull/2211) +- Tools: OpenPype stylesheet in workfiles tool [\#2208](https://github.com/pypeclub/OpenPype/pull/2208) +- Ftrack: Replace Queue with deque in event handlers logic [\#2204](https://github.com/pypeclub/OpenPype/pull/2204) +- Tools: New select context dialog [\#2200](https://github.com/pypeclub/OpenPype/pull/2200) +- Maya : Validate mesh ngons [\#2199](https://github.com/pypeclub/OpenPype/pull/2199) +- Dirmap in Nuke [\#2198](https://github.com/pypeclub/OpenPype/pull/2198) +- Delivery: Check 'frame' key in template for sequence delivery [\#2196](https://github.com/pypeclub/OpenPype/pull/2196) +- Settings: Site sync project settings improvement [\#2193](https://github.com/pypeclub/OpenPype/pull/2193) +- Usage of tools code [\#2185](https://github.com/pypeclub/OpenPype/pull/2185) +- Settings: Dictionary based on project roots [\#2184](https://github.com/pypeclub/OpenPype/pull/2184) +- Subset name: Be able to pass asset document to get subset name [\#2179](https://github.com/pypeclub/OpenPype/pull/2179) +- Tools: Experimental tools [\#2167](https://github.com/pypeclub/OpenPype/pull/2167) +- Loader: Refactor and use OpenPype stylesheets [\#2166](https://github.com/pypeclub/OpenPype/pull/2166) +- Add loader for linked smart objects in photoshop [\#2149](https://github.com/pypeclub/OpenPype/pull/2149) +- Burnins: DNxHD profiles handling [\#2142](https://github.com/pypeclub/OpenPype/pull/2142) +- Tools: Single access point for host tools [\#2139](https://github.com/pypeclub/OpenPype/pull/2139) + +**🐛 Bug fixes** + +- Ftrack: Sync project ftrack id cache issue [\#2250](https://github.com/pypeclub/OpenPype/pull/2250) +- Ftrack: Session creation and Prepare project [\#2245](https://github.com/pypeclub/OpenPype/pull/2245) +- Added queue for studio processing in PS [\#2237](https://github.com/pypeclub/OpenPype/pull/2237) +- Python 2: Unicode to string conversion [\#2236](https://github.com/pypeclub/OpenPype/pull/2236) +- Fix - enum for color coding in PS [\#2234](https://github.com/pypeclub/OpenPype/pull/2234) +- Pyblish Tool: Fix targets handling [\#2232](https://github.com/pypeclub/OpenPype/pull/2232) +- Ftrack: Base event fix of 'get\_project\_from\_entity' method [\#2214](https://github.com/pypeclub/OpenPype/pull/2214) +- Maya : multiple subsets review broken [\#2210](https://github.com/pypeclub/OpenPype/pull/2210) +- Fix - different command used for Linux and Mac OS [\#2207](https://github.com/pypeclub/OpenPype/pull/2207) +- Tools: Workfiles tool don't use avalon widgets [\#2205](https://github.com/pypeclub/OpenPype/pull/2205) +- Ftrack: Fill missing ftrack id on mongo project [\#2203](https://github.com/pypeclub/OpenPype/pull/2203) +- Project Manager: Fix copying of tasks [\#2191](https://github.com/pypeclub/OpenPype/pull/2191) +- StandalonePublisher: Source validator don't expect representations [\#2190](https://github.com/pypeclub/OpenPype/pull/2190) +- Blender: Fix trying to pack an image when the shader node has no texture [\#2183](https://github.com/pypeclub/OpenPype/pull/2183) +- Maya: review viewport settings [\#2177](https://github.com/pypeclub/OpenPype/pull/2177) +- MacOS: Launching of applications may cause Permissions error [\#2175](https://github.com/pypeclub/OpenPype/pull/2175) +- Maya: Aspect ratio [\#2174](https://github.com/pypeclub/OpenPype/pull/2174) +- Blender: Fix 'Deselect All' with object not in 'Object Mode' [\#2163](https://github.com/pypeclub/OpenPype/pull/2163) +- Tools: Stylesheets are applied after tool show [\#2161](https://github.com/pypeclub/OpenPype/pull/2161) +- Maya: Collect render - fix UNC path support 🐛 [\#2158](https://github.com/pypeclub/OpenPype/pull/2158) +- Maya: Fix hotbox broken by scriptsmenu [\#2151](https://github.com/pypeclub/OpenPype/pull/2151) +- Ftrack: Ignore save warnings exception in Prepare project action [\#2150](https://github.com/pypeclub/OpenPype/pull/2150) +- Loader thumbnails with smooth edges [\#2147](https://github.com/pypeclub/OpenPype/pull/2147) +- Added validator for source files for Standalone Publisher [\#2138](https://github.com/pypeclub/OpenPype/pull/2138) + +**Merged pull requests:** + +- Bump pillow from 8.2.0 to 8.3.2 [\#2162](https://github.com/pypeclub/OpenPype/pull/2162) +- Bump axios from 0.21.1 to 0.21.4 in /website [\#2059](https://github.com/pypeclub/OpenPype/pull/2059) ## [3.5.0](https://github.com/pypeclub/OpenPype/tree/3.5.0) (2021-10-17) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.5.0-nightly.8...3.5.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.4.1...3.5.0) + +**Deprecated:** + +- Maya: Change mayaAscii family to mayaScene [\#2106](https://github.com/pypeclub/OpenPype/pull/2106) + +**🆕 New features** + +- Added project and task into context change message in Maya [\#2131](https://github.com/pypeclub/OpenPype/pull/2131) +- Add ExtractBurnin to photoshop review [\#2124](https://github.com/pypeclub/OpenPype/pull/2124) +- PYPE-1218 - changed namespace to contain subset name in Maya [\#2114](https://github.com/pypeclub/OpenPype/pull/2114) +- Added running configurable disk mapping command before start of OP [\#2091](https://github.com/pypeclub/OpenPype/pull/2091) +- SFTP provider [\#2073](https://github.com/pypeclub/OpenPype/pull/2073) +- Maya: Validate setdress top group [\#2068](https://github.com/pypeclub/OpenPype/pull/2068) +- Maya: Enable publishing render attrib sets \(e.g. V-Ray Displacement\) with model [\#1955](https://github.com/pypeclub/OpenPype/pull/1955) + +**🚀 Enhancements** + +- Maya: make rig validators configurable in settings [\#2137](https://github.com/pypeclub/OpenPype/pull/2137) +- Settings: Updated readme for entity types in settings [\#2132](https://github.com/pypeclub/OpenPype/pull/2132) +- Nuke: unified clip loader [\#2128](https://github.com/pypeclub/OpenPype/pull/2128) +- Settings UI: Project model refreshing and sorting [\#2104](https://github.com/pypeclub/OpenPype/pull/2104) +- Create Read From Rendered - Disable Relative paths by default [\#2093](https://github.com/pypeclub/OpenPype/pull/2093) +- Added choosing different dirmap mapping if workfile synched locally [\#2088](https://github.com/pypeclub/OpenPype/pull/2088) +- General: Remove IdleManager module [\#2084](https://github.com/pypeclub/OpenPype/pull/2084) +- Tray UI: Message box about missing settings defaults [\#2080](https://github.com/pypeclub/OpenPype/pull/2080) +- Tray UI: Show menu where first click happened [\#2079](https://github.com/pypeclub/OpenPype/pull/2079) +- Global: add global validators to settings [\#2078](https://github.com/pypeclub/OpenPype/pull/2078) +- Use CRF for burnin when available [\#2070](https://github.com/pypeclub/OpenPype/pull/2070) +- Project manager: Filter first item after selection of project [\#2069](https://github.com/pypeclub/OpenPype/pull/2069) +- Nuke: Adding `still` image family workflow [\#2064](https://github.com/pypeclub/OpenPype/pull/2064) +- Maya: validate authorized loaded plugins [\#2062](https://github.com/pypeclub/OpenPype/pull/2062) +- Tools: add support for pyenv on windows [\#2051](https://github.com/pypeclub/OpenPype/pull/2051) +- SyncServer: Dropbox Provider [\#1979](https://github.com/pypeclub/OpenPype/pull/1979) +- Burnin: Get data from context with defined keys. [\#1897](https://github.com/pypeclub/OpenPype/pull/1897) +- Timers manager: Get task time [\#1896](https://github.com/pypeclub/OpenPype/pull/1896) +- TVPaint: Option to stop timer on application exit. [\#1887](https://github.com/pypeclub/OpenPype/pull/1887) + +**🐛 Bug fixes** + +- Maya: fix model publishing [\#2130](https://github.com/pypeclub/OpenPype/pull/2130) +- Fix - oiiotool wasn't recognized even if present [\#2129](https://github.com/pypeclub/OpenPype/pull/2129) +- General: Disk mapping group [\#2120](https://github.com/pypeclub/OpenPype/pull/2120) +- Hiero: publishing effect first time makes wrong resources path [\#2115](https://github.com/pypeclub/OpenPype/pull/2115) +- Add startup script for Houdini Core. [\#2110](https://github.com/pypeclub/OpenPype/pull/2110) +- TVPaint: Behavior name of loop also accept repeat [\#2109](https://github.com/pypeclub/OpenPype/pull/2109) +- Ftrack: Project settings save custom attributes skip unknown attributes [\#2103](https://github.com/pypeclub/OpenPype/pull/2103) +- Blender: Fix NoneType error when animation\_data is missing for a rig [\#2101](https://github.com/pypeclub/OpenPype/pull/2101) +- Fix broken import in sftp provider [\#2100](https://github.com/pypeclub/OpenPype/pull/2100) +- Global: Fix docstring on publish plugin extract review [\#2097](https://github.com/pypeclub/OpenPype/pull/2097) +- Delivery Action Files Sequence fix [\#2096](https://github.com/pypeclub/OpenPype/pull/2096) +- General: Cloud mongo ca certificate issue [\#2095](https://github.com/pypeclub/OpenPype/pull/2095) +- TVPaint: Creator use context from workfile [\#2087](https://github.com/pypeclub/OpenPype/pull/2087) +- Blender: fix texture missing when publishing blend files [\#2085](https://github.com/pypeclub/OpenPype/pull/2085) +- General: Startup validations oiio tool path fix on linux [\#2083](https://github.com/pypeclub/OpenPype/pull/2083) +- Deadline: Collect deadline server does not check existence of deadline key [\#2082](https://github.com/pypeclub/OpenPype/pull/2082) +- Blender: fixed Curves with modifiers in Rigs [\#2081](https://github.com/pypeclub/OpenPype/pull/2081) +- Nuke UI scaling [\#2077](https://github.com/pypeclub/OpenPype/pull/2077) +- Maya: Fix multi-camera renders [\#2065](https://github.com/pypeclub/OpenPype/pull/2065) +- Fix Sync Queue when project disabled [\#2063](https://github.com/pypeclub/OpenPype/pull/2063) + +**Merged pull requests:** + +- Bump pywin32 from 300 to 301 [\#2086](https://github.com/pypeclub/OpenPype/pull/2086) ## [3.4.1](https://github.com/pypeclub/OpenPype/tree/3.4.1) (2021-09-23) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.4.1-nightly.1...3.4.1) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.4.0...3.4.1) + +**🆕 New features** + +- Settings: Flag project as deactivated and hide from tools' view [\#2008](https://github.com/pypeclub/OpenPype/pull/2008) + +**🚀 Enhancements** + +- General: Startup validations [\#2054](https://github.com/pypeclub/OpenPype/pull/2054) +- Nuke: proxy mode validator [\#2052](https://github.com/pypeclub/OpenPype/pull/2052) +- Ftrack: Removed ftrack interface [\#2049](https://github.com/pypeclub/OpenPype/pull/2049) +- Settings UI: Deffered set value on entity [\#2044](https://github.com/pypeclub/OpenPype/pull/2044) +- Loader: Families filtering [\#2043](https://github.com/pypeclub/OpenPype/pull/2043) +- Settings UI: Project view enhancements [\#2042](https://github.com/pypeclub/OpenPype/pull/2042) +- Settings for Nuke IncrementScriptVersion [\#2039](https://github.com/pypeclub/OpenPype/pull/2039) +- Loader & Library loader: Use tools from OpenPype [\#2038](https://github.com/pypeclub/OpenPype/pull/2038) +- Adding predefined project folders creation in PM [\#2030](https://github.com/pypeclub/OpenPype/pull/2030) +- WebserverModule: Removed interface of webserver module [\#2028](https://github.com/pypeclub/OpenPype/pull/2028) +- TimersManager: Removed interface of timers manager [\#2024](https://github.com/pypeclub/OpenPype/pull/2024) +- Feature Maya import asset from scene inventory [\#2018](https://github.com/pypeclub/OpenPype/pull/2018) + +**🐛 Bug fixes** + +- Timers manger: Typo fix [\#2058](https://github.com/pypeclub/OpenPype/pull/2058) +- Hiero: Editorial fixes [\#2057](https://github.com/pypeclub/OpenPype/pull/2057) +- Differentiate jpg sequences from thumbnail [\#2056](https://github.com/pypeclub/OpenPype/pull/2056) +- FFmpeg: Split command to list does not work [\#2046](https://github.com/pypeclub/OpenPype/pull/2046) +- Removed shell flag in subprocess call [\#2045](https://github.com/pypeclub/OpenPype/pull/2045) + +**Merged pull requests:** + +- Bump prismjs from 1.24.0 to 1.25.0 in /website [\#2050](https://github.com/pypeclub/OpenPype/pull/2050) ## [3.4.0](https://github.com/pypeclub/OpenPype/tree/3.4.0) (2021-09-17) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.4.0-nightly.6...3.4.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.3.1...3.4.0) + +### 📖 Documentation + +- Documentation: Ftrack launch argsuments update [\#2014](https://github.com/pypeclub/OpenPype/pull/2014) +- Nuke Quick Start / Tutorial [\#1952](https://github.com/pypeclub/OpenPype/pull/1952) +- Houdini: add Camera, Point Cache, Composite, Redshift ROP and VDB Cache support [\#1821](https://github.com/pypeclub/OpenPype/pull/1821) + +**🆕 New features** + +- Nuke: Compatibility with Nuke 13 [\#2003](https://github.com/pypeclub/OpenPype/pull/2003) +- Maya: Add Xgen family support [\#1947](https://github.com/pypeclub/OpenPype/pull/1947) +- Feature/webpublisher backend [\#1876](https://github.com/pypeclub/OpenPype/pull/1876) +- Blender: Improved assets handling [\#1615](https://github.com/pypeclub/OpenPype/pull/1615) + +**🚀 Enhancements** + +- Added possibility to configure of synchronization of workfile version… [\#2041](https://github.com/pypeclub/OpenPype/pull/2041) +- General: Task types in profiles [\#2036](https://github.com/pypeclub/OpenPype/pull/2036) +- Console interpreter: Handle invalid sizes on initialization [\#2022](https://github.com/pypeclub/OpenPype/pull/2022) +- Ftrack: Show OpenPype versions in event server status [\#2019](https://github.com/pypeclub/OpenPype/pull/2019) +- General: Staging icon [\#2017](https://github.com/pypeclub/OpenPype/pull/2017) +- Ftrack: Sync to avalon actions have jobs [\#2015](https://github.com/pypeclub/OpenPype/pull/2015) +- Modules: Connect method is not required [\#2009](https://github.com/pypeclub/OpenPype/pull/2009) +- Settings UI: Number with configurable steps [\#2001](https://github.com/pypeclub/OpenPype/pull/2001) +- Moving project folder structure creation out of ftrack module \#1989 [\#1996](https://github.com/pypeclub/OpenPype/pull/1996) +- Configurable items for providers without Settings [\#1987](https://github.com/pypeclub/OpenPype/pull/1987) +- Global: Example addons [\#1986](https://github.com/pypeclub/OpenPype/pull/1986) +- Standalone Publisher: Extract harmony zip handle workfile template [\#1982](https://github.com/pypeclub/OpenPype/pull/1982) +- Settings UI: Number sliders [\#1978](https://github.com/pypeclub/OpenPype/pull/1978) +- Workfiles: Support more workfile templates [\#1966](https://github.com/pypeclub/OpenPype/pull/1966) +- Launcher: Fix crashes on action click [\#1964](https://github.com/pypeclub/OpenPype/pull/1964) +- Settings: Minor fixes in UI and missing default values [\#1963](https://github.com/pypeclub/OpenPype/pull/1963) +- Blender: Toggle system console works on windows [\#1962](https://github.com/pypeclub/OpenPype/pull/1962) +- Global: Settings defined by Addons/Modules [\#1959](https://github.com/pypeclub/OpenPype/pull/1959) +- CI: change release numbering triggers [\#1954](https://github.com/pypeclub/OpenPype/pull/1954) +- Global: Avalon Host name collector [\#1949](https://github.com/pypeclub/OpenPype/pull/1949) +- Global: Define hosts in CollectSceneVersion [\#1948](https://github.com/pypeclub/OpenPype/pull/1948) +- Add face sets to exported alembics [\#1942](https://github.com/pypeclub/OpenPype/pull/1942) +- OpenPype: Add version validation and `--headless` mode and update progress 🔄 [\#1939](https://github.com/pypeclub/OpenPype/pull/1939) +- \#1894 - adds host to template\_name\_profiles for filtering [\#1915](https://github.com/pypeclub/OpenPype/pull/1915) +- Environments: Tool environments in alphabetical order [\#1910](https://github.com/pypeclub/OpenPype/pull/1910) +- Disregard publishing time. [\#1888](https://github.com/pypeclub/OpenPype/pull/1888) +- Dynamic modules [\#1872](https://github.com/pypeclub/OpenPype/pull/1872) + +**🐛 Bug fixes** + +- Workfiles tool: Task selection [\#2040](https://github.com/pypeclub/OpenPype/pull/2040) +- Ftrack: Delete old versions missing settings key [\#2037](https://github.com/pypeclub/OpenPype/pull/2037) +- Nuke: typo on a button [\#2034](https://github.com/pypeclub/OpenPype/pull/2034) +- Hiero: Fix "none" named tags [\#2033](https://github.com/pypeclub/OpenPype/pull/2033) +- FFmpeg: Subprocess arguments as list [\#2032](https://github.com/pypeclub/OpenPype/pull/2032) +- General: Fix Python 2 breaking line [\#2016](https://github.com/pypeclub/OpenPype/pull/2016) +- Bugfix/webpublisher task type [\#2006](https://github.com/pypeclub/OpenPype/pull/2006) +- Nuke thumbnails generated from middle of the sequence [\#1992](https://github.com/pypeclub/OpenPype/pull/1992) +- Nuke: last version from path gets correct version [\#1990](https://github.com/pypeclub/OpenPype/pull/1990) +- nuke, resolve, hiero: precollector order lest then 0.5 [\#1984](https://github.com/pypeclub/OpenPype/pull/1984) +- Last workfile with multiple work templates [\#1981](https://github.com/pypeclub/OpenPype/pull/1981) +- Collectors order [\#1977](https://github.com/pypeclub/OpenPype/pull/1977) +- Stop timer was within validator order range. [\#1975](https://github.com/pypeclub/OpenPype/pull/1975) +- Ftrack: arrow submodule has https url source [\#1974](https://github.com/pypeclub/OpenPype/pull/1974) +- Ftrack: Fix hosts attribute in collect ftrack username [\#1972](https://github.com/pypeclub/OpenPype/pull/1972) +- Deadline: Houdini plugins in different hierarchy [\#1970](https://github.com/pypeclub/OpenPype/pull/1970) +- Removed deprecated submodules [\#1967](https://github.com/pypeclub/OpenPype/pull/1967) +- Global: ExtractJpeg can handle filepaths with spaces [\#1961](https://github.com/pypeclub/OpenPype/pull/1961) +- Resolve path when adding to zip [\#1960](https://github.com/pypeclub/OpenPype/pull/1960) + +**Merged pull requests:** + +- Bump url-parse from 1.5.1 to 1.5.3 in /website [\#1958](https://github.com/pypeclub/OpenPype/pull/1958) +- Bump path-parse from 1.0.6 to 1.0.7 in /website [\#1933](https://github.com/pypeclub/OpenPype/pull/1933) ## [3.3.1](https://github.com/pypeclub/OpenPype/tree/3.3.1) (2021-08-20) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.3.1-nightly.1...3.3.1) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.3.0...3.3.1) + +**🐛 Bug fixes** + +- TVPaint: Fixed rendered frame indexes [\#1946](https://github.com/pypeclub/OpenPype/pull/1946) +- Maya: Menu actions fix [\#1945](https://github.com/pypeclub/OpenPype/pull/1945) +- standalone: editorial shared object problem [\#1941](https://github.com/pypeclub/OpenPype/pull/1941) +- Bugfix nuke deadline app name [\#1928](https://github.com/pypeclub/OpenPype/pull/1928) ## [3.3.0](https://github.com/pypeclub/OpenPype/tree/3.3.0) (2021-08-17) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.3.0-nightly.11...3.3.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.2.0...3.3.0) + +### 📖 Documentation + +- Standalone Publish of textures family [\#1834](https://github.com/pypeclub/OpenPype/pull/1834) + +**🆕 New features** + +- Settings UI: Breadcrumbs in settings [\#1932](https://github.com/pypeclub/OpenPype/pull/1932) +- Maya: Scene patching 🩹on submission to Deadline [\#1923](https://github.com/pypeclub/OpenPype/pull/1923) +- Feature AE local render [\#1901](https://github.com/pypeclub/OpenPype/pull/1901) + +**🚀 Enhancements** + +- Python console interpreter [\#1940](https://github.com/pypeclub/OpenPype/pull/1940) +- Global: Updated logos and Default settings [\#1927](https://github.com/pypeclub/OpenPype/pull/1927) +- Check for missing ✨ Python when using `pyenv` [\#1925](https://github.com/pypeclub/OpenPype/pull/1925) +- Settings: Default values for enum [\#1920](https://github.com/pypeclub/OpenPype/pull/1920) +- Settings UI: Modifiable dict view enhance [\#1919](https://github.com/pypeclub/OpenPype/pull/1919) +- submodules: avalon-core update [\#1911](https://github.com/pypeclub/OpenPype/pull/1911) +- Ftrack: Where I run action enhancement [\#1900](https://github.com/pypeclub/OpenPype/pull/1900) +- Ftrack: Private project server actions [\#1899](https://github.com/pypeclub/OpenPype/pull/1899) +- Support nested studio plugins paths. [\#1898](https://github.com/pypeclub/OpenPype/pull/1898) +- Settings: global validators with options [\#1892](https://github.com/pypeclub/OpenPype/pull/1892) +- Settings: Conditional dict enum positioning [\#1891](https://github.com/pypeclub/OpenPype/pull/1891) +- Expose stop timer through rest api. [\#1886](https://github.com/pypeclub/OpenPype/pull/1886) +- TVPaint: Increment workfile [\#1885](https://github.com/pypeclub/OpenPype/pull/1885) +- Allow Multiple Notes to run on tasks. [\#1882](https://github.com/pypeclub/OpenPype/pull/1882) +- Prepare for pyside2 [\#1869](https://github.com/pypeclub/OpenPype/pull/1869) +- Filter hosts in settings host-enum [\#1868](https://github.com/pypeclub/OpenPype/pull/1868) +- Local actions with process identifier [\#1867](https://github.com/pypeclub/OpenPype/pull/1867) +- Workfile tool start at host launch support [\#1865](https://github.com/pypeclub/OpenPype/pull/1865) +- Anatomy schema validation [\#1864](https://github.com/pypeclub/OpenPype/pull/1864) +- Ftrack prepare project structure [\#1861](https://github.com/pypeclub/OpenPype/pull/1861) +- Maya: support for configurable `dirmap` 🗺️ [\#1859](https://github.com/pypeclub/OpenPype/pull/1859) +- Independent general environments [\#1853](https://github.com/pypeclub/OpenPype/pull/1853) +- TVPaint Start Frame [\#1844](https://github.com/pypeclub/OpenPype/pull/1844) +- Ftrack push attributes action adds traceback to job [\#1843](https://github.com/pypeclub/OpenPype/pull/1843) +- Prepare project action enhance [\#1838](https://github.com/pypeclub/OpenPype/pull/1838) +- nuke: settings create missing default subsets [\#1829](https://github.com/pypeclub/OpenPype/pull/1829) +- Update poetry lock [\#1823](https://github.com/pypeclub/OpenPype/pull/1823) +- Settings: settings for plugins [\#1819](https://github.com/pypeclub/OpenPype/pull/1819) +- Settings list can use template or schema as object type [\#1815](https://github.com/pypeclub/OpenPype/pull/1815) +- Maya: Deadline custom settings [\#1797](https://github.com/pypeclub/OpenPype/pull/1797) +- Maya: Shader name validation [\#1762](https://github.com/pypeclub/OpenPype/pull/1762) + +**🐛 Bug fixes** + +- Fix - ftrack family was added incorrectly in some cases [\#1935](https://github.com/pypeclub/OpenPype/pull/1935) +- Fix - Deadline publish on Linux started Tray instead of headless publishing [\#1930](https://github.com/pypeclub/OpenPype/pull/1930) +- Maya: Validate Model Name - repair accident deletion in settings defaults [\#1929](https://github.com/pypeclub/OpenPype/pull/1929) +- Nuke: submit to farm failed due `ftrack` family remove [\#1926](https://github.com/pypeclub/OpenPype/pull/1926) +- Fix - validate takes repre\["files"\] as list all the time [\#1922](https://github.com/pypeclub/OpenPype/pull/1922) +- standalone: validator asset parents [\#1917](https://github.com/pypeclub/OpenPype/pull/1917) +- Nuke: update video file crassing [\#1916](https://github.com/pypeclub/OpenPype/pull/1916) +- Fix - texture validators for workfiles triggers only for textures workfiles [\#1914](https://github.com/pypeclub/OpenPype/pull/1914) +- Settings UI: List order works as expected [\#1906](https://github.com/pypeclub/OpenPype/pull/1906) +- Hiero: loaded clip was not set colorspace from version data [\#1904](https://github.com/pypeclub/OpenPype/pull/1904) +- Pyblish UI: Fix collecting stage processing [\#1903](https://github.com/pypeclub/OpenPype/pull/1903) +- Burnins: Use input's bitrate in h624 [\#1902](https://github.com/pypeclub/OpenPype/pull/1902) +- Bug: fixed python detection [\#1893](https://github.com/pypeclub/OpenPype/pull/1893) +- global: integrate name missing default template [\#1890](https://github.com/pypeclub/OpenPype/pull/1890) +- publisher: editorial plugins fixes [\#1889](https://github.com/pypeclub/OpenPype/pull/1889) +- Normalize path returned from Workfiles. [\#1880](https://github.com/pypeclub/OpenPype/pull/1880) +- Workfiles tool event arguments fix [\#1862](https://github.com/pypeclub/OpenPype/pull/1862) +- imageio: fix grouping [\#1856](https://github.com/pypeclub/OpenPype/pull/1856) +- Maya: don't add reference members as connections to the container set 📦 [\#1855](https://github.com/pypeclub/OpenPype/pull/1855) +- publisher: missing version in subset prop [\#1849](https://github.com/pypeclub/OpenPype/pull/1849) +- Ftrack type error fix in sync to avalon event handler [\#1845](https://github.com/pypeclub/OpenPype/pull/1845) +- Nuke: updating effects subset fail [\#1841](https://github.com/pypeclub/OpenPype/pull/1841) +- nuke: write render node skipped with crop [\#1836](https://github.com/pypeclub/OpenPype/pull/1836) +- Project folder structure overrides [\#1813](https://github.com/pypeclub/OpenPype/pull/1813) +- Maya: fix yeti settings path in extractor [\#1809](https://github.com/pypeclub/OpenPype/pull/1809) +- Failsafe for cross project containers. [\#1806](https://github.com/pypeclub/OpenPype/pull/1806) +- Houdini colector formatting keys fix [\#1802](https://github.com/pypeclub/OpenPype/pull/1802) +- Settings error dialog on show [\#1798](https://github.com/pypeclub/OpenPype/pull/1798) +- Application launch stdout/stderr in GUI build [\#1684](https://github.com/pypeclub/OpenPype/pull/1684) +- Nuke: re-use instance nodes output path [\#1577](https://github.com/pypeclub/OpenPype/pull/1577) + +**Merged pull requests:** + +- Fix - make AE workfile publish to Ftrack configurable [\#1937](https://github.com/pypeclub/OpenPype/pull/1937) +- Add support for multiple Deadline ☠️➖ servers [\#1905](https://github.com/pypeclub/OpenPype/pull/1905) +- Maya: add support for `RedshiftNormalMap` node, fix `tx` linear space 🚀 [\#1863](https://github.com/pypeclub/OpenPype/pull/1863) +- Maya: expected files -\> render products ⚙️ overhaul [\#1812](https://github.com/pypeclub/OpenPype/pull/1812) +- PS, AE - send actual context when another webserver is running [\#1811](https://github.com/pypeclub/OpenPype/pull/1811) ## [3.2.0](https://github.com/pypeclub/OpenPype/tree/3.2.0) (2021-07-13) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.2.0-nightly.7...3.2.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.4...3.2.0) + +### 📖 Documentation + +- Fix: staging and `--use-version` option [\#1786](https://github.com/pypeclub/OpenPype/pull/1786) +- Subset template and TVPaint subset template docs [\#1717](https://github.com/pypeclub/OpenPype/pull/1717) +- Overscan color extract review [\#1701](https://github.com/pypeclub/OpenPype/pull/1701) + +**🚀 Enhancements** + +- Nuke: ftrack family plugin settings preset [\#1805](https://github.com/pypeclub/OpenPype/pull/1805) +- Standalone publisher last project [\#1799](https://github.com/pypeclub/OpenPype/pull/1799) +- Ftrack Multiple notes as server action [\#1795](https://github.com/pypeclub/OpenPype/pull/1795) +- Settings conditional dict [\#1777](https://github.com/pypeclub/OpenPype/pull/1777) +- Settings application use python 2 only where needed [\#1776](https://github.com/pypeclub/OpenPype/pull/1776) +- Settings UI copy/paste [\#1769](https://github.com/pypeclub/OpenPype/pull/1769) +- Workfile tool widths [\#1766](https://github.com/pypeclub/OpenPype/pull/1766) +- Push hierarchical attributes care about task parent changes [\#1763](https://github.com/pypeclub/OpenPype/pull/1763) +- Application executables with environment variables [\#1757](https://github.com/pypeclub/OpenPype/pull/1757) +- Deadline: Nuke submission additional attributes [\#1756](https://github.com/pypeclub/OpenPype/pull/1756) +- Settings schema without prefill [\#1753](https://github.com/pypeclub/OpenPype/pull/1753) +- Settings Hosts enum [\#1739](https://github.com/pypeclub/OpenPype/pull/1739) +- Validate containers settings [\#1736](https://github.com/pypeclub/OpenPype/pull/1736) +- PS - added loader from sequence [\#1726](https://github.com/pypeclub/OpenPype/pull/1726) +- Autoupdate launcher [\#1725](https://github.com/pypeclub/OpenPype/pull/1725) +- Toggle Ftrack upload in StandalonePublisher [\#1708](https://github.com/pypeclub/OpenPype/pull/1708) +- Nuke: Prerender Frame Range by default [\#1699](https://github.com/pypeclub/OpenPype/pull/1699) +- Smoother edges of color triangle [\#1695](https://github.com/pypeclub/OpenPype/pull/1695) + +**🐛 Bug fixes** + +- nuke: fixing wrong name of family folder when `used existing frames` [\#1803](https://github.com/pypeclub/OpenPype/pull/1803) +- Collect ftrack family bugs [\#1801](https://github.com/pypeclub/OpenPype/pull/1801) +- Invitee email can be None which break the Ftrack commit. [\#1788](https://github.com/pypeclub/OpenPype/pull/1788) +- Otio unrelated error on import [\#1782](https://github.com/pypeclub/OpenPype/pull/1782) +- FFprobe streams order [\#1775](https://github.com/pypeclub/OpenPype/pull/1775) +- Fix - single file files are str only, cast it to list to count properly [\#1772](https://github.com/pypeclub/OpenPype/pull/1772) +- Environments in app executable for MacOS [\#1768](https://github.com/pypeclub/OpenPype/pull/1768) +- Project specific environments [\#1767](https://github.com/pypeclub/OpenPype/pull/1767) +- Settings UI with refresh button [\#1764](https://github.com/pypeclub/OpenPype/pull/1764) +- Standalone publisher thumbnail extractor fix [\#1761](https://github.com/pypeclub/OpenPype/pull/1761) +- Anatomy others templates don't cause crash [\#1758](https://github.com/pypeclub/OpenPype/pull/1758) +- Backend acre module commit update [\#1745](https://github.com/pypeclub/OpenPype/pull/1745) +- hiero: precollect instances failing when audio selected [\#1743](https://github.com/pypeclub/OpenPype/pull/1743) +- Hiero: creator instance error [\#1742](https://github.com/pypeclub/OpenPype/pull/1742) +- Nuke: fixing render creator for no selection format failing [\#1741](https://github.com/pypeclub/OpenPype/pull/1741) +- StandalonePublisher: failing collector for editorial [\#1738](https://github.com/pypeclub/OpenPype/pull/1738) +- Local settings UI crash on missing defaults [\#1737](https://github.com/pypeclub/OpenPype/pull/1737) +- TVPaint white background on thumbnail [\#1735](https://github.com/pypeclub/OpenPype/pull/1735) +- Ftrack missing custom attribute message [\#1734](https://github.com/pypeclub/OpenPype/pull/1734) +- Launcher project changes [\#1733](https://github.com/pypeclub/OpenPype/pull/1733) +- Ftrack sync status [\#1732](https://github.com/pypeclub/OpenPype/pull/1732) +- TVPaint use layer name for default variant [\#1724](https://github.com/pypeclub/OpenPype/pull/1724) +- Default subset template for TVPaint review and workfile families [\#1716](https://github.com/pypeclub/OpenPype/pull/1716) +- Maya: Extract review hotfix [\#1714](https://github.com/pypeclub/OpenPype/pull/1714) +- Settings: Imageio improving granularity [\#1711](https://github.com/pypeclub/OpenPype/pull/1711) +- Application without executables [\#1679](https://github.com/pypeclub/OpenPype/pull/1679) +- Unreal: launching on Linux [\#1672](https://github.com/pypeclub/OpenPype/pull/1672) + +**Merged pull requests:** + +- Bump prismjs from 1.23.0 to 1.24.0 in /website [\#1773](https://github.com/pypeclub/OpenPype/pull/1773) +- TVPaint ftrack family [\#1755](https://github.com/pypeclub/OpenPype/pull/1755) ## [2.18.4](https://github.com/pypeclub/OpenPype/tree/2.18.4) (2021-06-24) @@ -235,7 +1914,7 @@ ## [2.18.3](https://github.com/pypeclub/OpenPype/tree/2.18.3) (2021-06-23) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.2.0-nightly.2...2.18.3) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.2...2.18.3) ## [2.18.2](https://github.com/pypeclub/OpenPype/tree/2.18.2) (2021-06-16) @@ -243,9 +1922,47 @@ ## [3.1.0](https://github.com/pypeclub/OpenPype/tree/3.1.0) (2021-06-15) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.1.0-nightly.4...3.1.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.0.0...3.1.0) -# Changelog +### 📖 Documentation + +- Feature Slack integration [\#1657](https://github.com/pypeclub/OpenPype/pull/1657) + +**🚀 Enhancements** + +- Log Viewer with OpenPype style [\#1703](https://github.com/pypeclub/OpenPype/pull/1703) +- Scrolling in OpenPype info widget [\#1702](https://github.com/pypeclub/OpenPype/pull/1702) +- OpenPype style in modules [\#1694](https://github.com/pypeclub/OpenPype/pull/1694) +- Sort applications and tools alphabetically in Settings UI [\#1689](https://github.com/pypeclub/OpenPype/pull/1689) +- \#683 - Validate Frame Range in Standalone Publisher [\#1683](https://github.com/pypeclub/OpenPype/pull/1683) +- Hiero: old container versions identify with red color [\#1682](https://github.com/pypeclub/OpenPype/pull/1682) +- Project Manger: Default name column width [\#1669](https://github.com/pypeclub/OpenPype/pull/1669) +- Remove outline in stylesheet [\#1667](https://github.com/pypeclub/OpenPype/pull/1667) +- TVPaint: Creator take layer name as default value for subset variant [\#1663](https://github.com/pypeclub/OpenPype/pull/1663) +- TVPaint custom subset template [\#1662](https://github.com/pypeclub/OpenPype/pull/1662) +- Editorial: conform assets validator [\#1659](https://github.com/pypeclub/OpenPype/pull/1659) +- Nuke - Publish simplification [\#1653](https://github.com/pypeclub/OpenPype/pull/1653) +- \#1333 - added tooltip hints to Pyblish buttons [\#1649](https://github.com/pypeclub/OpenPype/pull/1649) + +**🐛 Bug fixes** + +- Nuke: broken publishing rendered frames [\#1707](https://github.com/pypeclub/OpenPype/pull/1707) +- Standalone publisher Thumbnail export args [\#1705](https://github.com/pypeclub/OpenPype/pull/1705) +- Bad zip can break OpenPype start [\#1691](https://github.com/pypeclub/OpenPype/pull/1691) +- Hiero: published whole edit mov [\#1687](https://github.com/pypeclub/OpenPype/pull/1687) +- Ftrack subprocess handle of stdout/stderr [\#1675](https://github.com/pypeclub/OpenPype/pull/1675) +- Settings list race condifiton and mutable dict list conversion [\#1671](https://github.com/pypeclub/OpenPype/pull/1671) +- Mac launch arguments fix [\#1660](https://github.com/pypeclub/OpenPype/pull/1660) +- Fix missing dbm python module [\#1652](https://github.com/pypeclub/OpenPype/pull/1652) +- Transparent branches in view on Mac [\#1648](https://github.com/pypeclub/OpenPype/pull/1648) +- Add asset on task item [\#1646](https://github.com/pypeclub/OpenPype/pull/1646) +- Project manager save and queue [\#1645](https://github.com/pypeclub/OpenPype/pull/1645) +- New project anatomy values [\#1644](https://github.com/pypeclub/OpenPype/pull/1644) +- Farm publishing: check if published items do exist [\#1573](https://github.com/pypeclub/OpenPype/pull/1573) + +**Merged pull requests:** + +- Bump normalize-url from 4.5.0 to 4.5.1 in /website [\#1686](https://github.com/pypeclub/OpenPype/pull/1686) ## [3.0.0](https://github.com/pypeclub/openpype/tree/3.0.0) @@ -258,12 +1975,12 @@ - Easy to add Application versions. - Per Project Environment and plugin management. - Robust profile system for creating reviewables and burnins, with filtering based on Application, Task and data family. -- Configurable publish plugins. +- Configurable publish plugins. - Options to make any validator or extractor, optional or disabled. - Color Management is now unified under anatomy settings. - Subset naming and grouping is fully configurable. - All project attributes can now be set directly in OpenPype settings. -- Studio Setting can be locked to prevent unwanted artist changes. +- Studio Setting can be locked to prevent unwanted artist changes. - You can now add per project and per task type templates for workfile initialization in most hosts. - Too many other individual configurable option to list in this changelog :) @@ -1021,8 +2738,6 @@ - Standalone Publisher: getting fps from context instead of nonexistent entity [\#729](https://github.com/pypeclub/pype/pull/729) -# Changelog - ## [2.13.6](https://github.com/pypeclub/pype/tree/2.13.6) (2020-11-15) [Full Changelog](https://github.com/pypeclub/pype/compare/2.13.5...2.13.6) @@ -1812,9 +3527,4 @@ A large cleanup release. Most of the change are under the hood. - _(avalon)_ subsets in maya 2019 weren't behaving correctly in the outliner - - - - - - +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/HISTORY.md b/HISTORY.md index 032f876aa3..f6cc74e114 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,5 +1,1934 @@ # Changelog +## [3.14.5](https://github.com/pypeclub/OpenPype/tree/3.14.5) (2022-10-24) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.4...3.14.5) + +**🚀 Enhancements** + +- Maya: add OBJ extractor to model family [\#4021](https://github.com/pypeclub/OpenPype/pull/4021) +- Publish report viewer tool [\#4010](https://github.com/pypeclub/OpenPype/pull/4010) +- Nuke | Global: adding custom tags representation filtering [\#4009](https://github.com/pypeclub/OpenPype/pull/4009) +- Publisher: Create context has shared data for collection phase [\#3995](https://github.com/pypeclub/OpenPype/pull/3995) +- Resolve: updating to v18 compatibility [\#3986](https://github.com/pypeclub/OpenPype/pull/3986) + +**🐛 Bug fixes** + +- TrayPublisher: Fix missing argument [\#4019](https://github.com/pypeclub/OpenPype/pull/4019) +- General: Fix python 2 compatibility of ffmpeg and oiio tools discovery [\#4011](https://github.com/pypeclub/OpenPype/pull/4011) + +**🔀 Refactored code** + +- Maya: Removed unused imports [\#4008](https://github.com/pypeclub/OpenPype/pull/4008) +- Unreal: Fix import of moved function [\#4007](https://github.com/pypeclub/OpenPype/pull/4007) +- Houdini: Change import of RepairAction [\#4005](https://github.com/pypeclub/OpenPype/pull/4005) +- Nuke/Hiero: Refactor openpype.api imports [\#4000](https://github.com/pypeclub/OpenPype/pull/4000) +- TVPaint: Defined with HostBase [\#3994](https://github.com/pypeclub/OpenPype/pull/3994) + +**Merged pull requests:** + +- Unreal: Remove redundant Creator stub [\#4012](https://github.com/pypeclub/OpenPype/pull/4012) +- Unreal: add `uproject` extension to Unreal project template [\#4004](https://github.com/pypeclub/OpenPype/pull/4004) +- Unreal: fix order of includes [\#4002](https://github.com/pypeclub/OpenPype/pull/4002) +- Fusion: Implement backwards compatibility \(+/- Fusion 17.2\) [\#3958](https://github.com/pypeclub/OpenPype/pull/3958) + +## [3.14.4](https://github.com/pypeclub/OpenPype/tree/3.14.4) (2022-10-19) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.3...3.14.4) + +**🆕 New features** + +- Webpublisher: use max next published version number for all items in batch [\#3961](https://github.com/pypeclub/OpenPype/pull/3961) +- General: Control Thumbnail integration via explicit configuration profiles [\#3951](https://github.com/pypeclub/OpenPype/pull/3951) + +**🚀 Enhancements** + +- Publisher: Multiselection in card view [\#3993](https://github.com/pypeclub/OpenPype/pull/3993) +- TrayPublisher: Original Basename cause crash too early [\#3990](https://github.com/pypeclub/OpenPype/pull/3990) +- Tray Publisher: add `originalBasename` data to simple creators [\#3988](https://github.com/pypeclub/OpenPype/pull/3988) +- General: Custom paths to ffmpeg and OpenImageIO tools [\#3982](https://github.com/pypeclub/OpenPype/pull/3982) +- Integrate: Preserve existing subset group if instance does not set it for new version [\#3976](https://github.com/pypeclub/OpenPype/pull/3976) +- Publisher: Prepare publisher controller for remote publishing [\#3972](https://github.com/pypeclub/OpenPype/pull/3972) +- Maya: new style dataclasses in maya deadline submitter plugin [\#3968](https://github.com/pypeclub/OpenPype/pull/3968) +- Maya: Define preffered Qt bindings for Qt.py and qtpy [\#3963](https://github.com/pypeclub/OpenPype/pull/3963) +- Settings: Move imageio from project anatomy to project settings \[pypeclub\] [\#3959](https://github.com/pypeclub/OpenPype/pull/3959) +- TrayPublisher: Extract thumbnail for other families [\#3952](https://github.com/pypeclub/OpenPype/pull/3952) +- Publisher: Pass instance to subset name method on update [\#3949](https://github.com/pypeclub/OpenPype/pull/3949) +- General: Set root environments before DCC launch [\#3947](https://github.com/pypeclub/OpenPype/pull/3947) +- Refactor: changed legacy way to update database for Hero version integrate [\#3941](https://github.com/pypeclub/OpenPype/pull/3941) +- Maya: Moved plugin from global to maya [\#3939](https://github.com/pypeclub/OpenPype/pull/3939) +- Publisher: Create dialog is part of main window [\#3936](https://github.com/pypeclub/OpenPype/pull/3936) +- Fusion: Implement Alembic and FBX mesh loader [\#3927](https://github.com/pypeclub/OpenPype/pull/3927) + +**🐛 Bug fixes** + +- TrayPublisher: Disable sequences in batch mov creator [\#3996](https://github.com/pypeclub/OpenPype/pull/3996) +- Fix - tags might be missing on representation [\#3985](https://github.com/pypeclub/OpenPype/pull/3985) +- Resolve: Fix usage of functions from lib [\#3983](https://github.com/pypeclub/OpenPype/pull/3983) +- Maya: remove invalid prefix token for non-multipart outputs [\#3981](https://github.com/pypeclub/OpenPype/pull/3981) +- Ftrack: Fix schema cache for Python 2 [\#3980](https://github.com/pypeclub/OpenPype/pull/3980) +- Maya: add object to attr.s declaration [\#3973](https://github.com/pypeclub/OpenPype/pull/3973) +- Maya: Deadline OutputFilePath hack regression for Renderman [\#3950](https://github.com/pypeclub/OpenPype/pull/3950) +- Houdini: Fix validate workfile paths for non-parm file references [\#3948](https://github.com/pypeclub/OpenPype/pull/3948) +- Photoshop: missed sync published version of workfile with workfile [\#3946](https://github.com/pypeclub/OpenPype/pull/3946) +- Maya: Set default value for RenderSetupIncludeLights option [\#3944](https://github.com/pypeclub/OpenPype/pull/3944) +- Maya: fix regression of Renderman Deadline hack [\#3943](https://github.com/pypeclub/OpenPype/pull/3943) +- Kitsu: 2 fixes, nb\_frames and Shot type error [\#3940](https://github.com/pypeclub/OpenPype/pull/3940) +- Tray: Change order of attribute changes [\#3938](https://github.com/pypeclub/OpenPype/pull/3938) +- AttributeDefs: Fix crashing multivalue of files widget [\#3937](https://github.com/pypeclub/OpenPype/pull/3937) +- General: Fix links query on hero version [\#3900](https://github.com/pypeclub/OpenPype/pull/3900) +- Publisher: Files Drag n Drop cleanup [\#3888](https://github.com/pypeclub/OpenPype/pull/3888) + +**🔀 Refactored code** + +- Flame: Import lib functions from lib [\#3992](https://github.com/pypeclub/OpenPype/pull/3992) +- General: Fix deprecated warning in legacy creator [\#3978](https://github.com/pypeclub/OpenPype/pull/3978) +- Blender: Remove openpype api imports [\#3977](https://github.com/pypeclub/OpenPype/pull/3977) +- General: Use direct import of resources [\#3964](https://github.com/pypeclub/OpenPype/pull/3964) +- General: Direct settings imports [\#3934](https://github.com/pypeclub/OpenPype/pull/3934) +- General: import 'Logger' from 'openpype.lib' [\#3926](https://github.com/pypeclub/OpenPype/pull/3926) +- General: Remove deprecated functions from lib [\#3907](https://github.com/pypeclub/OpenPype/pull/3907) + +**Merged pull requests:** + +- Maya + Yeti: Load Yeti Cache fix frame number recognition [\#3942](https://github.com/pypeclub/OpenPype/pull/3942) +- Fusion: Implement callbacks to Fusion's event system thread [\#3928](https://github.com/pypeclub/OpenPype/pull/3928) +- Photoshop: create single frame image in Ftrack as review [\#3908](https://github.com/pypeclub/OpenPype/pull/3908) + +## [3.14.3](https://github.com/pypeclub/OpenPype/tree/3.14.3) (2022-10-03) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.2...3.14.3) + +**🚀 Enhancements** + +- Publisher: Enhancement proposals [\#3897](https://github.com/pypeclub/OpenPype/pull/3897) + +**🐛 Bug fixes** + +- Maya: Fix Render single camera validator [\#3929](https://github.com/pypeclub/OpenPype/pull/3929) +- Flame: loading multilayer exr to batch/reel is working [\#3901](https://github.com/pypeclub/OpenPype/pull/3901) +- Hiero: Fix inventory check on launch [\#3895](https://github.com/pypeclub/OpenPype/pull/3895) +- WebPublisher: Fix import after refactor [\#3891](https://github.com/pypeclub/OpenPype/pull/3891) + +**🔀 Refactored code** + +- Maya: Remove unused 'openpype.api' imports in plugins [\#3925](https://github.com/pypeclub/OpenPype/pull/3925) +- Resolve: Use new Extractor location [\#3918](https://github.com/pypeclub/OpenPype/pull/3918) +- Unreal: Use new Extractor location [\#3917](https://github.com/pypeclub/OpenPype/pull/3917) +- Flame: Use new Extractor location [\#3916](https://github.com/pypeclub/OpenPype/pull/3916) +- Houdini: Use new Extractor location [\#3894](https://github.com/pypeclub/OpenPype/pull/3894) +- Harmony: Use new Extractor location [\#3893](https://github.com/pypeclub/OpenPype/pull/3893) + +**Merged pull requests:** + +- Maya: Fix Scene Inventory possibly starting off-screen due to maya preferences [\#3923](https://github.com/pypeclub/OpenPype/pull/3923) + +## [3.14.2](https://github.com/pypeclub/OpenPype/tree/3.14.2) (2022-09-12) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.1...3.14.2) + +### 📖 Documentation + +- Documentation: Anatomy templates [\#3618](https://github.com/pypeclub/OpenPype/pull/3618) + +**🆕 New features** + +- Nuke: Build workfile by template [\#3763](https://github.com/pypeclub/OpenPype/pull/3763) +- Houdini: Publishing workfiles [\#3697](https://github.com/pypeclub/OpenPype/pull/3697) +- Global: making collect audio plugin global [\#3679](https://github.com/pypeclub/OpenPype/pull/3679) + +**🚀 Enhancements** + +- Flame: Adding Creator's retimed shot and handles switch [\#3826](https://github.com/pypeclub/OpenPype/pull/3826) +- Flame: OpenPype submenu to batch and media manager [\#3825](https://github.com/pypeclub/OpenPype/pull/3825) +- General: Better pixmap scaling [\#3809](https://github.com/pypeclub/OpenPype/pull/3809) +- Photoshop: attempt to speed up ExtractImage [\#3793](https://github.com/pypeclub/OpenPype/pull/3793) +- SyncServer: Added cli commands for sync server [\#3765](https://github.com/pypeclub/OpenPype/pull/3765) +- Kitsu: Drop 'entities root' setting. [\#3739](https://github.com/pypeclub/OpenPype/pull/3739) +- git: update gitignore [\#3722](https://github.com/pypeclub/OpenPype/pull/3722) +- Blender: Publisher collect workfile representation [\#3670](https://github.com/pypeclub/OpenPype/pull/3670) +- Maya: move set render settings menu entry [\#3669](https://github.com/pypeclub/OpenPype/pull/3669) +- Scene Inventory: Maya add actions to select from or to scene [\#3659](https://github.com/pypeclub/OpenPype/pull/3659) +- Scene Inventory: Add subsetGroup column [\#3658](https://github.com/pypeclub/OpenPype/pull/3658) + +**🐛 Bug fixes** + +- General: Fix Pattern access in client code [\#3828](https://github.com/pypeclub/OpenPype/pull/3828) +- Launcher: Skip opening last work file works for groups [\#3822](https://github.com/pypeclub/OpenPype/pull/3822) +- Maya: Publishing data key change [\#3811](https://github.com/pypeclub/OpenPype/pull/3811) +- Igniter: Fix status handling when version is already installed [\#3804](https://github.com/pypeclub/OpenPype/pull/3804) +- Resolve: Addon import is Python 2 compatible [\#3798](https://github.com/pypeclub/OpenPype/pull/3798) +- Hiero: retimed clip publishing is working [\#3792](https://github.com/pypeclub/OpenPype/pull/3792) +- nuke: validate write node is not failing due wrong type [\#3780](https://github.com/pypeclub/OpenPype/pull/3780) +- Fix - changed format of version string in pyproject.toml [\#3777](https://github.com/pypeclub/OpenPype/pull/3777) +- Ftrack status fix typo prgoress -\> progress [\#3761](https://github.com/pypeclub/OpenPype/pull/3761) +- Fix version resolution [\#3757](https://github.com/pypeclub/OpenPype/pull/3757) +- Maya: `containerise` dont skip empty values [\#3674](https://github.com/pypeclub/OpenPype/pull/3674) + +**🔀 Refactored code** + +- Photoshop: Use new Extractor location [\#3789](https://github.com/pypeclub/OpenPype/pull/3789) +- Blender: Use new Extractor location [\#3787](https://github.com/pypeclub/OpenPype/pull/3787) +- AfterEffects: Use new Extractor location [\#3784](https://github.com/pypeclub/OpenPype/pull/3784) +- General: Remove unused teshost [\#3773](https://github.com/pypeclub/OpenPype/pull/3773) +- General: Copied 'Extractor' plugin to publish pipeline [\#3771](https://github.com/pypeclub/OpenPype/pull/3771) +- General: Move queries of asset and representation links [\#3770](https://github.com/pypeclub/OpenPype/pull/3770) +- General: Move create project folders to pipeline [\#3768](https://github.com/pypeclub/OpenPype/pull/3768) +- General: Create project function moved to client code [\#3766](https://github.com/pypeclub/OpenPype/pull/3766) +- Maya: Refactor submit deadline to use AbstractSubmitDeadline [\#3759](https://github.com/pypeclub/OpenPype/pull/3759) +- General: Change publish template settings location [\#3755](https://github.com/pypeclub/OpenPype/pull/3755) +- General: Move hostdirname functionality into host [\#3749](https://github.com/pypeclub/OpenPype/pull/3749) +- General: Move publish utils to pipeline [\#3745](https://github.com/pypeclub/OpenPype/pull/3745) +- Houdini: Define houdini as addon [\#3735](https://github.com/pypeclub/OpenPype/pull/3735) +- Fusion: Defined fusion as addon [\#3733](https://github.com/pypeclub/OpenPype/pull/3733) +- Flame: Defined flame as addon [\#3732](https://github.com/pypeclub/OpenPype/pull/3732) +- Resolve: Define resolve as addon [\#3727](https://github.com/pypeclub/OpenPype/pull/3727) + +**Merged pull requests:** + +- Standalone Publisher: Ignore empty labels, then still use name like other asset models [\#3779](https://github.com/pypeclub/OpenPype/pull/3779) +- Kitsu - sync\_all\_project - add list ignore\_projects [\#3776](https://github.com/pypeclub/OpenPype/pull/3776) + +## [3.14.1](https://github.com/pypeclub/OpenPype/tree/3.14.1) (2022-08-30) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.0...3.14.1) + +### 📖 Documentation + +- Documentation: Few updates [\#3698](https://github.com/pypeclub/OpenPype/pull/3698) +- Documentation: Settings development [\#3660](https://github.com/pypeclub/OpenPype/pull/3660) + +**🆕 New features** + +- Webpublisher:change create flatten image into tri state [\#3678](https://github.com/pypeclub/OpenPype/pull/3678) +- Blender: validators code correction with settings and defaults [\#3662](https://github.com/pypeclub/OpenPype/pull/3662) + +**🚀 Enhancements** + +- General: Thumbnail can use project roots [\#3750](https://github.com/pypeclub/OpenPype/pull/3750) +- Settings: Remove settings lock on tray exit [\#3720](https://github.com/pypeclub/OpenPype/pull/3720) +- General: Added helper getters to modules manager [\#3712](https://github.com/pypeclub/OpenPype/pull/3712) +- Unreal: Define unreal as module and use host class [\#3701](https://github.com/pypeclub/OpenPype/pull/3701) +- Settings: Lock settings UI session [\#3700](https://github.com/pypeclub/OpenPype/pull/3700) +- General: Benevolent context label collector [\#3686](https://github.com/pypeclub/OpenPype/pull/3686) +- Ftrack: Store ftrack entities on hierarchy integration to instances [\#3677](https://github.com/pypeclub/OpenPype/pull/3677) +- Ftrack: More logs related to auto sync value change [\#3671](https://github.com/pypeclub/OpenPype/pull/3671) +- Blender: ops refresh manager after process events [\#3663](https://github.com/pypeclub/OpenPype/pull/3663) + +**🐛 Bug fixes** + +- Maya: Fix typo in getPanel argument `with_focus` -\> `withFocus` [\#3753](https://github.com/pypeclub/OpenPype/pull/3753) +- General: Smaller fixes of imports [\#3748](https://github.com/pypeclub/OpenPype/pull/3748) +- General: Logger tweaks [\#3741](https://github.com/pypeclub/OpenPype/pull/3741) +- Nuke: missing job dependency if multiple bake streams [\#3737](https://github.com/pypeclub/OpenPype/pull/3737) +- Nuke: color-space settings from anatomy is working [\#3721](https://github.com/pypeclub/OpenPype/pull/3721) +- Settings: Fix studio default anatomy save [\#3716](https://github.com/pypeclub/OpenPype/pull/3716) +- Maya: Use project name instead of project code [\#3709](https://github.com/pypeclub/OpenPype/pull/3709) +- Settings: Fix project overrides save [\#3708](https://github.com/pypeclub/OpenPype/pull/3708) +- Workfiles tool: Fix published workfile filtering [\#3704](https://github.com/pypeclub/OpenPype/pull/3704) +- PS, AE: Provide default variant value for workfile subset [\#3703](https://github.com/pypeclub/OpenPype/pull/3703) +- RoyalRender: handle host name that is not set [\#3695](https://github.com/pypeclub/OpenPype/pull/3695) +- Flame: retime is working on clip publishing [\#3684](https://github.com/pypeclub/OpenPype/pull/3684) +- Webpublisher: added check for empty context [\#3682](https://github.com/pypeclub/OpenPype/pull/3682) + +**🔀 Refactored code** + +- General: Move delivery logic to pipeline [\#3751](https://github.com/pypeclub/OpenPype/pull/3751) +- General: Host addons cleanup [\#3744](https://github.com/pypeclub/OpenPype/pull/3744) +- Webpublisher: Webpublisher is used as addon [\#3740](https://github.com/pypeclub/OpenPype/pull/3740) +- Photoshop: Defined photoshop as addon [\#3736](https://github.com/pypeclub/OpenPype/pull/3736) +- Harmony: Defined harmony as addon [\#3734](https://github.com/pypeclub/OpenPype/pull/3734) +- General: Module interfaces cleanup [\#3731](https://github.com/pypeclub/OpenPype/pull/3731) +- AfterEffects: Move AE functions from general lib [\#3730](https://github.com/pypeclub/OpenPype/pull/3730) +- Blender: Define blender as module [\#3729](https://github.com/pypeclub/OpenPype/pull/3729) +- AfterEffects: Define AfterEffects as module [\#3728](https://github.com/pypeclub/OpenPype/pull/3728) +- General: Replace PypeLogger with Logger [\#3725](https://github.com/pypeclub/OpenPype/pull/3725) +- Nuke: Define nuke as module [\#3724](https://github.com/pypeclub/OpenPype/pull/3724) +- General: Move subset name functionality [\#3723](https://github.com/pypeclub/OpenPype/pull/3723) +- General: Move creators plugin getter [\#3714](https://github.com/pypeclub/OpenPype/pull/3714) +- General: Move constants from lib to client [\#3713](https://github.com/pypeclub/OpenPype/pull/3713) +- Loader: Subset groups using client operations [\#3710](https://github.com/pypeclub/OpenPype/pull/3710) +- TVPaint: Defined as module [\#3707](https://github.com/pypeclub/OpenPype/pull/3707) +- StandalonePublisher: Define StandalonePublisher as module [\#3706](https://github.com/pypeclub/OpenPype/pull/3706) +- TrayPublisher: Define TrayPublisher as module [\#3705](https://github.com/pypeclub/OpenPype/pull/3705) +- General: Move context specific functions to context tools [\#3702](https://github.com/pypeclub/OpenPype/pull/3702) + +**Merged pull requests:** + +- Hiero: Define hiero as module [\#3717](https://github.com/pypeclub/OpenPype/pull/3717) +- Deadline: better logging for DL webservice failures [\#3694](https://github.com/pypeclub/OpenPype/pull/3694) +- Photoshop: resize saved images in ExtractReview for ffmpeg [\#3676](https://github.com/pypeclub/OpenPype/pull/3676) +- Nuke: Validation refactory to new publisher [\#3567](https://github.com/pypeclub/OpenPype/pull/3567) + +## [3.14.0](https://github.com/pypeclub/OpenPype/tree/3.14.0) (2022-08-18) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.13.0...3.14.0) + +**🆕 New features** + +- Maya: Build workfile by template [\#3578](https://github.com/pypeclub/OpenPype/pull/3578) +- Maya: Implementation of JSON layout for Unreal workflow [\#3353](https://github.com/pypeclub/OpenPype/pull/3353) +- Maya: Build workfile by template [\#3315](https://github.com/pypeclub/OpenPype/pull/3315) + +**🚀 Enhancements** + +- Ftrack: Addiotional component metadata [\#3685](https://github.com/pypeclub/OpenPype/pull/3685) +- Ftrack: Set task status on farm publishing [\#3680](https://github.com/pypeclub/OpenPype/pull/3680) +- Ftrack: Set task status on task creation in integrate hierarchy [\#3675](https://github.com/pypeclub/OpenPype/pull/3675) +- Maya: Disable rendering of all lights for render instances submitted through Deadline. [\#3661](https://github.com/pypeclub/OpenPype/pull/3661) +- General: Optimized OCIO configs [\#3650](https://github.com/pypeclub/OpenPype/pull/3650) + +**🐛 Bug fixes** + +- General: Switch from hero version to versioned works [\#3691](https://github.com/pypeclub/OpenPype/pull/3691) +- General: Fix finding of last version [\#3656](https://github.com/pypeclub/OpenPype/pull/3656) +- General: Extract Review can scale with pixel aspect ratio [\#3644](https://github.com/pypeclub/OpenPype/pull/3644) +- Maya: Refactor moved usage of CreateRender settings [\#3643](https://github.com/pypeclub/OpenPype/pull/3643) +- General: Hero version representations have full context [\#3638](https://github.com/pypeclub/OpenPype/pull/3638) +- Nuke: color settings for render write node is working now [\#3632](https://github.com/pypeclub/OpenPype/pull/3632) +- Maya: FBX support for update in reference loader [\#3631](https://github.com/pypeclub/OpenPype/pull/3631) + +**🔀 Refactored code** + +- General: Use client projects getter [\#3673](https://github.com/pypeclub/OpenPype/pull/3673) +- Resolve: Match folder structure to other hosts [\#3653](https://github.com/pypeclub/OpenPype/pull/3653) +- Maya: Hosts as modules [\#3647](https://github.com/pypeclub/OpenPype/pull/3647) +- TimersManager: Plugins are in timers manager module [\#3639](https://github.com/pypeclub/OpenPype/pull/3639) +- General: Move workfiles functions into pipeline [\#3637](https://github.com/pypeclub/OpenPype/pull/3637) +- General: Workfiles builder using query functions [\#3598](https://github.com/pypeclub/OpenPype/pull/3598) + +**Merged pull requests:** + +- Deadline: Global job pre load is not Pype 2 compatible [\#3666](https://github.com/pypeclub/OpenPype/pull/3666) +- Maya: Remove unused get current renderer logic [\#3645](https://github.com/pypeclub/OpenPype/pull/3645) +- Kitsu|Fix: Movie project type fails & first loop children names [\#3636](https://github.com/pypeclub/OpenPype/pull/3636) +- fix the bug of failing to extract look when UDIMs format used in AiImage [\#3628](https://github.com/pypeclub/OpenPype/pull/3628) + +## [3.13.0](https://github.com/pypeclub/OpenPype/tree/3.13.0) (2022-08-09) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.2...3.13.0) + +**🆕 New features** + +- Support for mutliple installed versions - 3.13 [\#3605](https://github.com/pypeclub/OpenPype/pull/3605) +- Traypublisher: simple editorial publishing [\#3492](https://github.com/pypeclub/OpenPype/pull/3492) + +**🚀 Enhancements** + +- Editorial: Mix audio use side file for ffmpeg filters [\#3630](https://github.com/pypeclub/OpenPype/pull/3630) +- Ftrack: Comment template can contain optional keys [\#3615](https://github.com/pypeclub/OpenPype/pull/3615) +- Ftrack: Add more metadata to ftrack components [\#3612](https://github.com/pypeclub/OpenPype/pull/3612) +- General: Add context to pyblish context [\#3594](https://github.com/pypeclub/OpenPype/pull/3594) +- Kitsu: Shot&Sequence name with prefix over appends [\#3593](https://github.com/pypeclub/OpenPype/pull/3593) +- Photoshop: implemented {layer} placeholder in subset template [\#3591](https://github.com/pypeclub/OpenPype/pull/3591) +- General: Python module appdirs from git [\#3589](https://github.com/pypeclub/OpenPype/pull/3589) +- Ftrack: Update ftrack api to 2.3.3 [\#3588](https://github.com/pypeclub/OpenPype/pull/3588) +- General: New Integrator small fixes [\#3583](https://github.com/pypeclub/OpenPype/pull/3583) +- Maya: Render Creator has configurable options. [\#3097](https://github.com/pypeclub/OpenPype/pull/3097) + +**🐛 Bug fixes** + +- Maya: fix aov separator in Redshift [\#3625](https://github.com/pypeclub/OpenPype/pull/3625) +- Fix for multi-version build on Mac [\#3622](https://github.com/pypeclub/OpenPype/pull/3622) +- Ftrack: Sync hierarchical attributes can handle new created entities [\#3621](https://github.com/pypeclub/OpenPype/pull/3621) +- General: Extract review aspect ratio scale is calculated by ffmpeg [\#3620](https://github.com/pypeclub/OpenPype/pull/3620) +- Maya: Fix types of default settings [\#3617](https://github.com/pypeclub/OpenPype/pull/3617) +- Integrator: Don't force to have dot before frame [\#3611](https://github.com/pypeclub/OpenPype/pull/3611) +- AfterEffects: refactored integrate doesnt work formulti frame publishes [\#3610](https://github.com/pypeclub/OpenPype/pull/3610) +- Maya look data contents fails with custom attribute on group [\#3607](https://github.com/pypeclub/OpenPype/pull/3607) +- TrayPublisher: Fix wrong conflict merge [\#3600](https://github.com/pypeclub/OpenPype/pull/3600) +- Bugfix: Add OCIO as submodule to prepare for handling `maketx` color space conversion. [\#3590](https://github.com/pypeclub/OpenPype/pull/3590) +- Fix general settings environment variables resolution [\#3587](https://github.com/pypeclub/OpenPype/pull/3587) +- Editorial publishing workflow improvements [\#3580](https://github.com/pypeclub/OpenPype/pull/3580) +- General: Update imports in start script [\#3579](https://github.com/pypeclub/OpenPype/pull/3579) +- Nuke: render family integration consistency [\#3576](https://github.com/pypeclub/OpenPype/pull/3576) +- Ftrack: Handle missing published path in integrator [\#3570](https://github.com/pypeclub/OpenPype/pull/3570) +- Nuke: publish existing frames with slate with correct range [\#3555](https://github.com/pypeclub/OpenPype/pull/3555) + +**🔀 Refactored code** + +- General: Plugin settings handled by plugins [\#3623](https://github.com/pypeclub/OpenPype/pull/3623) +- General: Naive implementation of document create, update, delete [\#3601](https://github.com/pypeclub/OpenPype/pull/3601) +- General: Use query functions in general code [\#3596](https://github.com/pypeclub/OpenPype/pull/3596) +- General: Separate extraction of template data into more functions [\#3574](https://github.com/pypeclub/OpenPype/pull/3574) +- General: Lib cleanup [\#3571](https://github.com/pypeclub/OpenPype/pull/3571) + +**Merged pull requests:** + +- Webpublisher: timeout for PS studio processing [\#3619](https://github.com/pypeclub/OpenPype/pull/3619) +- Core: translated validate\_containers.py into New publisher style [\#3614](https://github.com/pypeclub/OpenPype/pull/3614) +- Enable write color sets on animation publish automatically [\#3582](https://github.com/pypeclub/OpenPype/pull/3582) + +## [3.12.2](https://github.com/pypeclub/OpenPype/tree/3.12.2) (2022-07-27) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.1...3.12.2) + +### 📖 Documentation + +- Update website with more studios [\#3554](https://github.com/pypeclub/OpenPype/pull/3554) +- Documentation: Update publishing dev docs [\#3549](https://github.com/pypeclub/OpenPype/pull/3549) + +**🚀 Enhancements** + +- General: Global thumbnail extractor is ready for more cases [\#3561](https://github.com/pypeclub/OpenPype/pull/3561) +- Maya: add additional validators to Settings [\#3540](https://github.com/pypeclub/OpenPype/pull/3540) +- General: Interactive console in cli [\#3526](https://github.com/pypeclub/OpenPype/pull/3526) +- Ftrack: Automatic daily review session creation can define trigger hour [\#3516](https://github.com/pypeclub/OpenPype/pull/3516) +- Ftrack: add source into Note [\#3509](https://github.com/pypeclub/OpenPype/pull/3509) +- Ftrack: Trigger custom ftrack topic of project structure creation [\#3506](https://github.com/pypeclub/OpenPype/pull/3506) +- Settings UI: Add extract to file action on project view [\#3505](https://github.com/pypeclub/OpenPype/pull/3505) +- Add pack and unpack convenience scripts [\#3502](https://github.com/pypeclub/OpenPype/pull/3502) +- General: Event system [\#3499](https://github.com/pypeclub/OpenPype/pull/3499) +- NewPublisher: Keep plugins with mismatch target in report [\#3498](https://github.com/pypeclub/OpenPype/pull/3498) +- Nuke: load clip with options from settings [\#3497](https://github.com/pypeclub/OpenPype/pull/3497) +- TrayPublisher: implemented render\_mov\_batch [\#3486](https://github.com/pypeclub/OpenPype/pull/3486) +- Migrate basic families to the new Tray Publisher [\#3469](https://github.com/pypeclub/OpenPype/pull/3469) +- Enhance powershell build scripts [\#1827](https://github.com/pypeclub/OpenPype/pull/1827) + +**🐛 Bug fixes** + +- Maya: fix Review image plane attribute [\#3569](https://github.com/pypeclub/OpenPype/pull/3569) +- Maya: Fix animated attributes \(ie. overscan\) on loaded cameras breaking review publishing. [\#3562](https://github.com/pypeclub/OpenPype/pull/3562) +- NewPublisher: Python 2 compatible html escape [\#3559](https://github.com/pypeclub/OpenPype/pull/3559) +- Remove invalid submodules from `/vendor` [\#3557](https://github.com/pypeclub/OpenPype/pull/3557) +- General: Remove hosts filter on integrator plugins [\#3556](https://github.com/pypeclub/OpenPype/pull/3556) +- Settings: Clean default values of environments [\#3550](https://github.com/pypeclub/OpenPype/pull/3550) +- Module interfaces: Fix import error [\#3547](https://github.com/pypeclub/OpenPype/pull/3547) +- Workfiles tool: Show of tool and it's flags [\#3539](https://github.com/pypeclub/OpenPype/pull/3539) +- General: Create workfile documents works again [\#3538](https://github.com/pypeclub/OpenPype/pull/3538) +- Additional fixes for powershell scripts [\#3525](https://github.com/pypeclub/OpenPype/pull/3525) +- Maya: Added wrapper around cmds.setAttr [\#3523](https://github.com/pypeclub/OpenPype/pull/3523) +- Nuke: double slate [\#3521](https://github.com/pypeclub/OpenPype/pull/3521) +- General: Fix hash of centos oiio archive [\#3519](https://github.com/pypeclub/OpenPype/pull/3519) +- Maya: Renderman display output fix [\#3514](https://github.com/pypeclub/OpenPype/pull/3514) +- TrayPublisher: Simple creation enhancements and fixes [\#3513](https://github.com/pypeclub/OpenPype/pull/3513) +- NewPublisher: Publish attributes are properly collected [\#3510](https://github.com/pypeclub/OpenPype/pull/3510) +- TrayPublisher: Make sure host name is filled [\#3504](https://github.com/pypeclub/OpenPype/pull/3504) +- NewPublisher: Groups work and enum multivalue [\#3501](https://github.com/pypeclub/OpenPype/pull/3501) + +**🔀 Refactored code** + +- General: Use query functions in integrator [\#3563](https://github.com/pypeclub/OpenPype/pull/3563) +- General: Mongo core connection moved to client [\#3531](https://github.com/pypeclub/OpenPype/pull/3531) +- Refactor Integrate Asset [\#3530](https://github.com/pypeclub/OpenPype/pull/3530) +- General: Client docstrings cleanup [\#3529](https://github.com/pypeclub/OpenPype/pull/3529) +- General: Move load related functions into pipeline [\#3527](https://github.com/pypeclub/OpenPype/pull/3527) +- General: Get current context document functions [\#3522](https://github.com/pypeclub/OpenPype/pull/3522) +- Kitsu: Use query function from client [\#3496](https://github.com/pypeclub/OpenPype/pull/3496) +- TimersManager: Use query functions [\#3495](https://github.com/pypeclub/OpenPype/pull/3495) +- Deadline: Use query functions [\#3466](https://github.com/pypeclub/OpenPype/pull/3466) +- Refactor Integrate Asset [\#2898](https://github.com/pypeclub/OpenPype/pull/2898) + +**Merged pull requests:** + +- Maya: fix active pane loss [\#3566](https://github.com/pypeclub/OpenPype/pull/3566) + +## [3.12.1](https://github.com/pypeclub/OpenPype/tree/3.12.1) (2022-07-13) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.0...3.12.1) + +### 📖 Documentation + +- Docs: Added minimal permissions for MongoDB [\#3441](https://github.com/pypeclub/OpenPype/pull/3441) + +**🆕 New features** + +- Maya: Add VDB to Arnold loader [\#3433](https://github.com/pypeclub/OpenPype/pull/3433) + +**🚀 Enhancements** + +- TrayPublisher: Added more options for grouping of instances [\#3494](https://github.com/pypeclub/OpenPype/pull/3494) +- NewPublisher: Align creator attributes from top to bottom [\#3487](https://github.com/pypeclub/OpenPype/pull/3487) +- NewPublisher: Added ability to use label of instance [\#3484](https://github.com/pypeclub/OpenPype/pull/3484) +- General: Creator Plugins have access to project [\#3476](https://github.com/pypeclub/OpenPype/pull/3476) +- General: Better arguments order in creator init [\#3475](https://github.com/pypeclub/OpenPype/pull/3475) +- Ftrack: Trigger custom ftrack events on project creation and preparation [\#3465](https://github.com/pypeclub/OpenPype/pull/3465) +- Windows installer: Clean old files and add version subfolder [\#3445](https://github.com/pypeclub/OpenPype/pull/3445) +- Blender: Bugfix - Set fps properly on open [\#3426](https://github.com/pypeclub/OpenPype/pull/3426) +- Hiero: Add custom scripts menu [\#3425](https://github.com/pypeclub/OpenPype/pull/3425) +- Blender: pre pyside install for all platforms [\#3400](https://github.com/pypeclub/OpenPype/pull/3400) +- Maya: Add additional playblast options to review Extractor. [\#3384](https://github.com/pypeclub/OpenPype/pull/3384) +- Maya: Ability to set resolution for playblasts from asset, and override through review instance. [\#3360](https://github.com/pypeclub/OpenPype/pull/3360) +- Maya: Redshift Volume Loader Implement update, remove, switch + fix vdb sequence support [\#3197](https://github.com/pypeclub/OpenPype/pull/3197) +- Maya: Implement `iter_visible_nodes_in_range` for extracting Alembics [\#3100](https://github.com/pypeclub/OpenPype/pull/3100) + +**🐛 Bug fixes** + +- TrayPublisher: Keep use instance label in list view [\#3493](https://github.com/pypeclub/OpenPype/pull/3493) +- General: Extract review use first frame of input sequence [\#3491](https://github.com/pypeclub/OpenPype/pull/3491) +- General: Fix Plist loading for application launch [\#3485](https://github.com/pypeclub/OpenPype/pull/3485) +- Nuke: Workfile tools open on start [\#3479](https://github.com/pypeclub/OpenPype/pull/3479) +- New Publisher: Disabled context change allows creation [\#3478](https://github.com/pypeclub/OpenPype/pull/3478) +- General: thumbnail extractor fix [\#3474](https://github.com/pypeclub/OpenPype/pull/3474) +- Kitsu: bugfix with sync-service ans publish plugins [\#3473](https://github.com/pypeclub/OpenPype/pull/3473) +- Flame: solved problem with multi-selected loading [\#3470](https://github.com/pypeclub/OpenPype/pull/3470) +- General: Fix query function in update logic [\#3468](https://github.com/pypeclub/OpenPype/pull/3468) +- Resolve: removed few bugs [\#3464](https://github.com/pypeclub/OpenPype/pull/3464) +- General: Delete old versions is safer when ftrack is disabled [\#3462](https://github.com/pypeclub/OpenPype/pull/3462) +- Nuke: fixing metadata slate TC difference [\#3455](https://github.com/pypeclub/OpenPype/pull/3455) +- Nuke: prerender reviewable fails [\#3450](https://github.com/pypeclub/OpenPype/pull/3450) +- Maya: fix hashing in Python 3 for tile rendering [\#3447](https://github.com/pypeclub/OpenPype/pull/3447) +- LogViewer: Escape html characters in log message [\#3443](https://github.com/pypeclub/OpenPype/pull/3443) +- Nuke: Slate frame is integrated [\#3427](https://github.com/pypeclub/OpenPype/pull/3427) +- Maya: Camera extra data - additional fix for \#3304 [\#3386](https://github.com/pypeclub/OpenPype/pull/3386) +- Maya: Handle excluding `model` family from frame range validator. [\#3370](https://github.com/pypeclub/OpenPype/pull/3370) + +**🔀 Refactored code** + +- Maya: Merge animation + pointcache extractor logic [\#3461](https://github.com/pypeclub/OpenPype/pull/3461) +- Maya: Re-use `maintained_time` from lib [\#3460](https://github.com/pypeclub/OpenPype/pull/3460) +- General: Use query functions in global plugins [\#3459](https://github.com/pypeclub/OpenPype/pull/3459) +- Clockify: Use query functions in clockify actions [\#3458](https://github.com/pypeclub/OpenPype/pull/3458) +- General: Use query functions in rest api calls [\#3457](https://github.com/pypeclub/OpenPype/pull/3457) +- General: Use query functions in openpype lib functions [\#3454](https://github.com/pypeclub/OpenPype/pull/3454) +- General: Use query functions in load utils [\#3446](https://github.com/pypeclub/OpenPype/pull/3446) +- General: Move publish plugin and publish render abstractions [\#3442](https://github.com/pypeclub/OpenPype/pull/3442) +- General: Use Anatomy after move to pipeline [\#3436](https://github.com/pypeclub/OpenPype/pull/3436) +- General: Anatomy moved to pipeline [\#3435](https://github.com/pypeclub/OpenPype/pull/3435) +- Fusion: Use client query functions [\#3380](https://github.com/pypeclub/OpenPype/pull/3380) +- Resolve: Use client query functions [\#3379](https://github.com/pypeclub/OpenPype/pull/3379) +- General: Host implementation defined with class [\#3337](https://github.com/pypeclub/OpenPype/pull/3337) + +## [3.12.0](https://github.com/pypeclub/OpenPype/tree/3.12.0) (2022-06-28) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.11.1...3.12.0) + +### 📖 Documentation + +- Fix typo in documentation: pyenv on mac [\#3417](https://github.com/pypeclub/OpenPype/pull/3417) +- Linux: update OIIO package [\#3401](https://github.com/pypeclub/OpenPype/pull/3401) + +**🆕 New features** + +- Shotgrid: Add production beta of shotgrid integration [\#2921](https://github.com/pypeclub/OpenPype/pull/2921) + +**🚀 Enhancements** + +- Webserver: Added CORS middleware [\#3422](https://github.com/pypeclub/OpenPype/pull/3422) +- Attribute Defs UI: Files widget show what is allowed to drop in [\#3411](https://github.com/pypeclub/OpenPype/pull/3411) +- General: Add ability to change user value for templates [\#3366](https://github.com/pypeclub/OpenPype/pull/3366) +- Hosts: More options for in-host callbacks [\#3357](https://github.com/pypeclub/OpenPype/pull/3357) +- Multiverse: expose some settings to GUI [\#3350](https://github.com/pypeclub/OpenPype/pull/3350) +- Maya: Allow more data to be published along camera 🎥 [\#3304](https://github.com/pypeclub/OpenPype/pull/3304) +- Add root keys and project keys to create starting folder [\#2755](https://github.com/pypeclub/OpenPype/pull/2755) + +**🐛 Bug fixes** + +- NewPublisher: Fix subset name change on change of creator plugin [\#3420](https://github.com/pypeclub/OpenPype/pull/3420) +- Bug: fix invalid avalon import [\#3418](https://github.com/pypeclub/OpenPype/pull/3418) +- Nuke: Fix keyword argument in query function [\#3414](https://github.com/pypeclub/OpenPype/pull/3414) +- Houdini: fix loading and updating vbd/bgeo sequences [\#3408](https://github.com/pypeclub/OpenPype/pull/3408) +- Nuke: Collect representation files based on Write [\#3407](https://github.com/pypeclub/OpenPype/pull/3407) +- General: Filter representations before integration start [\#3398](https://github.com/pypeclub/OpenPype/pull/3398) +- Maya: look collector typo [\#3392](https://github.com/pypeclub/OpenPype/pull/3392) +- TVPaint: Make sure exit code is set to not None [\#3382](https://github.com/pypeclub/OpenPype/pull/3382) +- Maya: vray device aspect ratio fix [\#3381](https://github.com/pypeclub/OpenPype/pull/3381) +- Flame: bunch of publishing issues [\#3377](https://github.com/pypeclub/OpenPype/pull/3377) +- Harmony: added unc path to zifile command in Harmony [\#3372](https://github.com/pypeclub/OpenPype/pull/3372) +- Standalone: settings improvements [\#3355](https://github.com/pypeclub/OpenPype/pull/3355) +- Nuke: Load full model hierarchy by default [\#3328](https://github.com/pypeclub/OpenPype/pull/3328) +- Nuke: multiple baking streams with correct slate [\#3245](https://github.com/pypeclub/OpenPype/pull/3245) +- Maya: fix image prefix warning in validator [\#3128](https://github.com/pypeclub/OpenPype/pull/3128) + +**🔀 Refactored code** + +- Unreal: Use client query functions [\#3421](https://github.com/pypeclub/OpenPype/pull/3421) +- General: Move editorial lib to pipeline [\#3419](https://github.com/pypeclub/OpenPype/pull/3419) +- Kitsu: renaming to plural func sync\_all\_projects [\#3397](https://github.com/pypeclub/OpenPype/pull/3397) +- Houdini: Use client query functions [\#3395](https://github.com/pypeclub/OpenPype/pull/3395) +- Hiero: Use client query functions [\#3393](https://github.com/pypeclub/OpenPype/pull/3393) +- Nuke: Use client query functions [\#3391](https://github.com/pypeclub/OpenPype/pull/3391) +- Maya: Use client query functions [\#3385](https://github.com/pypeclub/OpenPype/pull/3385) +- Harmony: Use client query functions [\#3378](https://github.com/pypeclub/OpenPype/pull/3378) +- Celaction: Use client query functions [\#3376](https://github.com/pypeclub/OpenPype/pull/3376) +- Photoshop: Use client query functions [\#3375](https://github.com/pypeclub/OpenPype/pull/3375) +- AfterEffects: Use client query functions [\#3374](https://github.com/pypeclub/OpenPype/pull/3374) +- TVPaint: Use client query functions [\#3340](https://github.com/pypeclub/OpenPype/pull/3340) +- Ftrack: Use client query functions [\#3339](https://github.com/pypeclub/OpenPype/pull/3339) +- Standalone Publisher: Use client query functions [\#3330](https://github.com/pypeclub/OpenPype/pull/3330) + +**Merged pull requests:** + +- Sync Queue: Added far future value for null values for dates [\#3371](https://github.com/pypeclub/OpenPype/pull/3371) +- Maya - added support for single frame playblast review [\#3369](https://github.com/pypeclub/OpenPype/pull/3369) +- Houdini: Implement Redshift Proxy Export [\#3196](https://github.com/pypeclub/OpenPype/pull/3196) + +## [3.11.1](https://github.com/pypeclub/OpenPype/tree/3.11.1) (2022-06-20) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.11.0...3.11.1) + +**🆕 New features** + +- Flame: custom export temp folder [\#3346](https://github.com/pypeclub/OpenPype/pull/3346) +- Nuke: removing third-party plugins [\#3344](https://github.com/pypeclub/OpenPype/pull/3344) + +**🚀 Enhancements** + +- Pyblish Pype: Hiding/Close issues [\#3367](https://github.com/pypeclub/OpenPype/pull/3367) +- Ftrack: Removed requirement of pypeclub role from default settings [\#3354](https://github.com/pypeclub/OpenPype/pull/3354) +- Kitsu: Prevent crash on missing frames information [\#3352](https://github.com/pypeclub/OpenPype/pull/3352) +- Ftrack: Open browser from tray [\#3320](https://github.com/pypeclub/OpenPype/pull/3320) +- Enhancement: More control over thumbnail processing. [\#3259](https://github.com/pypeclub/OpenPype/pull/3259) + +**🐛 Bug fixes** + +- Nuke: bake streams with slate on farm [\#3368](https://github.com/pypeclub/OpenPype/pull/3368) +- Harmony: audio validator has wrong logic [\#3364](https://github.com/pypeclub/OpenPype/pull/3364) +- Nuke: Fix missing variable in extract thumbnail [\#3363](https://github.com/pypeclub/OpenPype/pull/3363) +- Nuke: Fix precollect writes [\#3361](https://github.com/pypeclub/OpenPype/pull/3361) +- AE- fix validate\_scene\_settings and renderLocal [\#3358](https://github.com/pypeclub/OpenPype/pull/3358) +- deadline: fixing misidentification of revieables [\#3356](https://github.com/pypeclub/OpenPype/pull/3356) +- General: Create only one thumbnail per instance [\#3351](https://github.com/pypeclub/OpenPype/pull/3351) +- nuke: adding extract thumbnail settings 3.10 [\#3347](https://github.com/pypeclub/OpenPype/pull/3347) +- General: Fix last version function [\#3345](https://github.com/pypeclub/OpenPype/pull/3345) +- Deadline: added OPENPYPE\_MONGO to filter [\#3336](https://github.com/pypeclub/OpenPype/pull/3336) +- Nuke: fixing farm publishing if review is disabled [\#3306](https://github.com/pypeclub/OpenPype/pull/3306) +- Maya: Fix Yeti errors on Create, Publish and Load [\#3198](https://github.com/pypeclub/OpenPype/pull/3198) + +**🔀 Refactored code** + +- Webpublisher: Use client query functions [\#3333](https://github.com/pypeclub/OpenPype/pull/3333) + +## [3.11.0](https://github.com/pypeclub/OpenPype/tree/3.11.0) (2022-06-17) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.10.0...3.11.0) + +### 📖 Documentation + +- Documentation: Add app key to template documentation [\#3299](https://github.com/pypeclub/OpenPype/pull/3299) +- doc: adding royal render and multiverse to the web site [\#3285](https://github.com/pypeclub/OpenPype/pull/3285) +- Module: Kitsu module [\#2650](https://github.com/pypeclub/OpenPype/pull/2650) + +**🆕 New features** + +- Multiverse: fixed composition write, full docs, cosmetics [\#3178](https://github.com/pypeclub/OpenPype/pull/3178) + +**🚀 Enhancements** + +- Settings: Settings can be extracted from UI [\#3323](https://github.com/pypeclub/OpenPype/pull/3323) +- updated poetry installation source [\#3316](https://github.com/pypeclub/OpenPype/pull/3316) +- Ftrack: Action to easily create daily review session [\#3310](https://github.com/pypeclub/OpenPype/pull/3310) +- TVPaint: Extractor use mark in/out range to render [\#3309](https://github.com/pypeclub/OpenPype/pull/3309) +- Ftrack: Delivery action can work on ReviewSessions [\#3307](https://github.com/pypeclub/OpenPype/pull/3307) +- Maya: Look assigner UI improvements [\#3298](https://github.com/pypeclub/OpenPype/pull/3298) +- Ftrack: Action to transfer values of hierarchical attributes [\#3284](https://github.com/pypeclub/OpenPype/pull/3284) +- Maya: better handling of legacy review subsets names [\#3269](https://github.com/pypeclub/OpenPype/pull/3269) +- General: Updated windows oiio tool [\#3268](https://github.com/pypeclub/OpenPype/pull/3268) +- Unreal: add support for skeletalMesh and staticMesh to loaders [\#3267](https://github.com/pypeclub/OpenPype/pull/3267) +- Maya: reference loaders could store placeholder in referenced url [\#3264](https://github.com/pypeclub/OpenPype/pull/3264) +- TVPaint: Init file for TVPaint worker also handle guideline images [\#3250](https://github.com/pypeclub/OpenPype/pull/3250) +- Nuke: Change default icon path in settings [\#3247](https://github.com/pypeclub/OpenPype/pull/3247) +- Maya: publishing of animation and pointcache on a farm [\#3225](https://github.com/pypeclub/OpenPype/pull/3225) +- Maya: Look assigner UI improvements [\#3208](https://github.com/pypeclub/OpenPype/pull/3208) +- Nuke: add pointcache and animation to loader [\#3186](https://github.com/pypeclub/OpenPype/pull/3186) +- Nuke: Add a gizmo menu [\#3172](https://github.com/pypeclub/OpenPype/pull/3172) +- Support for Unreal 5 [\#3122](https://github.com/pypeclub/OpenPype/pull/3122) + +**🐛 Bug fixes** + +- General: Handle empty source key on instance [\#3342](https://github.com/pypeclub/OpenPype/pull/3342) +- Houdini: Fix Houdini VDB manage update wrong file attribute name [\#3322](https://github.com/pypeclub/OpenPype/pull/3322) +- Nuke: anatomy compatibility issue hacks [\#3321](https://github.com/pypeclub/OpenPype/pull/3321) +- hiero: otio p3 compatibility issue - metadata on effect use update 3.11 [\#3314](https://github.com/pypeclub/OpenPype/pull/3314) +- General: Vendorized modules for Python 2 and update poetry lock [\#3305](https://github.com/pypeclub/OpenPype/pull/3305) +- Fix - added local targets to install host [\#3303](https://github.com/pypeclub/OpenPype/pull/3303) +- Settings: Add missing default settings for nuke gizmo [\#3301](https://github.com/pypeclub/OpenPype/pull/3301) +- Maya: Fix swaped width and height in reviews [\#3300](https://github.com/pypeclub/OpenPype/pull/3300) +- Maya: point cache publish handles Maya instances [\#3297](https://github.com/pypeclub/OpenPype/pull/3297) +- Global: extract review slate issues [\#3286](https://github.com/pypeclub/OpenPype/pull/3286) +- Webpublisher: return only active projects in ProjectsEndpoint [\#3281](https://github.com/pypeclub/OpenPype/pull/3281) +- Hiero: add support for task tags 3.10.x [\#3279](https://github.com/pypeclub/OpenPype/pull/3279) +- General: Fix Oiio tool path resolving [\#3278](https://github.com/pypeclub/OpenPype/pull/3278) +- Maya: Fix udim support for e.g. uppercase \ tag [\#3266](https://github.com/pypeclub/OpenPype/pull/3266) +- Nuke: bake reformat was failing on string type [\#3261](https://github.com/pypeclub/OpenPype/pull/3261) +- Maya: hotfix Pxr multitexture in looks [\#3260](https://github.com/pypeclub/OpenPype/pull/3260) +- Unreal: Fix Camera Loading if Layout is missing [\#3255](https://github.com/pypeclub/OpenPype/pull/3255) +- Unreal: Fixed Animation loading in UE5 [\#3240](https://github.com/pypeclub/OpenPype/pull/3240) +- Unreal: Fixed Render creation in UE5 [\#3239](https://github.com/pypeclub/OpenPype/pull/3239) +- Unreal: Fixed Camera loading in UE5 [\#3238](https://github.com/pypeclub/OpenPype/pull/3238) +- Flame: debugging [\#3224](https://github.com/pypeclub/OpenPype/pull/3224) +- add silent audio to slate [\#3162](https://github.com/pypeclub/OpenPype/pull/3162) +- Add timecode to slate [\#2929](https://github.com/pypeclub/OpenPype/pull/2929) + +**🔀 Refactored code** + +- Blender: Use client query functions [\#3331](https://github.com/pypeclub/OpenPype/pull/3331) +- General: Define query functions [\#3288](https://github.com/pypeclub/OpenPype/pull/3288) + +**Merged pull requests:** + +- Maya: add pointcache family to gpu cache loader [\#3318](https://github.com/pypeclub/OpenPype/pull/3318) +- Maya look: skip empty file attributes [\#3274](https://github.com/pypeclub/OpenPype/pull/3274) + +## [3.10.0](https://github.com/pypeclub/OpenPype/tree/3.10.0) (2022-05-26) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.8...3.10.0) + +### 📖 Documentation + +- Docs: add all-contributors config and initial list [\#3094](https://github.com/pypeclub/OpenPype/pull/3094) +- Nuke docs with videos [\#3052](https://github.com/pypeclub/OpenPype/pull/3052) + +**🆕 New features** + +- General: OpenPype modules publish plugins are registered in host [\#3180](https://github.com/pypeclub/OpenPype/pull/3180) +- General: Creator plugins from addons can be registered [\#3179](https://github.com/pypeclub/OpenPype/pull/3179) +- Ftrack: Single image reviewable [\#3157](https://github.com/pypeclub/OpenPype/pull/3157) +- Nuke: Expose write attributes to settings [\#3123](https://github.com/pypeclub/OpenPype/pull/3123) +- Hiero: Initial frame publish support [\#3106](https://github.com/pypeclub/OpenPype/pull/3106) +- Unreal: Render Publishing [\#2917](https://github.com/pypeclub/OpenPype/pull/2917) +- AfterEffects: Implemented New Publisher [\#2838](https://github.com/pypeclub/OpenPype/pull/2838) +- Unreal: Rendering implementation [\#2410](https://github.com/pypeclub/OpenPype/pull/2410) + +**🚀 Enhancements** + +- Maya: FBX camera export [\#3253](https://github.com/pypeclub/OpenPype/pull/3253) +- General: updating common vendor `scriptmenu` to 1.5.2 [\#3246](https://github.com/pypeclub/OpenPype/pull/3246) +- Project Manager: Allow to paste Tasks into multiple assets at the same time [\#3226](https://github.com/pypeclub/OpenPype/pull/3226) +- Project manager: Sped up project load [\#3216](https://github.com/pypeclub/OpenPype/pull/3216) +- Loader UI: Speed issues of loader with sync server [\#3199](https://github.com/pypeclub/OpenPype/pull/3199) +- Looks: add basic support for Renderman [\#3190](https://github.com/pypeclub/OpenPype/pull/3190) +- Maya: added clean\_import option to Import loader [\#3181](https://github.com/pypeclub/OpenPype/pull/3181) +- Add the scripts menu definition to nuke [\#3168](https://github.com/pypeclub/OpenPype/pull/3168) +- Maya: add maya 2023 to default applications [\#3167](https://github.com/pypeclub/OpenPype/pull/3167) +- Compressed bgeo publishing in SAP and Houdini loader [\#3153](https://github.com/pypeclub/OpenPype/pull/3153) +- General: Add 'dataclasses' to required python modules [\#3149](https://github.com/pypeclub/OpenPype/pull/3149) +- Hooks: Tweak logging grammar [\#3147](https://github.com/pypeclub/OpenPype/pull/3147) +- Nuke: settings for reformat node in CreateWriteRender node [\#3143](https://github.com/pypeclub/OpenPype/pull/3143) +- Houdini: Add loader for alembic through Alembic Archive node [\#3140](https://github.com/pypeclub/OpenPype/pull/3140) +- Publisher: UI Modifications and fixes [\#3139](https://github.com/pypeclub/OpenPype/pull/3139) +- General: Simplified OP modules/addons import [\#3137](https://github.com/pypeclub/OpenPype/pull/3137) +- Terminal: Tweak coloring of TrayModuleManager logging enabled states [\#3133](https://github.com/pypeclub/OpenPype/pull/3133) +- General: Cleanup some Loader docstrings [\#3131](https://github.com/pypeclub/OpenPype/pull/3131) +- Nuke: render instance with subset name filtered overrides [\#3117](https://github.com/pypeclub/OpenPype/pull/3117) +- Unreal: Layout and Camera update and remove functions reimplemented and improvements [\#3116](https://github.com/pypeclub/OpenPype/pull/3116) +- Settings: Remove environment groups from settings [\#3115](https://github.com/pypeclub/OpenPype/pull/3115) +- TVPaint: Match renderlayer key with other hosts [\#3110](https://github.com/pypeclub/OpenPype/pull/3110) +- Ftrack: AssetVersion status on publish [\#3108](https://github.com/pypeclub/OpenPype/pull/3108) +- Tray publisher: Simple families from settings [\#3105](https://github.com/pypeclub/OpenPype/pull/3105) +- Local Settings UI: Overlay messages on save and reset [\#3104](https://github.com/pypeclub/OpenPype/pull/3104) +- General: Remove repos related logic [\#3087](https://github.com/pypeclub/OpenPype/pull/3087) +- Standalone publisher: add support for bgeo and vdb [\#3080](https://github.com/pypeclub/OpenPype/pull/3080) +- Houdini: Fix FPS + outdated content pop-ups [\#3079](https://github.com/pypeclub/OpenPype/pull/3079) +- General: Add global log verbose arguments [\#3070](https://github.com/pypeclub/OpenPype/pull/3070) +- Flame: extract presets distribution [\#3063](https://github.com/pypeclub/OpenPype/pull/3063) +- Update collect\_render.py [\#3055](https://github.com/pypeclub/OpenPype/pull/3055) +- SiteSync: Added compute\_resource\_sync\_sites to sync\_server\_module [\#2983](https://github.com/pypeclub/OpenPype/pull/2983) +- Maya: Implement Hardware Renderer 2.0 support for Render Products [\#2611](https://github.com/pypeclub/OpenPype/pull/2611) + +**🐛 Bug fixes** + +- nuke: use framerange issue [\#3254](https://github.com/pypeclub/OpenPype/pull/3254) +- Ftrack: Chunk sizes for queries has minimal condition [\#3244](https://github.com/pypeclub/OpenPype/pull/3244) +- Maya: renderman displays needs to be filtered [\#3242](https://github.com/pypeclub/OpenPype/pull/3242) +- Ftrack: Validate that the user exists on ftrack [\#3237](https://github.com/pypeclub/OpenPype/pull/3237) +- Maya: Fix support for multiple resolutions [\#3236](https://github.com/pypeclub/OpenPype/pull/3236) +- TVPaint: Look for more groups than 12 [\#3228](https://github.com/pypeclub/OpenPype/pull/3228) +- Hiero: debugging frame range and other 3.10 [\#3222](https://github.com/pypeclub/OpenPype/pull/3222) +- Project Manager: Fix persistent editors on project change [\#3218](https://github.com/pypeclub/OpenPype/pull/3218) +- Deadline: instance data overwrite fix [\#3214](https://github.com/pypeclub/OpenPype/pull/3214) +- Ftrack: Push hierarchical attributes action works [\#3210](https://github.com/pypeclub/OpenPype/pull/3210) +- Standalone Publisher: Always create new representation for thumbnail [\#3203](https://github.com/pypeclub/OpenPype/pull/3203) +- Photoshop: skip collector when automatic testing [\#3202](https://github.com/pypeclub/OpenPype/pull/3202) +- Nuke: render/workfile version sync doesn't work on farm [\#3185](https://github.com/pypeclub/OpenPype/pull/3185) +- Ftrack: Review image only if there are no mp4 reviews [\#3183](https://github.com/pypeclub/OpenPype/pull/3183) +- Ftrack: Locations deepcopy issue [\#3177](https://github.com/pypeclub/OpenPype/pull/3177) +- General: Avoid creating multiple thumbnails [\#3176](https://github.com/pypeclub/OpenPype/pull/3176) +- General/Hiero: better clip duration calculation [\#3169](https://github.com/pypeclub/OpenPype/pull/3169) +- General: Oiio conversion for ffmpeg checks for invalid characters [\#3166](https://github.com/pypeclub/OpenPype/pull/3166) +- Fix for attaching render to subset [\#3164](https://github.com/pypeclub/OpenPype/pull/3164) +- Harmony: fixed missing task name in render instance [\#3163](https://github.com/pypeclub/OpenPype/pull/3163) +- Ftrack: Action delete old versions formatting works [\#3152](https://github.com/pypeclub/OpenPype/pull/3152) +- Deadline: fix the output directory [\#3144](https://github.com/pypeclub/OpenPype/pull/3144) +- General: New Session schema [\#3141](https://github.com/pypeclub/OpenPype/pull/3141) +- General: Missing version on headless mode crash properly [\#3136](https://github.com/pypeclub/OpenPype/pull/3136) +- TVPaint: Composite layers in reversed order [\#3135](https://github.com/pypeclub/OpenPype/pull/3135) +- Nuke: fixing default settings for workfile builder loaders [\#3120](https://github.com/pypeclub/OpenPype/pull/3120) +- Nuke: fix anatomy imageio regex default [\#3119](https://github.com/pypeclub/OpenPype/pull/3119) +- General: Python 3 compatibility in queries [\#3112](https://github.com/pypeclub/OpenPype/pull/3112) +- General: TemplateResult can be copied [\#3099](https://github.com/pypeclub/OpenPype/pull/3099) +- General: Collect loaded versions skips not existing representations [\#3095](https://github.com/pypeclub/OpenPype/pull/3095) +- RoyalRender Control Submission - AVALON\_APP\_NAME default [\#3091](https://github.com/pypeclub/OpenPype/pull/3091) +- Ftrack: Update Create Folders action [\#3089](https://github.com/pypeclub/OpenPype/pull/3089) +- Maya: Collect Render fix any render cameras check [\#3088](https://github.com/pypeclub/OpenPype/pull/3088) +- Project Manager: Avoid unnecessary updates of asset documents [\#3083](https://github.com/pypeclub/OpenPype/pull/3083) +- Standalone publisher: Fix plugins install [\#3077](https://github.com/pypeclub/OpenPype/pull/3077) +- General: Extract review sequence is not converted with same names [\#3076](https://github.com/pypeclub/OpenPype/pull/3076) +- Webpublisher: Use variant value [\#3068](https://github.com/pypeclub/OpenPype/pull/3068) +- Nuke: Add aov matching even for remainder and prerender [\#3060](https://github.com/pypeclub/OpenPype/pull/3060) +- Fix support for Renderman in Maya [\#3006](https://github.com/pypeclub/OpenPype/pull/3006) + +**🔀 Refactored code** + +- Avalon repo removed from Jobs workflow [\#3193](https://github.com/pypeclub/OpenPype/pull/3193) +- General: Remove remaining imports from avalon [\#3130](https://github.com/pypeclub/OpenPype/pull/3130) +- General: Move mongo db logic and remove avalon repository [\#3066](https://github.com/pypeclub/OpenPype/pull/3066) +- General: Move host install [\#3009](https://github.com/pypeclub/OpenPype/pull/3009) + +**Merged pull requests:** + +- Harmony: message length in 21.1 [\#3257](https://github.com/pypeclub/OpenPype/pull/3257) +- Harmony: 21.1 fix [\#3249](https://github.com/pypeclub/OpenPype/pull/3249) +- Maya: added jpg to filter for Image Plane Loader [\#3223](https://github.com/pypeclub/OpenPype/pull/3223) +- Webpublisher: replace space by underscore in subset names [\#3160](https://github.com/pypeclub/OpenPype/pull/3160) +- StandalonePublisher: removed Extract Background plugins [\#3093](https://github.com/pypeclub/OpenPype/pull/3093) +- Nuke: added suspend\_publish knob [\#3078](https://github.com/pypeclub/OpenPype/pull/3078) +- Bump async from 2.6.3 to 2.6.4 in /website [\#3065](https://github.com/pypeclub/OpenPype/pull/3065) +- SiteSync: Download all workfile inputs [\#2966](https://github.com/pypeclub/OpenPype/pull/2966) +- Photoshop: New Publisher [\#2933](https://github.com/pypeclub/OpenPype/pull/2933) +- Bump pillow from 9.0.0 to 9.0.1 [\#2880](https://github.com/pypeclub/OpenPype/pull/2880) +- AfterEffects: Allow configuration of default variant via Settings [\#2856](https://github.com/pypeclub/OpenPype/pull/2856) + +## [3.9.8](https://github.com/pypeclub/OpenPype/tree/3.9.8) (2022-05-19) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.7...3.9.8) + +## [3.9.7](https://github.com/pypeclub/OpenPype/tree/3.9.7) (2022-05-11) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.6...3.9.7) + +## [3.9.6](https://github.com/pypeclub/OpenPype/tree/3.9.6) (2022-05-03) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.5...3.9.6) + +## [3.9.5](https://github.com/pypeclub/OpenPype/tree/3.9.5) (2022-04-25) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.4...3.9.5) + +## [3.9.4](https://github.com/pypeclub/OpenPype/tree/3.9.4) (2022-04-15) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.3...3.9.4) + +### 📖 Documentation + +- Documentation: more info about Tasks [\#3062](https://github.com/pypeclub/OpenPype/pull/3062) +- Documentation: Python requirements to 3.7.9 [\#3035](https://github.com/pypeclub/OpenPype/pull/3035) +- Website Docs: Remove unused pages [\#2974](https://github.com/pypeclub/OpenPype/pull/2974) + +**🆕 New features** + +- General: Local overrides for environment variables [\#3045](https://github.com/pypeclub/OpenPype/pull/3045) +- Flame: Flare integration preparation [\#2928](https://github.com/pypeclub/OpenPype/pull/2928) + +**🚀 Enhancements** + +- TVPaint: Added init file for worker to triggers missing sound file dialog [\#3053](https://github.com/pypeclub/OpenPype/pull/3053) +- Ftrack: Custom attributes can be filled in slate values [\#3036](https://github.com/pypeclub/OpenPype/pull/3036) +- Resolve environment variable in google drive credential path [\#3008](https://github.com/pypeclub/OpenPype/pull/3008) + +**🐛 Bug fixes** + +- GitHub: Updated push-protected action in github workflow [\#3064](https://github.com/pypeclub/OpenPype/pull/3064) +- Nuke: Typos in imports from Nuke implementation [\#3061](https://github.com/pypeclub/OpenPype/pull/3061) +- Hotfix: fixing deadline job publishing [\#3059](https://github.com/pypeclub/OpenPype/pull/3059) +- General: Extract Review handle invalid characters for ffmpeg [\#3050](https://github.com/pypeclub/OpenPype/pull/3050) +- Slate Review: Support to keep format on slate concatenation [\#3049](https://github.com/pypeclub/OpenPype/pull/3049) +- Webpublisher: fix processing of workfile [\#3048](https://github.com/pypeclub/OpenPype/pull/3048) +- Ftrack: Integrate ftrack api fix [\#3044](https://github.com/pypeclub/OpenPype/pull/3044) +- Webpublisher - removed wrong hardcoded family [\#3043](https://github.com/pypeclub/OpenPype/pull/3043) +- LibraryLoader: Use current project for asset query in families filter [\#3042](https://github.com/pypeclub/OpenPype/pull/3042) +- SiteSync: Providers ignore that site is disabled [\#3041](https://github.com/pypeclub/OpenPype/pull/3041) +- Unreal: Creator import fixes [\#3040](https://github.com/pypeclub/OpenPype/pull/3040) +- SiteSync: fix transitive alternate sites, fix dropdown in Local Settings [\#3018](https://github.com/pypeclub/OpenPype/pull/3018) +- Maya: invalid review flag on rendered AOVs [\#2915](https://github.com/pypeclub/OpenPype/pull/2915) + +**Merged pull requests:** + +- Deadline: reworked pools assignment [\#3051](https://github.com/pypeclub/OpenPype/pull/3051) +- Houdini: Avoid ImportError on `hdefereval` when Houdini runs without UI [\#2987](https://github.com/pypeclub/OpenPype/pull/2987) + +## [3.9.3](https://github.com/pypeclub/OpenPype/tree/3.9.3) (2022-04-07) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.2...3.9.3) + +### 📖 Documentation + +- Documentation: Added mention of adding My Drive as a root [\#2999](https://github.com/pypeclub/OpenPype/pull/2999) +- Website Docs: Manager Ftrack fix broken links [\#2979](https://github.com/pypeclub/OpenPype/pull/2979) +- Docs: Added MongoDB requirements [\#2951](https://github.com/pypeclub/OpenPype/pull/2951) +- Documentation: New publisher develop docs [\#2896](https://github.com/pypeclub/OpenPype/pull/2896) + +**🆕 New features** + +- Ftrack: Add description integrator [\#3027](https://github.com/pypeclub/OpenPype/pull/3027) +- nuke: bypass baking [\#2992](https://github.com/pypeclub/OpenPype/pull/2992) +- Publishing textures for Unreal [\#2988](https://github.com/pypeclub/OpenPype/pull/2988) +- Maya to Unreal: Static and Skeletal Meshes [\#2978](https://github.com/pypeclub/OpenPype/pull/2978) +- Multiverse: Initial Support [\#2908](https://github.com/pypeclub/OpenPype/pull/2908) + +**🚀 Enhancements** + +- General: default workfile subset name for workfile [\#3011](https://github.com/pypeclub/OpenPype/pull/3011) +- Ftrack: Add more options for note text of integrate ftrack note [\#3025](https://github.com/pypeclub/OpenPype/pull/3025) +- Console Interpreter: Changed how console splitter size are reused on show [\#3016](https://github.com/pypeclub/OpenPype/pull/3016) +- Deadline: Use more suitable name for sequence review logic [\#3015](https://github.com/pypeclub/OpenPype/pull/3015) +- Nuke: add concurrency attr to deadline job [\#3005](https://github.com/pypeclub/OpenPype/pull/3005) +- Photoshop: create image without instance [\#3001](https://github.com/pypeclub/OpenPype/pull/3001) +- TVPaint: Render scene family [\#3000](https://github.com/pypeclub/OpenPype/pull/3000) +- Deadline: priority configurable in Maya jobs [\#2995](https://github.com/pypeclub/OpenPype/pull/2995) +- Nuke: ReviewDataMov Read RAW attribute [\#2985](https://github.com/pypeclub/OpenPype/pull/2985) +- General: `METADATA_KEYS` constant as `frozenset` for optimal immutable lookup [\#2980](https://github.com/pypeclub/OpenPype/pull/2980) +- General: Tools with host filters [\#2975](https://github.com/pypeclub/OpenPype/pull/2975) +- Hero versions: Use custom templates [\#2967](https://github.com/pypeclub/OpenPype/pull/2967) +- Slack: Added configurable maximum file size of review upload to Slack [\#2945](https://github.com/pypeclub/OpenPype/pull/2945) +- NewPublisher: Prepared implementation of optional pyblish plugin [\#2943](https://github.com/pypeclub/OpenPype/pull/2943) +- TVPaint: Extractor to convert PNG into EXR [\#2942](https://github.com/pypeclub/OpenPype/pull/2942) +- Workfiles tool: Save as published workfiles [\#2937](https://github.com/pypeclub/OpenPype/pull/2937) +- Workfiles: Open published workfiles [\#2925](https://github.com/pypeclub/OpenPype/pull/2925) +- General: Default modules loaded dynamically [\#2923](https://github.com/pypeclub/OpenPype/pull/2923) +- CI: change the version bump logic [\#2919](https://github.com/pypeclub/OpenPype/pull/2919) +- Deadline: Add headless argument [\#2916](https://github.com/pypeclub/OpenPype/pull/2916) +- Nuke: Add no-audio Tag [\#2911](https://github.com/pypeclub/OpenPype/pull/2911) +- Ftrack: Fill workfile in custom attribute [\#2906](https://github.com/pypeclub/OpenPype/pull/2906) +- Nuke: improving readability [\#2903](https://github.com/pypeclub/OpenPype/pull/2903) +- Settings UI: Add simple tooltips for settings entities [\#2901](https://github.com/pypeclub/OpenPype/pull/2901) + +**🐛 Bug fixes** + +- General: Fix validate asset docs plug-in filename and class name [\#3029](https://github.com/pypeclub/OpenPype/pull/3029) +- Deadline: Fixed default value of use sequence for review [\#3033](https://github.com/pypeclub/OpenPype/pull/3033) +- Settings UI: Version column can be extended so version are visible [\#3032](https://github.com/pypeclub/OpenPype/pull/3032) +- General: Fix import after movements [\#3028](https://github.com/pypeclub/OpenPype/pull/3028) +- Harmony: Added creating subset name for workfile from template [\#3024](https://github.com/pypeclub/OpenPype/pull/3024) +- AfterEffects: Added creating subset name for workfile from template [\#3023](https://github.com/pypeclub/OpenPype/pull/3023) +- General: Add example addons to ignored [\#3022](https://github.com/pypeclub/OpenPype/pull/3022) +- Maya: Remove missing import [\#3017](https://github.com/pypeclub/OpenPype/pull/3017) +- Ftrack: multiple reviewable componets [\#3012](https://github.com/pypeclub/OpenPype/pull/3012) +- Tray publisher: Fixes after code movement [\#3010](https://github.com/pypeclub/OpenPype/pull/3010) +- Hosts: Remove path existence checks in 'add\_implementation\_envs' [\#3004](https://github.com/pypeclub/OpenPype/pull/3004) +- Nuke: fixing unicode type detection in effect loaders [\#3002](https://github.com/pypeclub/OpenPype/pull/3002) +- Fix - remove doubled dot in workfile created from template [\#2998](https://github.com/pypeclub/OpenPype/pull/2998) +- Nuke: removing redundant Ftrack asset when farm publishing [\#2996](https://github.com/pypeclub/OpenPype/pull/2996) +- PS: fix renaming subset incorrectly in PS [\#2991](https://github.com/pypeclub/OpenPype/pull/2991) +- Fix: Disable setuptools auto discovery [\#2990](https://github.com/pypeclub/OpenPype/pull/2990) +- AEL: fix opening existing workfile if no scene opened [\#2989](https://github.com/pypeclub/OpenPype/pull/2989) +- Maya: Don't do hardlinks on windows for look publishing [\#2986](https://github.com/pypeclub/OpenPype/pull/2986) +- Settings UI: Fix version completer on linux [\#2981](https://github.com/pypeclub/OpenPype/pull/2981) +- Photoshop: Fix creation of subset names in PS review and workfile [\#2969](https://github.com/pypeclub/OpenPype/pull/2969) +- Slack: Added default for review\_upload\_limit for Slack [\#2965](https://github.com/pypeclub/OpenPype/pull/2965) +- General: OIIO conversion for ffmeg can handle sequences [\#2958](https://github.com/pypeclub/OpenPype/pull/2958) +- Settings: Conditional dictionary avoid invalid logs [\#2956](https://github.com/pypeclub/OpenPype/pull/2956) +- General: Smaller fixes and typos [\#2950](https://github.com/pypeclub/OpenPype/pull/2950) +- LogViewer: Don't refresh on initialization [\#2949](https://github.com/pypeclub/OpenPype/pull/2949) +- nuke: python3 compatibility issue with `iteritems` [\#2948](https://github.com/pypeclub/OpenPype/pull/2948) +- General: anatomy data with correct task short key [\#2947](https://github.com/pypeclub/OpenPype/pull/2947) +- SceneInventory: Fix imports in UI [\#2944](https://github.com/pypeclub/OpenPype/pull/2944) +- Slack: add generic exception [\#2941](https://github.com/pypeclub/OpenPype/pull/2941) +- General: Python specific vendor paths on env injection [\#2939](https://github.com/pypeclub/OpenPype/pull/2939) +- General: More fail safe delete old versions [\#2936](https://github.com/pypeclub/OpenPype/pull/2936) +- Settings UI: Collapsed of collapsible wrapper works as expected [\#2934](https://github.com/pypeclub/OpenPype/pull/2934) +- Maya: Do not pass `set` to maya commands \(fixes support for older maya versions\) [\#2932](https://github.com/pypeclub/OpenPype/pull/2932) +- General: Don't print log record on OSError [\#2926](https://github.com/pypeclub/OpenPype/pull/2926) +- Hiero: Fix import of 'register\_event\_callback' [\#2924](https://github.com/pypeclub/OpenPype/pull/2924) +- Flame: centos related debugging [\#2922](https://github.com/pypeclub/OpenPype/pull/2922) +- Ftrack: Missing Ftrack id after editorial publish [\#2905](https://github.com/pypeclub/OpenPype/pull/2905) +- AfterEffects: Fix rendering for single frame in DL [\#2875](https://github.com/pypeclub/OpenPype/pull/2875) + +**🔀 Refactored code** + +- General: Move plugins register and discover [\#2935](https://github.com/pypeclub/OpenPype/pull/2935) +- General: Move Attribute Definitions from pipeline [\#2931](https://github.com/pypeclub/OpenPype/pull/2931) +- General: Removed silo references and terminal splash [\#2927](https://github.com/pypeclub/OpenPype/pull/2927) +- General: Move pipeline constants to OpenPype [\#2918](https://github.com/pypeclub/OpenPype/pull/2918) +- General: Move formatting and workfile functions [\#2914](https://github.com/pypeclub/OpenPype/pull/2914) +- General: Move remaining plugins from avalon [\#2912](https://github.com/pypeclub/OpenPype/pull/2912) + +**Merged pull requests:** + +- Maya: Allow to select invalid camera contents if no cameras found [\#3030](https://github.com/pypeclub/OpenPype/pull/3030) +- Bump paramiko from 2.9.2 to 2.10.1 [\#2973](https://github.com/pypeclub/OpenPype/pull/2973) +- Bump minimist from 1.2.5 to 1.2.6 in /website [\#2954](https://github.com/pypeclub/OpenPype/pull/2954) +- Bump node-forge from 1.2.1 to 1.3.0 in /website [\#2953](https://github.com/pypeclub/OpenPype/pull/2953) +- Maya - added transparency into review creator [\#2952](https://github.com/pypeclub/OpenPype/pull/2952) + +## [3.9.2](https://github.com/pypeclub/OpenPype/tree/3.9.2) (2022-04-04) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.1...3.9.2) + +## [3.9.1](https://github.com/pypeclub/OpenPype/tree/3.9.1) (2022-03-18) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.0...3.9.1) + +**🚀 Enhancements** + +- General: Change how OPENPYPE\_DEBUG value is handled [\#2907](https://github.com/pypeclub/OpenPype/pull/2907) +- nuke: imageio adding ocio config version 1.2 [\#2897](https://github.com/pypeclub/OpenPype/pull/2897) +- Flame: support for comment with xml attribute overrides [\#2892](https://github.com/pypeclub/OpenPype/pull/2892) +- Nuke: ExtractReviewSlate can handle more codes and profiles [\#2879](https://github.com/pypeclub/OpenPype/pull/2879) +- Flame: sequence used for reference video [\#2869](https://github.com/pypeclub/OpenPype/pull/2869) + +**🐛 Bug fixes** + +- General: Fix use of Anatomy roots [\#2904](https://github.com/pypeclub/OpenPype/pull/2904) +- Fixing gap detection in extract review [\#2902](https://github.com/pypeclub/OpenPype/pull/2902) +- Pyblish Pype - ensure current state is correct when entering new group order [\#2899](https://github.com/pypeclub/OpenPype/pull/2899) +- SceneInventory: Fix import of load function [\#2894](https://github.com/pypeclub/OpenPype/pull/2894) +- Harmony - fixed creator issue [\#2891](https://github.com/pypeclub/OpenPype/pull/2891) +- General: Remove forgotten use of avalon Creator [\#2885](https://github.com/pypeclub/OpenPype/pull/2885) +- General: Avoid circular import [\#2884](https://github.com/pypeclub/OpenPype/pull/2884) +- Fixes for attaching loaded containers \(\#2837\) [\#2874](https://github.com/pypeclub/OpenPype/pull/2874) +- Maya: Deformer node ids validation plugin [\#2826](https://github.com/pypeclub/OpenPype/pull/2826) +- Flame Babypublisher optimalization [\#2806](https://github.com/pypeclub/OpenPype/pull/2806) +- hotfix: OIIO tool path - add extension on windows [\#2618](https://github.com/pypeclub/OpenPype/pull/2618) + +**🔀 Refactored code** + +- General: Reduce style usage to OpenPype repository [\#2889](https://github.com/pypeclub/OpenPype/pull/2889) +- General: Move loader logic from avalon to openpype [\#2886](https://github.com/pypeclub/OpenPype/pull/2886) + +## [3.9.0](https://github.com/pypeclub/OpenPype/tree/3.9.0) (2022-03-14) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.2...3.9.0) + +**Deprecated:** + +- Houdini: Remove unused code [\#2779](https://github.com/pypeclub/OpenPype/pull/2779) +- Loader: Remove default family states for hosts from code [\#2706](https://github.com/pypeclub/OpenPype/pull/2706) +- AssetCreator: Remove the tool [\#2845](https://github.com/pypeclub/OpenPype/pull/2845) + +### 📖 Documentation + +- Documentation: fixed broken links [\#2799](https://github.com/pypeclub/OpenPype/pull/2799) +- Documentation: broken link fix [\#2785](https://github.com/pypeclub/OpenPype/pull/2785) +- Documentation: link fixes [\#2772](https://github.com/pypeclub/OpenPype/pull/2772) +- Update docusaurus to latest version [\#2760](https://github.com/pypeclub/OpenPype/pull/2760) +- Various testing updates [\#2726](https://github.com/pypeclub/OpenPype/pull/2726) +- documentation: add example to `repack-version` command [\#2669](https://github.com/pypeclub/OpenPype/pull/2669) +- Update docusaurus [\#2639](https://github.com/pypeclub/OpenPype/pull/2639) +- Documentation: Fixed relative links [\#2621](https://github.com/pypeclub/OpenPype/pull/2621) +- Documentation: Change Photoshop & AfterEffects plugin path [\#2878](https://github.com/pypeclub/OpenPype/pull/2878) + +**🆕 New features** + +- Flame: loading clips to reels [\#2622](https://github.com/pypeclub/OpenPype/pull/2622) +- General: Store settings by OpenPype version [\#2570](https://github.com/pypeclub/OpenPype/pull/2570) + +**🚀 Enhancements** + +- New: Validation exceptions [\#2841](https://github.com/pypeclub/OpenPype/pull/2841) +- General: Set context environments for non host applications [\#2803](https://github.com/pypeclub/OpenPype/pull/2803) +- Houdini: Remove duplicate ValidateOutputNode plug-in [\#2780](https://github.com/pypeclub/OpenPype/pull/2780) +- Tray publisher: New Tray Publisher host \(beta\) [\#2778](https://github.com/pypeclub/OpenPype/pull/2778) +- Slack: Added regex for filtering on subset names [\#2775](https://github.com/pypeclub/OpenPype/pull/2775) +- Houdini: Implement Reset Frame Range [\#2770](https://github.com/pypeclub/OpenPype/pull/2770) +- Pyblish Pype: Remove redundant new line in installed fonts printing [\#2758](https://github.com/pypeclub/OpenPype/pull/2758) +- Flame: use Shot Name on segment for asset name [\#2751](https://github.com/pypeclub/OpenPype/pull/2751) +- Flame: adding validator source clip [\#2746](https://github.com/pypeclub/OpenPype/pull/2746) +- Work Files: Preserve subversion comment of current filename by default [\#2734](https://github.com/pypeclub/OpenPype/pull/2734) +- Maya: set Deadline job/batch name to original source workfile name instead of published workfile [\#2733](https://github.com/pypeclub/OpenPype/pull/2733) +- Ftrack: Disable ftrack module by default [\#2732](https://github.com/pypeclub/OpenPype/pull/2732) +- Project Manager: Disable add task, add asset and save button when not in a project [\#2727](https://github.com/pypeclub/OpenPype/pull/2727) +- dropbox handle big file [\#2718](https://github.com/pypeclub/OpenPype/pull/2718) +- Fusion Move PR: Minor tweaks to Fusion integration [\#2716](https://github.com/pypeclub/OpenPype/pull/2716) +- RoyalRender: Minor enhancements [\#2700](https://github.com/pypeclub/OpenPype/pull/2700) +- Nuke: prerender with review knob [\#2691](https://github.com/pypeclub/OpenPype/pull/2691) +- Maya configurable unit validator [\#2680](https://github.com/pypeclub/OpenPype/pull/2680) +- General: Add settings for CleanUpFarm and disable the plugin by default [\#2679](https://github.com/pypeclub/OpenPype/pull/2679) +- Project Manager: Only allow scroll wheel edits when spinbox is active [\#2678](https://github.com/pypeclub/OpenPype/pull/2678) +- Ftrack: Sync description to assets [\#2670](https://github.com/pypeclub/OpenPype/pull/2670) +- Houdini: Moved to OpenPype [\#2658](https://github.com/pypeclub/OpenPype/pull/2658) +- Maya: Move implementation to OpenPype [\#2649](https://github.com/pypeclub/OpenPype/pull/2649) +- General: FFmpeg conversion also check attribute string length [\#2635](https://github.com/pypeclub/OpenPype/pull/2635) +- Houdini: Load Arnold .ass procedurals into Houdini [\#2606](https://github.com/pypeclub/OpenPype/pull/2606) +- Deadline: Simplify GlobalJobPreLoad logic [\#2605](https://github.com/pypeclub/OpenPype/pull/2605) +- Houdini: Implement Arnold .ass standin extraction from Houdini \(also support .ass.gz\) [\#2603](https://github.com/pypeclub/OpenPype/pull/2603) +- New Publisher: New features and preparations for new standalone publisher [\#2556](https://github.com/pypeclub/OpenPype/pull/2556) +- Fix Maya 2022 Python 3 compatibility [\#2445](https://github.com/pypeclub/OpenPype/pull/2445) +- TVPaint: Use new publisher exceptions in validators [\#2435](https://github.com/pypeclub/OpenPype/pull/2435) +- Harmony: Added new style validations for New Publisher [\#2434](https://github.com/pypeclub/OpenPype/pull/2434) +- Aftereffects: New style validations for New publisher [\#2430](https://github.com/pypeclub/OpenPype/pull/2430) +- Farm publishing: New cleanup plugin for Maya renders on farm [\#2390](https://github.com/pypeclub/OpenPype/pull/2390) +- General: Subset name filtering in ExtractReview outpus [\#2872](https://github.com/pypeclub/OpenPype/pull/2872) +- NewPublisher: Descriptions and Icons in creator dialog [\#2867](https://github.com/pypeclub/OpenPype/pull/2867) +- NewPublisher: Changing task on publishing instance [\#2863](https://github.com/pypeclub/OpenPype/pull/2863) +- TrayPublisher: Choose project widget is more clear [\#2859](https://github.com/pypeclub/OpenPype/pull/2859) +- Maya: add loaded containers to published instance [\#2837](https://github.com/pypeclub/OpenPype/pull/2837) +- Ftrack: Can sync fps as string [\#2836](https://github.com/pypeclub/OpenPype/pull/2836) +- General: Custom function for find executable [\#2822](https://github.com/pypeclub/OpenPype/pull/2822) +- General: Color dialog UI fixes [\#2817](https://github.com/pypeclub/OpenPype/pull/2817) +- global: letter box calculated on output as last process [\#2812](https://github.com/pypeclub/OpenPype/pull/2812) +- Nuke: adding Reformat to baking mov plugin [\#2811](https://github.com/pypeclub/OpenPype/pull/2811) +- Manager: Update all to latest button [\#2805](https://github.com/pypeclub/OpenPype/pull/2805) +- Houdini: Move Houdini Save Current File to beginning of ExtractorOrder [\#2747](https://github.com/pypeclub/OpenPype/pull/2747) +- Global: adding studio name/code to anatomy template formatting data [\#2630](https://github.com/pypeclub/OpenPype/pull/2630) + +**🐛 Bug fixes** + +- Settings UI: Search case sensitivity [\#2810](https://github.com/pypeclub/OpenPype/pull/2810) +- resolve: fixing fusion module loading [\#2802](https://github.com/pypeclub/OpenPype/pull/2802) +- Ftrack: Unset task ids from asset versions before tasks are removed [\#2800](https://github.com/pypeclub/OpenPype/pull/2800) +- Slack: fail gracefully if slack exception [\#2798](https://github.com/pypeclub/OpenPype/pull/2798) +- Flame: Fix version string in default settings [\#2783](https://github.com/pypeclub/OpenPype/pull/2783) +- After Effects: Fix typo in name `afftereffects` -\> `aftereffects` [\#2768](https://github.com/pypeclub/OpenPype/pull/2768) +- Houdini: Fix open last workfile [\#2767](https://github.com/pypeclub/OpenPype/pull/2767) +- Avoid renaming udim indexes [\#2765](https://github.com/pypeclub/OpenPype/pull/2765) +- Maya: Fix `unique_namespace` when in an namespace that is empty [\#2759](https://github.com/pypeclub/OpenPype/pull/2759) +- Loader UI: Fix right click in representation widget [\#2757](https://github.com/pypeclub/OpenPype/pull/2757) +- Harmony: Rendering in Deadline didn't work in other machines than submitter [\#2754](https://github.com/pypeclub/OpenPype/pull/2754) +- Aftereffects 2022 and Deadline [\#2748](https://github.com/pypeclub/OpenPype/pull/2748) +- Flame: bunch of bugs [\#2745](https://github.com/pypeclub/OpenPype/pull/2745) +- Maya: Save current scene on workfile publish [\#2744](https://github.com/pypeclub/OpenPype/pull/2744) +- Version Up: Preserve parts of filename after version number \(like subversion\) on version\_up [\#2741](https://github.com/pypeclub/OpenPype/pull/2741) +- Loader UI: Multiple asset selection and underline colors fixed [\#2731](https://github.com/pypeclub/OpenPype/pull/2731) +- General: Fix loading of unused chars in xml format [\#2729](https://github.com/pypeclub/OpenPype/pull/2729) +- TVPaint: Set objectName with members [\#2725](https://github.com/pypeclub/OpenPype/pull/2725) +- General: Don't use 'objectName' from loaded references [\#2715](https://github.com/pypeclub/OpenPype/pull/2715) +- Settings: Studio Project anatomy is queried using right keys [\#2711](https://github.com/pypeclub/OpenPype/pull/2711) +- Local Settings: Additional applications don't break UI [\#2710](https://github.com/pypeclub/OpenPype/pull/2710) +- Maya: Remove some unused code [\#2709](https://github.com/pypeclub/OpenPype/pull/2709) +- Houdini: Fix refactor of Houdini host move for CreateArnoldAss [\#2704](https://github.com/pypeclub/OpenPype/pull/2704) +- LookAssigner: Fix imports after moving code to OpenPype repository [\#2701](https://github.com/pypeclub/OpenPype/pull/2701) +- Multiple hosts: unify menu style across hosts [\#2693](https://github.com/pypeclub/OpenPype/pull/2693) +- Maya Redshift fixes [\#2692](https://github.com/pypeclub/OpenPype/pull/2692) +- Maya: fix fps validation popup [\#2685](https://github.com/pypeclub/OpenPype/pull/2685) +- Houdini Explicitly collect correct frame name even in case of single frame render when `frameStart` is provided [\#2676](https://github.com/pypeclub/OpenPype/pull/2676) +- hiero: fix effect collector name and order [\#2673](https://github.com/pypeclub/OpenPype/pull/2673) +- Maya: Fix menu callbacks [\#2671](https://github.com/pypeclub/OpenPype/pull/2671) +- hiero: removing obsolete unsupported plugin [\#2667](https://github.com/pypeclub/OpenPype/pull/2667) +- Launcher: Fix access to 'data' attribute on actions [\#2659](https://github.com/pypeclub/OpenPype/pull/2659) +- Maya `vrscene` loader fixes [\#2633](https://github.com/pypeclub/OpenPype/pull/2633) +- Houdini: fix usd family in loader and integrators [\#2631](https://github.com/pypeclub/OpenPype/pull/2631) +- Maya: Add only reference node to look family container like with other families [\#2508](https://github.com/pypeclub/OpenPype/pull/2508) +- General: Missing time function [\#2877](https://github.com/pypeclub/OpenPype/pull/2877) +- Deadline: Fix plugin name for tile assemble [\#2868](https://github.com/pypeclub/OpenPype/pull/2868) +- Nuke: gizmo precollect fix [\#2866](https://github.com/pypeclub/OpenPype/pull/2866) +- General: Fix hardlink for windows [\#2864](https://github.com/pypeclub/OpenPype/pull/2864) +- General: ffmpeg was crashing on slate merge [\#2860](https://github.com/pypeclub/OpenPype/pull/2860) +- WebPublisher: Video file was published with one too many frame [\#2858](https://github.com/pypeclub/OpenPype/pull/2858) +- New Publisher: Error dialog got right styles [\#2857](https://github.com/pypeclub/OpenPype/pull/2857) +- General: Fix getattr clalback on dynamic modules [\#2855](https://github.com/pypeclub/OpenPype/pull/2855) +- Nuke: slate resolution to input video resolution [\#2853](https://github.com/pypeclub/OpenPype/pull/2853) +- WebPublisher: Fix username stored in DB [\#2852](https://github.com/pypeclub/OpenPype/pull/2852) +- WebPublisher: Fix wrong number of frames for video file [\#2851](https://github.com/pypeclub/OpenPype/pull/2851) +- Nuke: Fix family test in validate\_write\_legacy to work with stillImage [\#2847](https://github.com/pypeclub/OpenPype/pull/2847) +- Nuke: fix multiple baking profile farm publishing [\#2842](https://github.com/pypeclub/OpenPype/pull/2842) +- Blender: Fixed parameters for FBX export of the camera [\#2840](https://github.com/pypeclub/OpenPype/pull/2840) +- Maya: Stop creation of reviews for Cryptomattes [\#2832](https://github.com/pypeclub/OpenPype/pull/2832) +- Deadline: Remove recreated event [\#2828](https://github.com/pypeclub/OpenPype/pull/2828) +- Deadline: Added missing events folder [\#2827](https://github.com/pypeclub/OpenPype/pull/2827) +- Settings: Missing document with OP versions may break start of OpenPype [\#2825](https://github.com/pypeclub/OpenPype/pull/2825) +- Deadline: more detailed temp file name for environment json [\#2824](https://github.com/pypeclub/OpenPype/pull/2824) +- General: Host name was formed from obsolete code [\#2821](https://github.com/pypeclub/OpenPype/pull/2821) +- Settings UI: Fix "Apply from" action [\#2820](https://github.com/pypeclub/OpenPype/pull/2820) +- Ftrack: Job killer with missing user [\#2819](https://github.com/pypeclub/OpenPype/pull/2819) +- Nuke: Use AVALON\_APP to get value for "app" key [\#2818](https://github.com/pypeclub/OpenPype/pull/2818) +- StandalonePublisher: use dynamic groups in subset names [\#2816](https://github.com/pypeclub/OpenPype/pull/2816) + +**🔀 Refactored code** + +- Ftrack: Moved module one hierarchy level higher [\#2792](https://github.com/pypeclub/OpenPype/pull/2792) +- SyncServer: Moved module one hierarchy level higher [\#2791](https://github.com/pypeclub/OpenPype/pull/2791) +- Royal render: Move module one hierarchy level higher [\#2790](https://github.com/pypeclub/OpenPype/pull/2790) +- Deadline: Move module one hierarchy level higher [\#2789](https://github.com/pypeclub/OpenPype/pull/2789) +- Refactor: move webserver tool to openpype [\#2876](https://github.com/pypeclub/OpenPype/pull/2876) +- General: Move create logic from avalon to OpenPype [\#2854](https://github.com/pypeclub/OpenPype/pull/2854) +- General: Add vendors from avalon [\#2848](https://github.com/pypeclub/OpenPype/pull/2848) +- General: Basic event system [\#2846](https://github.com/pypeclub/OpenPype/pull/2846) +- General: Move change context functions [\#2839](https://github.com/pypeclub/OpenPype/pull/2839) +- Tools: Don't use avalon tools code [\#2829](https://github.com/pypeclub/OpenPype/pull/2829) +- Move Unreal Implementation to OpenPype [\#2823](https://github.com/pypeclub/OpenPype/pull/2823) +- General: Extract template formatting from anatomy [\#2766](https://github.com/pypeclub/OpenPype/pull/2766) + +**Merged pull requests:** + +- Fusion: Moved implementation into OpenPype [\#2713](https://github.com/pypeclub/OpenPype/pull/2713) +- TVPaint: Plugin build without dependencies [\#2705](https://github.com/pypeclub/OpenPype/pull/2705) +- Webpublisher: Photoshop create a beauty png [\#2689](https://github.com/pypeclub/OpenPype/pull/2689) +- Ftrack: Hierarchical attributes are queried properly [\#2682](https://github.com/pypeclub/OpenPype/pull/2682) +- Maya: Add Validate Frame Range settings [\#2661](https://github.com/pypeclub/OpenPype/pull/2661) +- Harmony: move to Openpype [\#2657](https://github.com/pypeclub/OpenPype/pull/2657) +- Maya: cleanup duplicate rendersetup code [\#2642](https://github.com/pypeclub/OpenPype/pull/2642) +- Deadline: Be able to pass Mongo url to job [\#2616](https://github.com/pypeclub/OpenPype/pull/2616) + +## [3.8.2](https://github.com/pypeclub/OpenPype/tree/3.8.2) (2022-02-07) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.1...3.8.2) + +### 📖 Documentation + +- Cosmetics: Fix common typos in openpype/website [\#2617](https://github.com/pypeclub/OpenPype/pull/2617) + +**🚀 Enhancements** + +- TVPaint: Image loaders also work on review family [\#2638](https://github.com/pypeclub/OpenPype/pull/2638) +- General: Project backup tools [\#2629](https://github.com/pypeclub/OpenPype/pull/2629) +- nuke: adding clear button to write nodes [\#2627](https://github.com/pypeclub/OpenPype/pull/2627) +- Ftrack: Family to Asset type mapping is in settings [\#2602](https://github.com/pypeclub/OpenPype/pull/2602) +- Nuke: load color space from representation data [\#2576](https://github.com/pypeclub/OpenPype/pull/2576) + +**🐛 Bug fixes** + +- Fix pulling of cx\_freeze 6.10 [\#2628](https://github.com/pypeclub/OpenPype/pull/2628) +- Global: fix broken otio review extractor [\#2590](https://github.com/pypeclub/OpenPype/pull/2590) + +**Merged pull requests:** + +- WebPublisher: fix instance duplicates [\#2641](https://github.com/pypeclub/OpenPype/pull/2641) +- Fix - safer pulling of task name for webpublishing from PS [\#2613](https://github.com/pypeclub/OpenPype/pull/2613) + +## [3.8.1](https://github.com/pypeclub/OpenPype/tree/3.8.1) (2022-02-01) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.0...3.8.1) + +**🚀 Enhancements** + +- Webpublisher: Thumbnail extractor [\#2600](https://github.com/pypeclub/OpenPype/pull/2600) +- Loader: Allow to toggle default family filters between "include" or "exclude" filtering [\#2541](https://github.com/pypeclub/OpenPype/pull/2541) +- Launcher: Added context menu to to skip opening last workfile [\#2536](https://github.com/pypeclub/OpenPype/pull/2536) +- Unreal: JSON Layout Loading support [\#2066](https://github.com/pypeclub/OpenPype/pull/2066) + +**🐛 Bug fixes** + +- Release/3.8.0 [\#2619](https://github.com/pypeclub/OpenPype/pull/2619) +- Settings: Enum does not store empty string if has single item to select [\#2615](https://github.com/pypeclub/OpenPype/pull/2615) +- switch distutils to sysconfig for `get_platform()` [\#2594](https://github.com/pypeclub/OpenPype/pull/2594) +- Fix poetry index and speedcopy update [\#2589](https://github.com/pypeclub/OpenPype/pull/2589) +- Webpublisher: Fix - subset names from processed .psd used wrong value for task [\#2586](https://github.com/pypeclub/OpenPype/pull/2586) +- `vrscene` creator Deadline webservice URL handling [\#2580](https://github.com/pypeclub/OpenPype/pull/2580) +- global: track name was failing if duplicated root word in name [\#2568](https://github.com/pypeclub/OpenPype/pull/2568) +- Validate Maya Rig produces no cycle errors [\#2484](https://github.com/pypeclub/OpenPype/pull/2484) + +**Merged pull requests:** + +- Bump pillow from 8.4.0 to 9.0.0 [\#2595](https://github.com/pypeclub/OpenPype/pull/2595) +- Webpublisher: Skip version collect [\#2591](https://github.com/pypeclub/OpenPype/pull/2591) +- build\(deps\): bump pillow from 8.4.0 to 9.0.0 [\#2523](https://github.com/pypeclub/OpenPype/pull/2523) + +## [3.8.0](https://github.com/pypeclub/OpenPype/tree/3.8.0) (2022-01-24) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.7.0...3.8.0) + +### 📖 Documentation + +- Variable in docs renamed to proper name [\#2546](https://github.com/pypeclub/OpenPype/pull/2546) + +**🆕 New features** + +- Flame: extracting segments with trans-coding [\#2547](https://github.com/pypeclub/OpenPype/pull/2547) +- Maya : V-Ray Proxy - load all ABC files via proxy [\#2544](https://github.com/pypeclub/OpenPype/pull/2544) +- Maya to Unreal: Extended static mesh workflow [\#2537](https://github.com/pypeclub/OpenPype/pull/2537) +- Flame: collecting publishable instances [\#2519](https://github.com/pypeclub/OpenPype/pull/2519) +- Flame: create publishable clips [\#2495](https://github.com/pypeclub/OpenPype/pull/2495) +- Flame: OpenTimelineIO Export Modul [\#2398](https://github.com/pypeclub/OpenPype/pull/2398) + +**🚀 Enhancements** + +- Webpublisher: Moved error at the beginning of the log [\#2559](https://github.com/pypeclub/OpenPype/pull/2559) +- Ftrack: Use ApplicationManager to get DJV path [\#2558](https://github.com/pypeclub/OpenPype/pull/2558) +- Webpublisher: Added endpoint to reprocess batch through UI [\#2555](https://github.com/pypeclub/OpenPype/pull/2555) +- Settings: PathInput strip passed string [\#2550](https://github.com/pypeclub/OpenPype/pull/2550) +- Global: Exctract Review anatomy fill data with output name [\#2548](https://github.com/pypeclub/OpenPype/pull/2548) +- Cosmetics: Clean up some cosmetics / typos [\#2542](https://github.com/pypeclub/OpenPype/pull/2542) +- General: Validate if current process OpenPype version is requested version [\#2529](https://github.com/pypeclub/OpenPype/pull/2529) +- General: Be able to use anatomy data in ffmpeg output arguments [\#2525](https://github.com/pypeclub/OpenPype/pull/2525) +- Expose toggle publish plug-in settings for Maya Look Shading Engine Naming [\#2521](https://github.com/pypeclub/OpenPype/pull/2521) +- Photoshop: Move implementation to OpenPype [\#2510](https://github.com/pypeclub/OpenPype/pull/2510) +- TimersManager: Move module one hierarchy higher [\#2501](https://github.com/pypeclub/OpenPype/pull/2501) +- Slack: notifications are sent with Openpype logo and bot name [\#2499](https://github.com/pypeclub/OpenPype/pull/2499) +- Slack: Add review to notification message [\#2498](https://github.com/pypeclub/OpenPype/pull/2498) +- Ftrack: Event handlers settings [\#2496](https://github.com/pypeclub/OpenPype/pull/2496) +- Tools: Fix style and modality of errors in loader and creator [\#2489](https://github.com/pypeclub/OpenPype/pull/2489) +- Maya: Collect 'fps' animation data only for "review" instances [\#2486](https://github.com/pypeclub/OpenPype/pull/2486) +- Project Manager: Remove project button cleanup [\#2482](https://github.com/pypeclub/OpenPype/pull/2482) +- Tools: Be able to change models of tasks and assets widgets [\#2475](https://github.com/pypeclub/OpenPype/pull/2475) +- Publish pype: Reduce publish process defering [\#2464](https://github.com/pypeclub/OpenPype/pull/2464) +- Maya: Improve speed of Collect History logic [\#2460](https://github.com/pypeclub/OpenPype/pull/2460) +- Maya: Validate Rig Controllers - fix Error: in script editor [\#2459](https://github.com/pypeclub/OpenPype/pull/2459) +- Maya: Validate NGONs simplify and speed-up [\#2458](https://github.com/pypeclub/OpenPype/pull/2458) +- Maya: Optimize Validate Locked Normals speed for dense polymeshes [\#2457](https://github.com/pypeclub/OpenPype/pull/2457) +- Maya: Refactor missing \_get\_reference\_node method [\#2455](https://github.com/pypeclub/OpenPype/pull/2455) +- Houdini: Remove broken unique name counter [\#2450](https://github.com/pypeclub/OpenPype/pull/2450) +- Maya: Improve lib.polyConstraint performance when Select tool is not the active tool context [\#2447](https://github.com/pypeclub/OpenPype/pull/2447) +- General: Validate third party before build [\#2425](https://github.com/pypeclub/OpenPype/pull/2425) +- Maya : add option to not group reference in ReferenceLoader [\#2383](https://github.com/pypeclub/OpenPype/pull/2383) + +**🐛 Bug fixes** + +- AfterEffects: Fix - removed obsolete import [\#2577](https://github.com/pypeclub/OpenPype/pull/2577) +- General: OpenPype version updates [\#2575](https://github.com/pypeclub/OpenPype/pull/2575) +- Ftrack: Delete action revision [\#2563](https://github.com/pypeclub/OpenPype/pull/2563) +- Webpublisher: ftrack shows incorrect user names [\#2560](https://github.com/pypeclub/OpenPype/pull/2560) +- General: Do not validate version if build does not support it [\#2557](https://github.com/pypeclub/OpenPype/pull/2557) +- Webpublisher: Fixed progress reporting [\#2553](https://github.com/pypeclub/OpenPype/pull/2553) +- Fix Maya AssProxyLoader version switch [\#2551](https://github.com/pypeclub/OpenPype/pull/2551) +- General: Fix install thread in igniter [\#2549](https://github.com/pypeclub/OpenPype/pull/2549) +- Houdini: vdbcache family preserve frame numbers on publish integration + enable validate version for Houdini [\#2535](https://github.com/pypeclub/OpenPype/pull/2535) +- Maya: Fix Load VDB to V-Ray [\#2533](https://github.com/pypeclub/OpenPype/pull/2533) +- Maya: ReferenceLoader fix not unique group name error for attach to root [\#2532](https://github.com/pypeclub/OpenPype/pull/2532) +- Maya: namespaced context go back to original namespace when started from inside a namespace [\#2531](https://github.com/pypeclub/OpenPype/pull/2531) +- Fix create zip tool - path argument [\#2522](https://github.com/pypeclub/OpenPype/pull/2522) +- Maya: Fix Extract Look with space in names [\#2518](https://github.com/pypeclub/OpenPype/pull/2518) +- Fix published frame content for sequence starting with 0 [\#2513](https://github.com/pypeclub/OpenPype/pull/2513) +- Maya: reset empty string attributes correctly to "" instead of "None" [\#2506](https://github.com/pypeclub/OpenPype/pull/2506) +- Improve FusionPreLaunch hook errors [\#2505](https://github.com/pypeclub/OpenPype/pull/2505) +- General: Settings work if OpenPypeVersion is available [\#2494](https://github.com/pypeclub/OpenPype/pull/2494) +- General: PYTHONPATH may break OpenPype dependencies [\#2493](https://github.com/pypeclub/OpenPype/pull/2493) +- General: Modules import function output fix [\#2492](https://github.com/pypeclub/OpenPype/pull/2492) +- AE: fix hiding of alert window below Publish [\#2491](https://github.com/pypeclub/OpenPype/pull/2491) +- Workfiles tool: Files widget show files on first show [\#2488](https://github.com/pypeclub/OpenPype/pull/2488) +- General: Custom template paths filter fix [\#2483](https://github.com/pypeclub/OpenPype/pull/2483) +- Loader: Remove always on top flag in tray [\#2480](https://github.com/pypeclub/OpenPype/pull/2480) +- General: Anatomy does not return root envs as unicode [\#2465](https://github.com/pypeclub/OpenPype/pull/2465) +- Maya: Validate Shape Zero do not keep fixed geometry vertices selected/active after repair [\#2456](https://github.com/pypeclub/OpenPype/pull/2456) + +**Merged pull requests:** + +- AfterEffects: Move implementation to OpenPype [\#2543](https://github.com/pypeclub/OpenPype/pull/2543) +- Maya: Remove Maya Look Assigner check on startup [\#2540](https://github.com/pypeclub/OpenPype/pull/2540) +- build\(deps\): bump shelljs from 0.8.4 to 0.8.5 in /website [\#2538](https://github.com/pypeclub/OpenPype/pull/2538) +- build\(deps\): bump follow-redirects from 1.14.4 to 1.14.7 in /website [\#2534](https://github.com/pypeclub/OpenPype/pull/2534) +- Nuke: Merge avalon's implementation into OpenPype [\#2514](https://github.com/pypeclub/OpenPype/pull/2514) +- Maya: Vray fix proxies look assignment [\#2392](https://github.com/pypeclub/OpenPype/pull/2392) +- Bump algoliasearch-helper from 3.4.4 to 3.6.2 in /website [\#2297](https://github.com/pypeclub/OpenPype/pull/2297) + +## [3.7.0](https://github.com/pypeclub/OpenPype/tree/3.7.0) (2022-01-04) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.4...3.7.0) + +**Deprecated:** + +- General: Default modules hierarchy n2 [\#2368](https://github.com/pypeclub/OpenPype/pull/2368) + +### 📖 Documentation + +- docs\[website\]: Add Ellipse Studio \(logo\) as an OpenPype contributor [\#2324](https://github.com/pypeclub/OpenPype/pull/2324) + +**🆕 New features** + +- Settings UI use OpenPype styles [\#2296](https://github.com/pypeclub/OpenPype/pull/2296) +- Store typed version dependencies for workfiles [\#2192](https://github.com/pypeclub/OpenPype/pull/2192) +- OpenPypeV3: add key task type, task shortname and user to path templating construction [\#2157](https://github.com/pypeclub/OpenPype/pull/2157) +- Nuke: Alembic model workflow [\#2140](https://github.com/pypeclub/OpenPype/pull/2140) +- TVPaint: Load workfile from published. [\#1980](https://github.com/pypeclub/OpenPype/pull/1980) + +**🚀 Enhancements** + +- General: Workdir extra folders [\#2462](https://github.com/pypeclub/OpenPype/pull/2462) +- Photoshop: New style validations for New publisher [\#2429](https://github.com/pypeclub/OpenPype/pull/2429) +- General: Environment variables groups [\#2424](https://github.com/pypeclub/OpenPype/pull/2424) +- Unreal: Dynamic menu created in Python [\#2422](https://github.com/pypeclub/OpenPype/pull/2422) +- Settings UI: Hyperlinks to settings [\#2420](https://github.com/pypeclub/OpenPype/pull/2420) +- Modules: JobQueue module moved one hierarchy level higher [\#2419](https://github.com/pypeclub/OpenPype/pull/2419) +- TimersManager: Start timer post launch hook [\#2418](https://github.com/pypeclub/OpenPype/pull/2418) +- General: Run applications as separate processes under linux [\#2408](https://github.com/pypeclub/OpenPype/pull/2408) +- Ftrack: Check existence of object type on recreation [\#2404](https://github.com/pypeclub/OpenPype/pull/2404) +- Enhancement: Global cleanup plugin that explicitly remove paths from context [\#2402](https://github.com/pypeclub/OpenPype/pull/2402) +- General: MongoDB ability to specify replica set groups [\#2401](https://github.com/pypeclub/OpenPype/pull/2401) +- Flame: moving `utility_scripts` to api folder also with `scripts` [\#2385](https://github.com/pypeclub/OpenPype/pull/2385) +- Centos 7 dependency compatibility [\#2384](https://github.com/pypeclub/OpenPype/pull/2384) +- Enhancement: Settings: Use project settings values from another project [\#2382](https://github.com/pypeclub/OpenPype/pull/2382) +- Blender 3: Support auto install for new blender version [\#2377](https://github.com/pypeclub/OpenPype/pull/2377) +- Maya add render image path to settings [\#2375](https://github.com/pypeclub/OpenPype/pull/2375) +- Settings: Webpublisher in hosts enum [\#2367](https://github.com/pypeclub/OpenPype/pull/2367) +- Hiero: python3 compatibility [\#2365](https://github.com/pypeclub/OpenPype/pull/2365) +- Burnins: Be able recognize mxf OPAtom format [\#2361](https://github.com/pypeclub/OpenPype/pull/2361) +- Maya: Add is\_static\_image\_plane and is\_in\_all\_views option in imagePlaneLoader [\#2356](https://github.com/pypeclub/OpenPype/pull/2356) +- Local settings: Copyable studio paths [\#2349](https://github.com/pypeclub/OpenPype/pull/2349) +- Assets Widget: Clear model on project change [\#2345](https://github.com/pypeclub/OpenPype/pull/2345) +- General: OpenPype default modules hierarchy [\#2338](https://github.com/pypeclub/OpenPype/pull/2338) +- TVPaint: Move implementation to OpenPype [\#2336](https://github.com/pypeclub/OpenPype/pull/2336) +- General: FFprobe error exception contain original error message [\#2328](https://github.com/pypeclub/OpenPype/pull/2328) +- Resolve: Add experimental button to menu [\#2325](https://github.com/pypeclub/OpenPype/pull/2325) +- Hiero: Add experimental tools action [\#2323](https://github.com/pypeclub/OpenPype/pull/2323) +- Input links: Cleanup and unification of differences [\#2322](https://github.com/pypeclub/OpenPype/pull/2322) +- General: Don't validate vendor bin with executing them [\#2317](https://github.com/pypeclub/OpenPype/pull/2317) +- General: Multilayer EXRs support [\#2315](https://github.com/pypeclub/OpenPype/pull/2315) +- General: Run process log stderr as info log level [\#2309](https://github.com/pypeclub/OpenPype/pull/2309) +- General: Reduce vendor imports [\#2305](https://github.com/pypeclub/OpenPype/pull/2305) +- Tools: Cleanup of unused classes [\#2304](https://github.com/pypeclub/OpenPype/pull/2304) +- Project Manager: Added ability to delete project [\#2298](https://github.com/pypeclub/OpenPype/pull/2298) +- Ftrack: Synchronize input links [\#2287](https://github.com/pypeclub/OpenPype/pull/2287) +- StandalonePublisher: Remove unused plugin ExtractHarmonyZip [\#2277](https://github.com/pypeclub/OpenPype/pull/2277) +- Ftrack: Support multiple reviews [\#2271](https://github.com/pypeclub/OpenPype/pull/2271) +- Ftrack: Remove unused clean component plugin [\#2269](https://github.com/pypeclub/OpenPype/pull/2269) +- Royal Render: Support for rr channels in separate dirs [\#2268](https://github.com/pypeclub/OpenPype/pull/2268) +- Houdini: Add experimental tools action [\#2267](https://github.com/pypeclub/OpenPype/pull/2267) +- Nuke: extract baked review videos presets [\#2248](https://github.com/pypeclub/OpenPype/pull/2248) +- TVPaint: Workers rendering [\#2209](https://github.com/pypeclub/OpenPype/pull/2209) +- OpenPypeV3: Add key parent asset to path templating construction [\#2186](https://github.com/pypeclub/OpenPype/pull/2186) + +**🐛 Bug fixes** + +- TVPaint: Create render layer dialog is in front [\#2471](https://github.com/pypeclub/OpenPype/pull/2471) +- Short Pyblish plugin path [\#2428](https://github.com/pypeclub/OpenPype/pull/2428) +- PS: Introduced settings for invalid characters to use in ValidateNaming plugin [\#2417](https://github.com/pypeclub/OpenPype/pull/2417) +- Settings UI: Breadcrumbs path does not create new entities [\#2416](https://github.com/pypeclub/OpenPype/pull/2416) +- AfterEffects: Variant 2022 is in defaults but missing in schemas [\#2412](https://github.com/pypeclub/OpenPype/pull/2412) +- Nuke: baking representations was not additive [\#2406](https://github.com/pypeclub/OpenPype/pull/2406) +- General: Fix access to environments from default settings [\#2403](https://github.com/pypeclub/OpenPype/pull/2403) +- Fix: Placeholder Input color set fix [\#2399](https://github.com/pypeclub/OpenPype/pull/2399) +- Settings: Fix state change of wrapper label [\#2396](https://github.com/pypeclub/OpenPype/pull/2396) +- Flame: fix ftrack publisher [\#2381](https://github.com/pypeclub/OpenPype/pull/2381) +- hiero: solve custom ocio path [\#2379](https://github.com/pypeclub/OpenPype/pull/2379) +- hiero: fix workio and flatten [\#2378](https://github.com/pypeclub/OpenPype/pull/2378) +- Nuke: fixing menu re-drawing during context change [\#2374](https://github.com/pypeclub/OpenPype/pull/2374) +- Webpublisher: Fix assignment of families of TVpaint instances [\#2373](https://github.com/pypeclub/OpenPype/pull/2373) +- Nuke: fixing node name based on switched asset name [\#2369](https://github.com/pypeclub/OpenPype/pull/2369) +- JobQueue: Fix loading of settings [\#2362](https://github.com/pypeclub/OpenPype/pull/2362) +- Tools: Placeholder color [\#2359](https://github.com/pypeclub/OpenPype/pull/2359) +- Launcher: Minimize button on MacOs [\#2355](https://github.com/pypeclub/OpenPype/pull/2355) +- StandalonePublisher: Fix import of constant [\#2354](https://github.com/pypeclub/OpenPype/pull/2354) +- Houdini: Fix HDA creation [\#2350](https://github.com/pypeclub/OpenPype/pull/2350) +- Adobe products show issue [\#2347](https://github.com/pypeclub/OpenPype/pull/2347) +- Maya Look Assigner: Fix Python 3 compatibility [\#2343](https://github.com/pypeclub/OpenPype/pull/2343) +- Remove wrongly used host for hook [\#2342](https://github.com/pypeclub/OpenPype/pull/2342) +- Tools: Use Qt context on tools show [\#2340](https://github.com/pypeclub/OpenPype/pull/2340) +- Flame: Fix default argument value in custom dictionary [\#2339](https://github.com/pypeclub/OpenPype/pull/2339) +- Timers Manager: Disable auto stop timer on linux platform [\#2334](https://github.com/pypeclub/OpenPype/pull/2334) +- nuke: bake preset single input exception [\#2331](https://github.com/pypeclub/OpenPype/pull/2331) +- Hiero: fixing multiple templates at a hierarchy parent [\#2330](https://github.com/pypeclub/OpenPype/pull/2330) +- Fix - provider icons are pulled from a folder [\#2326](https://github.com/pypeclub/OpenPype/pull/2326) +- InputLinks: Typo in "inputLinks" key [\#2314](https://github.com/pypeclub/OpenPype/pull/2314) +- Deadline timeout and logging [\#2312](https://github.com/pypeclub/OpenPype/pull/2312) +- nuke: do not multiply representation on class method [\#2311](https://github.com/pypeclub/OpenPype/pull/2311) +- Workfiles tool: Fix task formatting [\#2306](https://github.com/pypeclub/OpenPype/pull/2306) +- Delivery: Fix delivery paths created on windows [\#2302](https://github.com/pypeclub/OpenPype/pull/2302) +- Maya: Deadline - fix limit groups [\#2295](https://github.com/pypeclub/OpenPype/pull/2295) +- Royal Render: Fix plugin order and OpenPype auto-detection [\#2291](https://github.com/pypeclub/OpenPype/pull/2291) +- New Publisher: Fix mapping of indexes [\#2285](https://github.com/pypeclub/OpenPype/pull/2285) +- Alternate site for site sync doesnt work for sequences [\#2284](https://github.com/pypeclub/OpenPype/pull/2284) +- FFmpeg: Execute ffprobe using list of arguments instead of string command [\#2281](https://github.com/pypeclub/OpenPype/pull/2281) +- Nuke: Anatomy fill data use task as dictionary [\#2278](https://github.com/pypeclub/OpenPype/pull/2278) +- Bug: fix variable name \_asset\_id in workfiles application [\#2274](https://github.com/pypeclub/OpenPype/pull/2274) +- Version handling fixes [\#2272](https://github.com/pypeclub/OpenPype/pull/2272) + +**Merged pull requests:** + +- Maya: Replaced PATH usage with vendored oiio path for maketx utility [\#2405](https://github.com/pypeclub/OpenPype/pull/2405) +- \[Fix\]\[MAYA\] Handle message type attribute within CollectLook [\#2394](https://github.com/pypeclub/OpenPype/pull/2394) +- Add validator to check correct version of extension for PS and AE [\#2387](https://github.com/pypeclub/OpenPype/pull/2387) +- Maya: configurable model top level validation [\#2321](https://github.com/pypeclub/OpenPype/pull/2321) +- Create test publish class for After Effects [\#2270](https://github.com/pypeclub/OpenPype/pull/2270) + +## [3.6.4](https://github.com/pypeclub/OpenPype/tree/3.6.4) (2021-11-23) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.3...3.6.4) + +**🐛 Bug fixes** + +- Nuke: inventory update removes all loaded read nodes [\#2294](https://github.com/pypeclub/OpenPype/pull/2294) + +## [3.6.3](https://github.com/pypeclub/OpenPype/tree/3.6.3) (2021-11-19) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.2...3.6.3) + +**🐛 Bug fixes** + +- Deadline: Fix publish targets [\#2280](https://github.com/pypeclub/OpenPype/pull/2280) + +## [3.6.2](https://github.com/pypeclub/OpenPype/tree/3.6.2) (2021-11-18) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.1...3.6.2) + +**🚀 Enhancements** + +- Tools: Assets widget [\#2265](https://github.com/pypeclub/OpenPype/pull/2265) +- SceneInventory: Choose loader in asset switcher [\#2262](https://github.com/pypeclub/OpenPype/pull/2262) +- Style: New fonts in OpenPype style [\#2256](https://github.com/pypeclub/OpenPype/pull/2256) +- Tools: SceneInventory in OpenPype [\#2255](https://github.com/pypeclub/OpenPype/pull/2255) +- Tools: Tasks widget [\#2251](https://github.com/pypeclub/OpenPype/pull/2251) +- Tools: Creator in OpenPype [\#2244](https://github.com/pypeclub/OpenPype/pull/2244) +- Added endpoint for configured extensions [\#2221](https://github.com/pypeclub/OpenPype/pull/2221) + +**🐛 Bug fixes** + +- Tools: Parenting of tools in Nuke and Hiero [\#2266](https://github.com/pypeclub/OpenPype/pull/2266) +- limiting validator to specific editorial hosts [\#2264](https://github.com/pypeclub/OpenPype/pull/2264) +- Tools: Select Context dialog attribute fix [\#2261](https://github.com/pypeclub/OpenPype/pull/2261) +- Maya: Render publishing fails on linux [\#2260](https://github.com/pypeclub/OpenPype/pull/2260) +- LookAssigner: Fix tool reopen [\#2259](https://github.com/pypeclub/OpenPype/pull/2259) +- Standalone: editorial not publishing thumbnails on all subsets [\#2258](https://github.com/pypeclub/OpenPype/pull/2258) +- Burnins: Support mxf metadata [\#2247](https://github.com/pypeclub/OpenPype/pull/2247) +- Maya: Support for configurable AOV separator characters [\#2197](https://github.com/pypeclub/OpenPype/pull/2197) +- Maya: texture colorspace modes in looks [\#2195](https://github.com/pypeclub/OpenPype/pull/2195) + +## [3.6.1](https://github.com/pypeclub/OpenPype/tree/3.6.1) (2021-11-16) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.0...3.6.1) + +**🐛 Bug fixes** + +- Loader doesn't allow changing of version before loading [\#2254](https://github.com/pypeclub/OpenPype/pull/2254) + +## [3.6.0](https://github.com/pypeclub/OpenPype/tree/3.6.0) (2021-11-15) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.5.0...3.6.0) + +### 📖 Documentation + +- Add alternative sites for Site Sync [\#2206](https://github.com/pypeclub/OpenPype/pull/2206) +- Add command line way of running site sync server [\#2188](https://github.com/pypeclub/OpenPype/pull/2188) + +**🆕 New features** + +- Add validate active site button to sync queue on a project [\#2176](https://github.com/pypeclub/OpenPype/pull/2176) +- Maya : Colorspace configuration [\#2170](https://github.com/pypeclub/OpenPype/pull/2170) +- Blender: Added support for audio [\#2168](https://github.com/pypeclub/OpenPype/pull/2168) +- Flame: a host basic integration [\#2165](https://github.com/pypeclub/OpenPype/pull/2165) +- Houdini: simple HDA workflow [\#2072](https://github.com/pypeclub/OpenPype/pull/2072) +- Basic Royal Render Integration ✨ [\#2061](https://github.com/pypeclub/OpenPype/pull/2061) +- Camera handling between Blender and Unreal [\#1988](https://github.com/pypeclub/OpenPype/pull/1988) +- switch PyQt5 for PySide2 [\#1744](https://github.com/pypeclub/OpenPype/pull/1744) + +**🚀 Enhancements** + +- Tools: Subset manager in OpenPype [\#2243](https://github.com/pypeclub/OpenPype/pull/2243) +- General: Skip module directories without init file [\#2239](https://github.com/pypeclub/OpenPype/pull/2239) +- General: Static interfaces [\#2238](https://github.com/pypeclub/OpenPype/pull/2238) +- Style: Fix transparent image in style [\#2235](https://github.com/pypeclub/OpenPype/pull/2235) +- Add a "following workfile versioning" option on publish [\#2225](https://github.com/pypeclub/OpenPype/pull/2225) +- Modules: Module can add cli commands [\#2224](https://github.com/pypeclub/OpenPype/pull/2224) +- Webpublisher: Separate webpublisher logic [\#2222](https://github.com/pypeclub/OpenPype/pull/2222) +- Add both side availability on Site Sync sites to Loader [\#2220](https://github.com/pypeclub/OpenPype/pull/2220) +- Tools: Center loader and library loader on show [\#2219](https://github.com/pypeclub/OpenPype/pull/2219) +- Maya : Validate shape zero [\#2212](https://github.com/pypeclub/OpenPype/pull/2212) +- Maya : validate unique names [\#2211](https://github.com/pypeclub/OpenPype/pull/2211) +- Tools: OpenPype stylesheet in workfiles tool [\#2208](https://github.com/pypeclub/OpenPype/pull/2208) +- Ftrack: Replace Queue with deque in event handlers logic [\#2204](https://github.com/pypeclub/OpenPype/pull/2204) +- Tools: New select context dialog [\#2200](https://github.com/pypeclub/OpenPype/pull/2200) +- Maya : Validate mesh ngons [\#2199](https://github.com/pypeclub/OpenPype/pull/2199) +- Dirmap in Nuke [\#2198](https://github.com/pypeclub/OpenPype/pull/2198) +- Delivery: Check 'frame' key in template for sequence delivery [\#2196](https://github.com/pypeclub/OpenPype/pull/2196) +- Settings: Site sync project settings improvement [\#2193](https://github.com/pypeclub/OpenPype/pull/2193) +- Usage of tools code [\#2185](https://github.com/pypeclub/OpenPype/pull/2185) +- Settings: Dictionary based on project roots [\#2184](https://github.com/pypeclub/OpenPype/pull/2184) +- Subset name: Be able to pass asset document to get subset name [\#2179](https://github.com/pypeclub/OpenPype/pull/2179) +- Tools: Experimental tools [\#2167](https://github.com/pypeclub/OpenPype/pull/2167) +- Loader: Refactor and use OpenPype stylesheets [\#2166](https://github.com/pypeclub/OpenPype/pull/2166) +- Add loader for linked smart objects in photoshop [\#2149](https://github.com/pypeclub/OpenPype/pull/2149) +- Burnins: DNxHD profiles handling [\#2142](https://github.com/pypeclub/OpenPype/pull/2142) +- Tools: Single access point for host tools [\#2139](https://github.com/pypeclub/OpenPype/pull/2139) + +**🐛 Bug fixes** + +- Ftrack: Sync project ftrack id cache issue [\#2250](https://github.com/pypeclub/OpenPype/pull/2250) +- Ftrack: Session creation and Prepare project [\#2245](https://github.com/pypeclub/OpenPype/pull/2245) +- Added queue for studio processing in PS [\#2237](https://github.com/pypeclub/OpenPype/pull/2237) +- Python 2: Unicode to string conversion [\#2236](https://github.com/pypeclub/OpenPype/pull/2236) +- Fix - enum for color coding in PS [\#2234](https://github.com/pypeclub/OpenPype/pull/2234) +- Pyblish Tool: Fix targets handling [\#2232](https://github.com/pypeclub/OpenPype/pull/2232) +- Ftrack: Base event fix of 'get\_project\_from\_entity' method [\#2214](https://github.com/pypeclub/OpenPype/pull/2214) +- Maya : multiple subsets review broken [\#2210](https://github.com/pypeclub/OpenPype/pull/2210) +- Fix - different command used for Linux and Mac OS [\#2207](https://github.com/pypeclub/OpenPype/pull/2207) +- Tools: Workfiles tool don't use avalon widgets [\#2205](https://github.com/pypeclub/OpenPype/pull/2205) +- Ftrack: Fill missing ftrack id on mongo project [\#2203](https://github.com/pypeclub/OpenPype/pull/2203) +- Project Manager: Fix copying of tasks [\#2191](https://github.com/pypeclub/OpenPype/pull/2191) +- StandalonePublisher: Source validator don't expect representations [\#2190](https://github.com/pypeclub/OpenPype/pull/2190) +- Blender: Fix trying to pack an image when the shader node has no texture [\#2183](https://github.com/pypeclub/OpenPype/pull/2183) +- Maya: review viewport settings [\#2177](https://github.com/pypeclub/OpenPype/pull/2177) +- MacOS: Launching of applications may cause Permissions error [\#2175](https://github.com/pypeclub/OpenPype/pull/2175) +- Maya: Aspect ratio [\#2174](https://github.com/pypeclub/OpenPype/pull/2174) +- Blender: Fix 'Deselect All' with object not in 'Object Mode' [\#2163](https://github.com/pypeclub/OpenPype/pull/2163) +- Tools: Stylesheets are applied after tool show [\#2161](https://github.com/pypeclub/OpenPype/pull/2161) +- Maya: Collect render - fix UNC path support 🐛 [\#2158](https://github.com/pypeclub/OpenPype/pull/2158) +- Maya: Fix hotbox broken by scriptsmenu [\#2151](https://github.com/pypeclub/OpenPype/pull/2151) +- Ftrack: Ignore save warnings exception in Prepare project action [\#2150](https://github.com/pypeclub/OpenPype/pull/2150) +- Loader thumbnails with smooth edges [\#2147](https://github.com/pypeclub/OpenPype/pull/2147) +- Added validator for source files for Standalone Publisher [\#2138](https://github.com/pypeclub/OpenPype/pull/2138) + +**Merged pull requests:** + +- Bump pillow from 8.2.0 to 8.3.2 [\#2162](https://github.com/pypeclub/OpenPype/pull/2162) +- Bump axios from 0.21.1 to 0.21.4 in /website [\#2059](https://github.com/pypeclub/OpenPype/pull/2059) + +## [3.5.0](https://github.com/pypeclub/OpenPype/tree/3.5.0) (2021-10-17) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.4.1...3.5.0) + +**Deprecated:** + +- Maya: Change mayaAscii family to mayaScene [\#2106](https://github.com/pypeclub/OpenPype/pull/2106) + +**🆕 New features** + +- Added project and task into context change message in Maya [\#2131](https://github.com/pypeclub/OpenPype/pull/2131) +- Add ExtractBurnin to photoshop review [\#2124](https://github.com/pypeclub/OpenPype/pull/2124) +- PYPE-1218 - changed namespace to contain subset name in Maya [\#2114](https://github.com/pypeclub/OpenPype/pull/2114) +- Added running configurable disk mapping command before start of OP [\#2091](https://github.com/pypeclub/OpenPype/pull/2091) +- SFTP provider [\#2073](https://github.com/pypeclub/OpenPype/pull/2073) +- Maya: Validate setdress top group [\#2068](https://github.com/pypeclub/OpenPype/pull/2068) +- Maya: Enable publishing render attrib sets \(e.g. V-Ray Displacement\) with model [\#1955](https://github.com/pypeclub/OpenPype/pull/1955) + +**🚀 Enhancements** + +- Maya: make rig validators configurable in settings [\#2137](https://github.com/pypeclub/OpenPype/pull/2137) +- Settings: Updated readme for entity types in settings [\#2132](https://github.com/pypeclub/OpenPype/pull/2132) +- Nuke: unified clip loader [\#2128](https://github.com/pypeclub/OpenPype/pull/2128) +- Settings UI: Project model refreshing and sorting [\#2104](https://github.com/pypeclub/OpenPype/pull/2104) +- Create Read From Rendered - Disable Relative paths by default [\#2093](https://github.com/pypeclub/OpenPype/pull/2093) +- Added choosing different dirmap mapping if workfile synched locally [\#2088](https://github.com/pypeclub/OpenPype/pull/2088) +- General: Remove IdleManager module [\#2084](https://github.com/pypeclub/OpenPype/pull/2084) +- Tray UI: Message box about missing settings defaults [\#2080](https://github.com/pypeclub/OpenPype/pull/2080) +- Tray UI: Show menu where first click happened [\#2079](https://github.com/pypeclub/OpenPype/pull/2079) +- Global: add global validators to settings [\#2078](https://github.com/pypeclub/OpenPype/pull/2078) +- Use CRF for burnin when available [\#2070](https://github.com/pypeclub/OpenPype/pull/2070) +- Project manager: Filter first item after selection of project [\#2069](https://github.com/pypeclub/OpenPype/pull/2069) +- Nuke: Adding `still` image family workflow [\#2064](https://github.com/pypeclub/OpenPype/pull/2064) +- Maya: validate authorized loaded plugins [\#2062](https://github.com/pypeclub/OpenPype/pull/2062) +- Tools: add support for pyenv on windows [\#2051](https://github.com/pypeclub/OpenPype/pull/2051) +- SyncServer: Dropbox Provider [\#1979](https://github.com/pypeclub/OpenPype/pull/1979) +- Burnin: Get data from context with defined keys. [\#1897](https://github.com/pypeclub/OpenPype/pull/1897) +- Timers manager: Get task time [\#1896](https://github.com/pypeclub/OpenPype/pull/1896) +- TVPaint: Option to stop timer on application exit. [\#1887](https://github.com/pypeclub/OpenPype/pull/1887) + +**🐛 Bug fixes** + +- Maya: fix model publishing [\#2130](https://github.com/pypeclub/OpenPype/pull/2130) +- Fix - oiiotool wasn't recognized even if present [\#2129](https://github.com/pypeclub/OpenPype/pull/2129) +- General: Disk mapping group [\#2120](https://github.com/pypeclub/OpenPype/pull/2120) +- Hiero: publishing effect first time makes wrong resources path [\#2115](https://github.com/pypeclub/OpenPype/pull/2115) +- Add startup script for Houdini Core. [\#2110](https://github.com/pypeclub/OpenPype/pull/2110) +- TVPaint: Behavior name of loop also accept repeat [\#2109](https://github.com/pypeclub/OpenPype/pull/2109) +- Ftrack: Project settings save custom attributes skip unknown attributes [\#2103](https://github.com/pypeclub/OpenPype/pull/2103) +- Blender: Fix NoneType error when animation\_data is missing for a rig [\#2101](https://github.com/pypeclub/OpenPype/pull/2101) +- Fix broken import in sftp provider [\#2100](https://github.com/pypeclub/OpenPype/pull/2100) +- Global: Fix docstring on publish plugin extract review [\#2097](https://github.com/pypeclub/OpenPype/pull/2097) +- Delivery Action Files Sequence fix [\#2096](https://github.com/pypeclub/OpenPype/pull/2096) +- General: Cloud mongo ca certificate issue [\#2095](https://github.com/pypeclub/OpenPype/pull/2095) +- TVPaint: Creator use context from workfile [\#2087](https://github.com/pypeclub/OpenPype/pull/2087) +- Blender: fix texture missing when publishing blend files [\#2085](https://github.com/pypeclub/OpenPype/pull/2085) +- General: Startup validations oiio tool path fix on linux [\#2083](https://github.com/pypeclub/OpenPype/pull/2083) +- Deadline: Collect deadline server does not check existence of deadline key [\#2082](https://github.com/pypeclub/OpenPype/pull/2082) +- Blender: fixed Curves with modifiers in Rigs [\#2081](https://github.com/pypeclub/OpenPype/pull/2081) +- Nuke UI scaling [\#2077](https://github.com/pypeclub/OpenPype/pull/2077) +- Maya: Fix multi-camera renders [\#2065](https://github.com/pypeclub/OpenPype/pull/2065) +- Fix Sync Queue when project disabled [\#2063](https://github.com/pypeclub/OpenPype/pull/2063) + +**Merged pull requests:** + +- Bump pywin32 from 300 to 301 [\#2086](https://github.com/pypeclub/OpenPype/pull/2086) + +## [3.4.1](https://github.com/pypeclub/OpenPype/tree/3.4.1) (2021-09-23) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.4.0...3.4.1) + +**🆕 New features** + +- Settings: Flag project as deactivated and hide from tools' view [\#2008](https://github.com/pypeclub/OpenPype/pull/2008) + +**🚀 Enhancements** + +- General: Startup validations [\#2054](https://github.com/pypeclub/OpenPype/pull/2054) +- Nuke: proxy mode validator [\#2052](https://github.com/pypeclub/OpenPype/pull/2052) +- Ftrack: Removed ftrack interface [\#2049](https://github.com/pypeclub/OpenPype/pull/2049) +- Settings UI: Deffered set value on entity [\#2044](https://github.com/pypeclub/OpenPype/pull/2044) +- Loader: Families filtering [\#2043](https://github.com/pypeclub/OpenPype/pull/2043) +- Settings UI: Project view enhancements [\#2042](https://github.com/pypeclub/OpenPype/pull/2042) +- Settings for Nuke IncrementScriptVersion [\#2039](https://github.com/pypeclub/OpenPype/pull/2039) +- Loader & Library loader: Use tools from OpenPype [\#2038](https://github.com/pypeclub/OpenPype/pull/2038) +- Adding predefined project folders creation in PM [\#2030](https://github.com/pypeclub/OpenPype/pull/2030) +- WebserverModule: Removed interface of webserver module [\#2028](https://github.com/pypeclub/OpenPype/pull/2028) +- TimersManager: Removed interface of timers manager [\#2024](https://github.com/pypeclub/OpenPype/pull/2024) +- Feature Maya import asset from scene inventory [\#2018](https://github.com/pypeclub/OpenPype/pull/2018) + +**🐛 Bug fixes** + +- Timers manger: Typo fix [\#2058](https://github.com/pypeclub/OpenPype/pull/2058) +- Hiero: Editorial fixes [\#2057](https://github.com/pypeclub/OpenPype/pull/2057) +- Differentiate jpg sequences from thumbnail [\#2056](https://github.com/pypeclub/OpenPype/pull/2056) +- FFmpeg: Split command to list does not work [\#2046](https://github.com/pypeclub/OpenPype/pull/2046) +- Removed shell flag in subprocess call [\#2045](https://github.com/pypeclub/OpenPype/pull/2045) + +**Merged pull requests:** + +- Bump prismjs from 1.24.0 to 1.25.0 in /website [\#2050](https://github.com/pypeclub/OpenPype/pull/2050) + +## [3.4.0](https://github.com/pypeclub/OpenPype/tree/3.4.0) (2021-09-17) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.3.1...3.4.0) + +### 📖 Documentation + +- Documentation: Ftrack launch argsuments update [\#2014](https://github.com/pypeclub/OpenPype/pull/2014) +- Nuke Quick Start / Tutorial [\#1952](https://github.com/pypeclub/OpenPype/pull/1952) +- Houdini: add Camera, Point Cache, Composite, Redshift ROP and VDB Cache support [\#1821](https://github.com/pypeclub/OpenPype/pull/1821) + +**🆕 New features** + +- Nuke: Compatibility with Nuke 13 [\#2003](https://github.com/pypeclub/OpenPype/pull/2003) +- Maya: Add Xgen family support [\#1947](https://github.com/pypeclub/OpenPype/pull/1947) +- Feature/webpublisher backend [\#1876](https://github.com/pypeclub/OpenPype/pull/1876) +- Blender: Improved assets handling [\#1615](https://github.com/pypeclub/OpenPype/pull/1615) + +**🚀 Enhancements** + +- Added possibility to configure of synchronization of workfile version… [\#2041](https://github.com/pypeclub/OpenPype/pull/2041) +- General: Task types in profiles [\#2036](https://github.com/pypeclub/OpenPype/pull/2036) +- Console interpreter: Handle invalid sizes on initialization [\#2022](https://github.com/pypeclub/OpenPype/pull/2022) +- Ftrack: Show OpenPype versions in event server status [\#2019](https://github.com/pypeclub/OpenPype/pull/2019) +- General: Staging icon [\#2017](https://github.com/pypeclub/OpenPype/pull/2017) +- Ftrack: Sync to avalon actions have jobs [\#2015](https://github.com/pypeclub/OpenPype/pull/2015) +- Modules: Connect method is not required [\#2009](https://github.com/pypeclub/OpenPype/pull/2009) +- Settings UI: Number with configurable steps [\#2001](https://github.com/pypeclub/OpenPype/pull/2001) +- Moving project folder structure creation out of ftrack module \#1989 [\#1996](https://github.com/pypeclub/OpenPype/pull/1996) +- Configurable items for providers without Settings [\#1987](https://github.com/pypeclub/OpenPype/pull/1987) +- Global: Example addons [\#1986](https://github.com/pypeclub/OpenPype/pull/1986) +- Standalone Publisher: Extract harmony zip handle workfile template [\#1982](https://github.com/pypeclub/OpenPype/pull/1982) +- Settings UI: Number sliders [\#1978](https://github.com/pypeclub/OpenPype/pull/1978) +- Workfiles: Support more workfile templates [\#1966](https://github.com/pypeclub/OpenPype/pull/1966) +- Launcher: Fix crashes on action click [\#1964](https://github.com/pypeclub/OpenPype/pull/1964) +- Settings: Minor fixes in UI and missing default values [\#1963](https://github.com/pypeclub/OpenPype/pull/1963) +- Blender: Toggle system console works on windows [\#1962](https://github.com/pypeclub/OpenPype/pull/1962) +- Global: Settings defined by Addons/Modules [\#1959](https://github.com/pypeclub/OpenPype/pull/1959) +- CI: change release numbering triggers [\#1954](https://github.com/pypeclub/OpenPype/pull/1954) +- Global: Avalon Host name collector [\#1949](https://github.com/pypeclub/OpenPype/pull/1949) +- Global: Define hosts in CollectSceneVersion [\#1948](https://github.com/pypeclub/OpenPype/pull/1948) +- Add face sets to exported alembics [\#1942](https://github.com/pypeclub/OpenPype/pull/1942) +- OpenPype: Add version validation and `--headless` mode and update progress 🔄 [\#1939](https://github.com/pypeclub/OpenPype/pull/1939) +- \#1894 - adds host to template\_name\_profiles for filtering [\#1915](https://github.com/pypeclub/OpenPype/pull/1915) +- Environments: Tool environments in alphabetical order [\#1910](https://github.com/pypeclub/OpenPype/pull/1910) +- Disregard publishing time. [\#1888](https://github.com/pypeclub/OpenPype/pull/1888) +- Dynamic modules [\#1872](https://github.com/pypeclub/OpenPype/pull/1872) + +**🐛 Bug fixes** + +- Workfiles tool: Task selection [\#2040](https://github.com/pypeclub/OpenPype/pull/2040) +- Ftrack: Delete old versions missing settings key [\#2037](https://github.com/pypeclub/OpenPype/pull/2037) +- Nuke: typo on a button [\#2034](https://github.com/pypeclub/OpenPype/pull/2034) +- Hiero: Fix "none" named tags [\#2033](https://github.com/pypeclub/OpenPype/pull/2033) +- FFmpeg: Subprocess arguments as list [\#2032](https://github.com/pypeclub/OpenPype/pull/2032) +- General: Fix Python 2 breaking line [\#2016](https://github.com/pypeclub/OpenPype/pull/2016) +- Bugfix/webpublisher task type [\#2006](https://github.com/pypeclub/OpenPype/pull/2006) +- Nuke thumbnails generated from middle of the sequence [\#1992](https://github.com/pypeclub/OpenPype/pull/1992) +- Nuke: last version from path gets correct version [\#1990](https://github.com/pypeclub/OpenPype/pull/1990) +- nuke, resolve, hiero: precollector order lest then 0.5 [\#1984](https://github.com/pypeclub/OpenPype/pull/1984) +- Last workfile with multiple work templates [\#1981](https://github.com/pypeclub/OpenPype/pull/1981) +- Collectors order [\#1977](https://github.com/pypeclub/OpenPype/pull/1977) +- Stop timer was within validator order range. [\#1975](https://github.com/pypeclub/OpenPype/pull/1975) +- Ftrack: arrow submodule has https url source [\#1974](https://github.com/pypeclub/OpenPype/pull/1974) +- Ftrack: Fix hosts attribute in collect ftrack username [\#1972](https://github.com/pypeclub/OpenPype/pull/1972) +- Deadline: Houdini plugins in different hierarchy [\#1970](https://github.com/pypeclub/OpenPype/pull/1970) +- Removed deprecated submodules [\#1967](https://github.com/pypeclub/OpenPype/pull/1967) +- Global: ExtractJpeg can handle filepaths with spaces [\#1961](https://github.com/pypeclub/OpenPype/pull/1961) +- Resolve path when adding to zip [\#1960](https://github.com/pypeclub/OpenPype/pull/1960) + +**Merged pull requests:** + +- Bump url-parse from 1.5.1 to 1.5.3 in /website [\#1958](https://github.com/pypeclub/OpenPype/pull/1958) +- Bump path-parse from 1.0.6 to 1.0.7 in /website [\#1933](https://github.com/pypeclub/OpenPype/pull/1933) + +## [3.3.1](https://github.com/pypeclub/OpenPype/tree/3.3.1) (2021-08-20) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.3.0...3.3.1) + +**🐛 Bug fixes** + +- TVPaint: Fixed rendered frame indexes [\#1946](https://github.com/pypeclub/OpenPype/pull/1946) +- Maya: Menu actions fix [\#1945](https://github.com/pypeclub/OpenPype/pull/1945) +- standalone: editorial shared object problem [\#1941](https://github.com/pypeclub/OpenPype/pull/1941) +- Bugfix nuke deadline app name [\#1928](https://github.com/pypeclub/OpenPype/pull/1928) + +## [3.3.0](https://github.com/pypeclub/OpenPype/tree/3.3.0) (2021-08-17) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.2.0...3.3.0) + +### 📖 Documentation + +- Standalone Publish of textures family [\#1834](https://github.com/pypeclub/OpenPype/pull/1834) + +**🆕 New features** + +- Settings UI: Breadcrumbs in settings [\#1932](https://github.com/pypeclub/OpenPype/pull/1932) +- Maya: Scene patching 🩹on submission to Deadline [\#1923](https://github.com/pypeclub/OpenPype/pull/1923) +- Feature AE local render [\#1901](https://github.com/pypeclub/OpenPype/pull/1901) + +**🚀 Enhancements** + +- Python console interpreter [\#1940](https://github.com/pypeclub/OpenPype/pull/1940) +- Global: Updated logos and Default settings [\#1927](https://github.com/pypeclub/OpenPype/pull/1927) +- Check for missing ✨ Python when using `pyenv` [\#1925](https://github.com/pypeclub/OpenPype/pull/1925) +- Settings: Default values for enum [\#1920](https://github.com/pypeclub/OpenPype/pull/1920) +- Settings UI: Modifiable dict view enhance [\#1919](https://github.com/pypeclub/OpenPype/pull/1919) +- submodules: avalon-core update [\#1911](https://github.com/pypeclub/OpenPype/pull/1911) +- Ftrack: Where I run action enhancement [\#1900](https://github.com/pypeclub/OpenPype/pull/1900) +- Ftrack: Private project server actions [\#1899](https://github.com/pypeclub/OpenPype/pull/1899) +- Support nested studio plugins paths. [\#1898](https://github.com/pypeclub/OpenPype/pull/1898) +- Settings: global validators with options [\#1892](https://github.com/pypeclub/OpenPype/pull/1892) +- Settings: Conditional dict enum positioning [\#1891](https://github.com/pypeclub/OpenPype/pull/1891) +- Expose stop timer through rest api. [\#1886](https://github.com/pypeclub/OpenPype/pull/1886) +- TVPaint: Increment workfile [\#1885](https://github.com/pypeclub/OpenPype/pull/1885) +- Allow Multiple Notes to run on tasks. [\#1882](https://github.com/pypeclub/OpenPype/pull/1882) +- Prepare for pyside2 [\#1869](https://github.com/pypeclub/OpenPype/pull/1869) +- Filter hosts in settings host-enum [\#1868](https://github.com/pypeclub/OpenPype/pull/1868) +- Local actions with process identifier [\#1867](https://github.com/pypeclub/OpenPype/pull/1867) +- Workfile tool start at host launch support [\#1865](https://github.com/pypeclub/OpenPype/pull/1865) +- Anatomy schema validation [\#1864](https://github.com/pypeclub/OpenPype/pull/1864) +- Ftrack prepare project structure [\#1861](https://github.com/pypeclub/OpenPype/pull/1861) +- Maya: support for configurable `dirmap` 🗺️ [\#1859](https://github.com/pypeclub/OpenPype/pull/1859) +- Independent general environments [\#1853](https://github.com/pypeclub/OpenPype/pull/1853) +- TVPaint Start Frame [\#1844](https://github.com/pypeclub/OpenPype/pull/1844) +- Ftrack push attributes action adds traceback to job [\#1843](https://github.com/pypeclub/OpenPype/pull/1843) +- Prepare project action enhance [\#1838](https://github.com/pypeclub/OpenPype/pull/1838) +- nuke: settings create missing default subsets [\#1829](https://github.com/pypeclub/OpenPype/pull/1829) +- Update poetry lock [\#1823](https://github.com/pypeclub/OpenPype/pull/1823) +- Settings: settings for plugins [\#1819](https://github.com/pypeclub/OpenPype/pull/1819) +- Settings list can use template or schema as object type [\#1815](https://github.com/pypeclub/OpenPype/pull/1815) +- Maya: Deadline custom settings [\#1797](https://github.com/pypeclub/OpenPype/pull/1797) +- Maya: Shader name validation [\#1762](https://github.com/pypeclub/OpenPype/pull/1762) + +**🐛 Bug fixes** + +- Fix - ftrack family was added incorrectly in some cases [\#1935](https://github.com/pypeclub/OpenPype/pull/1935) +- Fix - Deadline publish on Linux started Tray instead of headless publishing [\#1930](https://github.com/pypeclub/OpenPype/pull/1930) +- Maya: Validate Model Name - repair accident deletion in settings defaults [\#1929](https://github.com/pypeclub/OpenPype/pull/1929) +- Nuke: submit to farm failed due `ftrack` family remove [\#1926](https://github.com/pypeclub/OpenPype/pull/1926) +- Fix - validate takes repre\["files"\] as list all the time [\#1922](https://github.com/pypeclub/OpenPype/pull/1922) +- standalone: validator asset parents [\#1917](https://github.com/pypeclub/OpenPype/pull/1917) +- Nuke: update video file crassing [\#1916](https://github.com/pypeclub/OpenPype/pull/1916) +- Fix - texture validators for workfiles triggers only for textures workfiles [\#1914](https://github.com/pypeclub/OpenPype/pull/1914) +- Settings UI: List order works as expected [\#1906](https://github.com/pypeclub/OpenPype/pull/1906) +- Hiero: loaded clip was not set colorspace from version data [\#1904](https://github.com/pypeclub/OpenPype/pull/1904) +- Pyblish UI: Fix collecting stage processing [\#1903](https://github.com/pypeclub/OpenPype/pull/1903) +- Burnins: Use input's bitrate in h624 [\#1902](https://github.com/pypeclub/OpenPype/pull/1902) +- Bug: fixed python detection [\#1893](https://github.com/pypeclub/OpenPype/pull/1893) +- global: integrate name missing default template [\#1890](https://github.com/pypeclub/OpenPype/pull/1890) +- publisher: editorial plugins fixes [\#1889](https://github.com/pypeclub/OpenPype/pull/1889) +- Normalize path returned from Workfiles. [\#1880](https://github.com/pypeclub/OpenPype/pull/1880) +- Workfiles tool event arguments fix [\#1862](https://github.com/pypeclub/OpenPype/pull/1862) +- imageio: fix grouping [\#1856](https://github.com/pypeclub/OpenPype/pull/1856) +- Maya: don't add reference members as connections to the container set 📦 [\#1855](https://github.com/pypeclub/OpenPype/pull/1855) +- publisher: missing version in subset prop [\#1849](https://github.com/pypeclub/OpenPype/pull/1849) +- Ftrack type error fix in sync to avalon event handler [\#1845](https://github.com/pypeclub/OpenPype/pull/1845) +- Nuke: updating effects subset fail [\#1841](https://github.com/pypeclub/OpenPype/pull/1841) +- nuke: write render node skipped with crop [\#1836](https://github.com/pypeclub/OpenPype/pull/1836) +- Project folder structure overrides [\#1813](https://github.com/pypeclub/OpenPype/pull/1813) +- Maya: fix yeti settings path in extractor [\#1809](https://github.com/pypeclub/OpenPype/pull/1809) +- Failsafe for cross project containers. [\#1806](https://github.com/pypeclub/OpenPype/pull/1806) +- Houdini colector formatting keys fix [\#1802](https://github.com/pypeclub/OpenPype/pull/1802) +- Settings error dialog on show [\#1798](https://github.com/pypeclub/OpenPype/pull/1798) +- Application launch stdout/stderr in GUI build [\#1684](https://github.com/pypeclub/OpenPype/pull/1684) +- Nuke: re-use instance nodes output path [\#1577](https://github.com/pypeclub/OpenPype/pull/1577) + +**Merged pull requests:** + +- Fix - make AE workfile publish to Ftrack configurable [\#1937](https://github.com/pypeclub/OpenPype/pull/1937) +- Add support for multiple Deadline ☠️➖ servers [\#1905](https://github.com/pypeclub/OpenPype/pull/1905) +- Maya: add support for `RedshiftNormalMap` node, fix `tx` linear space 🚀 [\#1863](https://github.com/pypeclub/OpenPype/pull/1863) +- Maya: expected files -\> render products ⚙️ overhaul [\#1812](https://github.com/pypeclub/OpenPype/pull/1812) +- PS, AE - send actual context when another webserver is running [\#1811](https://github.com/pypeclub/OpenPype/pull/1811) + +## [3.2.0](https://github.com/pypeclub/OpenPype/tree/3.2.0) (2021-07-13) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.4...3.2.0) + +### 📖 Documentation + +- Fix: staging and `--use-version` option [\#1786](https://github.com/pypeclub/OpenPype/pull/1786) +- Subset template and TVPaint subset template docs [\#1717](https://github.com/pypeclub/OpenPype/pull/1717) +- Overscan color extract review [\#1701](https://github.com/pypeclub/OpenPype/pull/1701) + +**🚀 Enhancements** + +- Nuke: ftrack family plugin settings preset [\#1805](https://github.com/pypeclub/OpenPype/pull/1805) +- Standalone publisher last project [\#1799](https://github.com/pypeclub/OpenPype/pull/1799) +- Ftrack Multiple notes as server action [\#1795](https://github.com/pypeclub/OpenPype/pull/1795) +- Settings conditional dict [\#1777](https://github.com/pypeclub/OpenPype/pull/1777) +- Settings application use python 2 only where needed [\#1776](https://github.com/pypeclub/OpenPype/pull/1776) +- Settings UI copy/paste [\#1769](https://github.com/pypeclub/OpenPype/pull/1769) +- Workfile tool widths [\#1766](https://github.com/pypeclub/OpenPype/pull/1766) +- Push hierarchical attributes care about task parent changes [\#1763](https://github.com/pypeclub/OpenPype/pull/1763) +- Application executables with environment variables [\#1757](https://github.com/pypeclub/OpenPype/pull/1757) +- Deadline: Nuke submission additional attributes [\#1756](https://github.com/pypeclub/OpenPype/pull/1756) +- Settings schema without prefill [\#1753](https://github.com/pypeclub/OpenPype/pull/1753) +- Settings Hosts enum [\#1739](https://github.com/pypeclub/OpenPype/pull/1739) +- Validate containers settings [\#1736](https://github.com/pypeclub/OpenPype/pull/1736) +- PS - added loader from sequence [\#1726](https://github.com/pypeclub/OpenPype/pull/1726) +- Autoupdate launcher [\#1725](https://github.com/pypeclub/OpenPype/pull/1725) +- Toggle Ftrack upload in StandalonePublisher [\#1708](https://github.com/pypeclub/OpenPype/pull/1708) +- Nuke: Prerender Frame Range by default [\#1699](https://github.com/pypeclub/OpenPype/pull/1699) +- Smoother edges of color triangle [\#1695](https://github.com/pypeclub/OpenPype/pull/1695) + +**🐛 Bug fixes** + +- nuke: fixing wrong name of family folder when `used existing frames` [\#1803](https://github.com/pypeclub/OpenPype/pull/1803) +- Collect ftrack family bugs [\#1801](https://github.com/pypeclub/OpenPype/pull/1801) +- Invitee email can be None which break the Ftrack commit. [\#1788](https://github.com/pypeclub/OpenPype/pull/1788) +- Otio unrelated error on import [\#1782](https://github.com/pypeclub/OpenPype/pull/1782) +- FFprobe streams order [\#1775](https://github.com/pypeclub/OpenPype/pull/1775) +- Fix - single file files are str only, cast it to list to count properly [\#1772](https://github.com/pypeclub/OpenPype/pull/1772) +- Environments in app executable for MacOS [\#1768](https://github.com/pypeclub/OpenPype/pull/1768) +- Project specific environments [\#1767](https://github.com/pypeclub/OpenPype/pull/1767) +- Settings UI with refresh button [\#1764](https://github.com/pypeclub/OpenPype/pull/1764) +- Standalone publisher thumbnail extractor fix [\#1761](https://github.com/pypeclub/OpenPype/pull/1761) +- Anatomy others templates don't cause crash [\#1758](https://github.com/pypeclub/OpenPype/pull/1758) +- Backend acre module commit update [\#1745](https://github.com/pypeclub/OpenPype/pull/1745) +- hiero: precollect instances failing when audio selected [\#1743](https://github.com/pypeclub/OpenPype/pull/1743) +- Hiero: creator instance error [\#1742](https://github.com/pypeclub/OpenPype/pull/1742) +- Nuke: fixing render creator for no selection format failing [\#1741](https://github.com/pypeclub/OpenPype/pull/1741) +- StandalonePublisher: failing collector for editorial [\#1738](https://github.com/pypeclub/OpenPype/pull/1738) +- Local settings UI crash on missing defaults [\#1737](https://github.com/pypeclub/OpenPype/pull/1737) +- TVPaint white background on thumbnail [\#1735](https://github.com/pypeclub/OpenPype/pull/1735) +- Ftrack missing custom attribute message [\#1734](https://github.com/pypeclub/OpenPype/pull/1734) +- Launcher project changes [\#1733](https://github.com/pypeclub/OpenPype/pull/1733) +- Ftrack sync status [\#1732](https://github.com/pypeclub/OpenPype/pull/1732) +- TVPaint use layer name for default variant [\#1724](https://github.com/pypeclub/OpenPype/pull/1724) +- Default subset template for TVPaint review and workfile families [\#1716](https://github.com/pypeclub/OpenPype/pull/1716) +- Maya: Extract review hotfix [\#1714](https://github.com/pypeclub/OpenPype/pull/1714) +- Settings: Imageio improving granularity [\#1711](https://github.com/pypeclub/OpenPype/pull/1711) +- Application without executables [\#1679](https://github.com/pypeclub/OpenPype/pull/1679) +- Unreal: launching on Linux [\#1672](https://github.com/pypeclub/OpenPype/pull/1672) + +**Merged pull requests:** + +- Bump prismjs from 1.23.0 to 1.24.0 in /website [\#1773](https://github.com/pypeclub/OpenPype/pull/1773) +- TVPaint ftrack family [\#1755](https://github.com/pypeclub/OpenPype/pull/1755) + +## [2.18.4](https://github.com/pypeclub/OpenPype/tree/2.18.4) (2021-06-24) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.3...2.18.4) + +## [2.18.3](https://github.com/pypeclub/OpenPype/tree/2.18.3) (2021-06-23) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.2...2.18.3) + +## [2.18.2](https://github.com/pypeclub/OpenPype/tree/2.18.2) (2021-06-16) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.1.0...2.18.2) + +## [3.1.0](https://github.com/pypeclub/OpenPype/tree/3.1.0) (2021-06-15) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.0.0...3.1.0) + +### 📖 Documentation + +- Feature Slack integration [\#1657](https://github.com/pypeclub/OpenPype/pull/1657) + +**🚀 Enhancements** + +- Log Viewer with OpenPype style [\#1703](https://github.com/pypeclub/OpenPype/pull/1703) +- Scrolling in OpenPype info widget [\#1702](https://github.com/pypeclub/OpenPype/pull/1702) +- OpenPype style in modules [\#1694](https://github.com/pypeclub/OpenPype/pull/1694) +- Sort applications and tools alphabetically in Settings UI [\#1689](https://github.com/pypeclub/OpenPype/pull/1689) +- \#683 - Validate Frame Range in Standalone Publisher [\#1683](https://github.com/pypeclub/OpenPype/pull/1683) +- Hiero: old container versions identify with red color [\#1682](https://github.com/pypeclub/OpenPype/pull/1682) +- Project Manger: Default name column width [\#1669](https://github.com/pypeclub/OpenPype/pull/1669) +- Remove outline in stylesheet [\#1667](https://github.com/pypeclub/OpenPype/pull/1667) +- TVPaint: Creator take layer name as default value for subset variant [\#1663](https://github.com/pypeclub/OpenPype/pull/1663) +- TVPaint custom subset template [\#1662](https://github.com/pypeclub/OpenPype/pull/1662) +- Editorial: conform assets validator [\#1659](https://github.com/pypeclub/OpenPype/pull/1659) +- Nuke - Publish simplification [\#1653](https://github.com/pypeclub/OpenPype/pull/1653) +- \#1333 - added tooltip hints to Pyblish buttons [\#1649](https://github.com/pypeclub/OpenPype/pull/1649) + +**🐛 Bug fixes** + +- Nuke: broken publishing rendered frames [\#1707](https://github.com/pypeclub/OpenPype/pull/1707) +- Standalone publisher Thumbnail export args [\#1705](https://github.com/pypeclub/OpenPype/pull/1705) +- Bad zip can break OpenPype start [\#1691](https://github.com/pypeclub/OpenPype/pull/1691) +- Hiero: published whole edit mov [\#1687](https://github.com/pypeclub/OpenPype/pull/1687) +- Ftrack subprocess handle of stdout/stderr [\#1675](https://github.com/pypeclub/OpenPype/pull/1675) +- Settings list race condifiton and mutable dict list conversion [\#1671](https://github.com/pypeclub/OpenPype/pull/1671) +- Mac launch arguments fix [\#1660](https://github.com/pypeclub/OpenPype/pull/1660) +- Fix missing dbm python module [\#1652](https://github.com/pypeclub/OpenPype/pull/1652) +- Transparent branches in view on Mac [\#1648](https://github.com/pypeclub/OpenPype/pull/1648) +- Add asset on task item [\#1646](https://github.com/pypeclub/OpenPype/pull/1646) +- Project manager save and queue [\#1645](https://github.com/pypeclub/OpenPype/pull/1645) +- New project anatomy values [\#1644](https://github.com/pypeclub/OpenPype/pull/1644) +- Farm publishing: check if published items do exist [\#1573](https://github.com/pypeclub/OpenPype/pull/1573) + +**Merged pull requests:** + +- Bump normalize-url from 4.5.0 to 4.5.1 in /website [\#1686](https://github.com/pypeclub/OpenPype/pull/1686) + ## [3.0.0](https://github.com/pypeclub/openpype/tree/3.0.0) @@ -11,12 +1940,12 @@ - Easy to add Application versions. - Per Project Environment and plugin management. - Robust profile system for creating reviewables and burnins, with filtering based on Application, Task and data family. -- Configurable publish plugins. +- Configurable publish plugins. - Options to make any validator or extractor, optional or disabled. - Color Management is now unified under anatomy settings. - Subset naming and grouping is fully configurable. - All project attributes can now be set directly in OpenPype settings. -- Studio Setting can be locked to prevent unwanted artist changes. +- Studio Setting can be locked to prevent unwanted artist changes. - You can now add per project and per task type templates for workfile initialization in most hosts. - Too many other individual configurable option to list in this changelog :) @@ -774,8 +2703,6 @@ - Standalone Publisher: getting fps from context instead of nonexistent entity [\#729](https://github.com/pypeclub/pype/pull/729) -# Changelog - ## [2.13.6](https://github.com/pypeclub/pype/tree/2.13.6) (2020-11-15) [Full Changelog](https://github.com/pypeclub/pype/compare/2.13.5...2.13.6) @@ -1565,10 +3492,4 @@ A large cleanup release. Most of the change are under the hood. - _(avalon)_ subsets in maya 2019 weren't behaving correctly in the outliner -\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* - - -\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* - - \* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/README.md b/README.md index b6966adbc4..a3d3cf1dbb 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -[![All Contributors](https://img.shields.io/badge/all_contributors-26-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-27-orange.svg?style=flat-square)](#contributors-) OpenPype ==== @@ -41,7 +41,7 @@ It can be built and ran on all common platforms. We develop and test on the foll - **Linux** - **Ubuntu** 20.04 LTS - **Centos** 7 -- **Mac OSX** +- **Mac OSX** - **10.15** Catalina - **11.1** Big Sur (using Rosetta2) @@ -287,6 +287,14 @@ To run tests, execute `.\tools\run_tests(.ps1|.sh)`. **Note that it needs existing virtual environment.** + +Developer tools +------------- + +In case you wish to add your own tools to `.\tools` folder without git tracking, it is possible by adding it with `dev_*` suffix (example: `dev_clear_pyc(.ps1|.sh)`). + + + ## Contributors ✨ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)): @@ -328,6 +336,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
Malthaldar

💻
Sven Neve

💻
zafrs

💻 +
Félix David

💻 📖 diff --git a/common/openpype_common/distribution/README.md b/common/openpype_common/distribution/README.md new file mode 100644 index 0000000000..212eb267b8 --- /dev/null +++ b/common/openpype_common/distribution/README.md @@ -0,0 +1,18 @@ +Addon distribution tool +------------------------ + +Code in this folder is backend portion of Addon distribution logic for v4 server. + +Each host, module will be separate Addon in the future. Each v4 server could run different set of Addons. + +Client (running on artist machine) will in the first step ask v4 for list of enabled addons. +(It expects list of json documents matching to `addon_distribution.py:AddonInfo` object.) +Next it will compare presence of enabled addon version in local folder. In the case of missing version of +an addon, client will use information in the addon to download (from http/shared local disk/git) zip file +and unzip it. + +Required part of addon distribution will be sharing of dependencies (python libraries, utilities) which is not part of this folder. + +Location of this folder might change in the future as it will be required for a clint to add this folder to sys.path reliably. + +This code needs to be independent on Openpype code as much as possible! \ No newline at end of file diff --git a/openpype/hosts/testhost/__init__.py b/common/openpype_common/distribution/__init__.py similarity index 100% rename from openpype/hosts/testhost/__init__.py rename to common/openpype_common/distribution/__init__.py diff --git a/common/openpype_common/distribution/addon_distribution.py b/common/openpype_common/distribution/addon_distribution.py new file mode 100644 index 0000000000..5e48639dec --- /dev/null +++ b/common/openpype_common/distribution/addon_distribution.py @@ -0,0 +1,208 @@ +import os +from enum import Enum +from abc import abstractmethod +import attr +import logging +import requests +import platform +import shutil + +from .file_handler import RemoteFileHandler +from .addon_info import AddonInfo + + +class UpdateState(Enum): + EXISTS = "exists" + UPDATED = "updated" + FAILED = "failed" + + +class AddonDownloader: + log = logging.getLogger(__name__) + + def __init__(self): + self._downloaders = {} + + def register_format(self, downloader_type, downloader): + self._downloaders[downloader_type.value] = downloader + + def get_downloader(self, downloader_type): + downloader = self._downloaders.get(downloader_type) + if not downloader: + raise ValueError(f"{downloader_type} not implemented") + return downloader() + + @classmethod + @abstractmethod + def download(cls, source, destination): + """Returns url to downloaded addon zip file. + + Args: + source (dict): {type:"http", "url":"https://} ...} + destination (str): local folder to unzip + Returns: + (str) local path to addon zip file + """ + pass + + @classmethod + def check_hash(cls, addon_path, addon_hash): + """Compares 'hash' of downloaded 'addon_url' file. + + Args: + addon_path (str): local path to addon zip file + addon_hash (str): sha256 hash of zip file + Raises: + ValueError if hashes doesn't match + """ + if not os.path.exists(addon_path): + raise ValueError(f"{addon_path} doesn't exist.") + if not RemoteFileHandler.check_integrity(addon_path, + addon_hash, + hash_type="sha256"): + raise ValueError(f"{addon_path} doesn't match expected hash.") + + @classmethod + def unzip(cls, addon_zip_path, destination): + """Unzips local 'addon_zip_path' to 'destination'. + + Args: + addon_zip_path (str): local path to addon zip file + destination (str): local folder to unzip + """ + RemoteFileHandler.unzip(addon_zip_path, destination) + os.remove(addon_zip_path) + + @classmethod + def remove(cls, addon_url): + pass + + +class OSAddonDownloader(AddonDownloader): + + @classmethod + def download(cls, source, destination): + # OS doesnt need to download, unzip directly + addon_url = source["path"].get(platform.system().lower()) + if not os.path.exists(addon_url): + raise ValueError("{} is not accessible".format(addon_url)) + return addon_url + + +class HTTPAddonDownloader(AddonDownloader): + CHUNK_SIZE = 100000 + + @classmethod + def download(cls, source, destination): + source_url = source["url"] + cls.log.debug(f"Downloading {source_url} to {destination}") + file_name = os.path.basename(destination) + _, ext = os.path.splitext(file_name) + if (ext.replace(".", '') not + in set(RemoteFileHandler.IMPLEMENTED_ZIP_FORMATS)): + file_name += ".zip" + RemoteFileHandler.download_url(source_url, + destination, + filename=file_name) + + return os.path.join(destination, file_name) + + +def get_addons_info(server_endpoint): + """Returns list of addon information from Server""" + # TODO temp + # addon_info = AddonInfo( + # **{"name": "openpype_slack", + # "version": "1.0.0", + # "addon_url": "c:/projects/openpype_slack_1.0.0.zip", + # "type": UrlType.FILESYSTEM, + # "hash": "4be25eb6215e91e5894d3c5475aeb1e379d081d3f5b43b4ee15b0891cf5f5658"}) # noqa + # + # http_addon = AddonInfo( + # **{"name": "openpype_slack", + # "version": "1.0.0", + # "addon_url": "https://drive.google.com/file/d/1TcuV8c2OV8CcbPeWi7lxOdqWsEqQNPYy/view?usp=sharing", # noqa + # "type": UrlType.HTTP, + # "hash": "4be25eb6215e91e5894d3c5475aeb1e379d081d3f5b43b4ee15b0891cf5f5658"}) # noqa + + response = requests.get(server_endpoint) + if not response.ok: + raise Exception(response.text) + + addons_info = [] + for addon in response.json(): + addons_info.append(AddonInfo(**addon)) + return addons_info + + +def update_addon_state(addon_infos, destination_folder, factory, + log=None): + """Loops through all 'addon_infos', compares local version, unzips. + + Loops through server provided list of dictionaries with information about + available addons. Looks if each addon is already present and deployed. + If isn't, addon zip gets downloaded and unzipped into 'destination_folder'. + Args: + addon_infos (list of AddonInfo) + destination_folder (str): local path + factory (AddonDownloader): factory to get appropriate downloader per + addon type + log (logging.Logger) + Returns: + (dict): {"addon_full_name": UpdateState.value + (eg. "exists"|"updated"|"failed") + """ + if not log: + log = logging.getLogger(__name__) + + download_states = {} + for addon in addon_infos: + full_name = "{}_{}".format(addon.name, addon.version) + addon_dest = os.path.join(destination_folder, full_name) + + if os.path.isdir(addon_dest): + log.debug(f"Addon version folder {addon_dest} already exists.") + download_states[full_name] = UpdateState.EXISTS.value + continue + + for source in addon.sources: + download_states[full_name] = UpdateState.FAILED.value + try: + downloader = factory.get_downloader(source.type) + zip_file_path = downloader.download(attr.asdict(source), + addon_dest) + downloader.check_hash(zip_file_path, addon.hash) + downloader.unzip(zip_file_path, addon_dest) + download_states[full_name] = UpdateState.UPDATED.value + break + except Exception: + log.warning(f"Error happened during updating {addon.name}", + exc_info=True) + if os.path.isdir(addon_dest): + log.debug(f"Cleaning {addon_dest}") + shutil.rmtree(addon_dest) + + return download_states + + +def check_addons(server_endpoint, addon_folder, downloaders): + """Main entry point to compare existing addons with those on server. + + Args: + server_endpoint (str): url to v4 server endpoint + addon_folder (str): local dir path for addons + downloaders (AddonDownloader): factory of downloaders + + Raises: + (RuntimeError) if any addon failed update + """ + addons_info = get_addons_info(server_endpoint) + result = update_addon_state(addons_info, + addon_folder, + downloaders) + if UpdateState.FAILED.value in result.values(): + raise RuntimeError(f"Unable to update some addons {result}") + + +def cli(*args): + raise NotImplementedError diff --git a/common/openpype_common/distribution/addon_info.py b/common/openpype_common/distribution/addon_info.py new file mode 100644 index 0000000000..00ece11f3b --- /dev/null +++ b/common/openpype_common/distribution/addon_info.py @@ -0,0 +1,80 @@ +import attr +from enum import Enum + + +class UrlType(Enum): + HTTP = "http" + GIT = "git" + FILESYSTEM = "filesystem" + + +@attr.s +class MultiPlatformPath(object): + windows = attr.ib(default=None) + linux = attr.ib(default=None) + darwin = attr.ib(default=None) + + +@attr.s +class AddonSource(object): + type = attr.ib() + + +@attr.s +class LocalAddonSource(AddonSource): + path = attr.ib(default=attr.Factory(MultiPlatformPath)) + + +@attr.s +class WebAddonSource(AddonSource): + url = attr.ib(default=None) + + +@attr.s +class VersionData(object): + version_data = attr.ib(default=None) + + +@attr.s +class AddonInfo(object): + """Object matching json payload from Server""" + name = attr.ib() + version = attr.ib() + title = attr.ib(default=None) + sources = attr.ib(default=attr.Factory(dict)) + hash = attr.ib(default=None) + description = attr.ib(default=None) + license = attr.ib(default=None) + authors = attr.ib(default=None) + + @classmethod + def from_dict(cls, data): + sources = [] + + production_version = data.get("productionVersion") + if not production_version: + return + + # server payload contains info about all versions + # active addon must have 'productionVersion' and matching version info + version_data = data.get("versions", {})[production_version] + + for source in version_data.get("clientSourceInfo", []): + if source.get("type") == UrlType.FILESYSTEM.value: + source_addon = LocalAddonSource(type=source["type"], + path=source["path"]) + if source.get("type") == UrlType.HTTP.value: + source_addon = WebAddonSource(type=source["type"], + url=source["url"]) + + sources.append(source_addon) + + return cls(name=data.get("name"), + version=production_version, + sources=sources, + hash=data.get("hash"), + description=data.get("description"), + title=data.get("title"), + license=data.get("license"), + authors=data.get("authors")) + diff --git a/tests/lib/file_handler.py b/common/openpype_common/distribution/file_handler.py similarity index 83% rename from tests/lib/file_handler.py rename to common/openpype_common/distribution/file_handler.py index ee3abc6ecb..f585c77632 100644 --- a/tests/lib/file_handler.py +++ b/common/openpype_common/distribution/file_handler.py @@ -21,7 +21,7 @@ class RemoteFileHandler: 'tar.gz', 'tar.xz', 'tar.bz2'] @staticmethod - def calculate_md5(fpath, chunk_size): + def calculate_md5(fpath, chunk_size=10000): md5 = hashlib.md5() with open(fpath, 'rb') as f: for chunk in iter(lambda: f.read(chunk_size), b''): @@ -33,17 +33,45 @@ class RemoteFileHandler: return md5 == RemoteFileHandler.calculate_md5(fpath, **kwargs) @staticmethod - def check_integrity(fpath, md5=None): + def calculate_sha256(fpath): + """Calculate sha256 for content of the file. + + Args: + fpath (str): Path to file. + + Returns: + str: hex encoded sha256 + + """ + h = hashlib.sha256() + b = bytearray(128 * 1024) + mv = memoryview(b) + with open(fpath, 'rb', buffering=0) as f: + for n in iter(lambda: f.readinto(mv), 0): + h.update(mv[:n]) + return h.hexdigest() + + @staticmethod + def check_sha256(fpath, sha256, **kwargs): + return sha256 == RemoteFileHandler.calculate_sha256(fpath, **kwargs) + + @staticmethod + def check_integrity(fpath, hash_value=None, hash_type=None): if not os.path.isfile(fpath): return False - if md5 is None: + if hash_value is None: return True - return RemoteFileHandler.check_md5(fpath, md5) + if not hash_type: + raise ValueError("Provide hash type, md5 or sha256") + if hash_type == 'md5': + return RemoteFileHandler.check_md5(fpath, hash_value) + if hash_type == "sha256": + return RemoteFileHandler.check_sha256(fpath, hash_value) @staticmethod def download_url( url, root, filename=None, - md5=None, max_redirect_hops=3 + sha256=None, max_redirect_hops=3 ): """Download a file from a url and place it in root. Args: @@ -51,7 +79,7 @@ class RemoteFileHandler: root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the basename of the URL - md5 (str, optional): MD5 checksum of the download. + sha256 (str, optional): sha256 checksum of the download. If None, do not check max_redirect_hops (int, optional): Maximum number of redirect hops allowed @@ -64,7 +92,8 @@ class RemoteFileHandler: os.makedirs(root, exist_ok=True) # check if file is already present locally - if RemoteFileHandler.check_integrity(fpath, md5): + if RemoteFileHandler.check_integrity(fpath, + sha256, hash_type="sha256"): print('Using downloaded and verified file: ' + fpath) return @@ -76,7 +105,7 @@ class RemoteFileHandler: file_id = RemoteFileHandler._get_google_drive_file_id(url) if file_id is not None: return RemoteFileHandler.download_file_from_google_drive( - file_id, root, filename, md5) + file_id, root, filename, sha256) # download the file try: @@ -92,20 +121,21 @@ class RemoteFileHandler: raise e # check integrity of downloaded file - if not RemoteFileHandler.check_integrity(fpath, md5): + if not RemoteFileHandler.check_integrity(fpath, + sha256, hash_type="sha256"): raise RuntimeError("File not found or corrupted.") @staticmethod def download_file_from_google_drive(file_id, root, filename=None, - md5=None): + sha256=None): """Download a Google Drive file from and place it in root. Args: file_id (str): id of file to be downloaded root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the id of the file. - md5 (str, optional): MD5 checksum of the download. + sha256 (str, optional): sha256 checksum of the download. If None, do not check """ # Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url # noqa @@ -119,8 +149,8 @@ class RemoteFileHandler: os.makedirs(root, exist_ok=True) - if os.path.isfile(fpath) and RemoteFileHandler.check_integrity(fpath, - md5): + if os.path.isfile(fpath) and RemoteFileHandler.check_integrity( + fpath, sha256, hash_type="sha256"): print('Using downloaded and verified file: ' + fpath) else: session = requests.Session() diff --git a/common/openpype_common/distribution/tests/test_addon_distributtion.py b/common/openpype_common/distribution/tests/test_addon_distributtion.py new file mode 100644 index 0000000000..765ea0596a --- /dev/null +++ b/common/openpype_common/distribution/tests/test_addon_distributtion.py @@ -0,0 +1,167 @@ +import pytest +import attr +import tempfile + +from common.openpype_common.distribution.addon_distribution import ( + AddonDownloader, + OSAddonDownloader, + HTTPAddonDownloader, + AddonInfo, + update_addon_state, + UpdateState +) +from common.openpype_common.distribution.addon_info import UrlType + + +@pytest.fixture +def addon_downloader(): + addon_downloader = AddonDownloader() + addon_downloader.register_format(UrlType.FILESYSTEM, OSAddonDownloader) + addon_downloader.register_format(UrlType.HTTP, HTTPAddonDownloader) + + yield addon_downloader + + +@pytest.fixture +def http_downloader(addon_downloader): + yield addon_downloader.get_downloader(UrlType.HTTP.value) + + +@pytest.fixture +def temp_folder(): + yield tempfile.mkdtemp() + + +@pytest.fixture +def sample_addon_info(): + addon_info = { + "versions": { + "1.0.0": { + "clientPyproject": { + "tool": { + "poetry": { + "dependencies": { + "nxtools": "^1.6", + "orjson": "^3.6.7", + "typer": "^0.4.1", + "email-validator": "^1.1.3", + "python": "^3.10", + "fastapi": "^0.73.0" + } + } + } + }, + "hasSettings": True, + "clientSourceInfo": [ + { + "type": "http", + "url": "https://drive.google.com/file/d/1TcuV8c2OV8CcbPeWi7lxOdqWsEqQNPYy/view?usp=sharing" # noqa + }, + { + "type": "filesystem", + "path": { + "windows": ["P:/sources/some_file.zip", + "W:/sources/some_file.zip"], # noqa + "linux": ["/mnt/srv/sources/some_file.zip"], + "darwin": ["/Volumes/srv/sources/some_file.zip"] + } + } + ], + "frontendScopes": { + "project": { + "sidebar": "hierarchy" + } + } + } + }, + "description": "", + "title": "Slack addon", + "name": "openpype_slack", + "productionVersion": "1.0.0", + "hash": "4be25eb6215e91e5894d3c5475aeb1e379d081d3f5b43b4ee15b0891cf5f5658" # noqa + } + yield addon_info + + +def test_register(printer): + addon_downloader = AddonDownloader() + + assert len(addon_downloader._downloaders) == 0, "Contains registered" + + addon_downloader.register_format(UrlType.FILESYSTEM, OSAddonDownloader) + assert len(addon_downloader._downloaders) == 1, "Should contain one" + + +def test_get_downloader(printer, addon_downloader): + assert addon_downloader.get_downloader(UrlType.FILESYSTEM.value), "Should find" # noqa + + with pytest.raises(ValueError): + addon_downloader.get_downloader("unknown"), "Shouldn't find" + + +def test_addon_info(printer, sample_addon_info): + """Tests parsing of expected payload from v4 server into AadonInfo.""" + valid_minimum = { + "name": "openpype_slack", + "productionVersion": "1.0.0", + "versions": { + "1.0.0": { + "clientSourceInfo": [ + { + "type": "filesystem", + "path": { + "windows": [ + "P:/sources/some_file.zip", + "W:/sources/some_file.zip"], + "linux": [ + "/mnt/srv/sources/some_file.zip"], + "darwin": [ + "/Volumes/srv/sources/some_file.zip"] # noqa + } + } + ] + } + } + } + + assert AddonInfo.from_dict(valid_minimum), "Missing required fields" + + valid_minimum["versions"].pop("1.0.0") + with pytest.raises(KeyError): + assert not AddonInfo.from_dict(valid_minimum), "Must fail without version data" # noqa + + valid_minimum.pop("productionVersion") + assert not AddonInfo.from_dict( + valid_minimum), "none if not productionVersion" # noqa + + addon = AddonInfo.from_dict(sample_addon_info) + assert addon, "Should be created" + assert addon.name == "openpype_slack", "Incorrect name" + assert addon.version == "1.0.0", "Incorrect version" + + with pytest.raises(TypeError): + assert addon["name"], "Dict approach not implemented" + + addon_as_dict = attr.asdict(addon) + assert addon_as_dict["name"], "Dict approach should work" + + +def test_update_addon_state(printer, sample_addon_info, + temp_folder, addon_downloader): + """Tests possible cases of addon update.""" + addon_info = AddonInfo.from_dict(sample_addon_info) + orig_hash = addon_info.hash + + addon_info.hash = "brokenhash" + result = update_addon_state([addon_info], temp_folder, addon_downloader) + assert result["openpype_slack_1.0.0"] == UpdateState.FAILED.value, \ + "Update should failed because of wrong hash" + + addon_info.hash = orig_hash + result = update_addon_state([addon_info], temp_folder, addon_downloader) + assert result["openpype_slack_1.0.0"] == UpdateState.UPDATED.value, \ + "Addon should have been updated" + + result = update_addon_state([addon_info], temp_folder, addon_downloader) + assert result["openpype_slack_1.0.0"] == UpdateState.EXISTS.value, \ + "Addon should already exist" diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py index 08333885c0..077f56d769 100644 --- a/igniter/bootstrap_repos.py +++ b/igniter/bootstrap_repos.py @@ -63,7 +63,8 @@ class OpenPypeVersion(semver.VersionInfo): """ staging = False path = None - _VERSION_REGEX = re.compile(r"(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$") # noqa: E501 + # this should match any string complying with https://semver.org/ + _VERSION_REGEX = re.compile(r"(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P[a-zA-Z\d\-.]*))?(?:\+(?P[a-zA-Z\d\-.]*))?") # noqa: E501 _installed_version = None def __init__(self, *args, **kwargs): @@ -122,7 +123,7 @@ class OpenPypeVersion(semver.VersionInfo): if self.staging: if kwargs.get("build"): if "staging" not in kwargs.get("build"): - kwargs["build"] = "{}-staging".format(kwargs.get("build")) + kwargs["build"] = f"{kwargs.get('build')}-staging" else: kwargs["build"] = "staging" @@ -136,8 +137,7 @@ class OpenPypeVersion(semver.VersionInfo): return bool(result and self.staging == other.staging) def __repr__(self): - return "<{}: {} - path={}>".format( - self.__class__.__name__, str(self), self.path) + return f"<{self.__class__.__name__}: {str(self)} - path={self.path}>" def __lt__(self, other: OpenPypeVersion): result = super().__lt__(other) @@ -212,6 +212,8 @@ class OpenPypeVersion(semver.VersionInfo): OpenPypeVersion: of detected or None. """ + # strip .zip ext if present + string = re.sub(r"\.zip$", "", string, flags=re.IGNORECASE) m = re.search(OpenPypeVersion._VERSION_REGEX, string) if not m: return None @@ -232,10 +234,7 @@ class OpenPypeVersion(semver.VersionInfo): return openpype_version def __hash__(self): - if self.path: - return hash(self.path) - else: - return hash(str(self)) + return hash(self.path) if self.path else hash(str(self)) @staticmethod def is_version_in_dir( @@ -384,7 +383,8 @@ class OpenPypeVersion(semver.VersionInfo): @classmethod def get_local_versions( - cls, production: bool = None, staging: bool = None + cls, production: bool = None, + staging: bool = None ) -> List: """Get all versions available on this machine. @@ -394,6 +394,10 @@ class OpenPypeVersion(semver.VersionInfo): Args: production (bool): Return production versions. staging (bool): Return staging versions. + + Returns: + list: of compatible versions available on the machine. + """ # Return all local versions if arguments are set to None if production is None and staging is None: @@ -410,10 +414,10 @@ class OpenPypeVersion(semver.VersionInfo): if not production and not staging: return [] + # DEPRECATED: backwards compatible way to look for versions in root dir_to_search = Path(user_data_dir("openpype", "pypeclub")) - versions = OpenPypeVersion.get_versions_from_directory( - dir_to_search - ) + versions = OpenPypeVersion.get_versions_from_directory(dir_to_search) + filtered_versions = [] for version in versions: if version.is_staging(): @@ -425,7 +429,8 @@ class OpenPypeVersion(semver.VersionInfo): @classmethod def get_remote_versions( - cls, production: bool = None, staging: bool = None + cls, production: bool = None, + staging: bool = None ) -> List: """Get all versions available in OpenPype Path. @@ -435,6 +440,7 @@ class OpenPypeVersion(semver.VersionInfo): Args: production (bool): Return production versions. staging (bool): Return staging versions. + """ # Return all local versions if arguments are set to None if production is None and staging is None: @@ -469,6 +475,7 @@ class OpenPypeVersion(semver.VersionInfo): return [] versions = cls.get_versions_from_directory(dir_to_search) + filtered_versions = [] for version in versions: if version.is_staging(): @@ -479,7 +486,8 @@ class OpenPypeVersion(semver.VersionInfo): return list(sorted(set(filtered_versions))) @staticmethod - def get_versions_from_directory(openpype_dir: Path) -> List: + def get_versions_from_directory( + openpype_dir: Path) -> List: """Get all detected OpenPype versions in directory. Args: @@ -492,15 +500,22 @@ class OpenPypeVersion(semver.VersionInfo): ValueError: if invalid path is specified. """ + openpype_versions = [] if not openpype_dir.exists() and not openpype_dir.is_dir(): - raise ValueError("specified directory is invalid") + return openpype_versions - _openpype_versions = [] # iterate over directory in first level and find all that might # contain OpenPype. for item in openpype_dir.iterdir(): + # if the item is directory with major.minor version, dive deeper - # if file, strip extension, in case of dir not. + if item.is_dir() and re.match(r"^\d+\.\d+$", item.name): + _versions = OpenPypeVersion.get_versions_from_directory( + item) + if _versions: + openpype_versions += _versions + + # if file exists, strip extension, in case of dir don't. name = item.name if item.is_dir() else item.stem result = OpenPypeVersion.version_in_str(name) @@ -519,9 +534,9 @@ class OpenPypeVersion(semver.VersionInfo): continue detected_version.path = item - _openpype_versions.append(detected_version) + openpype_versions.append(detected_version) - return sorted(_openpype_versions) + return sorted(openpype_versions) @staticmethod def get_installed_version_str() -> str: @@ -550,13 +565,13 @@ class OpenPypeVersion(semver.VersionInfo): staging: bool = False, local: bool = None, remote: bool = None - ) -> OpenPypeVersion: - """Get latest available version. + ) -> Union[OpenPypeVersion, None]: + """Get the latest available version. The version does not contain information about path and source. - This is utility version to get latest version from all found. Build - version is not listed if staging is enabled. + This is utility version to get the latest version from all found. + Build version is not listed if staging is enabled. Arguments 'local' and 'remote' define if local and remote repository versions are used. All versions are used if both are not set (or set @@ -568,6 +583,10 @@ class OpenPypeVersion(semver.VersionInfo): staging (bool, optional): List staging versions if True. local (bool, optional): List local versions if True. remote (bool, optional): List remote versions if True. + + Returns: + Latest OpenPypeVersion or None + """ if local is None and remote is None: local = True @@ -621,6 +640,21 @@ class OpenPypeVersion(semver.VersionInfo): return None return OpenPypeVersion(version=result) + def is_compatible(self, version: OpenPypeVersion): + """Test build compatibility. + + This will simply compare major and minor versions (ignoring patch + and the rest). + + Args: + version (OpenPypeVersion): Version to check compatibility with. + + Returns: + bool: if the version is compatible + + """ + return self.major == version.major and self.minor == version.minor + class BootstrapRepos: """Class for bootstrapping local OpenPype installation. @@ -714,9 +748,9 @@ class BootstrapRepos: self, repo_dir: Path = None) -> Union[OpenPypeVersion, None]: """Copy zip created from OpenPype repositories to user data dir. - This detect OpenPype version either in local "live" OpenPype + This detects OpenPype version either in local "live" OpenPype repository or in user provided path. Then it will zip it in temporary - directory and finally it will move it to destination which is user + directory, and finally it will move it to destination which is user data directory. Existing files will be replaced. Args: @@ -727,7 +761,7 @@ class BootstrapRepos: """ # if repo dir is not set, we detect local "live" OpenPype repository - # version and use it as a source. Otherwise repo_dir is user + # version and use it as a source. Otherwise, repo_dir is user # entered location. if repo_dir: version = self.get_version(repo_dir) @@ -741,8 +775,9 @@ class BootstrapRepos: return # create destination directory - if not self.data_dir.exists(): - self.data_dir.mkdir(parents=True) + destination = self.data_dir / f"{installed_version.major}.{installed_version.minor}" # noqa + if not destination.exists(): + destination.mkdir(parents=True) # create zip inside temporary directory. with tempfile.TemporaryDirectory() as temp_dir: @@ -770,7 +805,9 @@ class BootstrapRepos: Path to moved zip on success. """ - destination = self.data_dir / zip_file.name + version = OpenPypeVersion.version_in_str(zip_file.name) + destination_dir = self.data_dir / f"{version.major}.{version.minor}" + destination = destination_dir / zip_file.name if destination.exists(): self._print( @@ -781,8 +818,15 @@ class BootstrapRepos: except Exception as e: self._print(str(e), LOG_ERROR, exc_info=True) return None + if not destination_dir.exists(): + destination_dir.mkdir(parents=True) + elif not destination_dir.is_dir(): + self._print( + "Destination exists but is not directory.", LOG_ERROR) + return None + try: - shutil.move(zip_file.as_posix(), self.data_dir.as_posix()) + shutil.move(zip_file.as_posix(), destination_dir.as_posix()) except shutil.Error as e: self._print(str(e), LOG_ERROR, exc_info=True) return None @@ -995,6 +1039,16 @@ class BootstrapRepos: @staticmethod def _validate_dir(path: Path) -> tuple: + """Validate checksums in a given path. + + Args: + path (Path): path to folder to validate. + + Returns: + tuple(bool, str): returns status and reason as a bool + and str in a tuple. + + """ checksums_file = Path(path / "checksums") if not checksums_file.exists(): # FIXME: This should be set to False sometimes in the future @@ -1076,11 +1130,24 @@ class BootstrapRepos: sys.path.insert(0, directory.as_posix()) @staticmethod - def find_openpype_version(version, staging): + def find_openpype_version( + version: Union[str, OpenPypeVersion], + staging: bool + ) -> Union[OpenPypeVersion, None]: + """Find location of specified OpenPype version. + + Args: + version (Union[str, OpenPypeVersion): Version to find. + staging (bool): Filter staging versions. + + Returns: + requested OpenPypeVersion. + + """ + installed_version = OpenPypeVersion.get_installed_version() if isinstance(version, str): version = OpenPypeVersion(version=version) - installed_version = OpenPypeVersion.get_installed_version() if installed_version == version: return installed_version @@ -1107,7 +1174,18 @@ class BootstrapRepos: return None @staticmethod - def find_latest_openpype_version(staging): + def find_latest_openpype_version( + staging: bool + ) -> Union[OpenPypeVersion, None]: + """Find the latest available OpenPype version in all location. + + Args: + staging (bool): True to look for staging versions. + + Returns: + Latest OpenPype version on None if nothing was found. + + """ installed_version = OpenPypeVersion.get_installed_version() local_versions = OpenPypeVersion.get_local_versions( staging=staging @@ -1138,7 +1216,8 @@ class BootstrapRepos: self, openpype_path: Union[Path, str] = None, staging: bool = False, - include_zips: bool = False) -> Union[List[OpenPypeVersion], None]: + include_zips: bool = False + ) -> Union[List[OpenPypeVersion], None]: """Get ordered dict of detected OpenPype version. Resolution order for OpenPype is following: @@ -1172,30 +1251,38 @@ class BootstrapRepos: ("Finding OpenPype in non-filesystem locations is" " not implemented yet.")) - dir_to_search = self.data_dir - user_versions = self.get_openpype_versions(self.data_dir, staging) - # if we have openpype_path specified, search only there. + # if checks bellow for OPENPYPE_PATH and registry fails, use data_dir + # DEPRECATED: lookup in root of this folder is deprecated in favour + # of major.minor sub-folders. + dirs_to_search = [self.data_dir] + if openpype_path: - dir_to_search = openpype_path + dirs_to_search = [openpype_path] + elif os.getenv("OPENPYPE_PATH") \ + and Path(os.getenv("OPENPYPE_PATH")).exists(): + # first try OPENPYPE_PATH and if that is not available, + # try registry. + dirs_to_search = [Path(os.getenv("OPENPYPE_PATH"))] else: - if os.getenv("OPENPYPE_PATH"): - if Path(os.getenv("OPENPYPE_PATH")).exists(): - dir_to_search = Path(os.getenv("OPENPYPE_PATH")) - else: - try: - registry_dir = Path( - str(self.registry.get_item("openPypePath"))) - if registry_dir.exists(): - dir_to_search = registry_dir + try: + registry_dir = Path( + str(self.registry.get_item("openPypePath"))) + if registry_dir.exists(): + dirs_to_search = [registry_dir] - except ValueError: - # nothing found in registry, we'll use data dir - pass + except ValueError: + # nothing found in registry, we'll use data dir + pass - openpype_versions = self.get_openpype_versions(dir_to_search, staging) - openpype_versions += user_versions + openpype_versions = [] + for dir_to_search in dirs_to_search: + try: + openpype_versions += self.get_openpype_versions( + dir_to_search, staging) + except ValueError: + # location is invalid, skip it + pass - # remove zip file version if needed. if not include_zips: openpype_versions = [ v for v in openpype_versions if v.path.suffix != ".zip" @@ -1308,9 +1395,8 @@ class BootstrapRepos: raise ValueError( f"version {version} is not associated with any file") - destination = self.data_dir / version.path.stem - if destination.exists(): - assert destination.is_dir() + destination = self.data_dir / f"{version.major}.{version.minor}" / version.path.stem # noqa + if destination.exists() and destination.is_dir(): try: shutil.rmtree(destination) except OSError as e: @@ -1379,7 +1465,7 @@ class BootstrapRepos: else: dir_name = openpype_version.path.stem - destination = self.data_dir / dir_name + destination = self.data_dir / f"{openpype_version.major}.{openpype_version.minor}" / dir_name # noqa # test if destination directory already exist, if so lets delete it. if destination.exists() and force: @@ -1557,9 +1643,10 @@ class BootstrapRepos: return False return True - def get_openpype_versions(self, - openpype_dir: Path, - staging: bool = False) -> list: + def get_openpype_versions( + self, + openpype_dir: Path, + staging: bool = False) -> list: """Get all detected OpenPype versions in directory. Args: @@ -1574,14 +1661,20 @@ class BootstrapRepos: """ if not openpype_dir.exists() and not openpype_dir.is_dir(): - raise ValueError("specified directory is invalid") + raise ValueError(f"specified directory {openpype_dir} is invalid") - _openpype_versions = [] + openpype_versions = [] # iterate over directory in first level and find all that might # contain OpenPype. for item in openpype_dir.iterdir(): + # if the item is directory with major.minor version, dive deeper + if item.is_dir() and re.match(r"^\d+\.\d+$", item.name): + _versions = self.get_openpype_versions( + item, staging=staging) + if _versions: + openpype_versions += _versions - # if file, strip extension, in case of dir not. + # if it is file, strip extension, in case of dir don't. name = item.name if item.is_dir() else item.stem result = OpenPypeVersion.version_in_str(name) @@ -1601,12 +1694,12 @@ class BootstrapRepos: detected_version.path = item if staging and detected_version.is_staging(): - _openpype_versions.append(detected_version) + openpype_versions.append(detected_version) if not staging and not detected_version.is_staging(): - _openpype_versions.append(detected_version) + openpype_versions.append(detected_version) - return sorted(_openpype_versions) + return sorted(openpype_versions) class OpenPypeVersionExists(Exception): diff --git a/igniter/install_dialog.py b/igniter/install_dialog.py index b09529f5c5..65ddd58735 100644 --- a/igniter/install_dialog.py +++ b/igniter/install_dialog.py @@ -388,8 +388,11 @@ class InstallDialog(QtWidgets.QDialog): install_thread.start() def _installation_finished(self): + # TODO we should find out why status can be set to 'None'? + # - 'InstallThread.run' should handle all cases so not sure where + # that come from status = self._install_thread.result() - if status >= 0: + if status is not None and status >= 0: self._update_progress(100) QtWidgets.QApplication.processEvents() self.done(3) diff --git a/igniter/install_thread.py b/igniter/install_thread.py index 8e31f8cb8f..0cccf664e7 100644 --- a/igniter/install_thread.py +++ b/igniter/install_thread.py @@ -62,7 +62,7 @@ class InstallThread(QThread): progress_callback=self.set_progress, message=self.message) local_version = OpenPypeVersion.get_installed_version_str() - # if user did entered nothing, we install OpenPype from local version. + # if user did enter nothing, we install OpenPype from local version. # zip content of `repos`, copy it to user data dir and append # version to it. if not self._path: @@ -93,6 +93,23 @@ class InstallThread(QThread): detected = bs.find_openpype(include_zips=True) if detected: + if not OpenPypeVersion.get_installed_version().is_compatible( + detected[-1]): + self.message.emit(( + f"Latest detected version {detected[-1]} " + "is not compatible with the currently running " + f"{local_version}" + ), True) + self.message.emit(( + "Filtering detected versions to compatible ones..." + ), False) + + detected = [ + version for version in detected + if version.is_compatible( + OpenPypeVersion.get_installed_version()) + ] + if OpenPypeVersion( version=local_version, path=Path()) < detected[-1]: self.message.emit(( diff --git a/igniter/tools.py b/igniter/tools.py index 57159b5e52..a9d592acf0 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -21,6 +21,11 @@ class OpenPypeVersionNotFound(Exception): pass +class OpenPypeVersionIncompatible(Exception): + """OpenPype version is not compatible with the installed one (build).""" + pass + + def should_add_certificate_path_to_mongo_url(mongo_url): """Check if should add ca certificate to mongo url. diff --git a/inno_setup.iss b/inno_setup.iss index ead9907955..fa050ef1d6 100644 --- a/inno_setup.iss +++ b/inno_setup.iss @@ -18,7 +18,8 @@ AppPublisher=Orbi Tools s.r.o AppPublisherURL=http://pype.club AppSupportURL=http://pype.club AppUpdatesURL=http://pype.club -DefaultDirName={autopf}\{#MyAppName} +DefaultDirName={autopf}\{#MyAppName}\{#AppVer} +UsePreviousAppDir=no DisableProgramGroupPage=yes OutputBaseFilename={#MyAppName}-{#AppVer}-install AllowCancelDuringInstall=yes @@ -27,7 +28,7 @@ AllowCancelDuringInstall=yes PrivilegesRequiredOverridesAllowed=dialog SetupIconFile=igniter\openpype.ico OutputDir=build\ -Compression=lzma +Compression=lzma2 SolidCompression=yes WizardStyle=modern @@ -37,6 +38,11 @@ Name: "english"; MessagesFile: "compiler:Default.isl" [Tasks] Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked +[InstallDelete] +; clean everything in previous installation folder +Type: filesandordirs; Name: "{app}\*" + + [Files] Source: "build\{#build}\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs ; NOTE: Don't use "Flags: ignoreversion" on any shared system files diff --git a/openpype/action.py b/openpype/action.py index 50741875e4..de9cdee010 100644 --- a/openpype/action.py +++ b/openpype/action.py @@ -1,44 +1,82 @@ -# absolute_import is needed to counter the `module has no cmds error` in Maya -from __future__ import absolute_import - +import warnings +import functools import pyblish.api -def get_errored_instances_from_context(context): - - instances = list() - for result in context.data["results"]: - if result["instance"] is None: - # When instance is None we are on the "context" result - continue - - if result["error"]: - instances.append(result["instance"]) - - return instances +class ActionDeprecatedWarning(DeprecationWarning): + pass -def get_errored_plugins_from_data(context): - """Get all failed validation plugins - - Args: - context (object): - - Returns: - list of plugins which failed during validation +def deprecated(new_destination): + """Mark functions as deprecated. + It will result in a warning being emitted when the function is used. """ - plugins = list() - results = context.data.get("results", []) - for result in results: - if result["success"] is True: - continue - plugins.append(result["plugin"]) + func = None + if callable(new_destination): + func = new_destination + new_destination = None - return plugins + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", ActionDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=ActionDeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) +@deprecated("openpype.pipeline.publish.get_errored_instances_from_context") +def get_errored_instances_from_context(context): + """ + Deprecated: + Since 3.14.* will be removed in 3.16.* or later. + """ + + from openpype.pipeline.publish import get_errored_instances_from_context + + return get_errored_instances_from_context(context) + + +@deprecated("openpype.pipeline.publish.get_errored_plugins_from_context") +def get_errored_plugins_from_data(context): + """ + Deprecated: + Since 3.14.* will be removed in 3.16.* or later. + """ + + from openpype.pipeline.publish import get_errored_plugins_from_context + + return get_errored_plugins_from_context(context) + + +# 'RepairAction' and 'RepairContextAction' were moved to +# 'openpype.pipeline.publish' please change you imports. +# There is no "reasonable" way hot mark these classes as deprecated to show +# warning of wrong import. +# Deprecated since 3.14.* will be removed in 3.16.* class RepairAction(pyblish.api.Action): """Repairs the action @@ -65,6 +103,7 @@ class RepairAction(pyblish.api.Action): plugin.repair(instance) +# Deprecated since 3.14.* will be removed in 3.16.* class RepairContextAction(pyblish.api.Action): """Repairs the action diff --git a/openpype/api.py b/openpype/api.py index 9ce745b653..b60cd21d2b 100644 --- a/openpype/api.py +++ b/openpype/api.py @@ -9,13 +9,12 @@ from .settings import ( ) from .lib import ( PypeLogger, + Logger, Anatomy, - config, execute, run_subprocess, version_up, get_asset, - get_hierarchy, get_workdir_data, get_version_from_path, get_last_version_from_path, @@ -49,7 +48,6 @@ from .plugin import ( ValidateContentsOrder, ValidateSceneOrder, ValidateMeshOrder, - ValidationException ) # temporary fix, might @@ -59,8 +57,6 @@ from .action import ( RepairContextAction ) -# for backward compatibility with Pype 2 -Logger = PypeLogger __all__ = [ "get_system_settings", @@ -75,7 +71,6 @@ __all__ = [ "PypeLogger", "Logger", "Anatomy", - "config", "execute", "get_default_components", "ApplicationManager", @@ -96,12 +91,9 @@ __all__ = [ "RepairAction", "RepairContextAction", - "ValidationException", - # get contextual data "version_up", "get_asset", - "get_hierarchy", "get_workdir_data", "get_version_from_path", "get_last_version_from_path", diff --git a/openpype/cli.py b/openpype/cli.py index 2aa4a46929..d24cd4a872 100644 --- a/openpype/cli.py +++ b/openpype/cli.py @@ -2,7 +2,7 @@ """Package for handling pype command line arguments.""" import os import sys - +import code import click # import sys @@ -40,18 +40,6 @@ def settings(dev): PypeCommands().launch_settings_gui(dev) -@main.command() -def standalonepublisher(): - """Show Pype Standalone publisher UI.""" - PypeCommands().launch_standalone_publisher() - - -@main.command() -def traypublisher(): - """Show new OpenPype Standalone publisher UI.""" - PypeCommands().launch_traypublisher() - - @main.command() def tray(): """Launch pype tray. @@ -289,6 +277,13 @@ def projectmanager(): PypeCommands().launch_project_manager() +@main.command(context_settings={"ignore_unknown_options": True}) +def publish_report_viewer(): + from openpype.tools.publisher.publish_report_viewer import main + + sys.exit(main()) + + @main.command() @click.argument("output_path") @click.option("--project", help="Define project context") @@ -424,3 +419,45 @@ def pack_project(project, dirpath): def unpack_project(zipfile, root): """Create a package of project with all files and database dump.""" PypeCommands().unpack_project(zipfile, root) + + +@main.command() +def interactive(): + """Interative (Python like) console. + + Helpfull command not only for development to directly work with python + interpreter. + + Warning: + Executable 'openpype_gui' on windows won't work. + """ + + from openpype.version import __version__ + + banner = "OpenPype {}\nPython {} on {}".format( + __version__, sys.version, sys.platform + ) + code.interact(banner) + + +@main.command() +@click.option("--build", help="Print only build version", + is_flag=True, default=False) +def version(build): + """Print OpenPype version.""" + + from openpype.version import __version__ + from igniter.bootstrap_repos import BootstrapRepos, OpenPypeVersion + from pathlib import Path + import os + + if getattr(sys, 'frozen', False): + local_version = BootstrapRepos.get_version( + Path(os.getenv("OPENPYPE_ROOT"))) + else: + local_version = OpenPypeVersion.get_installed_version_str() + + if build: + print(local_version) + return + print(f"{__version__} (booted: {local_version})") diff --git a/openpype/client/__init__.py b/openpype/client/__init__.py new file mode 100644 index 0000000000..7831afd8ad --- /dev/null +++ b/openpype/client/__init__.py @@ -0,0 +1,108 @@ +from .mongo import ( + OpenPypeMongoConnection, +) + +from .entities import ( + get_projects, + get_project, + get_whole_project, + + get_asset_by_id, + get_asset_by_name, + get_assets, + get_archived_assets, + get_asset_ids_with_subsets, + + get_subset_by_id, + get_subset_by_name, + get_subsets, + get_subset_families, + + get_version_by_id, + get_version_by_name, + get_versions, + get_hero_version_by_id, + get_hero_version_by_subset_id, + get_hero_versions, + get_last_versions, + get_last_version_by_subset_id, + get_last_version_by_subset_name, + get_output_link_versions, + + version_is_latest, + + get_representation_by_id, + get_representation_by_name, + get_representations, + get_representation_parents, + get_representations_parents, + get_archived_representations, + + get_thumbnail, + get_thumbnails, + get_thumbnail_id_from_source, + + get_workfile_info, +) + +from .entity_links import ( + get_linked_asset_ids, + get_linked_assets, + get_linked_representation_id, +) + +from .operations import ( + create_project, +) + + +__all__ = ( + "OpenPypeMongoConnection", + + "get_projects", + "get_project", + "get_whole_project", + + "get_asset_by_id", + "get_asset_by_name", + "get_assets", + "get_archived_assets", + "get_asset_ids_with_subsets", + + "get_subset_by_id", + "get_subset_by_name", + "get_subsets", + "get_subset_families", + + "get_version_by_id", + "get_version_by_name", + "get_versions", + "get_hero_version_by_id", + "get_hero_version_by_subset_id", + "get_hero_versions", + "get_last_versions", + "get_last_version_by_subset_id", + "get_last_version_by_subset_name", + "get_output_link_versions", + + "version_is_latest", + + "get_representation_by_id", + "get_representation_by_name", + "get_representations", + "get_representation_parents", + "get_representations_parents", + "get_archived_representations", + + "get_thumbnail", + "get_thumbnails", + "get_thumbnail_id_from_source", + + "get_workfile_info", + + "get_linked_asset_ids", + "get_linked_assets", + "get_linked_representation_id", + + "create_project", +) diff --git a/openpype/client/entities.py b/openpype/client/entities.py new file mode 100644 index 0000000000..c415be8816 --- /dev/null +++ b/openpype/client/entities.py @@ -0,0 +1,1502 @@ +"""Unclear if these will have public functions like these. + +Goal is that most of functions here are called on (or with) an object +that has project name as a context (e.g. on 'ProjectEntity'?). + ++ We will need more specific functions doing wery specific queires really fast. +""" + +import re +import collections + +import six +from bson.objectid import ObjectId + +from .mongo import get_project_database, get_project_connection + +PatternType = type(re.compile("")) + + +def _prepare_fields(fields, required_fields=None): + if not fields: + return None + + output = { + field: True + for field in fields + } + if "_id" not in output: + output["_id"] = True + + if required_fields: + for key in required_fields: + output[key] = True + return output + + +def convert_id(in_id): + """Helper function for conversion of id from string to ObjectId. + + Args: + in_id (Union[str, ObjectId, Any]): Entity id that should be converted + to right type for queries. + + Returns: + Union[ObjectId, Any]: Converted ids to ObjectId or in type. + """ + + if isinstance(in_id, six.string_types): + return ObjectId(in_id) + return in_id + + +def convert_ids(in_ids): + """Helper function for conversion of ids from string to ObjectId. + + Args: + in_ids (Iterable[Union[str, ObjectId, Any]]): List of entity ids that + should be converted to right type for queries. + + Returns: + List[ObjectId]: Converted ids to ObjectId. + """ + + _output = set() + for in_id in in_ids: + if in_id is not None: + _output.add(convert_id(in_id)) + return list(_output) + + +def get_projects(active=True, inactive=False, fields=None): + mongodb = get_project_database() + for project_name in mongodb.collection_names(): + if project_name in ("system.indexes",): + continue + project_doc = get_project( + project_name, active=active, inactive=inactive, fields=fields + ) + if project_doc is not None: + yield project_doc + + +def get_project(project_name, active=True, inactive=True, fields=None): + # Skip if both are disabled + if not active and not inactive: + return None + + query_filter = {"type": "project"} + # Keep query untouched if both should be available + if active and inactive: + pass + + # Add filter to keep only active + elif active: + query_filter["$or"] = [ + {"data.active": {"$exists": False}}, + {"data.active": True}, + ] + + # Add filter to keep only inactive + elif inactive: + query_filter["$or"] = [ + {"data.active": {"$exists": False}}, + {"data.active": False}, + ] + + conn = get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + +def get_whole_project(project_name): + """Receive all documents from project. + + Helper that can be used to get all document from whole project. For example + for backups etc. + + Returns: + Cursor: Query cursor as iterable which returns all documents from + project collection. + """ + + conn = get_project_connection(project_name) + return conn.find({}) + + +def get_asset_by_id(project_name, asset_id, fields=None): + """Receive asset data by it's id. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_id (Union[str, ObjectId]): Asset's id. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + dict: Asset entity data. + None: Asset was not found by id. + """ + + asset_id = convert_id(asset_id) + if not asset_id: + return None + + query_filter = {"type": "asset", "_id": asset_id} + conn = get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + +def get_asset_by_name(project_name, asset_name, fields=None): + """Receive asset data by it's name. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_name (str): Asset's name. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + dict: Asset entity data. + None: Asset was not found by name. + """ + + if not asset_name: + return None + + query_filter = {"type": "asset", "name": asset_name} + conn = get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + +# NOTE this could be just public function? +# - any better variable name instead of 'standard'? +# - same approach can be used for rest of types +def _get_assets( + project_name, + asset_ids=None, + asset_names=None, + parent_ids=None, + standard=True, + archived=False, + fields=None +): + """Assets for specified project by passed filters. + + Passed filters (ids and names) are always combined so all conditions must + match. + + To receive all assets from project just keep filters empty. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should + be found. + asset_names (Iterable[str]): Name assets that should be found. + parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. + standard (bool): Query standart assets (type 'asset'). + archived (bool): Query archived assets (type 'archived_asset'). + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Query cursor as iterable which returns asset documents matching + passed filters. + """ + + asset_types = [] + if standard: + asset_types.append("asset") + if archived: + asset_types.append("archived_asset") + + if not asset_types: + return [] + + if len(asset_types) == 1: + query_filter = {"type": asset_types[0]} + else: + query_filter = {"type": {"$in": asset_types}} + + if asset_ids is not None: + asset_ids = convert_ids(asset_ids) + if not asset_ids: + return [] + query_filter["_id"] = {"$in": asset_ids} + + if asset_names is not None: + if not asset_names: + return [] + query_filter["name"] = {"$in": list(asset_names)} + + if parent_ids is not None: + parent_ids = convert_ids(parent_ids) + if not parent_ids: + return [] + query_filter["data.visualParent"] = {"$in": parent_ids} + + conn = get_project_connection(project_name) + + return conn.find(query_filter, _prepare_fields(fields)) + + +def get_assets( + project_name, + asset_ids=None, + asset_names=None, + parent_ids=None, + archived=False, + fields=None +): + """Assets for specified project by passed filters. + + Passed filters (ids and names) are always combined so all conditions must + match. + + To receive all assets from project just keep filters empty. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should + be found. + asset_names (Iterable[str]): Name assets that should be found. + parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. + archived (bool): Add also archived assets. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Query cursor as iterable which returns asset documents matching + passed filters. + """ + + return _get_assets( + project_name, + asset_ids, + asset_names, + parent_ids, + True, + archived, + fields + ) + + +def get_archived_assets( + project_name, + asset_ids=None, + asset_names=None, + parent_ids=None, + fields=None +): + """Archived assets for specified project by passed filters. + + Passed filters (ids and names) are always combined so all conditions must + match. + + To receive all archived assets from project just keep filters empty. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should + be found. + asset_names (Iterable[str]): Name assets that should be found. + parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Query cursor as iterable which returns asset documents matching + passed filters. + """ + + return _get_assets( + project_name, asset_ids, asset_names, parent_ids, False, True, fields + ) + + +def get_asset_ids_with_subsets(project_name, asset_ids=None): + """Find out which assets have existing subsets. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_ids (Iterable[Union[str, ObjectId]]): Look only for entered + asset ids. + + Returns: + Iterable[ObjectId]: Asset ids that have existing subsets. + """ + + subset_query = { + "type": "subset" + } + if asset_ids is not None: + asset_ids = convert_ids(asset_ids) + if not asset_ids: + return [] + subset_query["parent"] = {"$in": asset_ids} + + conn = get_project_connection(project_name) + result = conn.aggregate([ + { + "$match": subset_query + }, + { + "$group": { + "_id": "$parent", + "count": {"$sum": 1} + } + } + ]) + asset_ids_with_subsets = [] + for item in result: + asset_id = item["_id"] + count = item["count"] + if count > 0: + asset_ids_with_subsets.append(asset_id) + return asset_ids_with_subsets + + +def get_subset_by_id(project_name, subset_id, fields=None): + """Single subset entity data by it's id. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_id (Union[str, ObjectId]): Id of subset which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If subset with specified filters was not found. + Dict: Subset document which can be reduced to specified 'fields'. + """ + + subset_id = convert_id(subset_id) + if not subset_id: + return None + + query_filters = {"type": "subset", "_id": subset_id} + conn = get_project_connection(project_name) + return conn.find_one(query_filters, _prepare_fields(fields)) + + +def get_subset_by_name(project_name, subset_name, asset_id, fields=None): + """Single subset entity data by it's name and it's version id. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_name (str): Name of subset. + asset_id (Union[str, ObjectId]): Id of parent asset. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Union[None, Dict[str, Any]]: None if subset with specified filters was + not found or dict subset document which can be reduced to + specified 'fields'. + + """ + if not subset_name: + return None + + asset_id = convert_id(asset_id) + if not asset_id: + return None + + query_filters = { + "type": "subset", + "name": subset_name, + "parent": asset_id + } + conn = get_project_connection(project_name) + return conn.find_one(query_filters, _prepare_fields(fields)) + + +def get_subsets( + project_name, + subset_ids=None, + subset_names=None, + asset_ids=None, + names_by_asset_ids=None, + archived=False, + fields=None +): + """Subset entities data from one project filtered by entered filters. + + Filters are additive (all conditions must pass to return subset). + + Args: + project_name (str): Name of project where to look for queried entities. + subset_ids (Iterable[Union[str, ObjectId]]): Subset ids that should be + queried. Filter ignored if 'None' is passed. + subset_names (Iterable[str]): Subset names that should be queried. + Filter ignored if 'None' is passed. + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids under which + should look for the subsets. Filter ignored if 'None' is passed. + names_by_asset_ids (dict[ObjectId, List[str]]): Complex filtering + using asset ids and list of subset names under the asset. + archived (bool): Look for archived subsets too. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Iterable cursor yielding all matching subsets. + """ + + subset_types = ["subset"] + if archived: + subset_types.append("archived_subset") + + if len(subset_types) == 1: + query_filter = {"type": subset_types[0]} + else: + query_filter = {"type": {"$in": subset_types}} + + if asset_ids is not None: + asset_ids = convert_ids(asset_ids) + if not asset_ids: + return [] + query_filter["parent"] = {"$in": asset_ids} + + if subset_ids is not None: + subset_ids = convert_ids(subset_ids) + if not subset_ids: + return [] + query_filter["_id"] = {"$in": subset_ids} + + if subset_names is not None: + if not subset_names: + return [] + query_filter["name"] = {"$in": list(subset_names)} + + if names_by_asset_ids is not None: + or_query = [] + for asset_id, names in names_by_asset_ids.items(): + if asset_id and names: + or_query.append({ + "parent": convert_id(asset_id), + "name": {"$in": list(names)} + }) + if not or_query: + return [] + query_filter["$or"] = or_query + + conn = get_project_connection(project_name) + return conn.find(query_filter, _prepare_fields(fields)) + + +def get_subset_families(project_name, subset_ids=None): + """Set of main families of subsets. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_ids (Iterable[Union[str, ObjectId]]): Subset ids that should + be queried. All subsets from project are used if 'None' is passed. + + Returns: + set[str]: Main families of matching subsets. + """ + + subset_filter = { + "type": "subset" + } + if subset_ids is not None: + if not subset_ids: + return set() + subset_filter["_id"] = {"$in": list(subset_ids)} + + conn = get_project_connection(project_name) + result = list(conn.aggregate([ + {"$match": subset_filter}, + {"$project": { + "family": {"$arrayElemAt": ["$data.families", 0]} + }}, + {"$group": { + "_id": "family_group", + "families": {"$addToSet": "$family"} + }} + ])) + if result: + return set(result[0]["families"]) + return set() + + +def get_version_by_id(project_name, version_id, fields=None): + """Single version entity data by it's id. + + Args: + project_name (str): Name of project where to look for queried entities. + version_id (Union[str, ObjectId]): Id of version which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If version with specified filters was not found. + Dict: Version document which can be reduced to specified 'fields'. + """ + + version_id = convert_id(version_id) + if not version_id: + return None + + query_filter = { + "type": {"$in": ["version", "hero_version"]}, + "_id": version_id + } + conn = get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + +def get_version_by_name(project_name, version, subset_id, fields=None): + """Single version entity data by it's name and subset id. + + Args: + project_name (str): Name of project where to look for queried entities. + version (int): name of version entity (it's version). + subset_id (Union[str, ObjectId]): Id of version which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If version with specified filters was not found. + Dict: Version document which can be reduced to specified 'fields'. + """ + + subset_id = convert_id(subset_id) + if not subset_id: + return None + + conn = get_project_connection(project_name) + query_filter = { + "type": "version", + "parent": subset_id, + "name": version + } + return conn.find_one(query_filter, _prepare_fields(fields)) + + +def version_is_latest(project_name, version_id): + """Is version the latest from it's subset. + + Note: + Hero versions are considered as latest. + + Todo: + Maybe raise exception when version was not found? + + Args: + project_name (str):Name of project where to look for queried entities. + version_id (Union[str, ObjectId]): Version id which is checked. + + Returns: + bool: True if is latest version from subset else False. + """ + + version_id = convert_id(version_id) + if not version_id: + return False + version_doc = get_version_by_id( + project_name, version_id, fields=["_id", "type", "parent"] + ) + # What to do when version is not found? + if not version_doc: + return False + + if version_doc["type"] == "hero_version": + return True + + last_version = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) + return last_version["_id"] == version_id + + +def _get_versions( + project_name, + subset_ids=None, + version_ids=None, + versions=None, + standard=True, + hero=False, + fields=None +): + version_types = [] + if standard: + version_types.append("version") + + if hero: + version_types.append("hero_version") + + if not version_types: + return [] + elif len(version_types) == 1: + query_filter = {"type": version_types[0]} + else: + query_filter = {"type": {"$in": version_types}} + + if subset_ids is not None: + subset_ids = convert_ids(subset_ids) + if not subset_ids: + return [] + query_filter["parent"] = {"$in": subset_ids} + + if version_ids is not None: + version_ids = convert_ids(version_ids) + if not version_ids: + return [] + query_filter["_id"] = {"$in": version_ids} + + if versions is not None: + versions = list(versions) + if not versions: + return [] + + if len(versions) == 1: + query_filter["name"] = versions[0] + else: + query_filter["name"] = {"$in": versions} + + conn = get_project_connection(project_name) + + return conn.find(query_filter, _prepare_fields(fields)) + + +def get_versions( + project_name, + version_ids=None, + subset_ids=None, + versions=None, + hero=False, + fields=None +): + """Version entities data from one project filtered by entered filters. + + Filters are additive (all conditions must pass to return subset). + + Args: + project_name (str): Name of project where to look for queried entities. + version_ids (Iterable[Union[str, ObjectId]]): Version ids that will + be queried. Filter ignored if 'None' is passed. + subset_ids (Iterable[str]): Subset ids that will be queried. + Filter ignored if 'None' is passed. + versions (Iterable[int]): Version names (as integers). + Filter ignored if 'None' is passed. + hero (bool): Look also for hero versions. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Iterable cursor yielding all matching versions. + """ + + return _get_versions( + project_name, + subset_ids, + version_ids, + versions, + standard=True, + hero=hero, + fields=fields + ) + + +def get_hero_version_by_subset_id(project_name, subset_id, fields=None): + """Hero version by subset id. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_id (Union[str, ObjectId]): Subset id under which + is hero version. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If hero version for passed subset id does not exists. + Dict: Hero version entity data. + """ + + subset_id = convert_id(subset_id) + if not subset_id: + return None + + versions = list(_get_versions( + project_name, + subset_ids=[subset_id], + standard=False, + hero=True, + fields=fields + )) + if versions: + return versions[0] + return None + + +def get_hero_version_by_id(project_name, version_id, fields=None): + """Hero version by it's id. + + Args: + project_name (str): Name of project where to look for queried entities. + version_id (Union[str, ObjectId]): Hero version id. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If hero version with passed id was not found. + Dict: Hero version entity data. + """ + + version_id = convert_id(version_id) + if not version_id: + return None + + versions = list(_get_versions( + project_name, + version_ids=[version_id], + standard=False, + hero=True, + fields=fields + )) + if versions: + return versions[0] + return None + + +def get_hero_versions( + project_name, + subset_ids=None, + version_ids=None, + fields=None +): + """Hero version entities data from one project filtered by entered filters. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_ids (Iterable[Union[str, ObjectId]]): Subset ids for which + should look for hero versions. Filter ignored if 'None' is passed. + version_ids (Iterable[Union[str, ObjectId]]): Hero version ids. Filter + ignored if 'None' is passed. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor|list: Iterable yielding hero versions matching passed filters. + """ + + return _get_versions( + project_name, + subset_ids, + version_ids, + standard=False, + hero=True, + fields=fields + ) + + +def get_output_link_versions(project_name, version_id, fields=None): + """Versions where passed version was used as input. + + Question: + Not 100% sure about the usage of the function so the name and docstring + maybe does not match what it does? + + Args: + project_name (str): Name of project where to look for queried entities. + version_id (Union[str, ObjectId]): Version id which can be used + as input link for other versions. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Iterable: Iterable cursor yielding versions that are used as input + links for passed version. + """ + + version_id = convert_id(version_id) + if not version_id: + return [] + + conn = get_project_connection(project_name) + # Does make sense to look for hero versions? + query_filter = { + "type": "version", + "data.inputLinks.id": version_id + } + return conn.find(query_filter, _prepare_fields(fields)) + + +def get_last_versions(project_name, subset_ids, fields=None): + """Latest versions for entered subset_ids. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_ids (Iterable[Union[str, ObjectId]]): List of subset ids. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + dict[ObjectId, int]: Key is subset id and value is last version name. + """ + + subset_ids = convert_ids(subset_ids) + if not subset_ids: + return {} + + if fields is not None: + fields = list(fields) + if not fields: + return {} + + # Avoid double query if only name and _id are requested + name_needed = False + limit_query = False + if fields: + fields_s = set(fields) + if "name" in fields_s: + name_needed = True + fields_s.remove("name") + + for field in ("_id", "parent"): + if field in fields_s: + fields_s.remove(field) + limit_query = len(fields_s) == 0 + + group_item = { + "_id": "$parent", + "_version_id": {"$last": "$_id"} + } + # Add name if name is needed (only for limit query) + if name_needed: + group_item["name"] = {"$last": "$name"} + + aggregation_pipeline = [ + # Find all versions of those subsets + {"$match": { + "type": "version", + "parent": {"$in": subset_ids} + }}, + # Sorting versions all together + {"$sort": {"name": 1}}, + # Group them by "parent", but only take the last + {"$group": group_item} + ] + + conn = get_project_connection(project_name) + aggregate_result = conn.aggregate(aggregation_pipeline) + if limit_query: + output = {} + for item in aggregate_result: + subset_id = item["_id"] + item_data = {"_id": item["_version_id"], "parent": subset_id} + if name_needed: + item_data["name"] = item["name"] + output[subset_id] = item_data + return output + + version_ids = [ + doc["_version_id"] + for doc in aggregate_result + ] + + fields = _prepare_fields(fields, ["parent"]) + + version_docs = get_versions( + project_name, version_ids=version_ids, fields=fields + ) + + return { + version_doc["parent"]: version_doc + for version_doc in version_docs + } + + +def get_last_version_by_subset_id(project_name, subset_id, fields=None): + """Last version for passed subset id. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_id (Union[str, ObjectId]): Id of version which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If version with specified filters was not found. + Dict: Version document which can be reduced to specified 'fields'. + """ + + subset_id = convert_id(subset_id) + if not subset_id: + return None + + last_versions = get_last_versions( + project_name, subset_ids=[subset_id], fields=fields + ) + return last_versions.get(subset_id) + + +def get_last_version_by_subset_name( + project_name, subset_name, asset_id=None, asset_name=None, fields=None +): + """Last version for passed subset name under asset id/name. + + It is required to pass 'asset_id' or 'asset_name'. Asset id is recommended + if is available. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_name (str): Name of subset. + asset_id (Union[str, ObjectId]): Asset id which is parent of passed + subset name. + asset_name (str): Asset name which is parent of passed subset name. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If version with specified filters was not found. + Dict: Version document which can be reduced to specified 'fields'. + """ + + if not asset_id and not asset_name: + return None + + if not asset_id: + asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"]) + if not asset_doc: + return None + asset_id = asset_doc["_id"] + subset_doc = get_subset_by_name( + project_name, subset_name, asset_id, fields=["_id"] + ) + if not subset_doc: + return None + return get_last_version_by_subset_id( + project_name, subset_doc["_id"], fields=fields + ) + + +def get_representation_by_id(project_name, representation_id, fields=None): + """Representation entity data by it's id. + + Args: + project_name (str): Name of project where to look for queried entities. + representation_id (Union[str, ObjectId]): Representation id. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If representation with specified filters was not found. + Dict: Representation entity data which can be reduced + to specified 'fields'. + """ + + if not representation_id: + return None + + repre_types = ["representation", "archived_representation"] + query_filter = { + "type": {"$in": repre_types} + } + if representation_id is not None: + query_filter["_id"] = convert_id(representation_id) + + conn = get_project_connection(project_name) + + return conn.find_one(query_filter, _prepare_fields(fields)) + + +def get_representation_by_name( + project_name, representation_name, version_id, fields=None +): + """Representation entity data by it's name and it's version id. + + Args: + project_name (str): Name of project where to look for queried entities. + representation_name (str): Representation name. + version_id (Union[str, ObjectId]): Id of parent version entity. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If representation with specified filters was not found. + Dict: Representation entity data which can be reduced + to specified 'fields'. + """ + + version_id = convert_id(version_id) + if not version_id or not representation_name: + return None + repre_types = ["representation", "archived_representations"] + query_filter = { + "type": {"$in": repre_types}, + "name": representation_name, + "parent": version_id + } + + conn = get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + +def _flatten_dict(data): + flatten_queue = collections.deque() + flatten_queue.append(data) + output = {} + while flatten_queue: + item = flatten_queue.popleft() + for key, value in item.items(): + if not isinstance(value, dict): + output[key] = value + continue + + tmp = {} + for subkey, subvalue in value.items(): + new_key = "{}.{}".format(key, subkey) + tmp[new_key] = subvalue + flatten_queue.append(tmp) + return output + + +def _regex_filters(filters): + output = [] + for key, value in filters.items(): + regexes = [] + a_values = [] + if isinstance(value, PatternType): + regexes.append(value) + elif isinstance(value, (list, tuple, set)): + for item in value: + if isinstance(item, PatternType): + regexes.append(item) + else: + a_values.append(item) + else: + a_values.append(value) + + key_filters = [] + if len(a_values) == 1: + key_filters.append({key: a_values[0]}) + elif a_values: + key_filters.append({key: {"$in": a_values}}) + + for regex in regexes: + key_filters.append({key: {"$regex": regex}}) + + if len(key_filters) == 1: + output.append(key_filters[0]) + else: + output.append({"$or": key_filters}) + + return output + + +def _get_representations( + project_name, + representation_ids, + representation_names, + version_ids, + context_filters, + names_by_version_ids, + standard, + archived, + fields +): + default_output = [] + repre_types = [] + if standard: + repre_types.append("representation") + if archived: + repre_types.append("archived_representation") + + if not repre_types: + return default_output + + if len(repre_types) == 1: + query_filter = {"type": repre_types[0]} + else: + query_filter = {"type": {"$in": repre_types}} + + if representation_ids is not None: + representation_ids = convert_ids(representation_ids) + if not representation_ids: + return default_output + query_filter["_id"] = {"$in": representation_ids} + + if representation_names is not None: + if not representation_names: + return default_output + query_filter["name"] = {"$in": list(representation_names)} + + if version_ids is not None: + version_ids = convert_ids(version_ids) + if not version_ids: + return default_output + query_filter["parent"] = {"$in": version_ids} + + or_queries = [] + if names_by_version_ids is not None: + or_query = [] + for version_id, names in names_by_version_ids.items(): + if version_id and names: + or_query.append({ + "parent": convert_id(version_id), + "name": {"$in": list(names)} + }) + if not or_query: + return default_output + or_queries.append(or_query) + + if context_filters is not None: + if not context_filters: + return [] + _flatten_filters = _flatten_dict(context_filters) + flatten_filters = {} + for key, value in _flatten_filters.items(): + if not key.startswith("context"): + key = "context.{}".format(key) + flatten_filters[key] = value + + for item in _regex_filters(flatten_filters): + for key, value in item.items(): + if key != "$or": + query_filter[key] = value + + elif value: + or_queries.append(value) + + if len(or_queries) == 1: + query_filter["$or"] = or_queries[0] + elif or_queries: + and_query = [] + for or_query in or_queries: + if isinstance(or_query, list): + or_query = {"$or": or_query} + and_query.append(or_query) + query_filter["$and"] = and_query + + conn = get_project_connection(project_name) + + return conn.find(query_filter, _prepare_fields(fields)) + + +def get_representations( + project_name, + representation_ids=None, + representation_names=None, + version_ids=None, + context_filters=None, + names_by_version_ids=None, + archived=False, + standard=True, + fields=None +): + """Representaion entities data from one project filtered by filters. + + Filters are additive (all conditions must pass to return subset). + + Args: + project_name (str): Name of project where to look for queried entities. + representation_ids (Iterable[Union[str, ObjectId]]): Representation ids + used as filter. Filter ignored if 'None' is passed. + representation_names (Iterable[str]): Representations names used + as filter. Filter ignored if 'None' is passed. + version_ids (Iterable[str]): Subset ids used as parent filter. Filter + ignored if 'None' is passed. + context_filters (Dict[str, List[str, PatternType]]): Filter by + representation context fields. + names_by_version_ids (dict[ObjectId, list[str]]): Complex filtering + using version ids and list of names under the version. + archived (bool): Output will also contain archived representations. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Iterable cursor yielding all matching representations. + """ + + return _get_representations( + project_name=project_name, + representation_ids=representation_ids, + representation_names=representation_names, + version_ids=version_ids, + context_filters=context_filters, + names_by_version_ids=names_by_version_ids, + standard=True, + archived=archived, + fields=fields + ) + + +def get_archived_representations( + project_name, + representation_ids=None, + representation_names=None, + version_ids=None, + context_filters=None, + names_by_version_ids=None, + fields=None +): + """Archived representaion entities data from project with applied filters. + + Filters are additive (all conditions must pass to return subset). + + Args: + project_name (str): Name of project where to look for queried entities. + representation_ids (Iterable[Union[str, ObjectId]]): Representation ids + used as filter. Filter ignored if 'None' is passed. + representation_names (Iterable[str]): Representations names used + as filter. Filter ignored if 'None' is passed. + version_ids (Iterable[str]): Subset ids used as parent filter. Filter + ignored if 'None' is passed. + context_filters (Dict[str, List[str, PatternType]]): Filter by + representation context fields. + names_by_version_ids (dict[ObjectId, List[str]]): Complex filtering + using version ids and list of names under the version. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Iterable cursor yielding all matching representations. + """ + + return _get_representations( + project_name=project_name, + representation_ids=representation_ids, + representation_names=representation_names, + version_ids=version_ids, + context_filters=context_filters, + names_by_version_ids=names_by_version_ids, + standard=False, + archived=True, + fields=fields + ) + + +def get_representations_parents(project_name, representations): + """Prepare parents of representation entities. + + Each item of returned dictionary contains version, subset, asset + and project in that order. + + Args: + project_name (str): Name of project where to look for queried entities. + representations (List[dict]): Representation entities with at least + '_id' and 'parent' keys. + + Returns: + dict[ObjectId, tuple]: Parents by representation id. + """ + + repre_docs_by_version_id = collections.defaultdict(list) + version_docs_by_version_id = {} + version_docs_by_subset_id = collections.defaultdict(list) + subset_docs_by_subset_id = {} + subset_docs_by_asset_id = collections.defaultdict(list) + output = {} + for repre_doc in representations: + repre_id = repre_doc["_id"] + version_id = repre_doc["parent"] + output[repre_id] = (None, None, None, None) + repre_docs_by_version_id[version_id].append(repre_doc) + + version_docs = get_versions( + project_name, + version_ids=repre_docs_by_version_id.keys(), + hero=True + ) + for version_doc in version_docs: + version_id = version_doc["_id"] + subset_id = version_doc["parent"] + version_docs_by_version_id[version_id] = version_doc + version_docs_by_subset_id[subset_id].append(version_doc) + + subset_docs = get_subsets( + project_name, subset_ids=version_docs_by_subset_id.keys() + ) + for subset_doc in subset_docs: + subset_id = subset_doc["_id"] + asset_id = subset_doc["parent"] + subset_docs_by_subset_id[subset_id] = subset_doc + subset_docs_by_asset_id[asset_id].append(subset_doc) + + asset_docs = get_assets( + project_name, asset_ids=subset_docs_by_asset_id.keys() + ) + asset_docs_by_id = { + asset_doc["_id"]: asset_doc + for asset_doc in asset_docs + } + + project_doc = get_project(project_name) + + for version_id, repre_docs in repre_docs_by_version_id.items(): + asset_doc = None + subset_doc = None + version_doc = version_docs_by_version_id.get(version_id) + if version_doc: + subset_id = version_doc["parent"] + subset_doc = subset_docs_by_subset_id.get(subset_id) + if subset_doc: + asset_id = subset_doc["parent"] + asset_doc = asset_docs_by_id.get(asset_id) + + for repre_doc in repre_docs: + repre_id = repre_doc["_id"] + output[repre_id] = ( + version_doc, subset_doc, asset_doc, project_doc + ) + return output + + +def get_representation_parents(project_name, representation): + """Prepare parents of representation entity. + + Each item of returned dictionary contains version, subset, asset + and project in that order. + + Args: + project_name (str): Name of project where to look for queried entities. + representation (dict): Representation entities with at least + '_id' and 'parent' keys. + + Returns: + dict[ObjectId, tuple]: Parents by representation id. + """ + + if not representation: + return None + + repre_id = representation["_id"] + parents_by_repre_id = get_representations_parents( + project_name, [representation] + ) + return parents_by_repre_id[repre_id] + + +def get_thumbnail_id_from_source(project_name, src_type, src_id): + """Receive thumbnail id from source entity. + + Args: + project_name (str): Name of project where to look for queried entities. + src_type (str): Type of source entity ('asset', 'version'). + src_id (Union[str, ObjectId]): Id of source entity. + + Returns: + ObjectId: Thumbnail id assigned to entity. + None: If Source entity does not have any thumbnail id assigned. + """ + + if not src_type or not src_id: + return None + + query_filter = {"_id": convert_id(src_id)} + + conn = get_project_connection(project_name) + src_doc = conn.find_one(query_filter, {"data.thumbnail_id"}) + if src_doc: + return src_doc.get("data", {}).get("thumbnail_id") + return None + + +def get_thumbnails(project_name, thumbnail_ids, fields=None): + """Receive thumbnails entity data. + + Thumbnail entity can be used to receive binary content of thumbnail based + on it's content and ThumbnailResolvers. + + Args: + project_name (str): Name of project where to look for queried entities. + thumbnail_ids (Iterable[Union[str, ObjectId]]): Ids of thumbnail + entities. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + cursor: Cursor of queried documents. + """ + + if thumbnail_ids: + thumbnail_ids = convert_ids(thumbnail_ids) + + if not thumbnail_ids: + return [] + query_filter = { + "type": "thumbnail", + "_id": {"$in": thumbnail_ids} + } + conn = get_project_connection(project_name) + return conn.find(query_filter, _prepare_fields(fields)) + + +def get_thumbnail(project_name, thumbnail_id, fields=None): + """Receive thumbnail entity data. + + Args: + project_name (str): Name of project where to look for queried entities. + thumbnail_id (Union[str, ObjectId]): Id of thumbnail entity. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If thumbnail with specified id was not found. + Dict: Thumbnail entity data which can be reduced to specified 'fields'. + """ + + if not thumbnail_id: + return None + query_filter = {"type": "thumbnail", "_id": convert_id(thumbnail_id)} + conn = get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + +def get_workfile_info( + project_name, asset_id, task_name, filename, fields=None +): + """Document with workfile information. + + Warning: + Query is based on filename and context which does not meant it will + find always right and expected result. Information have limited usage + and is not recommended to use it as source information about workfile. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_id (Union[str, ObjectId]): Id of asset entity. + task_name (str): Task name on asset. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + """ + + if not asset_id or not task_name or not filename: + return None + + query_filter = { + "type": "workfile", + "parent": convert_id(asset_id), + "task_name": task_name, + "filename": filename + } + conn = get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + +""" +## Custom data storage: +- Settings - OP settings overrides and local settings +- Logging - logs from Logger +- Webpublisher - jobs +- Ftrack - events +- Maya - Shaders + - openpype/hosts/maya/api/shader_definition_editor.py + - openpype/hosts/maya/plugins/publish/validate_model_name.py + +## Global publish plugins +- openpype/plugins/publish/extract_hierarchy_avalon.py + Create: + - asset + Update: + - asset + +## Lib +- openpype/lib/avalon_context.py + Update: + - workfile data +- openpype/lib/project_backpack.py + Update: + - project +""" diff --git a/openpype/client/entity_links.py b/openpype/client/entity_links.py new file mode 100644 index 0000000000..e42ac58aff --- /dev/null +++ b/openpype/client/entity_links.py @@ -0,0 +1,241 @@ +from .mongo import get_project_connection +from .entities import ( + get_assets, + get_asset_by_id, + get_version_by_id, + get_representation_by_id, + convert_id, +) + + +def get_linked_asset_ids(project_name, asset_doc=None, asset_id=None): + """Extract linked asset ids from asset document. + + One of asset document or asset id must be passed. + + Note: + Asset links now works only from asset to assets. + + Args: + asset_doc (dict): Asset document from DB. + + Returns: + List[Union[ObjectId, str]]: Asset ids of input links. + """ + + output = [] + if not asset_doc and not asset_id: + return output + + if not asset_doc: + asset_doc = get_asset_by_id( + project_name, asset_id, fields=["data.inputLinks"] + ) + + input_links = asset_doc["data"].get("inputLinks") + if not input_links: + return output + + for item in input_links: + # Backwards compatibility for "_id" key which was replaced with + # "id" + if "_id" in item: + link_id = item["_id"] + else: + link_id = item["id"] + output.append(link_id) + return output + + +def get_linked_assets( + project_name, asset_doc=None, asset_id=None, fields=None +): + """Return linked assets based on passed asset document. + + One of asset document or asset id must be passed. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_doc (Dict[str, Any]): Asset document from database. + asset_id (Union[ObjectId, str]): Asset id. Can be used instead of + asset document. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + List[Dict[str, Any]]: Asset documents of input links for passed + asset doc. + """ + + if not asset_doc: + if not asset_id: + return [] + asset_doc = get_asset_by_id( + project_name, + asset_id, + fields=["data.inputLinks"] + ) + if not asset_doc: + return [] + + link_ids = get_linked_asset_ids(project_name, asset_doc=asset_doc) + if not link_ids: + return [] + + return list(get_assets(project_name, asset_ids=link_ids, fields=fields)) + + +def get_linked_representation_id( + project_name, repre_doc=None, repre_id=None, link_type=None, max_depth=None +): + """Returns list of linked ids of particular type (if provided). + + One of representation document or representation id must be passed. + Note: + Representation links now works only from representation through version + back to representations. + + Args: + project_name (str): Name of project where look for links. + repre_doc (Dict[str, Any]): Representation document. + repre_id (Union[ObjectId, str]): Representation id. + link_type (str): Type of link (e.g. 'reference', ...). + max_depth (int): Limit recursion level. Default: 0 + + Returns: + List[ObjectId] Linked representation ids. + """ + + if repre_doc: + repre_id = repre_doc["_id"] + + if repre_id: + repre_id = convert_id(repre_id) + + if not repre_id and not repre_doc: + return [] + + version_id = None + if repre_doc: + version_id = repre_doc.get("parent") + + if not version_id: + repre_doc = get_representation_by_id( + project_name, repre_id, fields=["parent"] + ) + version_id = repre_doc["parent"] + + if not version_id: + return [] + + version_doc = get_version_by_id( + project_name, version_id, fields=["type", "version_id"] + ) + if version_doc["type"] == "hero_version": + version_id = version_doc["version_id"] + + if max_depth is None: + max_depth = 0 + + match = { + "_id": version_id, + # Links are not stored to hero versions at this moment so filter + # is limited to just versions + "type": "version" + } + + graph_lookup = { + "from": project_name, + "startWith": "$data.inputLinks.id", + "connectFromField": "data.inputLinks.id", + "connectToField": "_id", + "as": "outputs_recursive", + "depthField": "depth" + } + if max_depth != 0: + # We offset by -1 since 0 basically means no recursion + # but the recursion only happens after the initial lookup + # for outputs. + graph_lookup["maxDepth"] = max_depth - 1 + + query_pipeline = [ + # Match + {"$match": match}, + # Recursive graph lookup for inputs + {"$graphLookup": graph_lookup} + ] + + conn = get_project_connection(project_name) + result = conn.aggregate(query_pipeline) + referenced_version_ids = _process_referenced_pipeline_result( + result, link_type + ) + if not referenced_version_ids: + return [] + + ref_ids = conn.distinct( + "_id", + filter={ + "parent": {"$in": list(referenced_version_ids)}, + "type": "representation" + } + ) + + return list(ref_ids) + + +def _process_referenced_pipeline_result(result, link_type): + """Filters result from pipeline for particular link_type. + + Pipeline cannot use link_type directly in a query. + + Returns: + (list) + """ + + referenced_version_ids = set() + correctly_linked_ids = set() + for item in result: + input_links = item.get("data", {}).get("inputLinks") + if not input_links: + continue + + _filter_input_links( + input_links, + link_type, + correctly_linked_ids + ) + + # outputs_recursive in random order, sort by depth + outputs_recursive = item.get("outputs_recursive") + if not outputs_recursive: + continue + + for output in sorted(outputs_recursive, key=lambda o: o["depth"]): + output_links = output.get("data", {}).get("inputLinks") + if not output_links: + continue + + # Leaf + if output["_id"] not in correctly_linked_ids: + continue + + _filter_input_links( + output_links, + link_type, + correctly_linked_ids + ) + + referenced_version_ids.add(output["_id"]) + + return referenced_version_ids + + +def _filter_input_links(input_links, link_type, correctly_linked_ids): + for input_link in input_links: + if link_type and input_link["type"] != link_type: + continue + + link_id = input_link.get("id") or input_link.get("_id") + if link_id is not None: + correctly_linked_ids.add(link_id) diff --git a/openpype/client/mongo.py b/openpype/client/mongo.py new file mode 100644 index 0000000000..72acbc5476 --- /dev/null +++ b/openpype/client/mongo.py @@ -0,0 +1,235 @@ +import os +import sys +import time +import logging +import pymongo +import certifi + +if sys.version_info[0] == 2: + from urlparse import urlparse, parse_qs +else: + from urllib.parse import urlparse, parse_qs + + +class MongoEnvNotSet(Exception): + pass + + +def _decompose_url(url): + """Decompose mongo url to basic components. + + Used for creation of MongoHandler which expect mongo url components as + separated kwargs. Components are at the end not used as we're setting + connection directly this is just a dumb components for MongoHandler + validation pass. + """ + + # Use first url from passed url + # - this is because it is possible to pass multiple urls for multiple + # replica sets which would crash on urlparse otherwise + # - please don't use comma in username of password + url = url.split(",")[0] + components = { + "scheme": None, + "host": None, + "port": None, + "username": None, + "password": None, + "auth_db": None + } + + result = urlparse(url) + if result.scheme is None: + _url = "mongodb://{}".format(url) + result = urlparse(_url) + + components["scheme"] = result.scheme + components["host"] = result.hostname + try: + components["port"] = result.port + except ValueError: + raise RuntimeError("invalid port specified") + components["username"] = result.username + components["password"] = result.password + + try: + components["auth_db"] = parse_qs(result.query)['authSource'][0] + except KeyError: + # no auth db provided, mongo will use the one we are connecting to + pass + + return components + + +def get_default_components(): + mongo_url = os.environ.get("OPENPYPE_MONGO") + if mongo_url is None: + raise MongoEnvNotSet( + "URL for Mongo logging connection is not set." + ) + return _decompose_url(mongo_url) + + +def should_add_certificate_path_to_mongo_url(mongo_url): + """Check if should add ca certificate to mongo url. + + Since 30.9.2021 cloud mongo requires newer certificates that are not + available on most of workstation. This adds path to certifi certificate + which is valid for it. To add the certificate path url must have scheme + 'mongodb+srv' or has 'ssl=true' or 'tls=true' in url query. + """ + + parsed = urlparse(mongo_url) + query = parse_qs(parsed.query) + lowered_query_keys = set(key.lower() for key in query.keys()) + add_certificate = False + # Check if url 'ssl' or 'tls' are set to 'true' + for key in ("ssl", "tls"): + if key in query and "true" in query["ssl"]: + add_certificate = True + break + + # Check if url contains 'mongodb+srv' + if not add_certificate and parsed.scheme == "mongodb+srv": + add_certificate = True + + # Check if url does already contain certificate path + if add_certificate and "tlscafile" in lowered_query_keys: + add_certificate = False + + return add_certificate + + +def validate_mongo_connection(mongo_uri): + """Check if provided mongodb URL is valid. + + Args: + mongo_uri (str): URL to validate. + + Raises: + ValueError: When port in mongo uri is not valid. + pymongo.errors.InvalidURI: If passed mongo is invalid. + pymongo.errors.ServerSelectionTimeoutError: If connection timeout + passed so probably couldn't connect to mongo server. + + """ + + client = OpenPypeMongoConnection.create_connection( + mongo_uri, retry_attempts=1 + ) + client.close() + + +class OpenPypeMongoConnection: + """Singleton MongoDB connection. + + Keeps MongoDB connections by url. + """ + + mongo_clients = {} + log = logging.getLogger("OpenPypeMongoConnection") + + @staticmethod + def get_default_mongo_url(): + return os.environ["OPENPYPE_MONGO"] + + @classmethod + def get_mongo_client(cls, mongo_url=None): + if mongo_url is None: + mongo_url = cls.get_default_mongo_url() + + connection = cls.mongo_clients.get(mongo_url) + if connection: + # Naive validation of existing connection + try: + connection.server_info() + with connection.start_session(): + pass + except Exception: + connection = None + + if not connection: + cls.log.debug("Creating mongo connection to {}".format(mongo_url)) + connection = cls.create_connection(mongo_url) + cls.mongo_clients[mongo_url] = connection + + return connection + + @classmethod + def create_connection(cls, mongo_url, timeout=None, retry_attempts=None): + parsed = urlparse(mongo_url) + # Force validation of scheme + if parsed.scheme not in ["mongodb", "mongodb+srv"]: + raise pymongo.errors.InvalidURI(( + "Invalid URI scheme:" + " URI must begin with 'mongodb://' or 'mongodb+srv://'" + )) + + if timeout is None: + timeout = int(os.environ.get("AVALON_TIMEOUT") or 1000) + + kwargs = { + "serverSelectionTimeoutMS": timeout + } + if should_add_certificate_path_to_mongo_url(mongo_url): + kwargs["ssl_ca_certs"] = certifi.where() + + mongo_client = pymongo.MongoClient(mongo_url, **kwargs) + + if retry_attempts is None: + retry_attempts = 3 + + elif not retry_attempts: + retry_attempts = 1 + + last_exc = None + valid = False + t1 = time.time() + for attempt in range(1, retry_attempts + 1): + try: + mongo_client.server_info() + with mongo_client.start_session(): + pass + valid = True + break + + except Exception as exc: + last_exc = exc + if attempt < retry_attempts: + cls.log.warning( + "Attempt {} failed. Retrying... ".format(attempt) + ) + time.sleep(1) + + if not valid: + raise last_exc + + cls.log.info("Connected to {}, delay {:.3f}s".format( + mongo_url, time.time() - t1 + )) + return mongo_client + + +def get_project_database(): + db_name = os.environ.get("AVALON_DB") or "avalon" + return OpenPypeMongoConnection.get_mongo_client()[db_name] + + +def get_project_connection(project_name): + """Direct access to mongo collection. + + We're trying to avoid using direct access to mongo. This should be used + only for Create, Update and Remove operations until there are implemented + api calls for that. + + Args: + project_name(str): Project name for which collection should be + returned. + + Returns: + pymongo.Collection: Collection realated to passed project. + """ + + if not project_name: + raise ValueError("Invalid project name {}".format(str(project_name))) + return get_project_database()[project_name] diff --git a/openpype/client/notes.md b/openpype/client/notes.md new file mode 100644 index 0000000000..a261b86eca --- /dev/null +++ b/openpype/client/notes.md @@ -0,0 +1,39 @@ +# Client functionality +## Reason +Preparation for OpenPype v4 server. Goal is to remove direct mongo calls in code to prepare a little bit for different source of data for code before. To start think about database calls less as mongo calls but more universally. To do so was implemented simple wrapper around database calls to not use pymongo specific code. + +Current goal is not to make universal database model which can be easily replaced with any different source of data but to make it close as possible. Current implementation of OpenPype is too tighly connected to pymongo and it's abilities so we're trying to get closer with long term changes that can be used even in current state. + +## Queries +Query functions don't use full potential of mongo queries like very specific queries based on subdictionaries or unknown structures. We try to avoid these calls as much as possible because they'll probably won't be available in future. If it's really necessary a new function can be added but only if it's reasonable for overall logic. All query functions were moved to `~/client/entities.py`. Each function has arguments with available filters and possible reduce of returned keys for each entity. + +## Changes +Changes are a little bit complicated. Mongo has many options how update can happen which had to be reduced also it would be at this stage complicated to validate values which are created or updated thus automation is at this point almost none. Changes can be made using operations available in `~/client/operations.py`. Each operation require project name and entity type, but may require operation specific data. + +### Create +Create operations expect already prepared document data, for that are prepared functions creating skeletal structures of documents (do not fill all required data), except `_id` all data should be right. Existence of entity is not validated so if the same creation operation is send n times it will create the entity n times which can cause issues. + +### Update +Update operation require entity id and keys that should be changed, update dictionary must have {"key": value}. If value should be set in nested dictionary the key must have also all subkeys joined with dot `.` (e.g. `{"data": {"fps": 25}}` -> `{"data.fps": 25}`). To simplify update dictionaries were prepared functions which does that for you, their name has template `prepare__update_data` - they work on comparison of previous document and new document. If there is missing function for requested entity type it is because we didn't need it yet and require implementaion. + +### Delete +Delete operation need entity id. Entity will be deleted from mongo. + + +## What (probably) won't be replaced +Some parts of code are still using direct mongo calls. In most of cases it is for very specific calls that are module specific or their usage will completely change in future. +- Mongo calls that are not project specific (out of `avalon` collection) will be removed or will have to use different mechanism how the data are stored. At this moment it is related to OpenPype settings and logs, ftrack server events, some other data. +- Sync server queries. They're complex and very specific for sync server module. Their replacement will require specific calls to OpenPype server in v4 thus their abstraction with wrapper is irrelevant and would complicate production in v3. +- Project managers (ftrack, kitsu, shotgrid, embedded Project Manager, etc.). Project managers are creating, updating or removing assets in v3, but in v4 will create folders with different structure. Wrapping creation of assets would not help to prepare for v4 because of new data structures. The same can be said about editorial Extract Hierarchy Avalon plugin which create project structure. +- Code parts that is marked as deprecated in v3 or will be deprecated in v4. + - integrate asset legacy publish plugin - already is legacy kept for safety + - integrate thumbnail - thumbnails will be stored in different way in v4 + - input links - link will be stored in different way and will have different mechanism of linking. In v3 are links limited to same entity type "asset <-> asset" or "representation <-> representation". + +## Known missing replacements +- change subset group in loader tool +- integrate subset group +- query input links in openpype lib +- create project in openpype lib +- save/create workfile doc in openpype lib +- integrate hero version diff --git a/openpype/client/operations.py b/openpype/client/operations.py new file mode 100644 index 0000000000..fd639c34a7 --- /dev/null +++ b/openpype/client/operations.py @@ -0,0 +1,794 @@ +import re +import uuid +import copy +import collections +from abc import ABCMeta, abstractmethod, abstractproperty + +import six +from bson.objectid import ObjectId +from pymongo import DeleteOne, InsertOne, UpdateOne + +from .mongo import get_project_connection +from .entities import get_project + +REMOVED_VALUE = object() + +PROJECT_NAME_ALLOWED_SYMBOLS = "a-zA-Z0-9_" +PROJECT_NAME_REGEX = re.compile( + "^[{}]+$".format(PROJECT_NAME_ALLOWED_SYMBOLS) +) + +CURRENT_PROJECT_SCHEMA = "openpype:project-3.0" +CURRENT_PROJECT_CONFIG_SCHEMA = "openpype:config-2.0" +CURRENT_ASSET_DOC_SCHEMA = "openpype:asset-3.0" +CURRENT_SUBSET_SCHEMA = "openpype:subset-3.0" +CURRENT_VERSION_SCHEMA = "openpype:version-3.0" +CURRENT_HERO_VERSION_SCHEMA = "openpype:hero_version-1.0" +CURRENT_REPRESENTATION_SCHEMA = "openpype:representation-2.0" +CURRENT_WORKFILE_INFO_SCHEMA = "openpype:workfile-1.0" +CURRENT_THUMBNAIL_SCHEMA = "openpype:thumbnail-1.0" + + +def _create_or_convert_to_mongo_id(mongo_id): + if mongo_id is None: + return ObjectId() + return ObjectId(mongo_id) + + +def new_project_document( + project_name, project_code, config, data=None, entity_id=None +): + """Create skeleton data of project document. + + Args: + project_name (str): Name of project. Used as identifier of a project. + project_code (str): Shorter version of projet without spaces and + special characters (in most of cases). Should be also considered + as unique name across projects. + config (Dic[str, Any]): Project config consist of roots, templates, + applications and other project Anatomy related data. + data (Dict[str, Any]): Project data with information about it's + attributes (e.g. 'fps' etc.) or integration specific keys. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of project document. + """ + + if data is None: + data = {} + + data["code"] = project_code + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "name": project_name, + "type": CURRENT_PROJECT_SCHEMA, + "entity_data": data, + "config": config + } + + +def new_asset_document( + name, project_id, parent_id, parents, data=None, entity_id=None +): + """Create skeleton data of asset document. + + Args: + name (str): Is considered as unique identifier of asset in project. + project_id (Union[str, ObjectId]): Id of project doument. + parent_id (Union[str, ObjectId]): Id of parent asset. + parents (List[str]): List of parent assets names. + data (Dict[str, Any]): Asset document data. Empty dictionary is used + if not passed. Value of 'parent_id' is used to fill 'visualParent'. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of asset document. + """ + + if data is None: + data = {} + if parent_id is not None: + parent_id = ObjectId(parent_id) + data["visualParent"] = parent_id + data["parents"] = parents + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "type": "asset", + "name": name, + "parent": ObjectId(project_id), + "data": data, + "schema": CURRENT_ASSET_DOC_SCHEMA + } + + +def new_subset_document(name, family, asset_id, data=None, entity_id=None): + """Create skeleton data of subset document. + + Args: + name (str): Is considered as unique identifier of subset under asset. + family (str): Subset's family. + asset_id (Union[str, ObjectId]): Id of parent asset. + data (Dict[str, Any]): Subset document data. Empty dictionary is used + if not passed. Value of 'family' is used to fill 'family'. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of subset document. + """ + + if data is None: + data = {} + data["family"] = family + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_SUBSET_SCHEMA, + "type": "subset", + "name": name, + "data": data, + "parent": asset_id + } + + +def new_version_doc(version, subset_id, data=None, entity_id=None): + """Create skeleton data of version document. + + Args: + version (int): Is considered as unique identifier of version + under subset. + subset_id (Union[str, ObjectId]): Id of parent subset. + data (Dict[str, Any]): Version document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_VERSION_SCHEMA, + "type": "version", + "name": int(version), + "parent": subset_id, + "data": data + } + + +def new_hero_version_doc(version_id, subset_id, data=None, entity_id=None): + """Create skeleton data of hero version document. + + Args: + version_id (ObjectId): Is considered as unique identifier of version + under subset. + subset_id (Union[str, ObjectId]): Id of parent subset. + data (Dict[str, Any]): Version document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_HERO_VERSION_SCHEMA, + "type": "hero_version", + "version_id": version_id, + "parent": subset_id, + "data": data + } + + +def new_representation_doc( + name, version_id, context, data=None, entity_id=None +): + """Create skeleton data of asset document. + + Args: + version (int): Is considered as unique identifier of version + under subset. + version_id (Union[str, ObjectId]): Id of parent version. + context (Dict[str, Any]): Representation context used for fill template + of to query. + data (Dict[str, Any]): Representation document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_REPRESENTATION_SCHEMA, + "type": "representation", + "parent": version_id, + "name": name, + "data": data, + + # Imprint shortcut to context for performance reasons. + "context": context + } + + +def new_thumbnail_doc(data=None, entity_id=None): + """Create skeleton data of thumbnail document. + + Args: + data (Dict[str, Any]): Thumbnail document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of thumbnail document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "type": "thumbnail", + "schema": CURRENT_THUMBNAIL_SCHEMA, + "data": data + } + + +def new_workfile_info_doc( + filename, asset_id, task_name, files, data=None, entity_id=None +): + """Create skeleton data of workfile info document. + + Workfile document is at this moment used primarily for artist notes. + + Args: + filename (str): Filename of workfile. + asset_id (Union[str, ObjectId]): Id of asset under which workfile live. + task_name (str): Task under which was workfile created. + files (List[str]): List of rootless filepaths related to workfile. + data (Dict[str, Any]): Additional metadata. + + Returns: + Dict[str, Any]: Skeleton of workfile info document. + """ + + if not data: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "type": "workfile", + "parent": ObjectId(asset_id), + "task_name": task_name, + "filename": filename, + "data": data, + "files": files + } + + +def _prepare_update_data(old_doc, new_doc, replace): + changes = {} + for key, value in new_doc.items(): + if key not in old_doc or value != old_doc[key]: + changes[key] = value + + if replace: + for key in old_doc.keys(): + if key not in new_doc: + changes[key] = REMOVED_VALUE + return changes + + +def prepare_subset_update_data(old_doc, new_doc, replace=True): + """Compare two subset documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_version_update_data(old_doc, new_doc, replace=True): + """Compare two version documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_hero_version_update_data(old_doc, new_doc, replace=True): + """Compare two hero version documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_representation_update_data(old_doc, new_doc, replace=True): + """Compare two representation documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_workfile_info_update_data(old_doc, new_doc, replace=True): + """Compare two workfile info documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +@six.add_metaclass(ABCMeta) +class AbstractOperation(object): + """Base operation class. + + Opration represent a call into database. The call can create, change or + remove data. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + """ + + def __init__(self, project_name, entity_type): + self._project_name = project_name + self._entity_type = entity_type + self._id = str(uuid.uuid4()) + + @property + def project_name(self): + return self._project_name + + @property + def id(self): + """Identifier of operation.""" + + return self._id + + @property + def entity_type(self): + return self._entity_type + + @abstractproperty + def operation_name(self): + """Stringified type of operation.""" + + pass + + @abstractmethod + def to_mongo_operation(self): + """Convert operation to Mongo batch operation.""" + + pass + + def to_data(self): + """Convert opration to data that can be converted to json or others. + + Warning: + Current state returns ObjectId objects which cannot be parsed by + json. + + Returns: + Dict[str, Any]: Description of operation. + """ + + return { + "id": self._id, + "entity_type": self.entity_type, + "project_name": self.project_name, + "operation": self.operation_name + } + + +class CreateOperation(AbstractOperation): + """Opeartion to create an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + data (Dict[str, Any]): Data of entity that will be created. + """ + + operation_name = "create" + + def __init__(self, project_name, entity_type, data): + super(CreateOperation, self).__init__(project_name, entity_type) + + if not data: + data = {} + else: + data = copy.deepcopy(dict(data)) + + if "_id" not in data: + data["_id"] = ObjectId() + else: + data["_id"] = ObjectId(data["_id"]) + + self._entity_id = data["_id"] + self._data = data + + def __setitem__(self, key, value): + self.set_value(key, value) + + def __getitem__(self, key): + return self.data[key] + + def set_value(self, key, value): + self.data[key] = value + + def get(self, key, *args, **kwargs): + return self.data.get(key, *args, **kwargs) + + @property + def entity_id(self): + return self._entity_id + + @property + def data(self): + return self._data + + def to_mongo_operation(self): + return InsertOne(copy.deepcopy(self._data)) + + def to_data(self): + output = super(CreateOperation, self).to_data() + output["data"] = copy.deepcopy(self.data) + return output + + +class UpdateOperation(AbstractOperation): + """Opeartion to update an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + entity_id (Union[str, ObjectId]): Identifier of an entity. + update_data (Dict[str, Any]): Key -> value changes that will be set in + database. If value is set to 'REMOVED_VALUE' the key will be + removed. Only first level of dictionary is checked (on purpose). + """ + + operation_name = "update" + + def __init__(self, project_name, entity_type, entity_id, update_data): + super(UpdateOperation, self).__init__(project_name, entity_type) + + self._entity_id = ObjectId(entity_id) + self._update_data = update_data + + @property + def entity_id(self): + return self._entity_id + + @property + def update_data(self): + return self._update_data + + def to_mongo_operation(self): + unset_data = {} + set_data = {} + for key, value in self._update_data.items(): + if value is REMOVED_VALUE: + unset_data[key] = None + else: + set_data[key] = value + + op_data = {} + if unset_data: + op_data["$unset"] = unset_data + if set_data: + op_data["$set"] = set_data + + if not op_data: + return None + + return UpdateOne( + {"_id": self.entity_id}, + op_data + ) + + def to_data(self): + changes = {} + for key, value in self._update_data.items(): + if value is REMOVED_VALUE: + value = None + changes[key] = value + + output = super(UpdateOperation, self).to_data() + output.update({ + "entity_id": self.entity_id, + "changes": changes + }) + return output + + +class DeleteOperation(AbstractOperation): + """Opeartion to delete an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + entity_id (Union[str, ObjectId]): Entity id that will be removed. + """ + + operation_name = "delete" + + def __init__(self, project_name, entity_type, entity_id): + super(DeleteOperation, self).__init__(project_name, entity_type) + + self._entity_id = ObjectId(entity_id) + + @property + def entity_id(self): + return self._entity_id + + def to_mongo_operation(self): + return DeleteOne({"_id": self.entity_id}) + + def to_data(self): + output = super(DeleteOperation, self).to_data() + output["entity_id"] = self.entity_id + return output + + +class OperationsSession(object): + """Session storing operations that should happen in an order. + + At this moment does not handle anything special can be sonsidered as + stupid list of operations that will happen after each other. If creation + of same entity is there multiple times it's handled in any way and document + values are not validated. + + All operations must be related to single project. + + Args: + project_name (str): Project name to which are operations related. + """ + + def __init__(self): + self._operations = [] + + def add(self, operation): + """Add operation to be processed. + + Args: + operation (BaseOperation): Operation that should be processed. + """ + if not isinstance( + operation, + (CreateOperation, UpdateOperation, DeleteOperation) + ): + raise TypeError("Expected Operation object got {}".format( + str(type(operation)) + )) + + self._operations.append(operation) + + def append(self, operation): + """Add operation to be processed. + + Args: + operation (BaseOperation): Operation that should be processed. + """ + + self.add(operation) + + def extend(self, operations): + """Add operations to be processed. + + Args: + operations (List[BaseOperation]): Operations that should be + processed. + """ + + for operation in operations: + self.add(operation) + + def remove(self, operation): + """Remove operation.""" + + self._operations.remove(operation) + + def clear(self): + """Clear all registered operations.""" + + self._operations = [] + + def to_data(self): + return [ + operation.to_data() + for operation in self._operations + ] + + def commit(self): + """Commit session operations.""" + + operations, self._operations = self._operations, [] + if not operations: + return + + operations_by_project = collections.defaultdict(list) + for operation in operations: + operations_by_project[operation.project_name].append(operation) + + for project_name, operations in operations_by_project.items(): + bulk_writes = [] + for operation in operations: + mongo_op = operation.to_mongo_operation() + if mongo_op is not None: + bulk_writes.append(mongo_op) + + if bulk_writes: + collection = get_project_connection(project_name) + collection.bulk_write(bulk_writes) + + def create_entity(self, project_name, entity_type, data): + """Fast access to 'CreateOperation'. + + Returns: + CreateOperation: Object of update operation. + """ + + operation = CreateOperation(project_name, entity_type, data) + self.add(operation) + return operation + + def update_entity(self, project_name, entity_type, entity_id, update_data): + """Fast access to 'UpdateOperation'. + + Returns: + UpdateOperation: Object of update operation. + """ + + operation = UpdateOperation( + project_name, entity_type, entity_id, update_data + ) + self.add(operation) + return operation + + def delete_entity(self, project_name, entity_type, entity_id): + """Fast access to 'DeleteOperation'. + + Returns: + DeleteOperation: Object of delete operation. + """ + + operation = DeleteOperation(project_name, entity_type, entity_id) + self.add(operation) + return operation + + +def create_project(project_name, project_code, library_project=False): + """Create project using OpenPype settings. + + This project creation function is not validating project document on + creation. It is because project document is created blindly with only + minimum required information about project which is it's name, code, type + and schema. + + Entered project name must be unique and project must not exist yet. + + Note: + This function is here to be OP v4 ready but in v3 has more logic + to do. That's why inner imports are in the body. + + Args: + project_name(str): New project name. Should be unique. + project_code(str): Project's code should be unique too. + library_project(bool): Project is library project. + + Raises: + ValueError: When project name already exists in MongoDB. + + Returns: + dict: Created project document. + """ + + from openpype.settings import ProjectSettings, SaveWarningExc + from openpype.pipeline.schema import validate + + if get_project(project_name, fields=["name"]): + raise ValueError("Project with name \"{}\" already exists".format( + project_name + )) + + if not PROJECT_NAME_REGEX.match(project_name): + raise ValueError(( + "Project name \"{}\" contain invalid characters" + ).format(project_name)) + + project_doc = { + "type": "project", + "name": project_name, + "data": { + "code": project_code, + "library_project": library_project + }, + "schema": CURRENT_PROJECT_SCHEMA + } + + op_session = OperationsSession() + # Insert document with basic data + create_op = op_session.create_entity( + project_name, project_doc["type"], project_doc + ) + op_session.commit() + + # Load ProjectSettings for the project and save it to store all attributes + # and Anatomy + try: + project_settings_entity = ProjectSettings(project_name) + project_settings_entity.save() + except SaveWarningExc as exc: + print(str(exc)) + except Exception: + op_session.delete_entity( + project_name, project_doc["type"], create_op.entity_id + ) + op_session.commit() + raise + + project_doc = get_project(project_name) + + try: + # Validate created project document + validate(project_doc) + except Exception: + # Remove project if is not valid + op_session.delete_entity( + project_name, project_doc["type"], create_op.entity_id + ) + op_session.commit() + raise + + return project_doc diff --git a/openpype/hooks/pre_add_last_workfile_arg.py b/openpype/hooks/pre_add_last_workfile_arg.py index 8edccd48d4..3609620917 100644 --- a/openpype/hooks/pre_add_last_workfile_arg.py +++ b/openpype/hooks/pre_add_last_workfile_arg.py @@ -19,6 +19,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook): "hiero", "houdini", "nukestudio", + "fusion", "blender", "photoshop", "tvpaint", diff --git a/openpype/hooks/pre_copy_last_published_workfile.py b/openpype/hooks/pre_copy_last_published_workfile.py new file mode 100644 index 0000000000..26b43c39cb --- /dev/null +++ b/openpype/hooks/pre_copy_last_published_workfile.py @@ -0,0 +1,177 @@ +import os +import shutil +from time import sleep +from openpype.client.entities import ( + get_last_version_by_subset_id, + get_representations, + get_subsets, +) +from openpype.lib import PreLaunchHook +from openpype.lib.local_settings import get_local_site_id +from openpype.lib.profiles_filtering import filter_profiles +from openpype.pipeline.load.utils import get_representation_path +from openpype.settings.lib import get_project_settings + + +class CopyLastPublishedWorkfile(PreLaunchHook): + """Copy last published workfile as first workfile. + + Prelaunch hook works only if last workfile leads to not existing file. + - That is possible only if it's first version. + """ + + # Before `AddLastWorkfileToLaunchArgs` + order = -1 + app_groups = ["blender", "photoshop", "tvpaint", "aftereffects"] + + def execute(self): + """Check if local workfile doesn't exist, else copy it. + + 1- Check if setting for this feature is enabled + 2- Check if workfile in work area doesn't exist + 3- Check if published workfile exists and is copied locally in publish + 4- Substitute copied published workfile as first workfile + + Returns: + None: This is a void method. + """ + + sync_server = self.modules_manager.get("sync_server") + if not sync_server or not sync_server.enabled: + self.log.debug("Sync server module is not enabled or available") + return + + # Check there is no workfile available + last_workfile = self.data.get("last_workfile_path") + if os.path.exists(last_workfile): + self.log.debug( + "Last workfile exists. Skipping {} process.".format( + self.__class__.__name__ + ) + ) + return + + # Get data + project_name = self.data["project_name"] + task_name = self.data["task_name"] + task_type = self.data["task_type"] + host_name = self.application.host_name + + # Check settings has enabled it + project_settings = get_project_settings(project_name) + profiles = project_settings["global"]["tools"]["Workfiles"][ + "last_workfile_on_startup" + ] + filter_data = { + "tasks": task_name, + "task_types": task_type, + "hosts": host_name, + } + last_workfile_settings = filter_profiles(profiles, filter_data) + use_last_published_workfile = last_workfile_settings.get( + "use_last_published_workfile" + ) + if use_last_published_workfile is None: + self.log.info( + ( + "Seems like old version of settings is used." + ' Can\'t access custom templates in host "{}".'.format( + host_name + ) + ) + ) + return + elif use_last_published_workfile is False: + self.log.info( + ( + 'Project "{}" has turned off to use last published' + ' workfile as first workfile for host "{}"'.format( + project_name, host_name + ) + ) + ) + return + + self.log.info("Trying to fetch last published workfile...") + + project_doc = self.data.get("project_doc") + asset_doc = self.data.get("asset_doc") + anatomy = self.data.get("anatomy") + + # Check it can proceed + if not project_doc and not asset_doc: + return + + # Get subset id + subset_id = next( + ( + subset["_id"] + for subset in get_subsets( + project_name, + asset_ids=[asset_doc["_id"]], + fields=["_id", "data.family", "data.families"], + ) + if subset["data"].get("family") == "workfile" + # Legacy compatibility + or "workfile" in subset["data"].get("families", {}) + ), + None, + ) + if not subset_id: + self.log.debug( + 'No any workfile for asset "{}".'.format(asset_doc["name"]) + ) + return + + # Get workfile representation + last_version_doc = get_last_version_by_subset_id( + project_name, subset_id, fields=["_id"] + ) + if not last_version_doc: + self.log.debug("Subset does not have any versions") + return + + workfile_representation = next( + ( + representation + for representation in get_representations( + project_name, version_ids=[last_version_doc["_id"]] + ) + if representation["context"]["task"]["name"] == task_name + ), + None, + ) + + if not workfile_representation: + self.log.debug( + 'No published workfile for task "{}" and host "{}".'.format( + task_name, host_name + ) + ) + return + + local_site_id = get_local_site_id() + sync_server.add_site( + project_name, + workfile_representation["_id"], + local_site_id, + force=True, + priority=99, + reset_timer=True, + ) + + while not sync_server.is_representation_on_site( + project_name, workfile_representation["_id"], local_site_id + ): + sleep(5) + + # Get paths + published_workfile_path = get_representation_path( + workfile_representation, root=anatomy.roots + ) + local_workfile_dir = os.path.dirname(last_workfile) + + # Copy file and substitute path + self.data["last_workfile_path"] = shutil.copy( + published_workfile_path, local_workfile_dir + ) diff --git a/openpype/hooks/pre_copy_template_workfile.py b/openpype/hooks/pre_copy_template_workfile.py index dffac22ee2..70c549919f 100644 --- a/openpype/hooks/pre_copy_template_workfile.py +++ b/openpype/hooks/pre_copy_template_workfile.py @@ -1,11 +1,11 @@ import os import shutil -from openpype.lib import ( - PreLaunchHook, - get_custom_workfile_template_by_context, +from openpype.lib import PreLaunchHook +from openpype.settings import get_project_settings +from openpype.pipeline.workfile import ( + get_custom_workfile_template, get_custom_workfile_template_by_string_context ) -from openpype.settings import get_project_settings class CopyTemplateWorkfile(PreLaunchHook): @@ -54,41 +54,22 @@ class CopyTemplateWorkfile(PreLaunchHook): project_name = self.data["project_name"] asset_name = self.data["asset_name"] task_name = self.data["task_name"] + host_name = self.application.host_name project_settings = get_project_settings(project_name) - host_settings = project_settings[self.application.host_name] - - workfile_builder_settings = host_settings.get("workfile_builder") - if not workfile_builder_settings: - # TODO remove warning when deprecated - self.log.warning(( - "Seems like old version of settings is used." - " Can't access custom templates in host \"{}\"." - ).format(self.application.full_label)) - return - - if not workfile_builder_settings["create_first_version"]: - self.log.info(( - "Project \"{}\" has turned off to create first workfile for" - " application \"{}\"" - ).format(project_name, self.application.full_label)) - return - - # Backwards compatibility - template_profiles = workfile_builder_settings.get("custom_templates") - if not template_profiles: - self.log.info( - "Custom templates are not filled. Skipping template copy." - ) - return project_doc = self.data.get("project_doc") asset_doc = self.data.get("asset_doc") anatomy = self.data.get("anatomy") if project_doc and asset_doc: self.log.debug("Started filtering of custom template paths.") - template_path = get_custom_workfile_template_by_context( - template_profiles, project_doc, asset_doc, task_name, anatomy + template_path = get_custom_workfile_template( + project_doc, + asset_doc, + task_name, + host_name, + anatomy, + project_settings ) else: @@ -96,10 +77,13 @@ class CopyTemplateWorkfile(PreLaunchHook): "Global data collection probably did not execute." " Using backup solution." )) - dbcon = self.data.get("dbcon") template_path = get_custom_workfile_template_by_string_context( - template_profiles, project_name, asset_name, task_name, - dbcon, anatomy + project_name, + asset_name, + task_name, + host_name, + anatomy, + project_settings ) if not template_path: diff --git a/openpype/hooks/pre_create_extra_workdir_folders.py b/openpype/hooks/pre_create_extra_workdir_folders.py index d79c5831ee..c5af620c87 100644 --- a/openpype/hooks/pre_create_extra_workdir_folders.py +++ b/openpype/hooks/pre_create_extra_workdir_folders.py @@ -1,8 +1,6 @@ import os -from openpype.lib import ( - PreLaunchHook, - create_workdir_extra_folders -) +from openpype.lib import PreLaunchHook +from openpype.pipeline.workfile import create_workdir_extra_folders class AddLastWorkfileToLaunchArgs(PreLaunchHook): diff --git a/openpype/hooks/pre_global_host_data.py b/openpype/hooks/pre_global_host_data.py index ea5e290d6f..8a178915fb 100644 --- a/openpype/hooks/pre_global_host_data.py +++ b/openpype/hooks/pre_global_host_data.py @@ -1,11 +1,11 @@ -from openpype.api import Anatomy +from openpype.client import get_project, get_asset_by_name from openpype.lib import ( PreLaunchHook, EnvironmentPrepData, prepare_app_environments, prepare_context_environments ) -from openpype.pipeline import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB, Anatomy class GlobalHostDataHook(PreLaunchHook): @@ -70,7 +70,7 @@ class GlobalHostDataHook(PreLaunchHook): self.data["dbcon"] = dbcon # Project document - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) self.data["project_doc"] = project_doc asset_name = self.data.get("asset_name") @@ -80,8 +80,5 @@ class GlobalHostDataHook(PreLaunchHook): ) return - asset_doc = dbcon.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) self.data["asset_doc"] = asset_doc diff --git a/openpype/host/__init__.py b/openpype/host/__init__.py new file mode 100644 index 0000000000..da1237c739 --- /dev/null +++ b/openpype/host/__init__.py @@ -0,0 +1,24 @@ +from .host import ( + HostBase, +) + +from .interfaces import ( + IWorkfileHost, + ILoadHost, + IPublishHost, + INewPublisher, +) + +from .dirmap import HostDirmap + + +__all__ = ( + "HostBase", + + "IWorkfileHost", + "ILoadHost", + "IPublishHost", + "INewPublisher", + + "HostDirmap", +) diff --git a/openpype/host/dirmap.py b/openpype/host/dirmap.py new file mode 100644 index 0000000000..88d68f27bf --- /dev/null +++ b/openpype/host/dirmap.py @@ -0,0 +1,205 @@ +"""Dirmap functionality used in host integrations inside DCCs. + +Idea for current dirmap implementation was used from Maya where is possible to +enter source and destination roots and maya will try each found source +in referenced file replace with each destionation paths. First path which +exists is used. +""" + +import os +from abc import ABCMeta, abstractmethod + +import six + +from openpype.lib import Logger +from openpype.modules import ModulesManager +from openpype.settings import get_project_settings +from openpype.settings.lib import get_site_local_overrides + + +@six.add_metaclass(ABCMeta) +class HostDirmap(object): + """Abstract class for running dirmap on a workfile in a host. + + Dirmap is used to translate paths inside of host workfile from one + OS to another. (Eg. arstist created workfile on Win, different artists + opens same file on Linux.) + + Expects methods to be implemented inside of host: + on_dirmap_enabled: run host code for enabling dirmap + do_dirmap: run host code to do actual remapping + """ + + def __init__( + self, host_name, project_name, project_settings=None, sync_module=None + ): + self.host_name = host_name + self.project_name = project_name + self._project_settings = project_settings + self._sync_module = sync_module # to limit reinit of Modules + self._log = None + self._mapping = None # cache mapping + + @property + def sync_module(self): + if self._sync_module is None: + manager = ModulesManager() + self._sync_module = manager["sync_server"] + return self._sync_module + + @property + def project_settings(self): + if self._project_settings is None: + self._project_settings = get_project_settings(self.project_name) + return self._project_settings + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + @abstractmethod + def on_enable_dirmap(self): + """Run host dependent operation for enabling dirmap if necessary.""" + pass + + @abstractmethod + def dirmap_routine(self, source_path, destination_path): + """Run host dependent remapping from source_path to destination_path""" + pass + + def process_dirmap(self): + # type: (dict) -> None + """Go through all paths in Settings and set them using `dirmap`. + + If artists has Site Sync enabled, take dirmap mapping directly from + Local Settings when artist is syncing workfile locally. + + Args: + project_settings (dict): Settings for current project. + """ + + if not self._mapping: + self._mapping = self.get_mappings(self.project_settings) + if not self._mapping: + return + + self.log.info("Processing directory mapping ...") + self.on_enable_dirmap() + self.log.info("mapping:: {}".format(self._mapping)) + + for k, sp in enumerate(self._mapping["source-path"]): + dst = self._mapping["destination-path"][k] + try: + print("{} -> {}".format(sp, dst)) + self.dirmap_routine(sp, dst) + except IndexError: + # missing corresponding destination path + self.log.error(( + "invalid dirmap mapping, missing corresponding" + " destination directory." + )) + break + except RuntimeError: + self.log.error( + "invalid path {} -> {}, mapping not registered".format( + sp, dst + ) + ) + continue + + def get_mappings(self, project_settings): + """Get translation from source-path to destination-path. + + It checks if Site Sync is enabled and user chose to use local + site, in that case configuration in Local Settings takes precedence + """ + + local_mapping = self._get_local_sync_dirmap(project_settings) + dirmap_label = "{}-dirmap".format(self.host_name) + if ( + not self.project_settings[self.host_name].get(dirmap_label) + and not local_mapping + ): + return {} + mapping_settings = self.project_settings[self.host_name][dirmap_label] + mapping_enabled = mapping_settings["enabled"] or bool(local_mapping) + if not mapping_enabled: + return {} + + mapping = ( + local_mapping + or mapping_settings["paths"] + or {} + ) + + if ( + not mapping + or not mapping.get("destination-path") + or not mapping.get("source-path") + ): + return {} + return mapping + + def _get_local_sync_dirmap(self, project_settings): + """ + Returns dirmap if synch to local project is enabled. + + Only valid mapping is from roots of remote site to local site set + in Local Settings. + + Args: + project_settings (dict) + Returns: + dict : { "source-path": [XXX], "destination-path": [YYYY]} + """ + + mapping = {} + + if not project_settings["global"]["sync_server"]["enabled"]: + return mapping + + project_name = os.getenv("AVALON_PROJECT") + + active_site = self.sync_module.get_local_normalized_site( + self.sync_module.get_active_site(project_name)) + remote_site = self.sync_module.get_local_normalized_site( + self.sync_module.get_remote_site(project_name)) + self.log.debug( + "active {} - remote {}".format(active_site, remote_site) + ) + + if ( + active_site == "local" + and project_name in self.sync_module.get_enabled_projects() + and active_site != remote_site + ): + sync_settings = self.sync_module.get_sync_project_setting( + project_name, + exclude_locals=False, + cached=False) + + active_overrides = get_site_local_overrides( + project_name, active_site) + remote_overrides = get_site_local_overrides( + project_name, remote_site) + + self.log.debug("local overrides {}".format(active_overrides)) + self.log.debug("remote overrides {}".format(remote_overrides)) + for root_name, active_site_dir in active_overrides.items(): + remote_site_dir = ( + remote_overrides.get(root_name) + or sync_settings["sites"][remote_site]["root"][root_name] + ) + if os.path.isdir(active_site_dir): + if "destination-path" not in mapping: + mapping["destination-path"] = [] + mapping["destination-path"].append(active_site_dir) + + if "source-path" not in mapping: + mapping["source-path"] = [] + mapping["source-path"].append(remote_site_dir) + + self.log.debug("local sync mapping:: {}".format(mapping)) + return mapping diff --git a/openpype/host/host.py b/openpype/host/host.py new file mode 100644 index 0000000000..99f7868727 --- /dev/null +++ b/openpype/host/host.py @@ -0,0 +1,162 @@ +import logging +import contextlib +from abc import ABCMeta, abstractproperty +import six + +# NOTE can't import 'typing' because of issues in Maya 2020 +# - shiboken crashes on 'typing' module import + + +@six.add_metaclass(ABCMeta) +class HostBase(object): + """Base of host implementation class. + + Host is pipeline implementation of DCC application. This class should help + to identify what must/should/can be implemented for specific functionality. + + Compared to 'avalon' concept: + What was before considered as functions in host implementation folder. The + host implementation should primarily care about adding ability of creation + (mark subsets to be published) and optionaly about referencing published + representations as containers. + + Host may need extend some functionality like working with workfiles + or loading. Not all host implementations may allow that for those purposes + can be logic extended with implementing functions for the purpose. There + are prepared interfaces to be able identify what must be implemented to + be able use that functionality. + - current statement is that it is not required to inherit from interfaces + but all of the methods are validated (only their existence!) + + # Installation of host before (avalon concept): + ```python + from openpype.pipeline import install_host + import openpype.hosts.maya.api as host + + install_host(host) + ``` + + # Installation of host now: + ```python + from openpype.pipeline import install_host + from openpype.hosts.maya.api import MayaHost + + host = MayaHost() + install_host(host) + ``` + + Todo: + - move content of 'install_host' as method of this class + - register host object + - install legacy_io + - install global plugin paths + - store registered plugin paths to this object + - handle current context (project, asset, task) + - this must be done in many separated steps + - have it's object of host tools instead of using globals + + This implementation will probably change over time when more + functionality and responsibility will be added. + """ + + _log = None + + def __init__(self): + """Initialization of host. + + Register DCC callbacks, host specific plugin paths, targets etc. + (Part of what 'install' did in 'avalon' concept.) + + Note: + At this moment global "installation" must happen before host + installation. Because of this current limitation it is recommended + to implement 'install' method which is triggered after global + 'install'. + """ + + pass + + @property + def log(self): + if self._log is None: + self._log = logging.getLogger(self.__class__.__name__) + return self._log + + @abstractproperty + def name(self): + """Host name.""" + + pass + + def get_current_context(self): + """Get current context information. + + This method should be used to get current context of host. Usage of + this method can be crutial for host implementations in DCCs where + can be opened multiple workfiles at one moment and change of context + can't be catched properly. + + Default implementation returns values from 'legacy_io.Session'. + + Returns: + dict: Context with 3 keys 'project_name', 'asset_name' and + 'task_name'. All of them can be 'None'. + """ + + from openpype.pipeline import legacy_io + + if legacy_io.is_installed(): + legacy_io.install() + + return { + "project_name": legacy_io.Session["AVALON_PROJECT"], + "asset_name": legacy_io.Session["AVALON_ASSET"], + "task_name": legacy_io.Session["AVALON_TASK"] + } + + def get_context_title(self): + """Context title shown for UI purposes. + + Should return current context title if possible. + + Note: + This method is used only for UI purposes so it is possible to + return some logical title for contextless cases. + Is not meant for "Context menu" label. + + Returns: + str: Context title. + None: Default title is used based on UI implementation. + """ + + # Use current context to fill the context title + current_context = self.get_current_context() + project_name = current_context["project_name"] + asset_name = current_context["asset_name"] + task_name = current_context["task_name"] + items = [] + if project_name: + items.append(project_name) + if asset_name: + items.append(asset_name) + if task_name: + items.append(task_name) + if items: + return "/".join(items) + return None + + @contextlib.contextmanager + def maintained_selection(self): + """Some functionlity will happen but selection should stay same. + + This is DCC specific. Some may not allow to implement this ability + that is reason why default implementation is empty context manager. + + Yields: + None: Yield when is ready to restore selected at the end. + """ + + try: + yield + finally: + pass diff --git a/openpype/host/interfaces.py b/openpype/host/interfaces.py new file mode 100644 index 0000000000..3b2df745d1 --- /dev/null +++ b/openpype/host/interfaces.py @@ -0,0 +1,386 @@ +from abc import ABCMeta, abstractmethod +import six + + +class MissingMethodsError(ValueError): + """Exception when host miss some required methods for specific workflow. + + Args: + host (HostBase): Host implementation where are missing methods. + missing_methods (list[str]): List of missing methods. + """ + + def __init__(self, host, missing_methods): + joined_missing = ", ".join( + ['"{}"'.format(item) for item in missing_methods] + ) + host_name = getattr(host, "name", None) + if not host_name: + try: + host_name = host.__file__.replace("\\", "/").split("/")[-3] + except Exception: + host_name = str(host) + message = ( + "Host \"{}\" miss methods {}".format(host_name, joined_missing) + ) + super(MissingMethodsError, self).__init__(message) + + +class ILoadHost: + """Implementation requirements to be able use reference of representations. + + The load plugins can do referencing even without implementation of methods + here, but switch and removement of containers would not be possible. + + Questions: + - Is list container dependency of host or load plugins? + - Should this be directly in HostBase? + - how to find out if referencing is available? + - do we need to know that? + """ + + @staticmethod + def get_missing_load_methods(host): + """Look for missing methods on "old type" host implementation. + + Method is used for validation of implemented functions related to + loading. Checks only existence of methods. + + Args: + Union[ModuleType, HostBase]: Object of host where to look for + required methods. + + Returns: + list[str]: Missing method implementations for loading workflow. + """ + + if isinstance(host, ILoadHost): + return [] + + required = ["ls"] + missing = [] + for name in required: + if not hasattr(host, name): + missing.append(name) + return missing + + @staticmethod + def validate_load_methods(host): + """Validate implemented methods of "old type" host for load workflow. + + Args: + Union[ModuleType, HostBase]: Object of host to validate. + + Raises: + MissingMethodsError: If there are missing methods on host + implementation. + """ + missing = ILoadHost.get_missing_load_methods(host) + if missing: + raise MissingMethodsError(host, missing) + + @abstractmethod + def get_containers(self): + """Retreive referenced containers from scene. + + This can be implemented in hosts where referencing can be used. + + Todo: + Rename function to something more self explanatory. + Suggestion: 'get_containers' + + Returns: + list[dict]: Information about loaded containers. + """ + + pass + + # --- Deprecated method names --- + def ls(self): + """Deprecated variant of 'get_containers'. + + Todo: + Remove when all usages are replaced. + """ + + return self.get_containers() + + +@six.add_metaclass(ABCMeta) +class IWorkfileHost: + """Implementation requirements to be able use workfile utils and tool.""" + + @staticmethod + def get_missing_workfile_methods(host): + """Look for missing methods on "old type" host implementation. + + Method is used for validation of implemented functions related to + workfiles. Checks only existence of methods. + + Args: + Union[ModuleType, HostBase]: Object of host where to look for + required methods. + + Returns: + list[str]: Missing method implementations for workfiles workflow. + """ + + if isinstance(host, IWorkfileHost): + return [] + + required = [ + "open_file", + "save_file", + "current_file", + "has_unsaved_changes", + "file_extensions", + "work_root", + ] + missing = [] + for name in required: + if not hasattr(host, name): + missing.append(name) + return missing + + @staticmethod + def validate_workfile_methods(host): + """Validate methods of "old type" host for workfiles workflow. + + Args: + Union[ModuleType, HostBase]: Object of host to validate. + + Raises: + MissingMethodsError: If there are missing methods on host + implementation. + """ + + missing = IWorkfileHost.get_missing_workfile_methods(host) + if missing: + raise MissingMethodsError(host, missing) + + @abstractmethod + def get_workfile_extensions(self): + """Extensions that can be used as save. + + Questions: + This could potentially use 'HostDefinition'. + """ + + return [] + + @abstractmethod + def save_workfile(self, dst_path=None): + """Save currently opened scene. + + Args: + dst_path (str): Where the current scene should be saved. Or use + current path if 'None' is passed. + """ + + pass + + @abstractmethod + def open_workfile(self, filepath): + """Open passed filepath in the host. + + Args: + filepath (str): Path to workfile. + """ + + pass + + @abstractmethod + def get_current_workfile(self): + """Retreive path to current opened file. + + Returns: + str: Path to file which is currently opened. + None: If nothing is opened. + """ + + return None + + def workfile_has_unsaved_changes(self): + """Currently opened scene is saved. + + Not all hosts can know if current scene is saved because the API of + DCC does not support it. + + Returns: + bool: True if scene is saved and False if has unsaved + modifications. + None: Can't tell if workfiles has modifications. + """ + + return None + + def work_root(self, session): + """Modify workdir per host. + + Default implementation keeps workdir untouched. + + Warnings: + We must handle this modification with more sofisticated way because + this can't be called out of DCC so opening of last workfile + (calculated before DCC is launched) is complicated. Also breaking + defined work template is not a good idea. + Only place where it's really used and can make sense is Maya. There + workspace.mel can modify subfolders where to look for maya files. + + Args: + session (dict): Session context data. + + Returns: + str: Path to new workdir. + """ + + return session["AVALON_WORKDIR"] + + # --- Deprecated method names --- + def file_extensions(self): + """Deprecated variant of 'get_workfile_extensions'. + + Todo: + Remove when all usages are replaced. + """ + return self.get_workfile_extensions() + + def save_file(self, dst_path=None): + """Deprecated variant of 'save_workfile'. + + Todo: + Remove when all usages are replaced. + """ + + self.save_workfile() + + def open_file(self, filepath): + """Deprecated variant of 'open_workfile'. + + Todo: + Remove when all usages are replaced. + """ + + return self.open_workfile(filepath) + + def current_file(self): + """Deprecated variant of 'get_current_workfile'. + + Todo: + Remove when all usages are replaced. + """ + + return self.get_current_workfile() + + def has_unsaved_changes(self): + """Deprecated variant of 'workfile_has_unsaved_changes'. + + Todo: + Remove when all usages are replaced. + """ + + return self.workfile_has_unsaved_changes() + + +class IPublishHost: + """Functions related to new creation system in new publisher. + + New publisher is not storing information only about each created instance + but also some global data. At this moment are data related only to context + publish plugins but that can extend in future. + """ + + @staticmethod + def get_missing_publish_methods(host): + """Look for missing methods on "old type" host implementation. + + Method is used for validation of implemented functions related to + new publish creation. Checks only existence of methods. + + Args: + Union[ModuleType, HostBase]: Host module where to look for + required methods. + + Returns: + list[str]: Missing method implementations for new publsher + workflow. + """ + + if isinstance(host, IPublishHost): + return [] + + required = [ + "get_context_data", + "update_context_data", + "get_context_title", + "get_current_context", + ] + missing = [] + for name in required: + if not hasattr(host, name): + missing.append(name) + return missing + + @staticmethod + def validate_publish_methods(host): + """Validate implemented methods of "old type" host. + + Args: + Union[ModuleType, HostBase]: Host module to validate. + + Raises: + MissingMethodsError: If there are missing methods on host + implementation. + """ + missing = IPublishHost.get_missing_publish_methods(host) + if missing: + raise MissingMethodsError(host, missing) + + @abstractmethod + def get_context_data(self): + """Get global data related to creation-publishing from workfile. + + These data are not related to any created instance but to whole + publishing context. Not saving/returning them will cause that each + reset of publishing resets all values to default ones. + + Context data can contain information about enabled/disabled publish + plugins or other values that can be filled by artist. + + Returns: + dict: Context data stored using 'update_context_data'. + """ + + pass + + @abstractmethod + def update_context_data(self, data, changes): + """Store global context data to workfile. + + Called when some values in context data has changed. + + Without storing the values in a way that 'get_context_data' would + return them will each reset of publishing cause loose of filled values + by artist. Best practice is to store values into workfile, if possible. + + Args: + data (dict): New data as are. + changes (dict): Only data that has been changed. Each value has + tuple with '(, )' value. + """ + + pass + + +class INewPublisher(IPublishHost): + """Legacy interface replaced by 'IPublishHost'. + + Deprecated: + 'INewPublisher' is replaced by 'IPublishHost' please change your + imports. + There is no "reasonable" way hot mark these classes as deprecated + to show warning of wrong import. Deprecated since 3.14.* will be + removed in 3.15.* + """ + + pass diff --git a/openpype/hosts/aftereffects/__init__.py b/openpype/hosts/aftereffects/__init__.py index deae48d122..ae750d05b6 100644 --- a/openpype/hosts/aftereffects/__init__.py +++ b/openpype/hosts/aftereffects/__init__.py @@ -1,9 +1,6 @@ -def add_implementation_envs(env, _app): - """Modify environments to contain all required for implementation.""" - defaults = { - "OPENPYPE_LOG_NO_COLORS": "True", - "WEBSOCKET_URL": "ws://localhost:8097/ws/" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value +from .addon import AfterEffectsAddon + + +__all__ = ( + "AfterEffectsAddon", +) diff --git a/openpype/hosts/aftereffects/addon.py b/openpype/hosts/aftereffects/addon.py new file mode 100644 index 0000000000..79df550312 --- /dev/null +++ b/openpype/hosts/aftereffects/addon.py @@ -0,0 +1,22 @@ +from openpype.modules import OpenPypeModule, IHostAddon + + +class AfterEffectsAddon(OpenPypeModule, IHostAddon): + name = "aftereffects" + host_name = "aftereffects" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + """Modify environments to contain all required for implementation.""" + defaults = { + "OPENPYPE_LOG_NO_COLORS": "True", + "WEBSOCKET_URL": "ws://localhost:8097/ws/" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_workfile_extensions(self): + return [".aep"] diff --git a/openpype/hosts/aftereffects/api/launch_logic.py b/openpype/hosts/aftereffects/api/launch_logic.py index 30a3e1f1c3..9c8513fe8c 100644 --- a/openpype/hosts/aftereffects/api/launch_logic.py +++ b/openpype/hosts/aftereffects/api/launch_logic.py @@ -12,6 +12,7 @@ from wsrpc_aiohttp import ( from Qt import QtCore +from openpype.lib import Logger from openpype.pipeline import legacy_io from openpype.tools.utils import host_tools from openpype.tools.adobe_webserver.app import WebServerTool @@ -84,8 +85,6 @@ class ProcessLauncher(QtCore.QObject): @property def log(self): if self._log is None: - from openpype.api import Logger - self._log = Logger.get_logger("{}-launcher".format( self.route_name)) return self._log diff --git a/openpype/hosts/aftereffects/api/lib.py b/openpype/hosts/aftereffects/api/lib.py index ce4cbf09af..8cdf9c407e 100644 --- a/openpype/hosts/aftereffects/api/lib.py +++ b/openpype/hosts/aftereffects/api/lib.py @@ -1,13 +1,16 @@ import os import sys +import re +import json import contextlib import traceback import logging +from functools import partial from Qt import QtWidgets from openpype.pipeline import install_host -from openpype.lib.remote_publish import headless_publish +from openpype.modules import ModulesManager from openpype.tools.utils import host_tools from .launch_logic import ProcessLauncher, get_stub @@ -35,10 +38,18 @@ def main(*subprocess_args): launcher.start() if os.environ.get("HEADLESS_PUBLISH"): - launcher.execute_in_main_thread(lambda: headless_publish( - log, - "CloseAE", - os.environ.get("IS_TEST"))) + manager = ModulesManager() + webpublisher_addon = manager["webpublisher"] + + launcher.execute_in_main_thread( + partial( + webpublisher_addon.headless_publish, + log, + "CloseAE", + os.environ.get("IS_TEST") + ) + ) + elif os.environ.get("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", True): save = False if os.getenv("WORKFILES_SAVE_AS"): @@ -68,3 +79,57 @@ def get_extension_manifest_path(): "CSXS", "manifest.xml" ) + + +def get_unique_layer_name(layers, name): + """ + Gets all layer names and if 'name' is present in them, increases + suffix by 1 (eg. creates unique layer name - for Loader) + Args: + layers (list): of strings, names only + name (string): checked value + + Returns: + (string): name_00X (without version) + """ + names = {} + for layer in layers: + layer_name = re.sub(r'_\d{3}$', '', layer) + if layer_name in names.keys(): + names[layer_name] = names[layer_name] + 1 + else: + names[layer_name] = 1 + occurrences = names.get(name, 0) + + return "{}_{:0>3d}".format(name, occurrences + 1) + + +def get_background_layers(file_url): + """ + Pulls file name from background json file, enrich with folder url for + AE to be able import files. + + Order is important, follows order in json. + + Args: + file_url (str): abs url of background json + + Returns: + (list): of abs paths to images + """ + with open(file_url) as json_file: + data = json.load(json_file) + + layers = list() + bg_folder = os.path.dirname(file_url) + for child in data['children']: + if child.get("filename"): + layers.append(os.path.join(bg_folder, child.get("filename")). + replace("\\", "/")) + else: + for layer in child['children']: + if layer.get("filename"): + layers.append(os.path.join(bg_folder, + layer.get("filename")). + replace("\\", "/")) + return layers diff --git a/openpype/hosts/aftereffects/api/pipeline.py b/openpype/hosts/aftereffects/api/pipeline.py index a428a1470d..7026fe3f05 100644 --- a/openpype/hosts/aftereffects/api/pipeline.py +++ b/openpype/hosts/aftereffects/api/pipeline.py @@ -1,12 +1,10 @@ import os -import sys from Qt import QtWidgets import pyblish.api -from openpype import lib -from openpype.api import Logger +from openpype.lib import Logger, register_event_callback from openpype.pipeline import ( register_loader_plugin_path, register_creator_plugin_path, @@ -15,10 +13,10 @@ from openpype.pipeline import ( AVALON_CONTAINER_ID, legacy_io, ) +from openpype.pipeline.load import any_outdated_containers import openpype.hosts.aftereffects -from openpype.lib import register_event_callback -from .launch_logic import get_stub +from .launch_logic import get_stub, ConnectionNotEstablishedYet log = Logger.get_logger(__name__) @@ -65,14 +63,14 @@ def on_pyblish_instance_toggled(instance, old_value, new_value): instance[0].Visible = new_value -def get_asset_settings(): +def get_asset_settings(asset_doc): """Get settings on current asset from database. Returns: dict: Scene data. """ - asset_data = lib.get_asset()["data"] + asset_data = asset_doc["data"] fps = asset_data.get("fps") frame_start = asset_data.get("frameStart") frame_end = asset_data.get("frameEnd") @@ -111,7 +109,7 @@ def ls(): """ try: stub = get_stub() # only after AfterEffects is up - except lib.ConnectionNotEstablishedYet: + except ConnectionNotEstablishedYet: print("Not connected yet, ignoring") return @@ -136,7 +134,7 @@ def ls(): def check_inventory(): """Checks loaded containers if they are of highest version""" - if not lib.any_outdated(): + if not any_outdated_containers(): return # Warn about outdated containers. @@ -284,7 +282,7 @@ def _get_stub(): """ try: stub = get_stub() # only after Photoshop is up - except lib.ConnectionNotEstablishedYet: + except ConnectionNotEstablishedYet: print("Not connected yet, ignoring") return diff --git a/openpype/hosts/aftereffects/api/workio.py b/openpype/hosts/aftereffects/api/workio.py index d6c732285a..18b40af5dc 100644 --- a/openpype/hosts/aftereffects/api/workio.py +++ b/openpype/hosts/aftereffects/api/workio.py @@ -1,12 +1,11 @@ """Host API required Work Files tool""" import os -from openpype.pipeline import HOST_WORKFILE_EXTENSIONS from .launch_logic import get_stub def file_extensions(): - return HOST_WORKFILE_EXTENSIONS["aftereffects"] + return [".aep"] def has_unsaved_changes(): diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py index 215c148f37..1019709dd6 100644 --- a/openpype/hosts/aftereffects/plugins/create/create_render.py +++ b/openpype/hosts/aftereffects/plugins/create/create_render.py @@ -17,11 +17,8 @@ class RenderCreator(Creator): create_allow_context_change = True - def __init__( - self, create_context, system_settings, project_settings, headless=False - ): - super(RenderCreator, self).__init__(create_context, system_settings, - project_settings, headless) + def __init__(self, project_settings, *args, **kwargs): + super(RenderCreator, self).__init__(project_settings, *args, **kwargs) self._default_variants = (project_settings["aftereffects"] ["create"] ["RenderCreator"] diff --git a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py index 88e55e21b5..f82d15b3c9 100644 --- a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py +++ b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py @@ -1,4 +1,5 @@ import openpype.hosts.aftereffects.api as api +from openpype.client import get_asset_by_name from openpype.pipeline import ( AutoCreator, CreatedInstance, @@ -10,6 +11,8 @@ class AEWorkfileCreator(AutoCreator): identifier = "workfile" family = "workfile" + default_variant = "Main" + def get_instance_attr_defs(self): return [] @@ -34,27 +37,25 @@ class AEWorkfileCreator(AutoCreator): existing_instance = instance break - variant = '' project_name = legacy_io.Session["AVALON_PROJECT"] asset_name = legacy_io.Session["AVALON_ASSET"] task_name = legacy_io.Session["AVALON_TASK"] host_name = legacy_io.Session["AVALON_APP"] if existing_instance is None: - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( - variant, task_name, asset_doc, project_name, host_name + self.default_variant, task_name, asset_doc, + project_name, host_name ) data = { "asset": asset_name, "task": task_name, - "variant": variant + "variant": self.default_variant } data.update(self.get_dynamic_data( - variant, task_name, asset_doc, project_name, host_name + self.default_variant, task_name, asset_doc, + project_name, host_name )) new_instance = CreatedInstance( @@ -69,12 +70,11 @@ class AEWorkfileCreator(AutoCreator): existing_instance["asset"] != asset_name or existing_instance["task"] != task_name ): - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( - variant, task_name, asset_doc, project_name, host_name + self.default_variant, task_name, asset_doc, + project_name, host_name ) existing_instance["asset"] = asset_name existing_instance["task"] = task_name + existing_instance["subset"] = subset_name diff --git a/openpype/hosts/aftereffects/plugins/load/load_background.py b/openpype/hosts/aftereffects/plugins/load/load_background.py index d346df504a..260e780be0 100644 --- a/openpype/hosts/aftereffects/plugins/load/load_background.py +++ b/openpype/hosts/aftereffects/plugins/load/load_background.py @@ -1,14 +1,14 @@ import re -from openpype.lib import ( - get_background_layers, - get_unique_layer_name -) from openpype.pipeline import get_representation_path from openpype.hosts.aftereffects.api import ( AfterEffectsLoader, containerise ) +from openpype.hosts.aftereffects.api.lib import ( + get_background_layers, + get_unique_layer_name, +) class BackgroundLoader(AfterEffectsLoader): diff --git a/openpype/hosts/aftereffects/plugins/load/load_file.py b/openpype/hosts/aftereffects/plugins/load/load_file.py index 6ab69c6bfa..2ddc9825e5 100644 --- a/openpype/hosts/aftereffects/plugins/load/load_file.py +++ b/openpype/hosts/aftereffects/plugins/load/load_file.py @@ -1,12 +1,11 @@ import re -from openpype import lib - from openpype.pipeline import get_representation_path from openpype.hosts.aftereffects.api import ( AfterEffectsLoader, containerise ) +from openpype.hosts.aftereffects.api.lib import get_unique_layer_name class FileLoader(AfterEffectsLoader): @@ -28,7 +27,7 @@ class FileLoader(AfterEffectsLoader): stub = self.get_stub() layers = stub.get_items(comps=True, folders=True, footages=True) existing_layers = [layer.name for layer in layers] - comp_name = lib.get_unique_layer_name( + comp_name = get_unique_layer_name( existing_layers, "{}_{}".format(context["asset"]["name"], name)) import_options = {} @@ -87,7 +86,7 @@ class FileLoader(AfterEffectsLoader): if namespace_from_container != layer_name: layers = stub.get_items(comps=True) existing_layers = [layer.name for layer in layers] - layer_name = lib.get_unique_layer_name( + layer_name = get_unique_layer_name( existing_layers, "{}_{}".format(context["asset"], context["subset"])) else: # switching version - keep same name diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_render.py b/openpype/hosts/aftereffects/plugins/publish/collect_render.py index fa23bf92b0..d444ead6dc 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_render.py @@ -6,8 +6,8 @@ import attr import pyblish.api from openpype.settings import get_project_settings -from openpype.lib import abstract_collect_render -from openpype.lib.abstract_collect_render import RenderInstance +from openpype.pipeline import publish +from openpype.pipeline.publish import RenderInstance from openpype.hosts.aftereffects.api import get_stub @@ -21,11 +21,11 @@ class AERenderInstance(RenderInstance): projectEntity = attr.ib(default=None) stagingDir = attr.ib(default=None) app_version = attr.ib(default=None) - publish_attributes = attr.ib(default=None) + publish_attributes = attr.ib(default={}) file_name = attr.ib(default=None) -class CollectAERender(abstract_collect_render.AbstractCollectRender): +class CollectAERender(publish.AbstractCollectRender): order = pyblish.api.CollectorOrder + 0.405 label = "Collect After Effects Render Layers" @@ -90,7 +90,7 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): subset_name = inst.data["subset"] instance = AERenderInstance( - family=family, + family="render", families=inst.data.get("families", []), version=version, time="", @@ -102,7 +102,6 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): attachTo=False, setMembers='', publish=True, - renderer='aerender', name=subset_name, resolutionWidth=render_q.width, resolutionHeight=render_q.height, @@ -113,10 +112,9 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): frameStart=frame_start, frameEnd=frame_end, frameStep=1, - toBeRenderedOn='deadline', fps=fps, app_version=app_version, - publish_attributes=inst.data.get("publish_attributes"), + publish_attributes=inst.data.get("publish_attributes", {}), file_name=render_q.file_name ) @@ -138,6 +136,9 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): fam = "render.farm" if fam not in instance.families: instance.families.append(fam) + instance.toBeRenderedOn = "deadline" + instance.renderer = "aerender" + instance.farm = True # to skip integrate instances.append(instance) instances_to_remove.append(inst) diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py index 9cb6900b0a..3c5013b3bd 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py @@ -1,8 +1,8 @@ import os import pyblish.api -from openpype.lib import get_subset_name_with_asset_doc from openpype.pipeline import legacy_io +from openpype.pipeline.create import get_subset_name class CollectWorkfile(pyblish.api.ContextPlugin): @@ -11,6 +11,8 @@ class CollectWorkfile(pyblish.api.ContextPlugin): label = "Collect After Effects Workfile Instance" order = pyblish.api.CollectorOrder + 0.1 + default_variant = "Main" + def process(self, context): existing_instance = None for instance in context: @@ -69,13 +71,14 @@ class CollectWorkfile(pyblish.api.ContextPlugin): # workfile instance family = "workfile" - subset = get_subset_name_with_asset_doc( + subset = get_subset_name( family, - "", + self.default_variant, context.data["anatomyData"]["task"]["name"], context.data["assetEntity"], context.data["anatomyData"]["project"]["name"], - host_name=context.data["hostName"] + host_name=context.data["hostName"], + project_settings=context.data["project_settings"] ) # Create instance instance = context.create_instance(subset) diff --git a/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py b/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py index 7323a0b125..dc65cee61d 100644 --- a/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py @@ -2,14 +2,18 @@ import os import sys import six -import openpype.api +from openpype.lib import ( + get_ffmpeg_tool_path, + run_subprocess, +) +from openpype.pipeline import publish from openpype.hosts.aftereffects.api import get_stub -class ExtractLocalRender(openpype.api.Extractor): +class ExtractLocalRender(publish.Extractor): """Render RenderQueue locally.""" - order = openpype.api.Extractor.order - 0.47 + order = publish.Extractor.order - 0.47 label = "Extract Local Render" hosts = ["aftereffects"] families = ["renderLocal", "render.local"] @@ -53,7 +57,7 @@ class ExtractLocalRender(openpype.api.Extractor): instance.data["representations"] = [repre_data] - ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") # Generate thumbnail. thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg") @@ -66,7 +70,7 @@ class ExtractLocalRender(openpype.api.Extractor): ] self.log.debug("Thumbnail args:: {}".format(args)) try: - output = openpype.lib.run_subprocess(args) + output = run_subprocess(args) except TypeError: self.log.warning("Error in creating thumbnail") six.reraise(*sys.exc_info()) diff --git a/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py b/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py index eb2977309f..343838eb49 100644 --- a/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py +++ b/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py @@ -1,13 +1,13 @@ import pyblish.api -import openpype.api +from openpype.pipeline import publish from openpype.hosts.aftereffects.api import get_stub class ExtractSaveScene(pyblish.api.ContextPlugin): """Save scene before extraction.""" - order = openpype.api.Extractor.order - 0.48 + order = publish.Extractor.order - 0.48 label = "Extract Save Scene" hosts = ["aftereffects"] diff --git a/openpype/hosts/aftereffects/plugins/publish/increment_workfile.py b/openpype/hosts/aftereffects/plugins/publish/increment_workfile.py index 0829355f3b..d8f6ef5d27 100644 --- a/openpype/hosts/aftereffects/plugins/publish/increment_workfile.py +++ b/openpype/hosts/aftereffects/plugins/publish/increment_workfile.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.action import get_errored_plugins_from_data from openpype.lib import version_up +from openpype.pipeline.publish import get_errored_plugins_from_context from openpype.hosts.aftereffects.api import get_stub @@ -18,7 +18,7 @@ class IncrementWorkfile(pyblish.api.InstancePlugin): optional = True def process(self, instance): - errored_plugins = get_errored_plugins_from_data(instance.context) + errored_plugins = get_errored_plugins_from_context(instance.context) if errored_plugins: raise RuntimeError( "Skipping incrementing current file because publishing failed." diff --git a/openpype/hosts/aftereffects/plugins/publish/remove_publish_highlight.py b/openpype/hosts/aftereffects/plugins/publish/remove_publish_highlight.py index 5f3fcc3089..370f916f04 100644 --- a/openpype/hosts/aftereffects/plugins/publish/remove_publish_highlight.py +++ b/openpype/hosts/aftereffects/plugins/publish/remove_publish_highlight.py @@ -1,8 +1,8 @@ -import openpype.api +from openpype.pipeline import publish from openpype.hosts.aftereffects.api import get_stub -class RemovePublishHighlight(openpype.api.Extractor): +class RemovePublishHighlight(publish.Extractor): """Clean utf characters which are not working in DL Published compositions are marked with unicode icon which causes @@ -10,7 +10,7 @@ class RemovePublishHighlight(openpype.api.Extractor): rendering, add it later back to avoid confusion. """ - order = openpype.api.Extractor.order - 0.49 # just before save + order = publish.Extractor.order - 0.49 # just before save label = "Clean render comp" hosts = ["aftereffects"] families = ["render.farm"] diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py index 7a9356f020..6c36136b20 100644 --- a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py +++ b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py @@ -1,9 +1,9 @@ import pyblish.api -import openpype.api -from openpype.pipeline import ( +from openpype.pipeline import legacy_io +from openpype.pipeline.publish import ( + ValidateContentsOrder, PublishXmlValidationError, - legacy_io, ) from openpype.hosts.aftereffects.api import get_stub @@ -50,7 +50,7 @@ class ValidateInstanceAsset(pyblish.api.InstancePlugin): label = "Validate Instance Asset" hosts = ["aftereffects"] actions = [ValidateInstanceAssetRepair] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder def process(self, instance): instance_asset = instance.data["asset"] diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py index 14e224fdc2..78f98d7445 100644 --- a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py +++ b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py @@ -1,5 +1,9 @@ # -*- coding: utf-8 -*- -"""Validate scene settings.""" +"""Validate scene settings. +Requires: + instance -> assetEntity + instance -> anatomyData +""" import os import re @@ -54,7 +58,7 @@ class ValidateSceneSettings(OptionalPyblishPluginMixin, order = pyblish.api.ValidatorOrder label = "Validate Scene Settings" - families = ["render.farm", "render"] + families = ["render.farm", "render.local", "render"] hosts = ["aftereffects"] optional = True @@ -67,7 +71,8 @@ class ValidateSceneSettings(OptionalPyblishPluginMixin, if not self.is_active(instance.data): return - expected_settings = get_asset_settings() + asset_doc = instance.data["assetEntity"] + expected_settings = get_asset_settings(asset_doc) self.log.info("config from DB::{}".format(expected_settings)) task_name = instance.data["anatomyData"]["task"]["name"] diff --git a/openpype/hosts/blender/__init__.py b/openpype/hosts/blender/__init__.py index 0f27882c7e..2a6603606a 100644 --- a/openpype/hosts/blender/__init__.py +++ b/openpype/hosts/blender/__init__.py @@ -1,52 +1,6 @@ -import os +from .addon import BlenderAddon -def add_implementation_envs(env, _app): - """Modify environments to contain all required for implementation.""" - # Prepare path to implementation script - implementation_user_script_path = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "blender_addon" - ) - - # Add blender implementation script path to PYTHONPATH - python_path = env.get("PYTHONPATH") or "" - python_path_parts = [ - path - for path in python_path.split(os.pathsep) - if path - ] - python_path_parts.insert(0, implementation_user_script_path) - env["PYTHONPATH"] = os.pathsep.join(python_path_parts) - - # Modify Blender user scripts path - previous_user_scripts = set() - # Implementation path is added to set for easier paths check inside loops - # - will be removed at the end - previous_user_scripts.add(implementation_user_script_path) - - openpype_blender_user_scripts = ( - env.get("OPENPYPE_BLENDER_USER_SCRIPTS") or "" - ) - for path in openpype_blender_user_scripts.split(os.pathsep): - if path: - previous_user_scripts.add(os.path.normpath(path)) - - blender_user_scripts = env.get("BLENDER_USER_SCRIPTS") or "" - for path in blender_user_scripts.split(os.pathsep): - if path: - previous_user_scripts.add(os.path.normpath(path)) - - # Remove implementation path from user script paths as is set to - # `BLENDER_USER_SCRIPTS` - previous_user_scripts.remove(implementation_user_script_path) - env["BLENDER_USER_SCRIPTS"] = implementation_user_script_path - - # Set custom user scripts env - env["OPENPYPE_BLENDER_USER_SCRIPTS"] = os.pathsep.join( - previous_user_scripts - ) - - # Define Qt binding if not defined - if not env.get("QT_PREFERRED_BINDING"): - env["QT_PREFERRED_BINDING"] = "PySide2" +__all__ = ( + "BlenderAddon", +) diff --git a/openpype/hosts/blender/addon.py b/openpype/hosts/blender/addon.py new file mode 100644 index 0000000000..f1da9b808c --- /dev/null +++ b/openpype/hosts/blender/addon.py @@ -0,0 +1,72 @@ +import os +from openpype.modules import OpenPypeModule, IHostAddon + +BLENDER_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class BlenderAddon(OpenPypeModule, IHostAddon): + name = "blender" + host_name = "blender" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + """Modify environments to contain all required for implementation.""" + # Prepare path to implementation script + implementation_user_script_path = os.path.join( + BLENDER_ROOT_DIR, + "blender_addon" + ) + + # Add blender implementation script path to PYTHONPATH + python_path = env.get("PYTHONPATH") or "" + python_path_parts = [ + path + for path in python_path.split(os.pathsep) + if path + ] + python_path_parts.insert(0, implementation_user_script_path) + env["PYTHONPATH"] = os.pathsep.join(python_path_parts) + + # Modify Blender user scripts path + previous_user_scripts = set() + # Implementation path is added to set for easier paths check inside + # loops - will be removed at the end + previous_user_scripts.add(implementation_user_script_path) + + openpype_blender_user_scripts = ( + env.get("OPENPYPE_BLENDER_USER_SCRIPTS") or "" + ) + for path in openpype_blender_user_scripts.split(os.pathsep): + if path: + previous_user_scripts.add(os.path.normpath(path)) + + blender_user_scripts = env.get("BLENDER_USER_SCRIPTS") or "" + for path in blender_user_scripts.split(os.pathsep): + if path: + previous_user_scripts.add(os.path.normpath(path)) + + # Remove implementation path from user script paths as is set to + # `BLENDER_USER_SCRIPTS` + previous_user_scripts.remove(implementation_user_script_path) + env["BLENDER_USER_SCRIPTS"] = implementation_user_script_path + + # Set custom user scripts env + env["OPENPYPE_BLENDER_USER_SCRIPTS"] = os.pathsep.join( + previous_user_scripts + ) + + # Define Qt binding if not defined + if not env.get("QT_PREFERRED_BINDING"): + env["QT_PREFERRED_BINDING"] = "PySide2" + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(BLENDER_ROOT_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".blend"] diff --git a/openpype/hosts/blender/api/action.py b/openpype/hosts/blender/api/action.py index 09ef76326e..fe0833e39f 100644 --- a/openpype/hosts/blender/api/action.py +++ b/openpype/hosts/blender/api/action.py @@ -2,7 +2,7 @@ import bpy import pyblish.api -from openpype.api import get_errored_instances_from_context +from openpype.pipeline.publish import get_errored_instances_from_context class SelectInvalidAction(pyblish.api.Action): diff --git a/openpype/hosts/blender/api/lib.py b/openpype/hosts/blender/api/lib.py index 20098c0fe8..05912885f7 100644 --- a/openpype/hosts/blender/api/lib.py +++ b/openpype/hosts/blender/api/lib.py @@ -6,7 +6,7 @@ from typing import Dict, List, Union import bpy import addon_utils -from openpype.api import Logger +from openpype.lib import Logger from . import pipeline @@ -234,7 +234,7 @@ def lsattrs(attrs: Dict) -> List: def read(node: bpy.types.bpy_struct_meta_idprop): """Return user-defined attributes from `node`""" - data = dict(node.get(pipeline.AVALON_PROPERTY)) + data = dict(node.get(pipeline.AVALON_PROPERTY, {})) # Ignore hidden/internal data data = { diff --git a/openpype/hosts/blender/api/ops.py b/openpype/hosts/blender/api/ops.py index c1b5add518..e0e09277df 100644 --- a/openpype/hosts/blender/api/ops.py +++ b/openpype/hosts/blender/api/ops.py @@ -26,7 +26,7 @@ PREVIEW_COLLECTIONS: Dict = dict() # This seems like a good value to keep the Qt app responsive and doesn't slow # down Blender. At least on macOS I the interace of Blender gets very laggy if # you make it smaller. -TIMER_INTERVAL: float = 0.01 +TIMER_INTERVAL: float = 0.01 if platform.system() == "Windows" else 0.1 class BlenderApplication(QtWidgets.QApplication): @@ -164,6 +164,12 @@ def _process_app_events() -> Optional[float]: dialog.setDetailedText(detail) dialog.exec_() + # Refresh Manager + if GlobalClass.app: + manager = GlobalClass.app.get_window("WM_OT_avalon_manager") + if manager: + manager.refresh() + if not GlobalClass.is_windows: if OpenFileCacher.opening_file: return TIMER_INTERVAL @@ -192,10 +198,11 @@ class LaunchQtApp(bpy.types.Operator): self._app = BlenderApplication.get_app() GlobalClass.app = self._app - bpy.app.timers.register( - _process_app_events, - persistent=True - ) + if not bpy.app.timers.is_registered(_process_app_events): + bpy.app.timers.register( + _process_app_events, + persistent=True + ) def execute(self, context): """Execute the operator. @@ -220,12 +227,9 @@ class LaunchQtApp(bpy.types.Operator): self._app.store_window(self.bl_idname, window) self._window = window - if not isinstance( - self._window, - (QtWidgets.QMainWindow, QtWidgets.QDialog, ModuleType) - ): + if not isinstance(self._window, (QtWidgets.QWidget, ModuleType)): raise AttributeError( - "`window` should be a `QDialog or module`. Got: {}".format( + "`window` should be a `QWidget or module`. Got: {}".format( str(type(window)) ) ) @@ -249,9 +253,9 @@ class LaunchQtApp(bpy.types.Operator): self._window.setWindowFlags(on_top_flags) self._window.show() - if on_top_flags != origin_flags: - self._window.setWindowFlags(origin_flags) - self._window.show() + # if on_top_flags != origin_flags: + # self._window.setWindowFlags(origin_flags) + # self._window.show() return {'FINISHED'} diff --git a/openpype/hosts/blender/api/pipeline.py b/openpype/hosts/blender/api/pipeline.py index 5b81764644..c2aee1e653 100644 --- a/openpype/hosts/blender/api/pipeline.py +++ b/openpype/hosts/blender/api/pipeline.py @@ -10,6 +10,7 @@ from . import ops import pyblish.api +from openpype.client import get_asset_by_name from openpype.pipeline import ( schema, legacy_io, @@ -19,8 +20,8 @@ from openpype.pipeline import ( deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) -from openpype.api import Logger from openpype.lib import ( + Logger, register_event_callback, emit_event ) @@ -83,18 +84,16 @@ def uninstall(): def set_start_end_frames(): + project_name = legacy_io.active_project() asset_name = legacy_io.Session["AVALON_ASSET"] - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) scene = bpy.context.scene # Default scene settings frameStart = scene.frame_start frameEnd = scene.frame_end - fps = scene.render.fps + fps = scene.render.fps / scene.render.fps_base resolution_x = scene.render.resolution_x resolution_y = scene.render.resolution_y @@ -117,7 +116,8 @@ def set_start_end_frames(): scene.frame_start = frameStart scene.frame_end = frameEnd - scene.render.fps = fps + scene.render.fps = round(fps) + scene.render.fps_base = round(fps) / fps scene.render.resolution_x = resolution_x scene.render.resolution_y = resolution_y diff --git a/openpype/hosts/blender/api/workio.py b/openpype/hosts/blender/api/workio.py index 5eb9f82999..a8f6193abc 100644 --- a/openpype/hosts/blender/api/workio.py +++ b/openpype/hosts/blender/api/workio.py @@ -5,8 +5,6 @@ from typing import List, Optional import bpy -from openpype.pipeline import HOST_WORKFILE_EXTENSIONS - class OpenFileCacher: """Store information about opening file. @@ -78,7 +76,7 @@ def has_unsaved_changes() -> bool: def file_extensions() -> List[str]: """Return the supported file extensions for Blender scene files.""" - return HOST_WORKFILE_EXTENSIONS["blender"] + return [".blend"] def work_root(session: dict) -> str: diff --git a/openpype/hosts/blender/blender_addon/startup/init.py b/openpype/hosts/blender/blender_addon/startup/init.py index 13a4b8a7a1..8dbff8a91d 100644 --- a/openpype/hosts/blender/blender_addon/startup/init.py +++ b/openpype/hosts/blender/blender_addon/startup/init.py @@ -1,4 +1,10 @@ from openpype.pipeline import install_host from openpype.hosts.blender import api -install_host(api) + +def register(): + install_host(api) + + +def unregister(): + pass diff --git a/openpype/hosts/blender/hooks/pre_pyside_install.py b/openpype/hosts/blender/hooks/pre_pyside_install.py index a37f8f0379..e5f66d2a26 100644 --- a/openpype/hosts/blender/hooks/pre_pyside_install.py +++ b/openpype/hosts/blender/hooks/pre_pyside_install.py @@ -1,6 +1,7 @@ import os import re import subprocess +from platform import system from openpype.lib import PreLaunchHook @@ -13,12 +14,9 @@ class InstallPySideToBlender(PreLaunchHook): For pipeline implementation is required to have Qt binding installed in blender's python packages. - - Prelaunch hook can work only on Windows right now. """ app_groups = ["blender"] - platforms = ["windows"] def execute(self): # Prelaunch hook is not crucial @@ -34,25 +32,28 @@ class InstallPySideToBlender(PreLaunchHook): # Get blender's python directory version_regex = re.compile(r"^[2-3]\.[0-9]+$") + platform = system().lower() executable = self.launch_context.executable.executable_path - if os.path.basename(executable).lower() != "blender.exe": + expected_executable = "blender" + if platform == "windows": + expected_executable += ".exe" + + if os.path.basename(executable).lower() != expected_executable: self.log.info(( - "Executable does not lead to blender.exe file. Can't determine" - " blender's python to check/install PySide2." + f"Executable does not lead to {expected_executable} file." + "Can't determine blender's python to check/install PySide2." )) return - executable_dir = os.path.dirname(executable) + versions_dir = os.path.dirname(executable) + if platform == "darwin": + versions_dir = os.path.join( + os.path.dirname(versions_dir), "Resources" + ) version_subfolders = [] - for name in os.listdir(executable_dir): - fullpath = os.path.join(name, executable_dir) - if not os.path.isdir(fullpath): - continue - - if not version_regex.match(name): - continue - - version_subfolders.append(name) + for dir_entry in os.scandir(versions_dir): + if dir_entry.is_dir() and version_regex.match(dir_entry.name): + version_subfolders.append(dir_entry.name) if not version_subfolders: self.log.info( @@ -72,16 +73,21 @@ class InstallPySideToBlender(PreLaunchHook): version_subfolder = version_subfolders[0] - pythond_dir = os.path.join( - os.path.dirname(executable), - version_subfolder, - "python" - ) + python_dir = os.path.join(versions_dir, version_subfolder, "python") + python_lib = os.path.join(python_dir, "lib") + python_version = "python" + + if platform != "windows": + for dir_entry in os.scandir(python_lib): + if dir_entry.is_dir() and dir_entry.name.startswith("python"): + python_lib = dir_entry.path + python_version = dir_entry.name + break # Change PYTHONPATH to contain blender's packages as first python_paths = [ - os.path.join(pythond_dir, "lib"), - os.path.join(pythond_dir, "lib", "site-packages"), + python_lib, + os.path.join(python_lib, "site-packages"), ] python_path = self.launch_context.env.get("PYTHONPATH") or "" for path in python_path.split(os.pathsep): @@ -91,7 +97,15 @@ class InstallPySideToBlender(PreLaunchHook): self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths) # Get blender's python executable - python_executable = os.path.join(pythond_dir, "bin", "python.exe") + python_bin = os.path.join(python_dir, "bin") + if platform == "windows": + python_executable = os.path.join(python_bin, "python.exe") + else: + python_executable = os.path.join(python_bin, python_version) + # Check for python with enabled 'pymalloc' + if not os.path.exists(python_executable): + python_executable += "m" + if not os.path.exists(python_executable): self.log.warning( "Couldn't find python executable for blender. {}".format( @@ -106,7 +120,15 @@ class InstallPySideToBlender(PreLaunchHook): return # Install PySide2 in blender's python - self.install_pyside_windows(python_executable) + if platform == "windows": + result = self.install_pyside_windows(python_executable) + else: + result = self.install_pyside(python_executable) + + if result: + self.log.info("Successfully installed PySide2 module to blender.") + else: + self.log.warning("Failed to install PySide2 module to blender.") def install_pyside_windows(self, python_executable): """Install PySide2 python module to blender's python. @@ -144,19 +166,41 @@ class InstallPySideToBlender(PreLaunchHook): lpDirectory=os.path.dirname(python_executable) ) process_handle = process_info["hProcess"] - obj = win32event.WaitForSingleObject( - process_handle, win32event.INFINITE - ) + win32event.WaitForSingleObject(process_handle, win32event.INFINITE) returncode = win32process.GetExitCodeProcess(process_handle) - if returncode == 0: - self.log.info( - "Successfully installed PySide2 module to blender." - ) - return + return returncode == 0 except pywintypes.error: pass - self.log.warning("Failed to install PySide2 module to blender.") + def install_pyside(self, python_executable): + """Install PySide2 python module to blender's python.""" + try: + # Parameters + # - use "-m pip" as module pip to install PySide2 and argument + # "--ignore-installed" is to force install module to blender's + # site-packages and make sure it is binary compatible + args = [ + python_executable, + "-m", + "pip", + "install", + "--ignore-installed", + "PySide2", + ] + process = subprocess.Popen( + args, stdout=subprocess.PIPE, universal_newlines=True + ) + process.communicate() + return process.returncode == 0 + except PermissionError: + self.log.warning( + "Permission denied with command:" + "\"{}\".".format(" ".join(args)) + ) + except OSError as error: + self.log.warning(f"OS error has occurred: \"{error}\".") + except subprocess.SubprocessError: + pass def is_pyside_installed(self, python_executable): """Check if PySide2 module is in blender's pip list. @@ -169,7 +213,7 @@ class InstallPySideToBlender(PreLaunchHook): args = [python_executable, "-m", "pip", "list"] process = subprocess.Popen(args, stdout=subprocess.PIPE) stdout, _ = process.communicate() - lines = stdout.decode().split("\r\n") + lines = stdout.decode().split(os.linesep) # Second line contain dashes that define maximum length of module name. # Second column of dashes define maximum length of module version. package_dashes, *_ = lines[1].split(" ") diff --git a/openpype/hosts/blender/plugins/load/load_layout_blend.py b/openpype/hosts/blender/plugins/load/load_layout_blend.py index cf8e89ed1f..e0124053bf 100644 --- a/openpype/hosts/blender/plugins/load/load_layout_blend.py +++ b/openpype/hosts/blender/plugins/load/load_layout_blend.py @@ -6,12 +6,12 @@ from typing import Dict, List, Optional import bpy -from openpype import lib from openpype.pipeline import ( legacy_create, get_representation_path, AVALON_CONTAINER_ID, ) +from openpype.pipeline.create import get_legacy_creator_by_name from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, @@ -157,7 +157,7 @@ class BlendLayoutLoader(plugin.AssetLoader): t.id = local_obj elif local_obj.type == 'EMPTY': - creator_plugin = lib.get_creator_by_name("CreateAnimation") + creator_plugin = get_legacy_creator_by_name("CreateAnimation") if not creator_plugin: raise ValueError("Creator plugin \"CreateAnimation\" was " "not found.") diff --git a/openpype/hosts/blender/plugins/load/load_layout_json.py b/openpype/hosts/blender/plugins/load/load_layout_json.py index a0580af4a0..eca098627e 100644 --- a/openpype/hosts/blender/plugins/load/load_layout_json.py +++ b/openpype/hosts/blender/plugins/load/load_layout_json.py @@ -118,7 +118,7 @@ class JsonLayoutLoader(plugin.AssetLoader): # Camera creation when loading a layout is not necessary for now, # but the code is worth keeping in case we need it in the future. # # Create the camera asset and the camera instance - # creator_plugin = lib.get_creator_by_name("CreateCamera") + # creator_plugin = get_legacy_creator_by_name("CreateCamera") # if not creator_plugin: # raise ValueError("Creator plugin \"CreateCamera\" was " # "not found.") diff --git a/openpype/hosts/blender/plugins/load/load_rig.py b/openpype/hosts/blender/plugins/load/load_rig.py index 4dfa96167f..1d23a70061 100644 --- a/openpype/hosts/blender/plugins/load/load_rig.py +++ b/openpype/hosts/blender/plugins/load/load_rig.py @@ -6,12 +6,12 @@ from typing import Dict, List, Optional import bpy -from openpype import lib from openpype.pipeline import ( legacy_create, get_representation_path, AVALON_CONTAINER_ID, ) +from openpype.pipeline.create import get_legacy_creator_by_name from openpype.hosts.blender.api import ( plugin, get_selection, @@ -244,7 +244,7 @@ class BlendRigLoader(plugin.AssetLoader): objects = self._process(libpath, asset_group, group_name, action) if create_animation: - creator_plugin = lib.get_creator_by_name("CreateAnimation") + creator_plugin = get_legacy_creator_by_name("CreateAnimation") if not creator_plugin: raise ValueError("Creator plugin \"CreateAnimation\" was " "not found.") diff --git a/openpype/hosts/blender/plugins/publish/collect_current_file.py b/openpype/hosts/blender/plugins/publish/collect_current_file.py index 72976c490b..c3097a0694 100644 --- a/openpype/hosts/blender/plugins/publish/collect_current_file.py +++ b/openpype/hosts/blender/plugins/publish/collect_current_file.py @@ -1,6 +1,19 @@ +import os import bpy import pyblish.api +from openpype.pipeline import legacy_io +from openpype.hosts.blender.api import workio + + +class SaveWorkfiledAction(pyblish.api.Action): + """Save Workfile.""" + label = "Save Workfile" + on = "failed" + icon = "save" + + def process(self, context, plugin): + bpy.ops.wm.avalon_workfiles() class CollectBlenderCurrentFile(pyblish.api.ContextPlugin): @@ -8,12 +21,52 @@ class CollectBlenderCurrentFile(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder - 0.5 label = "Blender Current File" - hosts = ['blender'] + hosts = ["blender"] + actions = [SaveWorkfiledAction] def process(self, context): """Inject the current working file""" - current_file = bpy.data.filepath - context.data['currentFile'] = current_file + current_file = workio.current_file() - assert current_file != '', "Current file is empty. " \ - "Save the file before continuing." + context.data["currentFile"] = current_file + + assert current_file, ( + "Current file is empty. Save the file before continuing." + ) + + folder, file = os.path.split(current_file) + filename, ext = os.path.splitext(file) + + task = legacy_io.Session["AVALON_TASK"] + + data = {} + + # create instance + instance = context.create_instance(name=filename) + subset = "workfile" + task.capitalize() + + data.update({ + "subset": subset, + "asset": os.getenv("AVALON_ASSET", None), + "label": subset, + "publish": True, + "family": "workfile", + "families": ["workfile"], + "setMembers": [current_file], + "frameStart": bpy.context.scene.frame_start, + "frameEnd": bpy.context.scene.frame_end, + }) + + data["representations"] = [{ + "name": ext.lstrip("."), + "ext": ext.lstrip("."), + "files": file, + "stagingDir": folder, + }] + + instance.data.update(data) + + self.log.info("Collected instance: {}".format(file)) + self.log.info("Scene path: {}".format(current_file)) + self.log.info("staging Dir: {}".format(folder)) + self.log.info("subset: {}".format(subset)) diff --git a/openpype/hosts/blender/plugins/publish/extract_abc.py b/openpype/hosts/blender/plugins/publish/extract_abc.py index a26a92f7e4..1cab9d225b 100644 --- a/openpype/hosts/blender/plugins/publish/extract_abc.py +++ b/openpype/hosts/blender/plugins/publish/extract_abc.py @@ -2,12 +2,12 @@ import os import bpy -from openpype import api +from openpype.pipeline import publish from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY -class ExtractABC(api.Extractor): +class ExtractABC(publish.Extractor): """Extract as ABC.""" label = "Extract ABC" diff --git a/openpype/hosts/blender/plugins/publish/extract_blend.py b/openpype/hosts/blender/plugins/publish/extract_blend.py index 9add633f05..6a001b6f65 100644 --- a/openpype/hosts/blender/plugins/publish/extract_blend.py +++ b/openpype/hosts/blender/plugins/publish/extract_blend.py @@ -2,10 +2,10 @@ import os import bpy -import openpype.api +from openpype.pipeline import publish -class ExtractBlend(openpype.api.Extractor): +class ExtractBlend(publish.Extractor): """Extract a blend file.""" label = "Extract Blend" diff --git a/openpype/hosts/blender/plugins/publish/extract_blend_animation.py b/openpype/hosts/blender/plugins/publish/extract_blend_animation.py index 4917223331..477411b73d 100644 --- a/openpype/hosts/blender/plugins/publish/extract_blend_animation.py +++ b/openpype/hosts/blender/plugins/publish/extract_blend_animation.py @@ -2,10 +2,10 @@ import os import bpy -import openpype.api +from openpype.pipeline import publish -class ExtractBlendAnimation(openpype.api.Extractor): +class ExtractBlendAnimation(publish.Extractor): """Extract a blend file.""" label = "Extract Blend" diff --git a/openpype/hosts/blender/plugins/publish/extract_camera.py b/openpype/hosts/blender/plugins/publish/extract_camera.py index b2c7611b58..9fd181825c 100644 --- a/openpype/hosts/blender/plugins/publish/extract_camera.py +++ b/openpype/hosts/blender/plugins/publish/extract_camera.py @@ -2,11 +2,11 @@ import os import bpy -from openpype import api +from openpype.pipeline import publish from openpype.hosts.blender.api import plugin -class ExtractCamera(api.Extractor): +class ExtractCamera(publish.Extractor): """Extract as the camera as FBX.""" label = "Extract Camera" diff --git a/openpype/hosts/blender/plugins/publish/extract_fbx.py b/openpype/hosts/blender/plugins/publish/extract_fbx.py index 3ac66f33a4..0ad797c226 100644 --- a/openpype/hosts/blender/plugins/publish/extract_fbx.py +++ b/openpype/hosts/blender/plugins/publish/extract_fbx.py @@ -2,12 +2,12 @@ import os import bpy -from openpype import api +from openpype.pipeline import publish from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY -class ExtractFBX(api.Extractor): +class ExtractFBX(publish.Extractor): """Extract as FBX.""" label = "Extract FBX" diff --git a/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py b/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py index 4b4a92932a..062b42e99d 100644 --- a/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py +++ b/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py @@ -5,12 +5,12 @@ import bpy import bpy_extras import bpy_extras.anim_utils -from openpype import api +from openpype.pipeline import publish from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY -class ExtractAnimationFBX(api.Extractor): +class ExtractAnimationFBX(publish.Extractor): """Extract as animation.""" label = "Extract FBX" diff --git a/openpype/hosts/blender/plugins/publish/extract_layout.py b/openpype/hosts/blender/plugins/publish/extract_layout.py index 8ecc78a2c6..f2d04f1178 100644 --- a/openpype/hosts/blender/plugins/publish/extract_layout.py +++ b/openpype/hosts/blender/plugins/publish/extract_layout.py @@ -1,19 +1,17 @@ import os import json -from bson.objectid import ObjectId - import bpy import bpy_extras import bpy_extras.anim_utils -from openpype.pipeline import legacy_io +from openpype.client import get_representation_by_name +from openpype.pipeline import publish from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY -import openpype.api -class ExtractLayout(openpype.api.Extractor): +class ExtractLayout(publish.Extractor): """Extract a layout.""" label = "Extract Layout" @@ -131,43 +129,32 @@ class ExtractLayout(openpype.api.Extractor): fbx_count = 0 + project_name = instance.context.data["projectEntity"]["name"] for asset in asset_group.children: metadata = asset.get(AVALON_PROPERTY) - parent = metadata["parent"] + version_id = metadata["parent"] family = metadata["family"] - self.log.debug("Parent: {}".format(parent)) + self.log.debug("Parent: {}".format(version_id)) # Get blend reference - blend = legacy_io.find_one( - { - "type": "representation", - "parent": ObjectId(parent), - "name": "blend" - }, - projection={"_id": True}) + blend = get_representation_by_name( + project_name, "blend", version_id, fields=["_id"] + ) blend_id = None if blend: blend_id = blend["_id"] # Get fbx reference - fbx = legacy_io.find_one( - { - "type": "representation", - "parent": ObjectId(parent), - "name": "fbx" - }, - projection={"_id": True}) + fbx = get_representation_by_name( + project_name, "fbx", version_id, fields=["_id"] + ) fbx_id = None if fbx: fbx_id = fbx["_id"] # Get abc reference - abc = legacy_io.find_one( - { - "type": "representation", - "parent": ObjectId(parent), - "name": "abc" - }, - projection={"_id": True}) + abc = get_representation_by_name( + project_name, "abc", version_id, fields=["_id"] + ) abc_id = None if abc: abc_id = abc["_id"] @@ -193,7 +180,7 @@ class ExtractLayout(openpype.api.Extractor): "rotation": { "x": asset.rotation_euler.x, "y": asset.rotation_euler.y, - "z": asset.rotation_euler.z, + "z": asset.rotation_euler.z }, "scale": { "x": asset.scale.x, @@ -202,6 +189,18 @@ class ExtractLayout(openpype.api.Extractor): } } + json_element["transform_matrix"] = [] + + for row in list(asset.matrix_world.transposed()): + json_element["transform_matrix"].append(list(row)) + + json_element["basis"] = [ + [1, 0, 0, 0], + [0, -1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1] + ] + # Extract the animation as well if family == "rig": f, n = self._export_animation( diff --git a/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py b/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py index 39b9b67511..84b9dd1a6e 100644 --- a/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py +++ b/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py @@ -1,9 +1,11 @@ from typing import List -import mathutils +import bpy import pyblish.api + import openpype.hosts.blender.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin): @@ -14,21 +16,18 @@ class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin): in Unreal and Blender. """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["blender"] families = ["camera"] - category = "geometry" version = (0, 1, 0) label = "Zero Keyframe" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] - _identity = mathutils.Matrix() - - @classmethod - def get_invalid(cls, instance) -> List: + @staticmethod + def get_invalid(instance) -> List: invalid = [] - for obj in [obj for obj in instance]: - if obj.type == "CAMERA": + for obj in instance: + if isinstance(obj, bpy.types.Object) and obj.type == "CAMERA": if obj.animation_data and obj.animation_data.action: action = obj.animation_data.action frames_set = set() @@ -45,4 +44,5 @@ class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: raise RuntimeError( - f"Object found in instance is not in Object Mode: {invalid}") + f"Camera must have a keyframe at frame 0: {invalid}" + ) diff --git a/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py b/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py index 1c73476fc8..cee855671d 100644 --- a/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py +++ b/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py @@ -3,13 +3,15 @@ from typing import List import bpy import pyblish.api + +from openpype.pipeline.publish import ValidateContentsOrder import openpype.hosts.blender.api.action class ValidateMeshHasUvs(pyblish.api.InstancePlugin): """Validate that the current mesh has UV's.""" - order = pyblish.api.ValidatorOrder + order = ValidateContentsOrder hosts = ["blender"] families = ["model"] category = "geometry" @@ -25,7 +27,10 @@ class ValidateMeshHasUvs(pyblish.api.InstancePlugin): for uv_layer in obj.data.uv_layers: for polygon in obj.data.polygons: for loop_index in polygon.loop_indices: - if not uv_layer.data[loop_index].uv: + if ( + loop_index >= len(uv_layer.data) + or not uv_layer.data[loop_index].uv + ): return False return True @@ -33,20 +38,20 @@ class ValidateMeshHasUvs(pyblish.api.InstancePlugin): @classmethod def get_invalid(cls, instance) -> List: invalid = [] - # TODO (jasper): only check objects in the collection that will be published? - for obj in [ - obj for obj in instance]: - try: - if obj.type == 'MESH': - # Make sure we are in object mode. - bpy.ops.object.mode_set(mode='OBJECT') - if not cls.has_uvs(obj): - invalid.append(obj) - except: - continue + for obj in instance: + if isinstance(obj, bpy.types.Object) and obj.type == 'MESH': + if obj.mode != "OBJECT": + cls.log.warning( + f"Mesh object {obj.name} should be in 'OBJECT' mode" + " to be properly checked." + ) + if not cls.has_uvs(obj): + invalid.append(obj) return invalid def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError(f"Meshes found in instance without valid UV's: {invalid}") + raise RuntimeError( + f"Meshes found in instance without valid UV's: {invalid}" + ) diff --git a/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py b/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py index 00159a2d36..45ac08811d 100644 --- a/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py +++ b/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py @@ -3,28 +3,28 @@ from typing import List import bpy import pyblish.api + +from openpype.pipeline.publish import ValidateContentsOrder import openpype.hosts.blender.api.action class ValidateMeshNoNegativeScale(pyblish.api.Validator): """Ensure that meshes don't have a negative scale.""" - order = pyblish.api.ValidatorOrder + order = ValidateContentsOrder hosts = ["blender"] families = ["model"] + category = "geometry" label = "Mesh No Negative Scale" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] @staticmethod def get_invalid(instance) -> List: invalid = [] - # TODO (jasper): only check objects in the collection that will be published? - for obj in [ - obj for obj in bpy.data.objects if obj.type == 'MESH' - ]: - if any(v < 0 for v in obj.scale): - invalid.append(obj) - + for obj in instance: + if isinstance(obj, bpy.types.Object) and obj.type == 'MESH': + if any(v < 0 for v in obj.scale): + invalid.append(obj) return invalid def process(self, instance): diff --git a/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py b/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py index 261ff864d5..f5dc9fdd5c 100644 --- a/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py +++ b/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py @@ -1,7 +1,11 @@ from typing import List +import bpy + import pyblish.api + import openpype.hosts.blender.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateNoColonsInName(pyblish.api.InstancePlugin): @@ -12,20 +16,20 @@ class ValidateNoColonsInName(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["blender"] families = ["model", "rig"] version = (0, 1, 0) label = "No Colons in names" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] - @classmethod - def get_invalid(cls, instance) -> List: + @staticmethod + def get_invalid(instance) -> List: invalid = [] - for obj in [obj for obj in instance]: + for obj in instance: if ':' in obj.name: invalid.append(obj) - if obj.type == 'ARMATURE': + if isinstance(obj, bpy.types.Object) and obj.type == 'ARMATURE': for bone in obj.data.bones: if ':' in bone.name: invalid.append(obj) @@ -36,4 +40,5 @@ class ValidateNoColonsInName(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: raise RuntimeError( - f"Objects found with colon in name: {invalid}") + f"Objects found with colon in name: {invalid}" + ) diff --git a/openpype/hosts/blender/plugins/publish/validate_object_mode.py b/openpype/hosts/blender/plugins/publish/validate_object_mode.py index 90ef0b7c41..ac60e00f89 100644 --- a/openpype/hosts/blender/plugins/publish/validate_object_mode.py +++ b/openpype/hosts/blender/plugins/publish/validate_object_mode.py @@ -1,5 +1,7 @@ from typing import List +import bpy + import pyblish.api import openpype.hosts.blender.api.action @@ -10,26 +12,21 @@ class ValidateObjectIsInObjectMode(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder - 0.01 hosts = ["blender"] families = ["model", "rig", "layout"] - category = "geometry" label = "Validate Object Mode" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] optional = False - @classmethod - def get_invalid(cls, instance) -> List: + @staticmethod + def get_invalid(instance) -> List: invalid = [] - for obj in [obj for obj in instance]: - try: - if obj.type == 'MESH' or obj.type == 'ARMATURE': - # Check if the object is in object mode. - if not obj.mode == 'OBJECT': - invalid.append(obj) - except Exception: - continue + for obj in instance: + if isinstance(obj, bpy.types.Object) and obj.mode != "OBJECT": + invalid.append(obj) return invalid def process(self, instance): invalid = self.get_invalid(instance) if invalid: raise RuntimeError( - f"Object found in instance is not in Object Mode: {invalid}") + f"Object found in instance is not in Object Mode: {invalid}" + ) diff --git a/openpype/hosts/blender/plugins/publish/validate_transform_zero.py b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py index 7456dbc423..742826d3d9 100644 --- a/openpype/hosts/blender/plugins/publish/validate_transform_zero.py +++ b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py @@ -1,9 +1,12 @@ from typing import List import mathutils +import bpy import pyblish.api + import openpype.hosts.blender.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateTransformZero(pyblish.api.InstancePlugin): @@ -15,10 +18,9 @@ class ValidateTransformZero(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["blender"] families = ["model"] - category = "geometry" version = (0, 1, 0) label = "Transform Zero" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] @@ -28,8 +30,11 @@ class ValidateTransformZero(pyblish.api.InstancePlugin): @classmethod def get_invalid(cls, instance) -> List: invalid = [] - for obj in [obj for obj in instance]: - if obj.matrix_basis != cls._identity: + for obj in instance: + if ( + isinstance(obj, bpy.types.Object) + and obj.matrix_basis != cls._identity + ): invalid.append(obj) return invalid @@ -37,4 +42,6 @@ class ValidateTransformZero(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: raise RuntimeError( - f"Object found in instance is not in Object Mode: {invalid}") + "Object found in instance has not" + f" transform to zero: {invalid}" + ) diff --git a/openpype/hosts/celaction/api/cli.py b/openpype/hosts/celaction/api/cli.py index 8c7b3a2e74..88fc11cafb 100644 --- a/openpype/hosts/celaction/api/cli.py +++ b/openpype/hosts/celaction/api/cli.py @@ -6,15 +6,14 @@ import argparse import pyblish.api import pyblish.util -from openpype.api import Logger -import openpype import openpype.hosts.celaction +from openpype.lib import Logger from openpype.hosts.celaction import api as celaction from openpype.tools.utils import host_tools from openpype.pipeline import install_openpype_plugins -log = Logger().get_logger("Celaction_cli_publisher") +log = Logger.get_logger("Celaction_cli_publisher") publish_host = "celaction" diff --git a/openpype/hosts/celaction/plugins/publish/collect_audio.py b/openpype/hosts/celaction/plugins/publish/collect_audio.py deleted file mode 100644 index 8acda5fc7c..0000000000 --- a/openpype/hosts/celaction/plugins/publish/collect_audio.py +++ /dev/null @@ -1,126 +0,0 @@ -import os -import collections -from pprint import pformat - -import pyblish.api - -from openpype.pipeline import legacy_io - - -class AppendCelactionAudio(pyblish.api.ContextPlugin): - - label = "Colect Audio for publishing" - order = pyblish.api.CollectorOrder + 0.1 - - def process(self, context): - self.log.info('Collecting Audio Data') - asset_doc = context.data["assetEntity"] - - # get all available representations - subsets = self.get_subsets( - asset_doc, - representations=["audio", "wav"] - ) - self.log.info(f"subsets is: {pformat(subsets)}") - - if not subsets.get("audioMain"): - raise AttributeError("`audioMain` subset does not exist") - - reprs = subsets.get("audioMain", {}).get("representations", []) - self.log.info(f"reprs is: {pformat(reprs)}") - - repr = next((r for r in reprs), None) - if not repr: - raise "Missing `audioMain` representation" - self.log.info(f"representation is: {repr}") - - audio_file = repr.get('data', {}).get('path', "") - - if os.path.exists(audio_file): - context.data["audioFile"] = audio_file - self.log.info( - 'audio_file: {}, has been added to context'.format(audio_file)) - else: - self.log.warning("Couldn't find any audio file on Ftrack.") - - def get_subsets(self, asset_doc, representations): - """ - Query subsets with filter on name. - - The method will return all found subsets and its defined version - and subsets. Version could be specified with number. Representation - can be filtered. - - Arguments: - asset_doct (dict): Asset (shot) mongo document - representations (list): list for all representations - - Returns: - dict: subsets with version and representations in keys - """ - - # Query all subsets for asset - subset_docs = legacy_io.find({ - "type": "subset", - "parent": asset_doc["_id"] - }) - # Collect all subset ids - subset_ids = [ - subset_doc["_id"] - for subset_doc in subset_docs - ] - - # Check if we found anything - assert subset_ids, ( - "No subsets found. Check correct filter. " - "Try this for start `r'.*'`: asset: `{}`" - ).format(asset_doc["name"]) - - # Last version aggregation - pipeline = [ - # Find all versions of those subsets - {"$match": { - "type": "version", - "parent": {"$in": subset_ids} - }}, - # Sorting versions all together - {"$sort": {"name": 1}}, - # Group them by "parent", but only take the last - {"$group": { - "_id": "$parent", - "_version_id": {"$last": "$_id"}, - "name": {"$last": "$name"} - }} - ] - last_versions_by_subset_id = dict() - for doc in legacy_io.aggregate(pipeline): - doc["parent"] = doc["_id"] - doc["_id"] = doc.pop("_version_id") - last_versions_by_subset_id[doc["parent"]] = doc - - version_docs_by_id = {} - for version_doc in last_versions_by_subset_id.values(): - version_docs_by_id[version_doc["_id"]] = version_doc - - repre_docs = legacy_io.find({ - "type": "representation", - "parent": {"$in": list(version_docs_by_id.keys())}, - "name": {"$in": representations} - }) - repre_docs_by_version_id = collections.defaultdict(list) - for repre_doc in repre_docs: - version_id = repre_doc["parent"] - repre_docs_by_version_id[version_id].append(repre_doc) - - output_dict = {} - for version_id, repre_docs in repre_docs_by_version_id.items(): - version_doc = version_docs_by_id[version_id] - subset_id = version_doc["parent"] - subset_doc = last_versions_by_subset_id[subset_id] - # Store queried docs by subset name - output_dict[subset_doc["name"]] = { - "representations": repre_docs, - "version": version_doc - } - - return output_dict diff --git a/openpype/hosts/flame/__init__.py b/openpype/hosts/flame/__init__.py index f839357147..b45f107747 100644 --- a/openpype/hosts/flame/__init__.py +++ b/openpype/hosts/flame/__init__.py @@ -1,22 +1,10 @@ -import os - -HOST_DIR = os.path.dirname( - os.path.abspath(__file__) +from .addon import ( + HOST_DIR, + FlameAddon, ) -def add_implementation_envs(env, _app): - # Add requirements to DL_PYTHON_HOOK_PATH - pype_root = os.environ["OPENPYPE_REPOS_ROOT"] - - env["DL_PYTHON_HOOK_PATH"] = os.path.join( - pype_root, "openpype", "hosts", "flame", "startup") - env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) - - # Set default values if are not already set via settings - defaults = { - "LOGLEVEL": "DEBUG" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value +__all__ = ( + "HOST_DIR", + "FlameAddon", +) diff --git a/openpype/hosts/flame/addon.py b/openpype/hosts/flame/addon.py new file mode 100644 index 0000000000..d9359fc5bf --- /dev/null +++ b/openpype/hosts/flame/addon.py @@ -0,0 +1,35 @@ +import os +from openpype.modules import OpenPypeModule, IHostAddon + +HOST_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class FlameAddon(OpenPypeModule, IHostAddon): + name = "flame" + host_name = "flame" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + # Add requirements to DL_PYTHON_HOOK_PATH + env["DL_PYTHON_HOOK_PATH"] = os.path.join(HOST_DIR, "startup") + env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) + + # Set default values if are not already set via settings + defaults = { + "LOGLEVEL": "DEBUG" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(HOST_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".otoc"] diff --git a/openpype/hosts/flame/api/__init__.py b/openpype/hosts/flame/api/__init__.py index 2c461e5f16..c00ee958b6 100644 --- a/openpype/hosts/flame/api/__init__.py +++ b/openpype/hosts/flame/api/__init__.py @@ -30,7 +30,8 @@ from .lib import ( maintained_temp_file_path, get_clip_segment, get_batch_group_from_desktop, - MediaInfoFile + MediaInfoFile, + TimeEffectMetadata ) from .utils import ( setup, @@ -50,7 +51,8 @@ from .pipeline import ( ) from .menu import ( FlameMenuProjectConnect, - FlameMenuTimeline + FlameMenuTimeline, + FlameMenuUniversal ) from .plugin import ( Creator, @@ -107,6 +109,7 @@ __all__ = [ "get_clip_segment", "get_batch_group_from_desktop", "MediaInfoFile", + "TimeEffectMetadata", # pipeline "install", @@ -129,6 +132,7 @@ __all__ = [ # menu "FlameMenuProjectConnect", "FlameMenuTimeline", + "FlameMenuUniversal", # plugin "Creator", diff --git a/openpype/hosts/flame/api/lib.py b/openpype/hosts/flame/api/lib.py index c7c444c1fb..6aca5c5ce6 100644 --- a/openpype/hosts/flame/api/lib.py +++ b/openpype/hosts/flame/api/lib.py @@ -3,13 +3,18 @@ import os import re import json import pickle +import clique import tempfile +import traceback import itertools import contextlib import xml.etree.cElementTree as cET -from copy import deepcopy +from copy import deepcopy, copy from xml.etree import ElementTree as ET from pprint import pformat + +from openpype.lib import Logger, run_subprocess + from .constants import ( MARKER_COLOR, MARKER_DURATION, @@ -18,9 +23,7 @@ from .constants import ( MARKER_PUBLISH_DEFAULT ) -import openpype.api as openpype - -log = openpype.Logger.get_logger(__name__) +log = Logger.get_logger(__name__) FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]") @@ -265,7 +268,7 @@ def get_current_sequence(selection): def rescan_hooks(): import flame try: - flame.execute_shortcut('Rescan Python Hooks') + flame.execute_shortcut("Rescan Python Hooks") except Exception: pass @@ -560,7 +563,7 @@ def get_segment_attributes(segment): if not hasattr(segment, attr_name): continue attr = getattr(segment, attr_name) - segment_attrs_data[attr] = str(attr).replace("+", ":") + segment_attrs_data[attr_name] = str(attr).replace("+", ":") if attr_name in ["record_in", "record_out"]: clip_data[attr_name] = attr.relative_frame @@ -762,28 +765,40 @@ class MediaInfoFile(object): _start_frame = None _fps = None _drop_mode = None + _file_pattern = None - def __init__(self, path, **kwargs): + def __init__(self, path, logger=None): # replace log if any - if kwargs.get("logger"): - self.log = kwargs["logger"] + if logger: + self.log = logger # test if `dl_get_media_info` paht exists self._validate_media_script_path() # derivate other feed variables - self.feed_basename = os.path.basename(path) - self.feed_dir = os.path.dirname(path) - self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower() + feed_basename = os.path.basename(path) + feed_dir = os.path.dirname(path) + feed_ext = os.path.splitext(feed_basename)[1][1:].lower() with maintained_temp_file_path(".clip") as tmp_path: self.log.info("Temp File: {}".format(tmp_path)) - self._generate_media_info_file(tmp_path) + self._generate_media_info_file(tmp_path, feed_ext, feed_dir) + + # get collection containing feed_basename from path + self.file_pattern = self._get_collection( + feed_basename, feed_dir, feed_ext) + + if ( + not self.file_pattern + and os.path.exists(os.path.join(feed_dir, feed_basename)) + ): + self.file_pattern = feed_basename # get clip data and make them single if there is multiple # clips data - xml_data = self._make_single_clip_media_info(tmp_path) + xml_data = self._make_single_clip_media_info( + tmp_path, feed_basename, self.file_pattern) self.log.debug("xml_data: {}".format(xml_data)) self.log.debug("type: {}".format(type(xml_data))) @@ -794,6 +809,123 @@ class MediaInfoFile(object): self.log.debug("drop frame: {}".format(self.drop_mode)) self.clip_data = xml_data + def _get_collection(self, feed_basename, feed_dir, feed_ext): + """ Get collection string + + Args: + feed_basename (str): file base name + feed_dir (str): file's directory + feed_ext (str): file extension + + Raises: + AttributeError: feed_ext is not matching feed_basename + + Returns: + str: collection basename with range of sequence + """ + partialname = self._separate_file_head(feed_basename, feed_ext) + self.log.debug("__ partialname: {}".format(partialname)) + + # make sure partial input basename is having correct extensoon + if not partialname: + raise AttributeError( + "Wrong input attributes. Basename - {}, Ext - {}".format( + feed_basename, feed_ext + ) + ) + + # get all related files + files = [ + f for f in os.listdir(feed_dir) + if partialname == self._separate_file_head(f, feed_ext) + ] + + # ignore reminders as we dont need them + collections = clique.assemble(files)[0] + + # in case no collection found return None + # it is probably just single file + if not collections: + return + + # we expect only one collection + collection = collections[0] + + self.log.debug("__ collection: {}".format(collection)) + + if collection.is_contiguous(): + return self._format_collection(collection) + + # add `[` in front to make sure it want capture + # shot name with the same number + number_from_path = self._separate_number(feed_basename, feed_ext) + search_number_pattern = "[" + number_from_path + # convert to multiple collections + _continues_colls = collection.separate() + for _coll in _continues_colls: + coll_to_text = self._format_collection( + _coll, len(number_from_path)) + self.log.debug("__ coll_to_text: {}".format(coll_to_text)) + if search_number_pattern in coll_to_text: + return coll_to_text + + @staticmethod + def _format_collection(collection, padding=None): + padding = padding or collection.padding + # if no holes then return collection + head = collection.format("{head}") + tail = collection.format("{tail}") + range_template = "[{{:0{0}d}}-{{:0{0}d}}]".format( + padding) + ranges = range_template.format( + min(collection.indexes), + max(collection.indexes) + ) + # if no holes then return collection + return "{}{}{}".format(head, ranges, tail) + + def _separate_file_head(self, basename, extension): + """ Get only head with out sequence and extension + + Args: + basename (str): file base name + extension (str): file extension + + Returns: + str: file head + """ + # in case sequence file + found = re.findall( + r"(.*)[._][\d]*(?=.{})".format(extension), + basename, + ) + if found: + return found.pop() + + # in case single file + name, ext = os.path.splitext(basename) + + if extension == ext[1:]: + return name + + def _separate_number(self, basename, extension): + """ Get only sequence number as string + + Args: + basename (str): file base name + extension (str): file extension + + Returns: + str: number with padding + """ + # in case sequence file + found = re.findall( + r"[._]([\d]*)(?=.{})".format(extension), + basename, + ) + if found: + return found.pop() + @property def clip_data(self): """Clip's xml clip data @@ -846,28 +978,64 @@ class MediaInfoFile(object): def drop_mode(self, text): self._drop_mode = str(text) + @property + def file_pattern(self): + """Clips file patter + + Returns: + str: file pattern. ex. file.[1-2].exr + """ + return self._file_pattern + + @file_pattern.setter + def file_pattern(self, fpattern): + self._file_pattern = fpattern + def _validate_media_script_path(self): if not os.path.isfile(self.MEDIA_SCRIPT_PATH): raise IOError("Media Scirpt does not exist: `{}`".format( self.MEDIA_SCRIPT_PATH)) - def _generate_media_info_file(self, fpath): + def _generate_media_info_file(self, fpath, feed_ext, feed_dir): + """ Generate media info xml .clip file + + Args: + fpath (str): .clip file path + feed_ext (str): file extension to be filtered + feed_dir (str): look up directory + + Raises: + TypeError: Type error if it fails + """ # Create cmd arguments for gettig xml file info file cmd_args = [ self.MEDIA_SCRIPT_PATH, - "-e", self.feed_ext, + "-e", feed_ext, "-o", fpath, - self.feed_dir + feed_dir ] try: # execute creation of clip xml template data - openpype.run_subprocess(cmd_args) + run_subprocess(cmd_args) except TypeError as error: raise TypeError( "Error creating `{}` due: {}".format(fpath, error)) - def _make_single_clip_media_info(self, fpath): + def _make_single_clip_media_info(self, fpath, feed_basename, path_pattern): + """ Separate only relative clip object form .clip file + + Args: + fpath (str): clip file path + feed_basename (str): search basename + path_pattern (str): search file pattern (file.[1-2].exr) + + Raises: + ET.ParseError: if nothing found + + Returns: + ET.Element: xml element data of matching clip + """ with open(fpath) as f: lines = f.readlines() _added_root = itertools.chain( @@ -878,14 +1046,30 @@ class MediaInfoFile(object): xml_clips = new_root.findall("clip") matching_clip = None for xml_clip in xml_clips: - if xml_clip.find("name").text in self.feed_basename: - matching_clip = xml_clip + clip_name = xml_clip.find("name").text + self.log.debug("__ clip_name: `{}`".format(clip_name)) + if clip_name not in feed_basename: + continue + + # test path pattern + for out_track in xml_clip.iter("track"): + for out_feed in out_track.iter("feed"): + for span in out_feed.iter("span"): + # start frame + span_path = span.find("path") + self.log.debug( + "__ span_path.text: {}, path_pattern: {}".format( + span_path.text, path_pattern + ) + ) + if path_pattern in span_path.text: + matching_clip = xml_clip if matching_clip is None: # return warning there is missing clip raise ET.ParseError( "Missing clip in `{}`. Available clips {}".format( - self.feed_basename, [ + feed_basename, [ xml_clip.find("name").text for xml_clip in xml_clips ] @@ -894,26 +1078,29 @@ class MediaInfoFile(object): return matching_clip def _get_time_info_from_origin(self, xml_data): + """Set time info to class attributes + + Args: + xml_data (ET.Element): clip data + """ try: - for out_track in xml_data.iter('track'): - for out_feed in out_track.iter('feed'): + for out_track in xml_data.iter("track"): + for out_feed in out_track.iter("feed"): # start frame out_feed_nb_ticks_obj = out_feed.find( - 'startTimecode/nbTicks') + "startTimecode/nbTicks") self.start_frame = out_feed_nb_ticks_obj.text # fps out_feed_fps_obj = out_feed.find( - 'startTimecode/rate') + "startTimecode/rate") self.fps = out_feed_fps_obj.text # drop frame mode out_feed_drop_mode_obj = out_feed.find( - 'startTimecode/dropMode') + "startTimecode/dropMode") self.drop_mode = out_feed_drop_mode_obj.text break - else: - continue except Exception as msg: self.log.warning(msg) @@ -933,8 +1120,153 @@ class MediaInfoFile(object): tree = cET.ElementTree(xml_element_data) tree.write( fpath, xml_declaration=True, - method='xml', encoding='UTF-8' + method="xml", encoding="UTF-8" ) except IOError as error: raise IOError( "Not able to write data to file: {}".format(error)) + + +class TimeEffectMetadata(object): + log = log + _data = {} + _retime_modes = { + 0: "speed", + 1: "timewarp", + 2: "duration" + } + + def __init__(self, segment, logger=None): + if logger: + self.log = logger + + self._data = self._get_metadata(segment) + + @property + def data(self): + """ Returns timewarp effect data + + Returns: + dict: retime data + """ + return self._data + + def _get_metadata(self, segment): + effects = segment.effects or [] + for effect in effects: + if effect.type == "Timewarp": + with maintained_temp_file_path(".timewarp_node") as tmp_path: + self.log.info("Temp File: {}".format(tmp_path)) + effect.save_setup(tmp_path) + return self._get_attributes_from_xml(tmp_path) + + return {} + + def _get_attributes_from_xml(self, tmp_path): + with open(tmp_path, "r") as tw_setup_file: + tw_setup_string = tw_setup_file.read() + tw_setup_file.close() + + tw_setup_xml = ET.fromstring(tw_setup_string) + tw_setup = self._dictify(tw_setup_xml) + # pprint(tw_setup) + try: + tw_setup_state = tw_setup["Setup"]["State"][0] + mode = int( + tw_setup_state["TW_RetimerMode"][0]["_text"] + ) + r_data = { + "type": self._retime_modes[mode], + "effectStart": int( + tw_setup["Setup"]["Base"][0]["Range"][0]["Start"]), + "effectEnd": int( + tw_setup["Setup"]["Base"][0]["Range"][0]["End"]) + } + + if mode == 0: # speed + r_data[self._retime_modes[mode]] = float( + tw_setup_state["TW_Speed"] + [0]["Channel"][0]["Value"][0]["_text"] + ) / 100 + elif mode == 1: # timewarp + print("timing") + r_data[self._retime_modes[mode]] = self._get_anim_keys( + tw_setup_state["TW_Timing"] + ) + elif mode == 2: # duration + r_data[self._retime_modes[mode]] = { + "start": { + "source": int( + tw_setup_state["TW_DurationTiming"][0]["Channel"] + [0]["KFrames"][0]["Key"][0]["Value"][0]["_text"] + ), + "timeline": int( + tw_setup_state["TW_DurationTiming"][0]["Channel"] + [0]["KFrames"][0]["Key"][0]["Frame"][0]["_text"] + ) + }, + "end": { + "source": int( + tw_setup_state["TW_DurationTiming"][0]["Channel"] + [0]["KFrames"][0]["Key"][1]["Value"][0]["_text"] + ), + "timeline": int( + tw_setup_state["TW_DurationTiming"][0]["Channel"] + [0]["KFrames"][0]["Key"][1]["Frame"][0]["_text"] + ) + } + } + except Exception: + lines = traceback.format_exception(*sys.exc_info()) + self.log.error("\n".join(lines)) + return + + return r_data + + def _get_anim_keys(self, setup_cat, index=None): + return_data = { + "extrapolation": ( + setup_cat[0]["Channel"][0]["Extrap"][0]["_text"] + ), + "animKeys": [] + } + for key in setup_cat[0]["Channel"][0]["KFrames"][0]["Key"]: + if index and int(key["Index"]) != index: + continue + key_data = { + "source": float(key["Value"][0]["_text"]), + "timeline": float(key["Frame"][0]["_text"]), + "index": int(key["Index"]), + "curveMode": key["CurveMode"][0]["_text"], + "curveOrder": key["CurveOrder"][0]["_text"] + } + if key.get("TangentMode"): + key_data["tangentMode"] = key["TangentMode"][0]["_text"] + + return_data["animKeys"].append(key_data) + + return return_data + + def _dictify(self, xml_, root=True): + """ Convert xml object to dictionary + + Args: + xml_ (xml.etree.ElementTree.Element): xml data + root (bool, optional): is root available. Defaults to True. + + Returns: + dict: dictionarized xml + """ + + if root: + return {xml_.tag: self._dictify(xml_, False)} + + d = copy(xml_.attrib) + if xml_.text: + d["_text"] = xml_.text + + for x in xml_.findall("./*"): + if x.tag not in d: + d[x.tag] = [] + d[x.tag].append(self._dictify(x, False)) + return d diff --git a/openpype/hosts/flame/api/menu.py b/openpype/hosts/flame/api/menu.py index 7f1a6a24e2..319ed7afb6 100644 --- a/openpype/hosts/flame/api/menu.py +++ b/openpype/hosts/flame/api/menu.py @@ -201,3 +201,54 @@ class FlameMenuTimeline(_FlameMenuApp): if self.flame: self.flame.execute_shortcut('Rescan Python Hooks') self.log.info('Rescan Python Hooks') + + +class FlameMenuUniversal(_FlameMenuApp): + + # flameMenuProjectconnect app takes care of the preferences dialog as well + + def __init__(self, framework): + _FlameMenuApp.__init__(self, framework) + + def __getattr__(self, name): + def method(*args, **kwargs): + project = self.dynamic_menu_data.get(name) + if project: + self.link_project(project) + return method + + def build_menu(self): + if not self.flame: + return [] + + menu = deepcopy(self.menu) + + menu['actions'].append({ + "name": "Load...", + "execute": lambda x: callback_selection( + x, self.tools_helper.show_loader) + }) + menu['actions'].append({ + "name": "Manage...", + "execute": lambda x: self.tools_helper.show_scene_inventory() + }) + menu['actions'].append({ + "name": "Library...", + "execute": lambda x: self.tools_helper.show_library_loader() + }) + return menu + + def refresh(self, *args, **kwargs): + self.rescan() + + def rescan(self, *args, **kwargs): + if not self.flame: + try: + import flame + self.flame = flame + except ImportError: + self.flame = None + + if self.flame: + self.flame.execute_shortcut('Rescan Python Hooks') + self.log.info('Rescan Python Hooks') diff --git a/openpype/hosts/flame/api/pipeline.py b/openpype/hosts/flame/api/pipeline.py index da44be1b15..3a23389961 100644 --- a/openpype/hosts/flame/api/pipeline.py +++ b/openpype/hosts/flame/api/pipeline.py @@ -5,7 +5,7 @@ import os import contextlib from pyblish import api as pyblish -from openpype.api import Logger +from openpype.lib import Logger from openpype.pipeline import ( register_loader_plugin_path, register_creator_plugin_path, @@ -90,8 +90,7 @@ def containerise(flame_clip_segment, def ls(): """List available containers. """ - # TODO: ls - pass + return [] def parse_container(tl_segment, validate=True): @@ -107,6 +106,7 @@ def update_container(tl_segment, data=None): # TODO: update_container pass + def on_pyblish_instance_toggled(instance, old_value, new_value): """Toggle node passthrough states on instance toggles.""" diff --git a/openpype/hosts/flame/api/plugin.py b/openpype/hosts/flame/api/plugin.py index 11108ba49f..26129ebaa6 100644 --- a/openpype/hosts/flame/api/plugin.py +++ b/openpype/hosts/flame/api/plugin.py @@ -4,18 +4,19 @@ import shutil from copy import deepcopy from xml.etree import ElementTree as ET +import qargparse from Qt import QtCore, QtWidgets -import openpype.api as openpype -import qargparse from openpype import style +from openpype.lib import Logger from openpype.pipeline import LegacyCreator, LoaderPlugin +from openpype.settings import get_current_project_settings from . import constants from . import lib as flib from . import pipeline as fpipeline -log = openpype.Logger.get_logger(__name__) +log = Logger.get_logger(__name__) class CreatorWidget(QtWidgets.QDialog): @@ -305,7 +306,7 @@ class Creator(LegacyCreator): def __init__(self, *args, **kwargs): super(Creator, self).__init__(*args, **kwargs) - self.presets = openpype.get_current_project_settings()[ + self.presets = get_current_project_settings()[ "flame"]["create"].get(self.__class__.__name__, {}) # adding basic current context flame objects @@ -360,6 +361,9 @@ class PublishableClip: driving_layer_default = "" index_from_segment_default = False use_shot_name_default = False + include_handles_default = False + retimed_handles_default = True + retimed_framerange_default = True def __init__(self, segment, **kwargs): self.rename_index = kwargs["rename_index"] @@ -493,6 +497,16 @@ class PublishableClip: "reviewTrack", {}).get("value") or self.review_track_default self.audio = self.ui_inputs.get( "audio", {}).get("value") or False + self.include_handles = self.ui_inputs.get( + "includeHandles", {}).get("value") or self.include_handles_default + self.retimed_handles = ( + self.ui_inputs.get("retimedHandles", {}).get("value") + or self.retimed_handles_default + ) + self.retimed_framerange = ( + self.ui_inputs.get("retimedFramerange", {}).get("value") + or self.retimed_framerange_default + ) # build subset name from layer name if self.subset_name == "[ track name ]": @@ -665,6 +679,7 @@ class ClipLoader(LoaderPlugin): `update` logic. """ + log = log options = [ qargparse.Boolean( @@ -675,22 +690,74 @@ class ClipLoader(LoaderPlugin): ) ] + _mapping = None + + def get_colorspace(self, context): + """Get colorspace name + + Look either to version data or representation data. + + Args: + context (dict): version context data + + Returns: + str: colorspace name or None + """ + version = context['version'] + version_data = version.get("data", {}) + colorspace = version_data.get( + "colorspace", None + ) + + if ( + not colorspace + or colorspace == "Unknown" + ): + colorspace = context["representation"]["data"].get( + "colorspace", None) + + return colorspace + + @classmethod + def get_native_colorspace(cls, input_colorspace): + """Return native colorspace name. + + Args: + input_colorspace (str | None): colorspace name + + Returns: + str: native colorspace name defined in mapping or None + """ + if not cls._mapping: + settings = get_current_project_settings()["flame"] + mapping = settings["imageio"]["profilesMapping"]["inputs"] + cls._mapping = { + input["ocioName"]: input["flameName"] + for input in mapping + } + + return cls._mapping.get(input_colorspace) + class OpenClipSolver(flib.MediaInfoFile): create_new_clip = False log = log - def __init__(self, openclip_file_path, feed_data): + def __init__(self, openclip_file_path, feed_data, logger=None): self.out_file = openclip_file_path + # replace log if any + if logger: + self.log = logger + # new feed variables: feed_path = feed_data.pop("path") # initialize parent class super(OpenClipSolver, self).__init__( feed_path, - **feed_data + logger=logger ) # get other metadata @@ -738,17 +805,18 @@ class OpenClipSolver(flib.MediaInfoFile): self.log.info("Building new openClip") self.log.debug(">> self.clip_data: {}".format(self.clip_data)) - # clip data comming from MediaInfoFile - tmp_xml_feeds = self.clip_data.find('tracks/track/feeds') - tmp_xml_feeds.set('currentVersion', self.feed_version_name) - for tmp_feed in tmp_xml_feeds: - tmp_feed.set('vuid', self.feed_version_name) + for tmp_xml_track in self.clip_data.iter("track"): + tmp_xml_feeds = tmp_xml_track.find('feeds') + tmp_xml_feeds.set('currentVersion', self.feed_version_name) - # add colorspace if any is set - if self.feed_colorspace: - self._add_colorspace(tmp_feed, self.feed_colorspace) + for tmp_feed in tmp_xml_track.iter("feed"): + tmp_feed.set('vuid', self.feed_version_name) - self._clear_handler(tmp_feed) + # add colorspace if any is set + if self.feed_colorspace: + self._add_colorspace(tmp_feed, self.feed_colorspace) + + self._clear_handler(tmp_feed) tmp_xml_versions_obj = self.clip_data.find('versions') tmp_xml_versions_obj.set('currentVersion', self.feed_version_name) @@ -761,6 +829,17 @@ class OpenClipSolver(flib.MediaInfoFile): self.write_clip_data_to_file(self.out_file, self.clip_data) + def _get_xml_track_obj_by_uid(self, xml_data, uid): + # loop all tracks of input xml data + for xml_track in xml_data.iter("track"): + track_uid = xml_track.get("uid") + self.log.debug( + ">> track_uid:uid: {}:{}".format(track_uid, uid)) + + # get matching uids + if uid == track_uid: + return xml_track + def _update_open_clip(self): self.log.info("Updating openClip ..") @@ -770,52 +849,81 @@ class OpenClipSolver(flib.MediaInfoFile): self.log.debug(">> out_xml: {}".format(out_xml)) self.log.debug(">> self.clip_data: {}".format(self.clip_data)) - # Get new feed from tmp file - tmp_xml_feed = self.clip_data.find('tracks/track/feeds/feed') + # loop tmp tracks + updated_any = False + for tmp_xml_track in self.clip_data.iter("track"): + # get tmp track uid + tmp_track_uid = tmp_xml_track.get("uid") + self.log.debug(">> tmp_track_uid: {}".format(tmp_track_uid)) - self._clear_handler(tmp_xml_feed) + # get out data track by uid + out_track_element = self._get_xml_track_obj_by_uid( + out_xml, tmp_track_uid) + self.log.debug( + ">> out_track_element: {}".format(out_track_element)) - # update fps from MediaInfoFile class - if self.fps: - tmp_feed_fps_obj = tmp_xml_feed.find( - "startTimecode/rate") - tmp_feed_fps_obj.text = str(self.fps) + # loop tmp feeds + for tmp_xml_feed in tmp_xml_track.iter("feed"): + new_path_obj = tmp_xml_feed.find( + "spans/span/path") + new_path = new_path_obj.text - # update start_frame from MediaInfoFile class - if self.start_frame: - tmp_feed_nb_ticks_obj = tmp_xml_feed.find( - "startTimecode/nbTicks") - tmp_feed_nb_ticks_obj.text = str(self.start_frame) + # check if feed path already exists in track's feeds + if ( + out_track_element is not None + and self._feed_exists(out_track_element, new_path) + ): + continue - # update drop_mode from MediaInfoFile class - if self.drop_mode: - tmp_feed_drop_mode_obj = tmp_xml_feed.find( - "startTimecode/dropMode") - tmp_feed_drop_mode_obj.text = str(self.drop_mode) + # rename versions on feeds + tmp_xml_feed.set('vuid', self.feed_version_name) + self._clear_handler(tmp_xml_feed) - new_path_obj = tmp_xml_feed.find( - "spans/span/path") - new_path = new_path_obj.text + # update fps from MediaInfoFile class + if self.fps is not None: + tmp_feed_fps_obj = tmp_xml_feed.find( + "startTimecode/rate") + tmp_feed_fps_obj.text = str(self.fps) - feed_added = False - if not self._feed_exists(out_xml, new_path): - tmp_xml_feed.set('vuid', self.feed_version_name) - # Append new temp file feed to .clip source out xml - out_track = out_xml.find("tracks/track") - # add colorspace if any is set - if self.feed_colorspace: - self._add_colorspace(tmp_xml_feed, self.feed_colorspace) + # update start_frame from MediaInfoFile class + if self.start_frame is not None: + tmp_feed_nb_ticks_obj = tmp_xml_feed.find( + "startTimecode/nbTicks") + tmp_feed_nb_ticks_obj.text = str(self.start_frame) - out_feeds = out_track.find('feeds') - out_feeds.set('currentVersion', self.feed_version_name) - out_feeds.append(tmp_xml_feed) + # update drop_mode from MediaInfoFile class + if self.drop_mode is not None: + tmp_feed_drop_mode_obj = tmp_xml_feed.find( + "startTimecode/dropMode") + tmp_feed_drop_mode_obj.text = str(self.drop_mode) - self.log.info( - "Appending new feed: {}".format( - self.feed_version_name)) - feed_added = True + # add colorspace if any is set + if self.feed_colorspace is not None: + self._add_colorspace(tmp_xml_feed, self.feed_colorspace) - if feed_added: + # then append/update feed to correct track in output + if out_track_element: + self.log.debug("updating track element ..") + # update already present track + out_feeds = out_track_element.find('feeds') + out_feeds.set('currentVersion', self.feed_version_name) + out_feeds.append(tmp_xml_feed) + + self.log.info( + "Appending new feed: {}".format( + self.feed_version_name)) + else: + self.log.debug("adding new track element ..") + # create new track as it doesnt exists yet + # set current version to feeds on tmp + tmp_xml_feeds = tmp_xml_track.find('feeds') + tmp_xml_feeds.set('currentVersion', self.feed_version_name) + out_tracks = out_xml.find("tracks") + out_tracks.append(tmp_xml_track) + + updated_any = True + + if updated_any: # Append vUID to versions out_xml_versions_obj = out_xml.find('versions') out_xml_versions_obj.set( diff --git a/openpype/hosts/flame/api/render_utils.py b/openpype/hosts/flame/api/render_utils.py index 473fb2f985..7e50c2b23e 100644 --- a/openpype/hosts/flame/api/render_utils.py +++ b/openpype/hosts/flame/api/render_utils.py @@ -1,5 +1,8 @@ import os from xml.etree import ElementTree as ET +from openpype.lib import Logger + +log = Logger.get_logger(__name__) def export_clip(export_path, clip, preset_path, **kwargs): @@ -143,10 +146,40 @@ def modify_preset_file(xml_path, staging_dir, data): # change xml following data keys with open(xml_path, "r") as datafile: - tree = ET.parse(datafile) + _root = ET.parse(datafile) + for key, value in data.items(): - for element in tree.findall(".//{}".format(key)): - element.text = str(value) - tree.write(temp_path) + try: + if "/" in key: + if not key.startswith("./"): + key = ".//" + key + + split_key_path = key.split("/") + element_key = split_key_path[-1] + parent_obj_path = "/".join(split_key_path[:-1]) + + parent_obj = _root.find(parent_obj_path) + element_obj = parent_obj.find(element_key) + if not element_obj: + append_element(parent_obj, element_key, value) + else: + finds = _root.findall(".//{}".format(key)) + if not finds: + raise AttributeError + for element in finds: + element.text = str(value) + except AttributeError: + log.warning( + "Cannot create attribute: {}: {}. Skipping".format( + key, value + )) + _root.write(temp_path) return temp_path + + +def append_element(root_element_obj, key, value): + new_element_obj = ET.Element(key) + log.debug("__ new_element_obj: {}".format(new_element_obj)) + new_element_obj.text = str(value) + root_element_obj.insert(0, new_element_obj) diff --git a/openpype/hosts/flame/api/utils.py b/openpype/hosts/flame/api/utils.py index 2dfdfa8f48..fb8bdee42d 100644 --- a/openpype/hosts/flame/api/utils.py +++ b/openpype/hosts/flame/api/utils.py @@ -4,7 +4,7 @@ Flame utils for syncing scripts import os import shutil -from openpype.api import Logger +from openpype.lib import Logger log = Logger.get_logger(__name__) diff --git a/openpype/hosts/flame/api/workio.py b/openpype/hosts/flame/api/workio.py index 0c96c0752a..e49321c75a 100644 --- a/openpype/hosts/flame/api/workio.py +++ b/openpype/hosts/flame/api/workio.py @@ -1,7 +1,7 @@ """Host API required Work Files tool""" import os -from openpype.api import Logger +from openpype.lib import Logger # from .. import ( # get_project_manager, # get_current_project diff --git a/openpype/hosts/flame/hooks/pre_flame_setup.py b/openpype/hosts/flame/hooks/pre_flame_setup.py index ad2b0dc897..713daf1031 100644 --- a/openpype/hosts/flame/hooks/pre_flame_setup.py +++ b/openpype/hosts/flame/hooks/pre_flame_setup.py @@ -3,16 +3,17 @@ import json import tempfile import contextlib import socket +from pprint import pformat + from openpype.lib import ( PreLaunchHook, - get_openpype_username + get_openpype_username, + run_subprocess, ) from openpype.lib.applications import ( ApplicationLaunchFailed ) from openpype.hosts import flame as opflame -import openpype -from pprint import pformat class FlamePrelaunch(PreLaunchHook): @@ -22,6 +23,7 @@ class FlamePrelaunch(PreLaunchHook): in environment var FLAME_SCRIPT_DIR. """ app_groups = ["flame"] + permissions = 0o777 wtc_script_path = os.path.join( opflame.HOST_DIR, "api", "scripts", "wiretap_com.py") @@ -38,19 +40,12 @@ class FlamePrelaunch(PreLaunchHook): """Hook entry method.""" project_doc = self.data["project_doc"] project_name = project_doc["name"] + volume_name = _env.get("FLAME_WIRETAP_VOLUME") # get image io - project_anatomy = self.data["anatomy"] + project_settings = self.data["project_settings"] - # make sure anatomy settings are having flame key - if not project_anatomy["imageio"].get("flame"): - raise ApplicationLaunchFailed(( - "Anatomy project settings are missing `flame` key. " - "Please make sure you remove project overides on " - "Anatomy Image io") - ) - - imageio_flame = project_anatomy["imageio"]["flame"] + imageio_flame = project_settings["flame"]["imageio"] # get user name and host name user_name = get_openpype_username() @@ -81,7 +76,7 @@ class FlamePrelaunch(PreLaunchHook): data_to_script = { # from settings "host_name": _env.get("FLAME_WIRETAP_HOSTNAME") or hostname, - "volume_name": _env.get("FLAME_WIRETAP_VOLUME"), + "volume_name": volume_name, "group_name": _env.get("FLAME_WIRETAP_GROUP"), "color_policy": str(imageio_flame["project"]["colourPolicy"]), @@ -99,8 +94,40 @@ class FlamePrelaunch(PreLaunchHook): app_arguments = self._get_launch_arguments(data_to_script) + # fix project data permission issue + self._fix_permissions(project_name, volume_name) + self.launch_context.launch_args.extend(app_arguments) + def _fix_permissions(self, project_name, volume_name): + """Work around for project data permissions + + Reported issue: when project is created locally on one machine, + it is impossible to migrate it to other machine. Autodesk Flame + is crating some unmanagable files which needs to be opened to 0o777. + + Args: + project_name (str): project name + volume_name (str): studio volume + """ + dirs_to_modify = [ + "/usr/discreet/project/{}".format(project_name), + "/opt/Autodesk/clip/{}/{}.prj".format(volume_name, project_name), + "/usr/discreet/clip/{}/{}.prj".format(volume_name, project_name) + ] + + for dirtm in dirs_to_modify: + for root, dirs, files in os.walk(dirtm): + try: + for name in set(dirs) | set(files): + path = os.path.join(root, name) + st = os.stat(path) + if oct(st.st_mode) != self.permissions: + os.chmod(path, self.permissions) + + except OSError as exc: + self.log.warning("Not able to open files: {}".format(exc)) + def _get_flame_fps(self, fps_num): fps_table = { float(23.976): "23.976 fps", @@ -152,7 +179,7 @@ class FlamePrelaunch(PreLaunchHook): "env": self.launch_context.env } - openpype.api.run_subprocess(args, **process_kwargs) + run_subprocess(args, **process_kwargs) # process returned json file to pass launch args return_json_data = open(tmp_json_path).read() diff --git a/openpype/hosts/flame/otio/flame_export.py b/openpype/hosts/flame/otio/flame_export.py index 4fe05ec1d8..6d6b33d2a1 100644 --- a/openpype/hosts/flame/otio/flame_export.py +++ b/openpype/hosts/flame/otio/flame_export.py @@ -94,83 +94,30 @@ def create_otio_time_range(start_frame, frame_duration, fps): def _get_metadata(item): if hasattr(item, 'metadata'): - if not item.metadata: - return {} - return {key: value for key, value in dict(item.metadata)} + return dict(item.metadata) if item.metadata else {} return {} -def create_time_effects(otio_clip, item): - # todo #2426: add retiming effects to export - # get all subtrack items - # subTrackItems = flatten(track_item.parent().subTrackItems()) - # speed = track_item.playbackSpeed() +def create_time_effects(otio_clip, speed): + otio_effect = None - # otio_effect = None - # # retime on track item - # if speed != 1.: - # # make effect - # otio_effect = otio.schema.LinearTimeWarp() - # otio_effect.name = "Speed" - # otio_effect.time_scalar = speed - # otio_effect.metadata = {} + # retime on track item + if speed != 1.: + # make effect + otio_effect = otio.schema.LinearTimeWarp() + otio_effect.name = "Speed" + otio_effect.time_scalar = speed + otio_effect.metadata = {} - # # freeze frame effect - # if speed == 0.: - # otio_effect = otio.schema.FreezeFrame() - # otio_effect.name = "FreezeFrame" - # otio_effect.metadata = {} + # freeze frame effect + if speed == 0.: + otio_effect = otio.schema.FreezeFrame() + otio_effect.name = "FreezeFrame" + otio_effect.metadata = {} - # if otio_effect: - # # add otio effect to clip effects - # otio_clip.effects.append(otio_effect) - - # # loop through and get all Timewarps - # for effect in subTrackItems: - # if ((track_item not in effect.linkedItems()) - # and (len(effect.linkedItems()) > 0)): - # continue - # # avoid all effect which are not TimeWarp and disabled - # if "TimeWarp" not in effect.name(): - # continue - - # if not effect.isEnabled(): - # continue - - # node = effect.node() - # name = node["name"].value() - - # # solve effect class as effect name - # _name = effect.name() - # if "_" in _name: - # effect_name = re.sub(r"(?:_)[_0-9]+", "", _name) # more numbers - # else: - # effect_name = re.sub(r"\d+", "", _name) # one number - - # metadata = {} - # # add knob to metadata - # for knob in ["lookup", "length"]: - # value = node[knob].value() - # animated = node[knob].isAnimated() - # if animated: - # value = [ - # ((node[knob].getValueAt(i)) - i) - # for i in range( - # track_item.timelineIn(), - # track_item.timelineOut() + 1) - # ] - - # metadata[knob] = value - - # # make effect - # otio_effect = otio.schema.TimeEffect() - # otio_effect.name = name - # otio_effect.effect_name = effect_name - # otio_effect.metadata = metadata - - # # add otio effect to clip effects - # otio_clip.effects.append(otio_effect) - pass + if otio_effect: + # add otio effect to clip effects + otio_clip.effects.append(otio_effect) def _get_marker_color(flame_colour): @@ -260,6 +207,7 @@ def create_otio_markers(otio_item, item): def create_otio_reference(clip_data, fps=None): metadata = _get_metadata(clip_data) + duration = int(clip_data["source_duration"]) # get file info for path and start frame frame_start = 0 @@ -273,7 +221,6 @@ def create_otio_reference(clip_data, fps=None): # get padding and other file infos log.debug("_ path: {}".format(path)) - frame_duration = clip_data["source_duration"] otio_ex_ref_item = None is_sequence = frame_number = utils.get_frame_from_filename(file_name) @@ -300,7 +247,7 @@ def create_otio_reference(clip_data, fps=None): rate=fps, available_range=create_otio_time_range( frame_start, - frame_duration, + duration, fps ) ) @@ -316,7 +263,7 @@ def create_otio_reference(clip_data, fps=None): target_url=reformated_path, available_range=create_otio_time_range( frame_start, - frame_duration, + duration, fps ) ) @@ -328,28 +275,84 @@ def create_otio_reference(clip_data, fps=None): def create_otio_clip(clip_data): - from openpype.hosts.flame.api import MediaInfoFile + from openpype.hosts.flame.api import MediaInfoFile, TimeEffectMetadata segment = clip_data["PySegment"] # calculate source in - media_info = MediaInfoFile(clip_data["fpath"]) + media_info = MediaInfoFile(clip_data["fpath"], logger=log) media_timecode_start = media_info.start_frame media_fps = media_info.fps - # create media reference - media_reference = create_otio_reference(clip_data, media_fps) + # Timewarp metadata + tw_data = TimeEffectMetadata(segment, logger=log).data + log.debug("__ tw_data: {}".format(tw_data)) # define first frame - first_frame = media_timecode_start or utils.get_frame_from_filename( - clip_data["fpath"]) or 0 + file_first_frame = utils.get_frame_from_filename( + clip_data["fpath"]) + if file_first_frame: + file_first_frame = int(file_first_frame) - source_in = int(clip_data["source_in"]) - int(first_frame) + first_frame = media_timecode_start or file_first_frame or 0 + + _clip_source_in = int(clip_data["source_in"]) + _clip_source_out = int(clip_data["source_out"]) + _clip_record_in = clip_data["record_in"] + _clip_record_out = clip_data["record_out"] + _clip_record_duration = int(clip_data["record_duration"]) + + log.debug("_ file_first_frame: {}".format(file_first_frame)) + log.debug("_ first_frame: {}".format(first_frame)) + log.debug("_ _clip_source_in: {}".format(_clip_source_in)) + log.debug("_ _clip_source_out: {}".format(_clip_source_out)) + log.debug("_ _clip_record_in: {}".format(_clip_record_in)) + log.debug("_ _clip_record_out: {}".format(_clip_record_out)) + + # first solve if the reverse timing + speed = 1 + if clip_data["source_in"] > clip_data["source_out"]: + source_in = _clip_source_out - int(first_frame) + source_out = _clip_source_in - int(first_frame) + speed = -1 + else: + source_in = _clip_source_in - int(first_frame) + source_out = _clip_source_out - int(first_frame) + + log.debug("_ source_in: {}".format(source_in)) + log.debug("_ source_out: {}".format(source_out)) + + if file_first_frame: + log.debug("_ file_source_in: {}".format( + file_first_frame + source_in)) + log.debug("_ file_source_in: {}".format( + file_first_frame + source_out)) + + source_duration = (source_out - source_in + 1) + + # secondly check if any change of speed + if source_duration != _clip_record_duration: + retime_speed = float(source_duration) / float(_clip_record_duration) + log.debug("_ calculated speed: {}".format(retime_speed)) + speed *= retime_speed + + # get speed from metadata if available + if tw_data.get("speed"): + speed = tw_data["speed"] + log.debug("_ metadata speed: {}".format(speed)) + + log.debug("_ speed: {}".format(speed)) + log.debug("_ source_duration: {}".format(source_duration)) + log.debug("_ _clip_record_duration: {}".format(_clip_record_duration)) + + # create media reference + media_reference = create_otio_reference( + clip_data, media_fps) # creatae source range source_range = create_otio_time_range( source_in, - clip_data["record_duration"], + _clip_record_duration, CTX.get_fps() ) @@ -363,6 +366,9 @@ def create_otio_clip(clip_data): if MARKERS_INCLUDE: create_otio_markers(otio_clip, segment) + if speed != 1: + create_time_effects(otio_clip, speed) + return otio_clip diff --git a/openpype/hosts/flame/plugins/create/create_shot_clip.py b/openpype/hosts/flame/plugins/create/create_shot_clip.py index 11c00dab42..4fb041a4b2 100644 --- a/openpype/hosts/flame/plugins/create/create_shot_clip.py +++ b/openpype/hosts/flame/plugins/create/create_shot_clip.py @@ -23,10 +23,11 @@ class CreateShotClip(opfapi.Creator): # nested dictionary (only one level allowed # for sections and dict) for _k, _v in v["value"].items(): - if presets.get(_k): + if presets.get(_k) is not None: gui_inputs[k][ "value"][_k]["value"] = presets[_k] - if presets.get(k): + + if presets.get(k) is not None: gui_inputs[k]["value"] = presets[k] # open widget for plugins inputs @@ -268,6 +269,30 @@ class CreateShotClip(opfapi.Creator): "target": "tag", "toolTip": "Handle at end of clip", # noqa "order": 2 + }, + "includeHandles": { + "value": False, + "type": "QCheckBox", + "label": "Include handles", + "target": "tag", + "toolTip": "By default handles are excluded", # noqa + "order": 3 + }, + "retimedHandles": { + "value": True, + "type": "QCheckBox", + "label": "Retimed handles", + "target": "tag", + "toolTip": "By default handles are retimed.", # noqa + "order": 4 + }, + "retimedFramerange": { + "value": True, + "type": "QCheckBox", + "label": "Retimed framerange", + "target": "tag", + "toolTip": "By default framerange is retimed.", # noqa + "order": 5 } } } diff --git a/openpype/hosts/flame/plugins/load/load_clip.py b/openpype/hosts/flame/plugins/load/load_clip.py index e0a7297381..f8cb7b3e11 100644 --- a/openpype/hosts/flame/plugins/load/load_clip.py +++ b/openpype/hosts/flame/plugins/load/load_clip.py @@ -2,6 +2,7 @@ import os import flame from pprint import pformat import openpype.hosts.flame.api as opfapi +from openpype.lib import StringTemplate class LoadClip(opfapi.ClipLoader): @@ -22,7 +23,7 @@ class LoadClip(opfapi.ClipLoader): # settings reel_group_name = "OpenPype_Reels" reel_name = "Loaded" - clip_name_template = "{asset}_{subset}_{output}" + clip_name_template = "{asset}_{subset}<_{output}>" def load(self, context, name, namespace, options): @@ -35,14 +36,15 @@ class LoadClip(opfapi.ClipLoader): version = context['version'] version_data = version.get("data", {}) version_name = version.get("name", None) - colorspace = version_data.get("colorspace", None) - clip_name = self.clip_name_template.format( - **context["representation"]["context"]) + colorspace = self.get_colorspace(context) + + clip_name = StringTemplate(self.clip_name_template).format( + context["representation"]["context"]) - # TODO: settings in imageio # convert colorspace with ocio to flame mapping # in imageio flame section - colorspace = colorspace + colorspace = self.get_native_colorspace(colorspace) + self.log.info("Loading with colorspace: `{}`".format(colorspace)) # create workfile path workfile_dir = os.environ["AVALON_WORKDIR"] @@ -60,8 +62,6 @@ class LoadClip(opfapi.ClipLoader): "path": self.fname.replace("\\", "/"), "colorspace": colorspace, "version": "v{:0>3}".format(version_name), - "logger": self.log - } self.log.debug(pformat( loading_context @@ -69,7 +69,8 @@ class LoadClip(opfapi.ClipLoader): self.log.debug(openclip_path) # make openpype clip file - opfapi.OpenClipSolver(openclip_path, loading_context).make() + opfapi.OpenClipSolver( + openclip_path, loading_context, logger=self.log).make() # prepare Reel group in actual desktop opc = self._get_clip( diff --git a/openpype/hosts/flame/plugins/load/load_clip_batch.py b/openpype/hosts/flame/plugins/load/load_clip_batch.py index 5de3226035..048ac19431 100644 --- a/openpype/hosts/flame/plugins/load/load_clip_batch.py +++ b/openpype/hosts/flame/plugins/load/load_clip_batch.py @@ -1,7 +1,9 @@ +from copy import deepcopy import os import flame from pprint import pformat import openpype.hosts.flame.api as opfapi +from openpype.lib import StringTemplate class LoadClipBatch(opfapi.ClipLoader): @@ -21,7 +23,7 @@ class LoadClipBatch(opfapi.ClipLoader): # settings reel_name = "OP_LoadedReel" - clip_name_template = "{asset}_{subset}_{output}" + clip_name_template = "{batch}_{asset}_{subset}<_{output}>" def load(self, context, name, namespace, options): @@ -33,19 +35,22 @@ class LoadClipBatch(opfapi.ClipLoader): version = context['version'] version_data = version.get("data", {}) version_name = version.get("name", None) - colorspace = version_data.get("colorspace", None) + colorspace = self.get_colorspace(context) # in case output is not in context replace key to representation if not context["representation"]["context"].get("output"): self.clip_name_template.replace("output", "representation") - clip_name = self.clip_name_template.format( - **context["representation"]["context"]) + formating_data = deepcopy(context["representation"]["context"]) + formating_data["batch"] = self.batch.name.get_value() + + clip_name = StringTemplate(self.clip_name_template).format( + formating_data) - # TODO: settings in imageio # convert colorspace with ocio to flame mapping # in imageio flame section - colorspace = colorspace + colorspace = self.get_native_colorspace(colorspace) + self.log.info("Loading with colorspace: `{}`".format(colorspace)) # create workfile path workfile_dir = options.get("workdir") or os.environ["AVALON_WORKDIR"] @@ -55,6 +60,7 @@ class LoadClipBatch(opfapi.ClipLoader): openclip_path = os.path.join( openclip_dir, clip_name + ".clip" ) + if not os.path.exists(openclip_dir): os.makedirs(openclip_dir) @@ -63,8 +69,6 @@ class LoadClipBatch(opfapi.ClipLoader): "path": self.fname.replace("\\", "/"), "colorspace": colorspace, "version": "v{:0>3}".format(version_name), - "logger": self.log - } self.log.debug(pformat( loading_context @@ -72,7 +76,8 @@ class LoadClipBatch(opfapi.ClipLoader): self.log.debug(openclip_path) # make openpype clip file - opfapi.OpenClipSolver(openclip_path, loading_context).make() + opfapi.OpenClipSolver( + openclip_path, loading_context, logger=self.log).make() # prepare Reel group in actual desktop opc = self._get_clip( diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py index 5174f9db48..76d48dded2 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py @@ -1,8 +1,12 @@ import re +from types import NoneType import pyblish -import openpype import openpype.hosts.flame.api as opfapi from openpype.hosts.flame.otio import flame_export +from openpype.pipeline.editorial import ( + is_overlapping_otio_ranges, + get_media_range_with_retimes +) # # developer reload modules from pprint import pformat @@ -36,6 +40,7 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): for segment in selected_segments: # get openpype tag data marker_data = opfapi.get_segment_data_marker(segment) + self.log.debug("__ marker_data: {}".format( pformat(marker_data))) @@ -58,24 +63,50 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): clip_name = clip_data["segment_name"] self.log.debug("clip_name: {}".format(clip_name)) + # get otio clip data + otio_data = self._get_otio_clip_instance_data(clip_data) or {} + self.log.debug("__ otio_data: {}".format(pformat(otio_data))) + # get file path file_path = clip_data["fpath"] first_frame = opfapi.get_frame_from_filename(file_path) or 0 - head, tail = self._get_head_tail(clip_data, first_frame) + head, tail = self._get_head_tail( + clip_data, + otio_data["otioClip"], + marker_data["handleStart"], + marker_data["handleEnd"] + ) + + # make sure there is not NoneType rather 0 + if isinstance(head, NoneType): + head = 0 + if isinstance(tail, NoneType): + tail = 0 + + # make sure value is absolute + if head != 0: + head = abs(head) + if tail != 0: + tail = abs(tail) # solve handles length marker_data["handleStart"] = min( - marker_data["handleStart"], abs(head)) + marker_data["handleStart"], head) marker_data["handleEnd"] = min( - marker_data["handleEnd"], abs(tail)) + marker_data["handleEnd"], tail) + + workfile_start = self._set_workfile_start(marker_data) with_audio = bool(marker_data.pop("audio")) # add marker data to instance data inst_data = dict(marker_data.items()) + # add ocio_data to instance data + inst_data.update(otio_data) + asset = marker_data["asset"] subset = marker_data["subset"] @@ -98,20 +129,19 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): "families": families, "publish": marker_data["publish"], "fps": self.fps, + "workfileFrameStart": workfile_start, "sourceFirstFrame": int(first_frame), + "retimedHandles": marker_data.get("retimedHandles"), + "shotDurationFromSource": ( + not marker_data.get("retimedFramerange")), "path": file_path, "flameAddTasks": self.add_tasks, "tasks": { task["name"]: {"type": task["type"]} - for task in self.add_tasks} + for task in self.add_tasks}, + "representations": [], + "newAssetPublishing": True }) - - # get otio clip data - otio_data = self._get_otio_clip_instance_data(clip_data) or {} - self.log.debug("__ otio_data: {}".format(pformat(otio_data))) - - # add to instance data - inst_data.update(otio_data) self.log.debug("__ inst_data: {}".format(pformat(inst_data))) # add resolution @@ -145,6 +175,17 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): if marker_data.get("reviewTrack") is not None: instance.data["reviewAudio"] = True + @staticmethod + def _set_workfile_start(data): + include_handles = data.get("includeHandles") + workfile_start = data["workfileFrameStart"] + handle_start = data["handleStart"] + + if include_handles: + workfile_start += handle_start + + return workfile_start + def _get_comment_attributes(self, segment): comment = segment.comment.get_value() @@ -236,20 +277,24 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): return split_comments - def _get_head_tail(self, clip_data, first_frame): + def _get_head_tail(self, clip_data, otio_clip, handle_start, handle_end): # calculate head and tail with forward compatibility head = clip_data.get("segment_head") tail = clip_data.get("segment_tail") + self.log.debug("__ head: `{}`".format(head)) + self.log.debug("__ tail: `{}`".format(tail)) # HACK: it is here to serve for versions bellow 2021.1 - if not head: - head = int(clip_data["source_in"]) - int(first_frame) - if not tail: - tail = int( - clip_data["source_duration"] - ( - head + clip_data["record_duration"] - ) - ) + if not any([head, tail]): + retimed_attributes = get_media_range_with_retimes( + otio_clip, handle_start, handle_end) + self.log.debug( + ">> retimed_attributes: {}".format(retimed_attributes)) + + # retimed head and tail + head = int(retimed_attributes["handleStart"]) + tail = int(retimed_attributes["handleEnd"]) + return head, tail def _get_resolution_to_data(self, data, context): @@ -340,7 +385,7 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): continue if otio_clip.name not in segment.name.get_value(): continue - if openpype.lib.is_overlapping_otio_ranges( + if is_overlapping_otio_ranges( parent_range, timeline_range, strict=True): # add pypedata marker to otio_clip metadata diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py b/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py index f2ae1f62a9..917041e053 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py @@ -1,9 +1,9 @@ import pyblish.api -import openpype.lib as oplib -from openpype.pipeline import legacy_io import openpype.hosts.flame.api as opfapi from openpype.hosts.flame.otio import flame_export +from openpype.pipeline import legacy_io +from openpype.pipeline.create import get_subset_name class CollecTimelineOTIO(pyblish.api.ContextPlugin): @@ -24,11 +24,14 @@ class CollecTimelineOTIO(pyblish.api.ContextPlugin): sequence = opfapi.get_current_sequence(opfapi.CTX.selection) # create subset name - subset_name = oplib.get_subset_name_with_asset_doc( + subset_name = get_subset_name( family, variant, task_name, asset_doc, + context.data["projectName"], + context.data["hostName"], + project_settings=context.data["project_settings"] ) # adding otio timeline to context @@ -39,7 +42,8 @@ class CollecTimelineOTIO(pyblish.api.ContextPlugin): "name": subset_name, "asset": asset_doc["name"], "subset": subset_name, - "family": "workfile" + "family": "workfile", + "families": [] } # create instance with workfile diff --git a/openpype/hosts/flame/plugins/publish/extract_otio_file.py b/openpype/hosts/flame/plugins/publish/extract_otio_file.py index 7dd75974fc..e5bfa42ce6 100644 --- a/openpype/hosts/flame/plugins/publish/extract_otio_file.py +++ b/openpype/hosts/flame/plugins/publish/extract_otio_file.py @@ -1,10 +1,10 @@ import os import pyblish.api -import openpype.api import opentimelineio as otio +from openpype.pipeline import publish -class ExtractOTIOFile(openpype.api.Extractor): +class ExtractOTIOFile(publish.Extractor): """ Extractor export OTIO file """ diff --git a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py index fd0ece2590..d5294d61c2 100644 --- a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py +++ b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py @@ -1,16 +1,21 @@ import os import re -from pprint import pformat +import tempfile from copy import deepcopy import pyblish.api -import openpype.api + +from openpype.pipeline import publish from openpype.hosts.flame import api as opfapi +from openpype.hosts.flame.api import MediaInfoFile +from openpype.pipeline.editorial import ( + get_media_range_with_retimes +) import flame -class ExtractSubsetResources(openpype.api.Extractor): +class ExtractSubsetResources(publish.Extractor): """ Extractor for transcoding files from Flame clip """ @@ -21,6 +26,8 @@ class ExtractSubsetResources(openpype.api.Extractor): hosts = ["flame"] # plugin defaults + keep_original_representation = False + default_presets = { "thumbnail": { "active": True, @@ -33,24 +40,8 @@ class ExtractSubsetResources(openpype.api.Extractor): "representation_add_range": False, "representation_tags": ["thumbnail"], "path_regex": ".*" - }, - "ftrackpreview": { - "active": True, - "ext": "mov", - "xml_preset_file": "Apple iPad (1920x1080).xml", - "xml_preset_dir": "", - "export_type": "Movie", - "parsed_comment_attrs": False, - "colorspace_out": "Output - Rec.709", - "representation_add_range": True, - "representation_tags": [ - "review", - "delete" - ], - "path_regex": ".*" } } - keep_original_representation = False # hide publisher during exporting hide_ui_on_process = True @@ -59,11 +50,8 @@ class ExtractSubsetResources(openpype.api.Extractor): export_presets_mapping = {} def process(self, instance): - if ( - self.keep_original_representation - and "representations" not in instance.data - or not self.keep_original_representation - ): + if not self.keep_original_representation: + # remove previeous representation if not needed instance.data["representations"] = [] # flame objects @@ -81,54 +69,132 @@ class ExtractSubsetResources(openpype.api.Extractor): # get media source first frame source_first_frame = instance.data["sourceFirstFrame"] + self.log.debug("_ frame_start: {}".format(frame_start)) + self.log.debug("_ source_first_frame: {}".format(source_first_frame)) + # get timeline in/out of segment clip_in = instance.data["clipIn"] clip_out = instance.data["clipOut"] + # get retimed attributres + retimed_data = self._get_retimed_attributes(instance) + + # get individual keys + retimed_handle_start = retimed_data["handle_start"] + retimed_handle_end = retimed_data["handle_end"] + retimed_source_duration = retimed_data["source_duration"] + retimed_speed = retimed_data["speed"] + # get handles value - take only the max from both handle_start = instance.data["handleStart"] - handle_end = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] handles = max(handle_start, handle_end) + include_handles = instance.data.get("includeHandles") + retimed_handles = instance.data.get("retimedHandles") # get media source range with handles - source_end_handles = instance.data["sourceEndH"] source_start_handles = instance.data["sourceStartH"] source_end_handles = instance.data["sourceEndH"] + # retime if needed + if retimed_speed != 1.0: + if retimed_handles: + # handles are retimed + source_start_handles = ( + instance.data["sourceStart"] - retimed_handle_start) + source_end_handles = ( + source_start_handles + + (retimed_source_duration - 1) + + retimed_handle_start + + retimed_handle_end + ) + + else: + # handles are not retimed + source_end_handles = ( + source_start_handles + + (retimed_source_duration - 1) + + handle_start + + handle_end + ) + + # get frame range with handles for representation range + frame_start_handle = frame_start - handle_start + repre_frame_start = frame_start_handle + if include_handles: + if retimed_speed == 1.0 or not retimed_handles: + frame_start_handle = frame_start + else: + frame_start_handle = ( + frame_start - handle_start) + retimed_handle_start + + self.log.debug("_ frame_start_handle: {}".format( + frame_start_handle)) + self.log.debug("_ repre_frame_start: {}".format( + repre_frame_start)) + + # calculate duration with handles + source_duration_handles = ( + source_end_handles - source_start_handles) + 1 + + self.log.debug("_ source_duration_handles: {}".format( + source_duration_handles)) + # create staging dir path staging_dir = self.staging_dir(instance) # add default preset type for thumbnail and reviewable video # update them with settings and override in case the same # are found in there - export_presets = deepcopy(self.default_presets) + _preset_keys = [k.split('_')[0] for k in self.export_presets_mapping] + export_presets = { + k: v for k, v in deepcopy(self.default_presets).items() + if k not in _preset_keys + } export_presets.update(self.export_presets_mapping) + if not instance.data.get("versionData"): + instance.data["versionData"] = {} + + # set versiondata if any retime + version_data = retimed_data.get("version_data") + self.log.debug("_ version_data: {}".format(version_data)) + + if version_data: + instance.data["versionData"].update(version_data) + + # version data start frame + version_frame_start = frame_start + if include_handles: + version_frame_start = frame_start_handle + if retimed_speed != 1.0: + if retimed_handles: + instance.data["versionData"].update({ + "frameStart": version_frame_start, + "frameEnd": ( + (version_frame_start + source_duration_handles - 1) + - (retimed_handle_start + retimed_handle_end) + ) + }) + else: + instance.data["versionData"].update({ + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": version_frame_start, + "frameEnd": ( + (version_frame_start + source_duration_handles - 1) + - (handle_start + handle_end) + ) + }) + self.log.debug("_ version_data: {}".format( + instance.data["versionData"] + )) + # loop all preset names and for unique_name, preset_config in export_presets.items(): modify_xml_data = {} - # get activating attributes - activated_preset = preset_config["active"] - filter_path_regex = preset_config.get("filter_path_regex") - - self.log.info( - "Preset `{}` is active `{}` with filter `{}`".format( - unique_name, activated_preset, filter_path_regex - ) - ) - self.log.debug( - "__ clip_path: `{}`".format(clip_path)) - - # skip if not activated presete - if not activated_preset: - continue - - # exclude by regex filter if any - if ( - filter_path_regex - and not re.search(filter_path_regex, clip_path) - ): + if self._should_skip(preset_config, clip_path, unique_name): continue # get all presets attributes @@ -146,28 +212,10 @@ class ExtractSubsetResources(openpype.api.Extractor): ) ) - # get attribures related loading in integrate_batch_group - load_to_batch_group = preset_config.get( - "load_to_batch_group") - batch_group_loader_name = preset_config.get( - "batch_group_loader_name") - - # convert to None if empty string - if batch_group_loader_name == "": - batch_group_loader_name = None - - # get frame range with handles for representation range - frame_start_handle = frame_start - handle_start - source_duration_handles = ( - source_end_handles - source_start_handles) + 1 - - # define in/out marks - in_mark = (source_start_handles - source_first_frame) + 1 - out_mark = in_mark + source_duration_handles - exporting_clip = None name_patern_xml = "_{}.".format( unique_name) + if export_type == "Sequence Publish": # change export clip to sequence exporting_clip = flame.duplicate(sequence_clip) @@ -180,20 +228,26 @@ class ExtractSubsetResources(openpype.api.Extractor): name_patern_xml = ( "__{}.").format( unique_name) + + # only for h264 with baked retime + in_mark = clip_in + out_mark = clip_out + 1 + modify_xml_data.update({ + "exportHandles": True, + "nbHandles": handles + }) else: + in_mark = (source_start_handles - source_first_frame) + 1 + out_mark = in_mark + source_duration_handles exporting_clip = self.import_clip(clip_path) exporting_clip.name.set_value("{}_{}".format( asset_name, segment_name)) - # change in/out marks to timeline in/out - in_mark = clip_in - out_mark = clip_out - # add xml tags modifications modify_xml_data.update({ - "exportHandles": True, - "nbHandles": handles, - "startFrame": frame_start, + # enum position low start from 0 + "frameIndex": 0, + "startFrame": repre_frame_start, "namePattern": name_patern_xml }) @@ -201,9 +255,8 @@ class ExtractSubsetResources(openpype.api.Extractor): # add any xml overrides collected form segment.comment modify_xml_data.update(instance.data["xml_overrides"]) - self.log.debug("__ modify_xml_data: {}".format(pformat( - modify_xml_data - ))) + self.log.debug("_ in_mark: {}".format(in_mark)) + self.log.debug("_ out_mark: {}".format(out_mark)) export_kwargs = {} # validate xml preset file is filled @@ -231,19 +284,30 @@ class ExtractSubsetResources(openpype.api.Extractor): preset_dir, preset_file )) - preset_path = opfapi.modify_preset_file( - preset_orig_xml_path, staging_dir, modify_xml_data) - # define kwargs based on preset type if "thumbnail" in unique_name: - export_kwargs["thumb_frame_number"] = int(in_mark + ( - source_duration_handles / 2)) + modify_xml_data.update({ + "video/posterFrame": True, + "video/useFrameAsPoster": 1, + "namePattern": "__thumbnail" + }) + thumb_frame_number = int(in_mark + ( + (out_mark - in_mark + 1) / 2)) + + self.log.debug("__ thumb_frame_number: {}".format( + thumb_frame_number + )) + + export_kwargs["thumb_frame_number"] = thumb_frame_number else: export_kwargs.update({ "in_mark": in_mark, "out_mark": out_mark }) + preset_path = opfapi.modify_preset_file( + preset_orig_xml_path, staging_dir, modify_xml_data) + # get and make export dir paths export_dir_path = str(os.path.join( staging_dir, unique_name @@ -254,18 +318,29 @@ class ExtractSubsetResources(openpype.api.Extractor): opfapi.export_clip( export_dir_path, exporting_clip, preset_path, **export_kwargs) + repr_name = unique_name + # make sure only first segment is used if underscore in name + # HACK: `ftrackreview_withLUT` will result only in `ftrackreview` + if ( + "thumbnail" in unique_name + or "ftrackreview" in unique_name + ): + repr_name = unique_name.split("_")[0] + # create representation data representation_data = { - "name": unique_name, - "outputName": unique_name, + "name": repr_name, + "outputName": repr_name, "ext": extension, "stagingDir": export_dir_path, "tags": repre_tags, "data": { "colorspace": color_out }, - "load_to_batch_group": load_to_batch_group, - "batch_group_loader_name": batch_group_loader_name + "load_to_batch_group": preset_config.get( + "load_to_batch_group"), + "batch_group_loader_name": preset_config.get( + "batch_group_loader_name") or None } # collect all available content of export dir @@ -289,7 +364,7 @@ class ExtractSubsetResources(openpype.api.Extractor): if os.path.splitext(f)[-1] == ".mov" ] # then try if thumbnail is not in unique name - or unique_name == "thumbnail" + or repr_name == "thumbnail" ): representation_data["files"] = files.pop() else: @@ -298,9 +373,9 @@ class ExtractSubsetResources(openpype.api.Extractor): # add frame range if preset_config["representation_add_range"]: representation_data.update({ - "frameStart": frame_start_handle, + "frameStart": repre_frame_start, "frameEnd": ( - frame_start_handle + source_duration_handles), + repre_frame_start + source_duration_handles) - 1, "fps": instance.data["fps"] }) @@ -317,8 +392,54 @@ class ExtractSubsetResources(openpype.api.Extractor): # at the end remove the duplicated clip flame.delete(exporting_clip) - self.log.debug("All representations: {}".format( - pformat(instance.data["representations"]))) + def _get_retimed_attributes(self, instance): + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + + # get basic variables + otio_clip = instance.data["otioClip"] + + # get available range trimmed with processed retimes + retimed_attributes = get_media_range_with_retimes( + otio_clip, handle_start, handle_end) + self.log.debug( + ">> retimed_attributes: {}".format(retimed_attributes)) + + r_media_in = int(retimed_attributes["mediaIn"]) + r_media_out = int(retimed_attributes["mediaOut"]) + version_data = retimed_attributes.get("versionData") + + return { + "version_data": version_data, + "handle_start": int(retimed_attributes["handleStart"]), + "handle_end": int(retimed_attributes["handleEnd"]), + "source_duration": ( + (r_media_out - r_media_in) + 1 + ), + "speed": float(retimed_attributes["speed"]) + } + + def _should_skip(self, preset_config, clip_path, unique_name): + # get activating attributes + activated_preset = preset_config["active"] + filter_path_regex = preset_config.get("filter_path_regex") + + self.log.info( + "Preset `{}` is active `{}` with filter `{}`".format( + unique_name, activated_preset, filter_path_regex + ) + ) + + # skip if not activated presete + if not activated_preset: + return True + + # exclude by regex filter if any + if ( + filter_path_regex + and not re.search(filter_path_regex, clip_path) + ): + return True def _unfolds_nested_folders(self, stage_dir, files_list, ext): """Unfolds nested folders @@ -408,8 +529,17 @@ class ExtractSubsetResources(openpype.api.Extractor): """ Import clip from path """ - clips = flame.import_clips(path) + dir_path = os.path.dirname(path) + media_info = MediaInfoFile(path, logger=self.log) + file_pattern = media_info.file_pattern + self.log.debug("__ file_pattern: {}".format(file_pattern)) + + # rejoin the pattern to dir path + new_path = os.path.join(dir_path, file_pattern) + + clips = flame.import_clips(new_path) self.log.info("Clips [{}] imported from `{}`".format(clips, path)) + if not clips: self.log.warning("Path `{}` is not having any clips".format(path)) return None @@ -418,3 +548,30 @@ class ExtractSubsetResources(openpype.api.Extractor): "Path `{}` is containing more that one clip".format(path) ) return clips[0] + + def staging_dir(self, instance): + """Provide a temporary directory in which to store extracted files + + Upon calling this method the staging directory is stored inside + the instance.data['stagingDir'] + """ + staging_dir = instance.data.get('stagingDir', None) + openpype_temp_dir = os.getenv("OPENPYPE_TEMP_DIR") + + if not staging_dir: + if openpype_temp_dir and os.path.exists(openpype_temp_dir): + staging_dir = os.path.normpath( + tempfile.mkdtemp( + prefix="pyblish_tmp_", + dir=openpype_temp_dir + ) + ) + else: + staging_dir = os.path.normpath( + tempfile.mkdtemp(prefix="pyblish_tmp_") + ) + instance.data['stagingDir'] = staging_dir + + instance.context.data["cleanupFullPaths"].append(staging_dir) + + return staging_dir diff --git a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py index da9553cc2a..4d45f67ded 100644 --- a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py +++ b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py @@ -3,9 +3,9 @@ import copy from collections import OrderedDict from pprint import pformat import pyblish -from openpype.lib import get_workdir import openpype.hosts.flame.api as opfapi import openpype.pipeline as op_pipeline +from openpype.pipeline.workfile import get_workdir class IntegrateBatchGroup(pyblish.api.InstancePlugin): @@ -323,6 +323,14 @@ class IntegrateBatchGroup(pyblish.api.InstancePlugin): def _get_shot_task_dir_path(self, instance, task_data): project_doc = instance.data["projectEntity"] asset_entity = instance.data["assetEntity"] + anatomy = instance.context.data["anatomy"] + project_settings = instance.context.data["project_settings"] return get_workdir( - project_doc, asset_entity, task_data["name"], "flame") + project_doc, + asset_entity, + task_data["name"], + "flame", + anatomy, + project_settings=project_settings + ) diff --git a/openpype/hosts/flame/startup/openpype_in_flame.py b/openpype/hosts/flame/startup/openpype_in_flame.py index f2ac23b19e..d07aaa6b7d 100644 --- a/openpype/hosts/flame/startup/openpype_in_flame.py +++ b/openpype/hosts/flame/startup/openpype_in_flame.py @@ -73,6 +73,8 @@ def load_apps(): opfapi.FlameMenuProjectConnect(opfapi.CTX.app_framework)) opfapi.CTX.flame_apps.append( opfapi.FlameMenuTimeline(opfapi.CTX.app_framework)) + opfapi.CTX.flame_apps.append( + opfapi.FlameMenuUniversal(opfapi.CTX.app_framework)) opfapi.CTX.app_framework.log.info("Apps are loaded") @@ -191,3 +193,27 @@ def get_timeline_custom_ui_actions(): openpype_install() return _build_app_menu("FlameMenuTimeline") + + +def get_batch_custom_ui_actions(): + """Hook to create submenu in batch + + Returns: + list: menu object + """ + # install openpype and the host + openpype_install() + + return _build_app_menu("FlameMenuUniversal") + + +def get_media_panel_custom_ui_actions(): + """Hook to create submenu in desktop + + Returns: + list: menu object + """ + # install openpype and the host + openpype_install() + + return _build_app_menu("FlameMenuUniversal") diff --git a/openpype/hosts/fusion/__init__.py b/openpype/hosts/fusion/__init__.py index e69de29bb2..ddae01890b 100644 --- a/openpype/hosts/fusion/__init__.py +++ b/openpype/hosts/fusion/__init__.py @@ -0,0 +1,10 @@ +from .addon import ( + FusionAddon, + FUSION_HOST_DIR, +) + + +__all__ = ( + "FusionAddon", + "FUSION_HOST_DIR", +) diff --git a/openpype/hosts/fusion/addon.py b/openpype/hosts/fusion/addon.py new file mode 100644 index 0000000000..d1bd1566b7 --- /dev/null +++ b/openpype/hosts/fusion/addon.py @@ -0,0 +1,31 @@ +import os +from openpype.modules import OpenPypeModule, IHostAddon + +FUSION_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class FusionAddon(OpenPypeModule, IHostAddon): + name = "fusion" + host_name = "fusion" + + def initialize(self, module_settings): + self.enabled = True + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(FUSION_HOST_DIR, "hooks") + ] + + def add_implementation_envs(self, env, _app): + # Set default values if are not already set via settings + defaults = { + "OPENPYPE_LOG_NO_COLORS": "Yes" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_workfile_extensions(self): + return [".comp"] diff --git a/openpype/hosts/fusion/api/__init__.py b/openpype/hosts/fusion/api/__init__.py index 19d1e092fe..ed70dbca50 100644 --- a/openpype/hosts/fusion/api/__init__.py +++ b/openpype/hosts/fusion/api/__init__.py @@ -5,10 +5,7 @@ from .pipeline import ( ls, imprint_container, - parse_container, - - get_current_comp, - comp_lock_and_undo_chunk + parse_container ) from .workio import ( @@ -22,8 +19,10 @@ from .workio import ( from .lib import ( maintained_selection, - get_additional_data, - update_frame_range + update_frame_range, + set_asset_framerange, + get_current_comp, + comp_lock_and_undo_chunk ) from .menu import launch_openpype_menu @@ -38,9 +37,6 @@ __all__ = [ "imprint_container", "parse_container", - "get_current_comp", - "comp_lock_and_undo_chunk", - # workio "open_file", "save_file", @@ -51,8 +47,10 @@ __all__ = [ # lib "maintained_selection", - "get_additional_data", "update_frame_range", + "set_asset_framerange", + "get_current_comp", + "comp_lock_and_undo_chunk", # menu "launch_openpype_menu", diff --git a/openpype/hosts/fusion/api/lib.py b/openpype/hosts/fusion/api/lib.py index 29f3a3a3eb..a33e5cf289 100644 --- a/openpype/hosts/fusion/api/lib.py +++ b/openpype/hosts/fusion/api/lib.py @@ -3,20 +3,27 @@ import sys import re import contextlib -from bson.objectid import ObjectId -from Qt import QtGui - +from openpype.lib import Logger +from openpype.client import ( + get_asset_by_name, + get_subset_by_name, + get_last_version_by_subset_id, + get_representation_by_id, + get_representation_by_name, + get_representation_parents, +) from openpype.pipeline import ( switch_container, legacy_io, ) -from .pipeline import get_current_comp, comp_lock_and_undo_chunk +from openpype.pipeline.context_tools import get_current_project_asset self = sys.modules[__name__] self._project = None -def update_frame_range(start, end, comp=None, set_render_range=True): +def update_frame_range(start, end, comp=None, set_render_range=True, + handle_start=0, handle_end=0): """Set Fusion comp's start and end frame range Args: @@ -25,6 +32,8 @@ def update_frame_range(start, end, comp=None, set_render_range=True): comp (object, Optional): comp object from fusion set_render_range (bool, Optional): When True this will also set the composition's render start and end frame. + handle_start (float, int, Optional): frame handles before start frame + handle_end (float, int, Optional): frame handles after end frame Returns: None @@ -34,11 +43,16 @@ def update_frame_range(start, end, comp=None, set_render_range=True): if not comp: comp = get_current_comp() + # Convert any potential none type to zero + handle_start = handle_start or 0 + handle_end = handle_end or 0 + attrs = { - "COMPN_GlobalStart": start, - "COMPN_GlobalEnd": end + "COMPN_GlobalStart": start - handle_start, + "COMPN_GlobalEnd": end + handle_end } + # set frame range if set_render_range: attrs.update({ "COMPN_RenderStart": start, @@ -49,24 +63,122 @@ def update_frame_range(start, end, comp=None, set_render_range=True): comp.SetAttrs(attrs) -def get_additional_data(container): - """Get Fusion related data for the container +def set_asset_framerange(): + """Set Comp's frame range based on current asset""" + asset_doc = get_current_project_asset() + start = asset_doc["data"]["frameStart"] + end = asset_doc["data"]["frameEnd"] + handle_start = asset_doc["data"]["handleStart"] + handle_end = asset_doc["data"]["handleEnd"] + update_frame_range(start, end, set_render_range=True, + handle_start=handle_start, + handle_end=handle_end) - Args: - container(dict): the container found by the ls() function - Returns: - dict +def set_asset_resolution(): + """Set Comp's resolution width x height default based on current asset""" + asset_doc = get_current_project_asset() + width = asset_doc["data"]["resolutionWidth"] + height = asset_doc["data"]["resolutionHeight"] + comp = get_current_comp() + + print("Setting comp frame format resolution to {}x{}".format(width, + height)) + comp.SetPrefs({ + "Comp.FrameFormat.Width": width, + "Comp.FrameFormat.Height": height, + }) + + +def validate_comp_prefs(comp=None, force_repair=False): + """Validate current comp defaults with asset settings. + + Validates fps, resolutionWidth, resolutionHeight, aspectRatio. + + This does *not* validate frameStart, frameEnd, handleStart and handleEnd. """ - tool = container["_tool"] - tile_color = tool.TileColor - if tile_color is None: - return {} + if comp is None: + comp = get_current_comp() - return {"color": QtGui.QColor.fromRgbF(tile_color["R"], - tile_color["G"], - tile_color["B"])} + log = Logger.get_logger("validate_comp_prefs") + + fields = [ + "name", + "data.fps", + "data.resolutionWidth", + "data.resolutionHeight", + "data.pixelAspect" + ] + asset_doc = get_current_project_asset(fields=fields) + asset_data = asset_doc["data"] + + comp_frame_format_prefs = comp.GetPrefs("Comp.FrameFormat") + + # Pixel aspect ratio in Fusion is set as AspectX and AspectY so we convert + # the data to something that is more sensible to Fusion + asset_data["pixelAspectX"] = asset_data.pop("pixelAspect") + asset_data["pixelAspectY"] = 1.0 + + validations = [ + ("fps", "Rate", "FPS"), + ("resolutionWidth", "Width", "Resolution Width"), + ("resolutionHeight", "Height", "Resolution Height"), + ("pixelAspectX", "AspectX", "Pixel Aspect Ratio X"), + ("pixelAspectY", "AspectY", "Pixel Aspect Ratio Y") + ] + + invalid = [] + for key, comp_key, label in validations: + asset_value = asset_data[key] + comp_value = comp_frame_format_prefs.get(comp_key) + if asset_value != comp_value: + invalid_msg = "{} {} should be {}".format(label, + comp_value, + asset_value) + invalid.append(invalid_msg) + + if not force_repair: + # Do not log warning if we force repair anyway + log.warning( + "Comp {pref} {value} does not match asset " + "'{asset_name}' {pref} {asset_value}".format( + pref=label, + value=comp_value, + asset_name=asset_doc["name"], + asset_value=asset_value) + ) + + if invalid: + + def _on_repair(): + attributes = dict() + for key, comp_key, _label in validations: + value = asset_data[key] + comp_key_full = "Comp.FrameFormat.{}".format(comp_key) + attributes[comp_key_full] = value + comp.SetPrefs(attributes) + + if force_repair: + log.info("Applying default Comp preferences..") + _on_repair() + return + + from . import menu + from openpype.widgets import popup + from openpype.style import load_stylesheet + dialog = popup.Popup(parent=menu.menu) + dialog.setWindowTitle("Fusion comp has invalid configuration") + + msg = "Comp preferences mismatches '{}'".format(asset_doc["name"]) + msg += "\n" + "\n".join(invalid) + dialog.setMessage(msg) + dialog.setButtonText("Repair") + dialog.on_clicked.connect(_on_repair) + dialog.show() + dialog.raise_() + dialog.activateWindow() + dialog.setStyleSheet(load_stylesheet()) def switch_item(container, @@ -93,13 +205,16 @@ def switch_item(container, raise ValueError("Must have at least one change provided to switch.") # Collect any of current asset, subset and representation if not provided - # so we can use the original name from those. + # so we can use the original name from those. + project_name = legacy_io.active_project() if any(not x for x in [asset_name, subset_name, representation_name]): - _id = ObjectId(container["representation"]) - representation = legacy_io.find_one({ - "type": "representation", "_id": _id - }) - version, subset, asset, project = legacy_io.parenthood(representation) + repre_id = container["representation"] + representation = get_representation_by_id(project_name, repre_id) + repre_parent_docs = get_representation_parents(representation) + if repre_parent_docs: + version, subset, asset, _ = repre_parent_docs + else: + version = subset = asset = None if asset_name is None: asset_name = asset["name"] @@ -111,39 +226,26 @@ def switch_item(container, representation_name = representation["name"] # Find the new one - asset = legacy_io.find_one({ - "name": asset_name, - "type": "asset" - }) + asset = get_asset_by_name(project_name, asset_name, fields=["_id"]) assert asset, ("Could not find asset in the database with the name " "'%s'" % asset_name) - subset = legacy_io.find_one({ - "name": subset_name, - "type": "subset", - "parent": asset["_id"] - }) + subset = get_subset_by_name( + project_name, subset_name, asset["_id"], fields=["_id"] + ) assert subset, ("Could not find subset in the database with the name " "'%s'" % subset_name) - version = legacy_io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[('name', -1)] + version = get_last_version_by_subset_id( + project_name, subset["_id"], fields=["_id"] ) - assert version, "Could not find a version for {}.{}".format( asset_name, subset_name ) - representation = legacy_io.find_one({ - "name": representation_name, - "type": "representation", - "parent": version["_id"]} + representation = get_representation_by_name( + project_name, representation_name, version["_id"] ) - assert representation, ("Could not find representation in the database " "with the name '%s'" % representation_name) @@ -198,3 +300,21 @@ def get_frame_path(path): padding = 4 # default Fusion padding return filename, padding, ext + + +def get_current_comp(): + """Hack to get current comp in this session""" + fusion = getattr(sys.modules["__main__"], "fusion", None) + return fusion.CurrentComp if fusion else None + + +@contextlib.contextmanager +def comp_lock_and_undo_chunk(comp, undo_queue_name="Script CMD"): + """Lock comp and open an undo chunk during the context""" + try: + comp.Lock() + comp.StartUndo(undo_queue_name) + yield + finally: + comp.Unlock() + comp.EndUndo() diff --git a/openpype/hosts/fusion/api/menu.py b/openpype/hosts/fusion/api/menu.py index 6234322d7f..39126935e6 100644 --- a/openpype/hosts/fusion/api/menu.py +++ b/openpype/hosts/fusion/api/menu.py @@ -1,43 +1,26 @@ -import os import sys -from Qt import QtWidgets, QtCore +from Qt import QtWidgets, QtCore, QtGui -from openpype import style from openpype.tools.utils import host_tools - +from openpype.style import load_stylesheet +from openpype.lib import register_event_callback from openpype.hosts.fusion.scripts import ( set_rendermode, duplicate_with_inputs ) +from openpype.hosts.fusion.api.lib import ( + set_asset_framerange, + set_asset_resolution +) +from openpype.pipeline import legacy_io +from openpype.resources import get_openpype_icon_filepath +from .pipeline import FusionEventHandler +from .pulse import FusionPulse -def load_stylesheet(): - path = os.path.join(os.path.dirname(__file__), "menu_style.qss") - if not os.path.exists(path): - print("Unable to load stylesheet, file not found in resources") - return "" - - with open(path, "r") as file_stream: - stylesheet = file_stream.read() - return stylesheet - - -class Spacer(QtWidgets.QWidget): - def __init__(self, height, *args, **kwargs): - super(Spacer, self).__init__(*args, **kwargs) - - self.setFixedHeight(height) - - real_spacer = QtWidgets.QWidget(self) - real_spacer.setObjectName("Spacer") - real_spacer.setFixedHeight(height) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(real_spacer) - - self.setLayout(layout) +self = sys.modules[__name__] +self.menu = None class OpenPypeMenu(QtWidgets.QWidget): @@ -46,15 +29,29 @@ class OpenPypeMenu(QtWidgets.QWidget): self.setObjectName("OpenPypeMenu") + icon_path = get_openpype_icon_filepath() + icon = QtGui.QIcon(icon_path) + self.setWindowIcon(icon) + self.setWindowFlags( QtCore.Qt.Window | QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint + | QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.WindowStaysOnTopHint ) self.render_mode_widget = None self.setWindowTitle("OpenPype") + + asset_label = QtWidgets.QLabel("Context", self) + asset_label.setStyleSheet("""QLabel { + font-size: 14px; + font-weight: 600; + color: #5f9fb8; + }""") + asset_label.setAlignment(QtCore.Qt.AlignHCenter) + workfiles_btn = QtWidgets.QPushButton("Workfiles...", self) create_btn = QtWidgets.QPushButton("Create...", self) publish_btn = QtWidgets.QPushButton("Publish...", self) @@ -62,77 +59,111 @@ class OpenPypeMenu(QtWidgets.QWidget): manager_btn = QtWidgets.QPushButton("Manage...", self) libload_btn = QtWidgets.QPushButton("Library...", self) rendermode_btn = QtWidgets.QPushButton("Set render mode...", self) + set_framerange_btn = QtWidgets.QPushButton("Set Frame Range", self) + set_resolution_btn = QtWidgets.QPushButton("Set Resolution", self) duplicate_with_inputs_btn = QtWidgets.QPushButton( "Duplicate with input connections", self ) - reset_resolution_btn = QtWidgets.QPushButton( - "Reset Resolution from project", self - ) layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(10, 20, 10, 20) + layout.addWidget(asset_label) + + layout.addSpacing(20) + layout.addWidget(workfiles_btn) + + layout.addSpacing(20) + layout.addWidget(create_btn) - layout.addWidget(publish_btn) layout.addWidget(load_btn) + layout.addWidget(publish_btn) layout.addWidget(manager_btn) - layout.addWidget(Spacer(15, self)) + layout.addSpacing(20) layout.addWidget(libload_btn) - layout.addWidget(Spacer(15, self)) + layout.addSpacing(20) + layout.addWidget(set_framerange_btn) + layout.addWidget(set_resolution_btn) layout.addWidget(rendermode_btn) - layout.addWidget(Spacer(15, self)) + layout.addSpacing(20) layout.addWidget(duplicate_with_inputs_btn) - layout.addWidget(reset_resolution_btn) self.setLayout(layout) + # Store reference so we can update the label + self.asset_label = asset_label + workfiles_btn.clicked.connect(self.on_workfile_clicked) create_btn.clicked.connect(self.on_create_clicked) publish_btn.clicked.connect(self.on_publish_clicked) load_btn.clicked.connect(self.on_load_clicked) manager_btn.clicked.connect(self.on_manager_clicked) libload_btn.clicked.connect(self.on_libload_clicked) - rendermode_btn.clicked.connect(self.on_rendernode_clicked) + rendermode_btn.clicked.connect(self.on_rendermode_clicked) duplicate_with_inputs_btn.clicked.connect( self.on_duplicate_with_inputs_clicked) - reset_resolution_btn.clicked.connect(self.on_reset_resolution_clicked) + set_resolution_btn.clicked.connect(self.on_set_resolution_clicked) + set_framerange_btn.clicked.connect(self.on_set_framerange_clicked) + + self._callbacks = [] + self.register_callback("taskChanged", self.on_task_changed) + self.on_task_changed() + + # Force close current process if Fusion is closed + self._pulse = FusionPulse(parent=self) + self._pulse.start() + + # Detect Fusion events as OpenPype events + self._event_handler = FusionEventHandler(parent=self) + self._event_handler.start() + + def on_task_changed(self): + # Update current context label + label = legacy_io.Session["AVALON_ASSET"] + self.asset_label.setText(label) + + def register_callback(self, name, fn): + + # Create a wrapper callback that we only store + # for as long as we want it to persist as callback + def _callback(*args): + fn() + + self._callbacks.append(_callback) + register_event_callback(name, _callback) + + def deregister_all_callbacks(self): + self._callbacks[:] = [] def on_workfile_clicked(self): - print("Clicked Workfile") host_tools.show_workfiles() def on_create_clicked(self): - print("Clicked Create") host_tools.show_creator() def on_publish_clicked(self): - print("Clicked Publish") host_tools.show_publish() def on_load_clicked(self): - print("Clicked Load") host_tools.show_loader(use_context=True) def on_manager_clicked(self): - print("Clicked Manager") host_tools.show_scene_inventory() def on_libload_clicked(self): - print("Clicked Library") host_tools.show_library_loader() - def on_rendernode_clicked(self): - print("Clicked Set Render Mode") + def on_rendermode_clicked(self): if self.render_mode_widget is None: window = set_rendermode.SetRenderMode() - window.setStyleSheet(style.load_stylesheet()) + window.setStyleSheet(load_stylesheet()) window.show() self.render_mode_widget = window else: @@ -140,15 +171,16 @@ class OpenPypeMenu(QtWidgets.QWidget): def on_duplicate_with_inputs_clicked(self): duplicate_with_inputs.duplicate_with_input_connections() - print("Clicked Set Colorspace") - def on_reset_resolution_clicked(self): - print("Clicked Reset Resolution") + def on_set_resolution_clicked(self): + set_asset_resolution() + + def on_set_framerange_clicked(self): + set_asset_framerange() def launch_openpype_menu(): app = QtWidgets.QApplication(sys.argv) - app.setQuitOnLastWindowClosed(False) pype_menu = OpenPypeMenu() @@ -156,5 +188,8 @@ def launch_openpype_menu(): pype_menu.setStyleSheet(stylesheet) pype_menu.show() + self.menu = pype_menu - sys.exit(app.exec_()) + result = app.exec_() + print("Shutting down..") + sys.exit(result) diff --git a/openpype/hosts/fusion/api/menu_style.qss b/openpype/hosts/fusion/api/menu_style.qss deleted file mode 100644 index 12c474b070..0000000000 --- a/openpype/hosts/fusion/api/menu_style.qss +++ /dev/null @@ -1,29 +0,0 @@ -QWidget { - background-color: #282828; - border-radius: 3; -} - -QPushButton { - border: 1px solid #090909; - background-color: #201f1f; - color: #ffffff; - padding: 5; -} - -QPushButton:focus { - background-color: "#171717"; - color: #d0d0d0; -} - -QPushButton:hover { - background-color: "#171717"; - color: #e64b3d; -} - -#OpenPypeMenu { - border: 1px solid #fef9ef; -} - -#Spacer { - background-color: #282828; -} diff --git a/openpype/hosts/fusion/api/pipeline.py b/openpype/hosts/fusion/api/pipeline.py index 54002f9f51..b6092f7c1b 100644 --- a/openpype/hosts/fusion/api/pipeline.py +++ b/openpype/hosts/fusion/api/pipeline.py @@ -4,11 +4,15 @@ Basic avalon integration import os import sys import logging -import contextlib import pyblish.api +from Qt import QtCore -from openpype.api import Logger +from openpype.lib import ( + Logger, + register_event_callback, + emit_event +) from openpype.pipeline import ( register_loader_plugin_path, register_creator_plugin_path, @@ -18,12 +22,19 @@ from openpype.pipeline import ( deregister_inventory_action_path, AVALON_CONTAINER_ID, ) -import openpype.hosts.fusion +from openpype.pipeline.load import any_outdated_containers +from openpype.hosts.fusion import FUSION_HOST_DIR +from openpype.tools.utils import host_tools -log = Logger().get_logger(__name__) +from .lib import ( + get_current_comp, + comp_lock_and_undo_chunk, + validate_comp_prefs +) -HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.fusion.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +log = Logger.get_logger(__name__) + +PLUGINS_DIR = os.path.join(FUSION_HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") @@ -31,16 +42,32 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") -class CompLogHandler(logging.Handler): +class FusionLogHandler(logging.Handler): + # Keep a reference to fusion's Print function (Remote Object) + _print = None + + @property + def print(self): + if self._print is not None: + # Use cached + return self._print + + _print = getattr(sys.modules["__main__"], "fusion").Print + if _print is None: + # Backwards compatibility: Print method on Fusion instance was + # added around Fusion 17.4 and wasn't available on PyRemote Object + # before + _print = get_current_comp().Print + self._print = _print + return _print + def emit(self, record): entry = self.format(record) - comp = get_current_comp() - if comp: - comp.Print(entry) + self.print(entry) def install(): - """Install fusion-specific functionality of avalon-core. + """Install fusion-specific functionality of OpenPype. This is where you install menus and register families, data and loaders into fusion. @@ -52,20 +79,18 @@ def install(): """ # Remove all handlers associated with the root logger object, because - # that one sometimes logs as "warnings" incorrectly. + # that one always logs as "warnings" incorrectly. for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) # Attach default logging handler that prints to active comp logger = logging.getLogger() formatter = logging.Formatter(fmt="%(message)s\n") - handler = CompLogHandler() + handler = FusionLogHandler() handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) - log.info("openpype.hosts.fusion installed") - pyblish.api.register_host("fusion") pyblish.api.register_plugin_path(PUBLISH_PATH) log.info("Registering Fusion plug-ins..") @@ -78,6 +103,11 @@ def install(): "instanceToggled", on_pyblish_instance_toggled ) + # Register events + register_event_callback("open", on_after_open) + register_event_callback("save", on_save) + register_event_callback("new", on_new) + def uninstall(): """Uninstall all that was installed @@ -103,7 +133,7 @@ def uninstall(): ) -def on_pyblish_instance_toggled(instance, new_value, old_value): +def on_pyblish_instance_toggled(instance, old_value, new_value): """Toggle saver tool passthrough states on instance toggles.""" comp = instance.context.data.get("currentComp") if not comp: @@ -126,6 +156,48 @@ def on_pyblish_instance_toggled(instance, new_value, old_value): tool.SetAttrs({"TOOLB_PassThrough": passthrough}) +def on_new(event): + comp = event["Rets"]["comp"] + validate_comp_prefs(comp, force_repair=True) + + +def on_save(event): + comp = event["sender"] + validate_comp_prefs(comp) + + +def on_after_open(event): + comp = event["sender"] + validate_comp_prefs(comp) + + if any_outdated_containers(): + log.warning("Scene has outdated content.") + + # Find OpenPype menu to attach to + from . import menu + + def _on_show_scene_inventory(): + # ensure that comp is active + frame = comp.CurrentFrame + if not frame: + print("Comp is closed, skipping show scene inventory") + return + frame.ActivateFrame() # raise comp window + host_tools.show_scene_inventory() + + from openpype.widgets import popup + from openpype.style import load_stylesheet + dialog = popup.Popup(parent=menu.menu) + dialog.setWindowTitle("Fusion comp has outdated content") + dialog.setMessage("There are outdated containers in " + "your Fusion comp.") + dialog.on_clicked.connect(_on_show_scene_inventory) + dialog.show() + dialog.raise_() + dialog.activateWindow() + dialog.setStyleSheet(load_stylesheet()) + + def ls(): """List containers from active Fusion scene @@ -139,7 +211,7 @@ def ls(): """ comp = get_current_comp() - tools = comp.GetToolList(False, "Loader").values() + tools = comp.GetToolList(False).values() for tool in tools: container = parse_container(tool) @@ -211,19 +283,114 @@ def parse_container(tool): return container -def get_current_comp(): - """Hack to get current comp in this session""" - fusion = getattr(sys.modules["__main__"], "fusion", None) - return fusion.CurrentComp if fusion else None +class FusionEventThread(QtCore.QThread): + """QThread which will periodically ping Fusion app for any events. + + The fusion.UIManager must be set up to be notified of events before they'll + be reported by this thread, for example: + fusion.UIManager.AddNotify("Comp_Save", None) + + """ + + on_event = QtCore.Signal(dict) + + def run(self): + + app = getattr(sys.modules["__main__"], "app", None) + if app is None: + # No Fusion app found + return + + # As optimization store the GetEvent method directly because every + # getattr of UIManager.GetEvent tries to resolve the Remote Function + # through the PyRemoteObject + get_event = app.UIManager.GetEvent + delay = int(os.environ.get("OPENPYPE_FUSION_CALLBACK_INTERVAL", 1000)) + while True: + if self.isInterruptionRequested(): + return + + # Process all events that have been queued up until now + while True: + event = get_event(False) + if not event: + break + self.on_event.emit(event) + + # Wait some time before processing events again + # to not keep blocking the UI + self.msleep(delay) -@contextlib.contextmanager -def comp_lock_and_undo_chunk(comp, undo_queue_name="Script CMD"): - """Lock comp and open an undo chunk during the context""" - try: - comp.Lock() - comp.StartUndo(undo_queue_name) - yield - finally: - comp.Unlock() - comp.EndUndo() +class FusionEventHandler(QtCore.QObject): + """Emits OpenPype events based on Fusion events captured in a QThread. + + This will emit the following OpenPype events based on Fusion actions: + save: Comp_Save, Comp_SaveAs + open: Comp_Opened + new: Comp_New + + To use this you can attach it to you Qt UI so it runs in the background. + E.g. + >>> handler = FusionEventHandler(parent=window) + >>> handler.start() + + + """ + ACTION_IDS = [ + "Comp_Save", + "Comp_SaveAs", + "Comp_New", + "Comp_Opened" + ] + + def __init__(self, parent=None): + super(FusionEventHandler, self).__init__(parent=parent) + + # Set up Fusion event callbacks + fusion = getattr(sys.modules["__main__"], "fusion", None) + ui = fusion.UIManager + + # Add notifications for the ones we want to listen to + notifiers = [] + for action_id in self.ACTION_IDS: + notifier = ui.AddNotify(action_id, None) + notifiers.append(notifier) + + # TODO: Not entirely sure whether these must be kept to avoid + # garbage collection + self._notifiers = notifiers + + self._event_thread = FusionEventThread(parent=self) + self._event_thread.on_event.connect(self._on_event) + + def start(self): + self._event_thread.start() + + def stop(self): + self._event_thread.stop() + + def _on_event(self, event): + """Handle Fusion events to emit OpenPype events""" + if not event: + return + + what = event["what"] + + # Comp Save + if what in {"Comp_Save", "Comp_SaveAs"}: + if not event["Rets"].get("success"): + # If the Save action is cancelled it will still emit an + # event but with "success": False so we ignore those cases + return + # Comp was saved + emit_event("save", data=event) + return + + # Comp New + elif what in {"Comp_New"}: + emit_event("new", data=event) + + # Comp Opened + elif what in {"Comp_Opened"}: + emit_event("open", data=event) diff --git a/openpype/hosts/fusion/api/pulse.py b/openpype/hosts/fusion/api/pulse.py new file mode 100644 index 0000000000..eb7ef3785d --- /dev/null +++ b/openpype/hosts/fusion/api/pulse.py @@ -0,0 +1,63 @@ +import os +import sys + +from Qt import QtCore + + +class PulseThread(QtCore.QThread): + no_response = QtCore.Signal() + + def __init__(self, parent=None): + super(PulseThread, self).__init__(parent=parent) + + def run(self): + app = getattr(sys.modules["__main__"], "app", None) + + # Interval in milliseconds + interval = os.environ.get("OPENPYPE_FUSION_PULSE_INTERVAL", 1000) + + while True: + if self.isInterruptionRequested(): + return + + # We don't need to call Test because PyRemoteObject of the app + # will actually fail to even resolve the Test function if it has + # gone down. So we can actually already just check by confirming + # the method is still getting resolved. (Optimization) + if app.Test is None: + self.no_response.emit() + + self.msleep(interval) + + +class FusionPulse(QtCore.QObject): + """A Timer that checks whether host app is still alive. + + This checks whether the Fusion process is still active at a certain + interval. This is useful due to how Fusion runs its scripts. Each script + runs in its own environment and process (a `fusionscript` process each). + If Fusion would go down and we have a UI process running at the same time + then it can happen that the `fusionscript.exe` will remain running in the + background in limbo due to e.g. a Qt interface's QApplication that keeps + running infinitely. + + Warning: + When the host is not detected this will automatically exit + the current process. + + """ + + def __init__(self, parent=None): + super(FusionPulse, self).__init__(parent=parent) + self._thread = PulseThread(parent=self) + self._thread.no_response.connect(self.on_no_response) + + def on_no_response(self): + print("Pulse detected no response from Fusion..") + sys.exit(1) + + def start(self): + self._thread.start() + + def stop(self): + self._thread.requestInterruption() diff --git a/openpype/hosts/fusion/api/workio.py b/openpype/hosts/fusion/api/workio.py index a1710c6e3a..939b2ff4be 100644 --- a/openpype/hosts/fusion/api/workio.py +++ b/openpype/hosts/fusion/api/workio.py @@ -2,13 +2,11 @@ import sys import os -from openpype.pipeline import HOST_WORKFILE_EXTENSIONS - -from .pipeline import get_current_comp +from .lib import get_current_comp def file_extensions(): - return HOST_WORKFILE_EXTENSIONS["fusion"] + return [".comp"] def has_unsaved_changes(): diff --git a/openpype/hosts/fusion/deploy/Config/openpype_menu.fu b/openpype/hosts/fusion/deploy/Config/openpype_menu.fu new file mode 100644 index 0000000000..8b8d448259 --- /dev/null +++ b/openpype/hosts/fusion/deploy/Config/openpype_menu.fu @@ -0,0 +1,60 @@ +{ + Action + { + ID = "OpenPype_Menu", + Category = "OpenPype", + Name = "OpenPype Menu", + + Targets = + { + Composition = + { + Execute = _Lua [=[ + local scriptPath = app:MapPath("OpenPype:MenuScripts/openpype_menu.py") + if bmd.fileexists(scriptPath) == false then + print("[OpenPype Error] Can't run file: " .. scriptPath) + else + target:RunScript(scriptPath) + end + ]=], + }, + }, + }, + Action + { + ID = "OpenPype_Install_PySide2", + Category = "OpenPype", + Name = "Install PySide2", + + Targets = + { + Composition = + { + Execute = _Lua [=[ + local scriptPath = app:MapPath("OpenPype:MenuScripts/install_pyside2.py") + if bmd.fileexists(scriptPath) == false then + print("[OpenPype Error] Can't run file: " .. scriptPath) + else + target:RunScript(scriptPath) + end + ]=], + }, + }, + }, + Menus + { + Target = "ChildFrame", + + Before "Help" + { + Sub "OpenPype" + { + "OpenPype_Menu{}", + "_", + Sub "Admin" { + "OpenPype_Install_PySide2{}" + } + } + }, + }, +} diff --git a/openpype/hosts/fusion/deploy/MenuScripts/README.md b/openpype/hosts/fusion/deploy/MenuScripts/README.md new file mode 100644 index 0000000000..f87eaea4a2 --- /dev/null +++ b/openpype/hosts/fusion/deploy/MenuScripts/README.md @@ -0,0 +1,6 @@ +### OpenPype deploy MenuScripts + +Note that this `MenuScripts` is not an official Fusion folder. +OpenPype only uses this folder in `{fusion}/deploy/` to trigger the OpenPype menu actions. + +They are used in the actions defined in `.fu` files in `{fusion}/deploy/Config`. \ No newline at end of file diff --git a/openpype/hosts/fusion/deploy/MenuScripts/install_pyside2.py b/openpype/hosts/fusion/deploy/MenuScripts/install_pyside2.py new file mode 100644 index 0000000000..ab9f13ce05 --- /dev/null +++ b/openpype/hosts/fusion/deploy/MenuScripts/install_pyside2.py @@ -0,0 +1,29 @@ +# This is just a quick hack for users running Py3 locally but having no +# Qt library installed +import os +import subprocess +import importlib + + +try: + from Qt import QtWidgets # noqa: F401 + from Qt import __binding__ + print(f"Qt binding: {__binding__}") + mod = importlib.import_module(__binding__) + print(f"Qt path: {mod.__file__}") + print("Qt library found, nothing to do..") + +except ImportError: + print("Assuming no Qt library is installed..") + print('Installing PySide2 for Python 3.6: ' + f'{os.environ["FUSION16_PYTHON36_HOME"]}') + + # Get full path to python executable + exe = "python.exe" if os.name == 'nt' else "python" + python = os.path.join(os.environ["FUSION16_PYTHON36_HOME"], exe) + assert os.path.exists(python), f"Python doesn't exist: {python}" + + # Do python -m pip install PySide2 + args = [python, "-m", "pip", "install", "PySide2"] + print(f"Args: {args}") + subprocess.Popen(args) diff --git a/openpype/hosts/fusion/deploy/MenuScripts/openpype_menu.py b/openpype/hosts/fusion/deploy/MenuScripts/openpype_menu.py new file mode 100644 index 0000000000..2918c552c8 --- /dev/null +++ b/openpype/hosts/fusion/deploy/MenuScripts/openpype_menu.py @@ -0,0 +1,35 @@ +import os +import sys + +from openpype.lib import Logger +from openpype.pipeline import ( + install_host, + registered_host, +) + + +def main(env): + # This script working directory starts in Fusion application folder. + # However the contents of that folder can conflict with Qt library dlls + # so we make sure to move out of it to avoid DLL Load Failed errors. + os.chdir("..") + from openpype.hosts.fusion import api + from openpype.hosts.fusion.api import menu + + # activate resolve from pype + install_host(api) + + log = Logger.get_logger(__name__) + log.info(f"Registered host: {registered_host()}") + + menu.launch_openpype_menu() + + # Initiate a QTimer to check if Fusion is still alive every X interval + # If Fusion is not found - kill itself + # todo(roy): Implement timer that ensures UI doesn't remain when e.g. + # Fusion closes down + + +if __name__ == "__main__": + result = main(os.environ) + sys.exit(not bool(result)) diff --git a/openpype/hosts/fusion/utility_scripts/32bit/backgrounds_selected_to32bit.py b/openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/32bit/backgrounds_selected_to32bit.py similarity index 100% rename from openpype/hosts/fusion/utility_scripts/32bit/backgrounds_selected_to32bit.py rename to openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/32bit/backgrounds_selected_to32bit.py diff --git a/openpype/hosts/fusion/utility_scripts/32bit/backgrounds_to32bit.py b/openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/32bit/backgrounds_to32bit.py similarity index 100% rename from openpype/hosts/fusion/utility_scripts/32bit/backgrounds_to32bit.py rename to openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/32bit/backgrounds_to32bit.py diff --git a/openpype/hosts/fusion/utility_scripts/32bit/loaders_selected_to32bit.py b/openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/32bit/loaders_selected_to32bit.py similarity index 100% rename from openpype/hosts/fusion/utility_scripts/32bit/loaders_selected_to32bit.py rename to openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/32bit/loaders_selected_to32bit.py diff --git a/openpype/hosts/fusion/utility_scripts/32bit/loaders_to32bit.py b/openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/32bit/loaders_to32bit.py similarity index 100% rename from openpype/hosts/fusion/utility_scripts/32bit/loaders_to32bit.py rename to openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/32bit/loaders_to32bit.py diff --git a/openpype/hosts/fusion/utility_scripts/switch_ui.py b/openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/switch_ui.py similarity index 92% rename from openpype/hosts/fusion/utility_scripts/switch_ui.py rename to openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/switch_ui.py index 70eb3d0a19..93f775b24b 100644 --- a/openpype/hosts/fusion/utility_scripts/switch_ui.py +++ b/openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/switch_ui.py @@ -7,13 +7,14 @@ from Qt import QtWidgets, QtCore import qtawesome as qta +from openpype.client import get_assets from openpype import style from openpype.pipeline import ( install_host, legacy_io, ) from openpype.hosts.fusion import api -from openpype.lib.avalon_context import get_workdir_from_session +from openpype.pipeline.context_tools import get_workdir_from_session log = logging.getLogger("Fusion Switch Shot") @@ -142,7 +143,7 @@ class App(QtWidgets.QWidget): # Clear any existing items self._assets.clear() - asset_names = [a["name"] for a in self.collect_assets()] + asset_names = self.collect_asset_names() completer = QtWidgets.QCompleter(asset_names) self._assets.setCompleter(completer) @@ -165,8 +166,14 @@ class App(QtWidgets.QWidget): items = glob.glob("{}/*.comp".format(directory)) return items - def collect_assets(self): - return list(legacy_io.find({"type": "asset"}, {"name": True})) + def collect_asset_names(self): + project_name = legacy_io.active_project() + asset_docs = get_assets(project_name, fields=["name"]) + asset_names = { + asset_doc["name"] + for asset_doc in asset_docs + } + return list(asset_names) def populate_comp_box(self, files): """Ensure we display the filename only but the path is stored as well diff --git a/openpype/hosts/fusion/utility_scripts/update_loader_ranges.py b/openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/update_loader_ranges.py similarity index 100% rename from openpype/hosts/fusion/utility_scripts/update_loader_ranges.py rename to openpype/hosts/fusion/deploy/Scripts/Comp/OpenPype/update_loader_ranges.py diff --git a/openpype/hosts/fusion/deploy/fusion_shared.prefs b/openpype/hosts/fusion/deploy/fusion_shared.prefs new file mode 100644 index 0000000000..998c6a6d66 --- /dev/null +++ b/openpype/hosts/fusion/deploy/fusion_shared.prefs @@ -0,0 +1,19 @@ +{ +Locked = true, +Global = { + Paths = { + Map = { + ["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy", + ["Reactor:"] = "$(REACTOR)", + + ["Config:"] = "UserPaths:Config;OpenPype:Config", + ["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts;OpenPype:Scripts", + ["UserPaths:"] = "UserData:;AllData:;Fusion:;Reactor:Deploy" + }, + }, + Script = { + PythonVersion = 3, + Python3Forced = true + }, + }, +} \ No newline at end of file diff --git a/openpype/hosts/fusion/hooks/pre_fusion_ocio_hook.py b/openpype/hosts/fusion/hooks/pre_fusion_ocio_hook.py new file mode 100644 index 0000000000..d1ae5f64fd --- /dev/null +++ b/openpype/hosts/fusion/hooks/pre_fusion_ocio_hook.py @@ -0,0 +1,34 @@ +import os +import platform + +from openpype.lib import PreLaunchHook, ApplicationLaunchFailed + + +class FusionPreLaunchOCIO(PreLaunchHook): + """Set OCIO environment variable for Fusion""" + app_groups = ["fusion"] + + def execute(self): + """Hook entry method.""" + + # get image io + project_settings = self.data["project_settings"] + + # make sure anatomy settings are having flame key + imageio_fusion = project_settings["fusion"]["imageio"] + + ocio = imageio_fusion.get("ocio") + enabled = ocio.get("enabled", False) + if not enabled: + return + + platform_key = platform.system().lower() + ocio_path = ocio["configFilePath"][platform_key] + if not ocio_path: + raise ApplicationLaunchFailed( + "Fusion OCIO is enabled in project settings but no OCIO config" + f"path is set for your current platform: {platform_key}" + ) + + self.log.info(f"Setting OCIO config path: {ocio_path}") + self.launch_context.env["OCIO"] = os.pathsep.join(ocio_path) diff --git a/openpype/hosts/fusion/hooks/pre_fusion_setup.py b/openpype/hosts/fusion/hooks/pre_fusion_setup.py index e635a0ea74..d043d54322 100644 --- a/openpype/hosts/fusion/hooks/pre_fusion_setup.py +++ b/openpype/hosts/fusion/hooks/pre_fusion_setup.py @@ -1,114 +1,61 @@ import os -import shutil - -import openpype.hosts.fusion from openpype.lib import PreLaunchHook, ApplicationLaunchFailed +from openpype.hosts.fusion import FUSION_HOST_DIR class FusionPrelaunch(PreLaunchHook): - """ - This hook will check if current workfile path has Fusion - project inside. + """Prepares OpenPype Fusion environment + + Requires FUSION_PYTHON3_HOME to be defined in the environment for Fusion + to point at a valid Python 3 build for Fusion. That is Python 3.3-3.10 + for Fusion 18 and Fusion 3.6 for Fusion 16 and 17. + + This also sets FUSION16_MasterPrefs to apply the fusion master prefs + as set in openpype/hosts/fusion/deploy/fusion_shared.prefs to enable + the OpenPype menu and force Python 3 over Python 2. + """ app_groups = ["fusion"] def execute(self): - # making sure python 3.6 is installed at provided path - py36_dir = self.launch_context.env.get("PYTHON36") - if not py36_dir: + # making sure python 3 is installed at provided path + # Py 3.3-3.10 for Fusion 18+ or Py 3.6 for Fu 16-17 + py3_var = "FUSION_PYTHON3_HOME" + fusion_python3_home = self.launch_context.env.get(py3_var, "") + + self.log.info(f"Looking for Python 3 in: {fusion_python3_home}") + for path in fusion_python3_home.split(os.pathsep): + # Allow defining multiple paths to allow "fallback" to other + # path. But make to set only a single path as final variable. + py3_dir = os.path.normpath(path) + if os.path.isdir(py3_dir): + break + else: raise ApplicationLaunchFailed( - "Required environment variable \"PYTHON36\" is not set." - "\n\nFusion implementation requires to have" - " installed Python 3.6" + "Python 3 is not installed at the provided path.\n" + "Make sure the environment in fusion settings has " + "'FUSION_PYTHON3_HOME' set correctly and make sure " + "Python 3 is installed in the given path." + f"\n\nPYTHON36: {fusion_python3_home}" ) - py36_dir = os.path.normpath(py36_dir) - if not os.path.isdir(py36_dir): - raise ApplicationLaunchFailed( - "Python 3.6 is not installed at the provided path.\n" - "Either make sure the environments in fusion settings has" - " 'PYTHON36' set corectly or make sure Python 3.6 is installed" - f" in the given path.\n\nPYTHON36: {py36_dir}" - ) - self.log.info(f"Path to Fusion Python folder: '{py36_dir}'...") - self.launch_context.env["PYTHON36"] = py36_dir + self.log.info(f"Setting {py3_var}: '{py3_dir}'...") + self.launch_context.env[py3_var] = py3_dir - utility_dir = self.launch_context.env.get("FUSION_UTILITY_SCRIPTS_DIR") - if not utility_dir: - raise ApplicationLaunchFailed( - "Required Fusion utility script dir environment variable" - " \"FUSION_UTILITY_SCRIPTS_DIR\" is not set." - ) + # Fusion 18+ requires FUSION_PYTHON3_HOME to also be on PATH + self.launch_context.env["PATH"] += ";" + py3_dir - # setting utility scripts dir for scripts syncing - utility_dir = os.path.normpath(utility_dir) - if not os.path.isdir(utility_dir): - raise ApplicationLaunchFailed( - "Fusion utility script dir does not exist. Either make sure " - "the environments in fusion settings has" - " 'FUSION_UTILITY_SCRIPTS_DIR' set correctly or reinstall " - f"Fusion.\n\nFUSION_UTILITY_SCRIPTS_DIR: '{utility_dir}'" - ) + # Fusion 16 and 17 use FUSION16_PYTHON36_HOME instead of + # FUSION_PYTHON3_HOME and will only work with a Python 3.6 version + # TODO: Detect Fusion version to only set for specific Fusion build + self.launch_context.env["FUSION16_PYTHON36_HOME"] = py3_dir - self._sync_utility_scripts(self.launch_context.env) - self.log.info("Fusion Pype wrapper has been installed") + # Add our Fusion Master Prefs which is the only way to customize + # Fusion to define where it can read custom scripts and tools from + self.log.info(f"Setting OPENPYPE_FUSION: {FUSION_HOST_DIR}") + self.launch_context.env["OPENPYPE_FUSION"] = FUSION_HOST_DIR - def _sync_utility_scripts(self, env): - """ Synchronizing basic utlility scripts for resolve. - - To be able to run scripts from inside `Fusion/Workspace/Scripts` menu - all scripts has to be accessible from defined folder. - """ - if not env: - env = {k: v for k, v in os.environ.items()} - - # initiate inputs - scripts = {} - us_env = env.get("FUSION_UTILITY_SCRIPTS_SOURCE_DIR") - us_dir = env.get("FUSION_UTILITY_SCRIPTS_DIR", "") - us_paths = [os.path.join( - os.path.dirname(os.path.abspath(openpype.hosts.fusion.__file__)), - "utility_scripts" - )] - - # collect script dirs - if us_env: - self.log.info(f"Utility Scripts Env: `{us_env}`") - us_paths = us_env.split( - os.pathsep) + us_paths - - # collect scripts from dirs - for path in us_paths: - scripts.update({path: os.listdir(path)}) - - self.log.info(f"Utility Scripts Dir: `{us_paths}`") - self.log.info(f"Utility Scripts: `{scripts}`") - - # make sure no script file is in folder - if next((s for s in os.listdir(us_dir)), None): - for s in os.listdir(us_dir): - path = os.path.normpath( - os.path.join(us_dir, s)) - self.log.info(f"Removing `{path}`...") - - # remove file or directory if not in our folders - if not os.path.isdir(path): - os.remove(path) - else: - shutil.rmtree(path) - - # copy scripts into Resolve's utility scripts dir - for d, sl in scripts.items(): - # directory and scripts list - for s in sl: - # script in script list - src = os.path.normpath(os.path.join(d, s)) - dst = os.path.normpath(os.path.join(us_dir, s)) - - self.log.info(f"Copying `{src}` to `{dst}`...") - - # copy file or directory from our folders to fusion's folder - if not os.path.isdir(src): - shutil.copy2(src, dst) - else: - shutil.copytree(src, dst) + pref_var = "FUSION16_MasterPrefs" # used by Fusion 16, 17 and 18 + prefs = os.path.join(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs") + self.log.info(f"Setting {pref_var}: {prefs}") + self.launch_context.env[pref_var] = prefs diff --git a/openpype/hosts/fusion/plugins/create/create_exr_saver.py b/openpype/hosts/fusion/plugins/create/create_exr_saver.py index 8bab5ee9b1..6d93fe710a 100644 --- a/openpype/hosts/fusion/plugins/create/create_exr_saver.py +++ b/openpype/hosts/fusion/plugins/create/create_exr_saver.py @@ -1,6 +1,9 @@ import os -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + legacy_io +) from openpype.hosts.fusion.api import ( get_current_comp, comp_lock_and_undo_chunk @@ -21,12 +24,9 @@ class CreateOpenEXRSaver(LegacyCreator): comp = get_current_comp() - # todo: improve method of getting current environment - # todo: pref avalon.Session over os.environ + workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"]) - workdir = os.path.normpath(os.environ["AVALON_WORKDIR"]) - - filename = "{}..tiff".format(self.name) + filename = "{}..exr".format(self.name) filepath = os.path.join(workdir, "render", filename) with comp_lock_and_undo_chunk(comp): @@ -39,10 +39,10 @@ class CreateOpenEXRSaver(LegacyCreator): saver["Clip"] = filepath saver["OutputFormat"] = file_format - # # # Set standard TIFF settings + # Check file format settings are available if saver[file_format] is None: - raise RuntimeError("File format is not set to TiffFormat, " - "this is a bug") + raise RuntimeError("File format is not set to {}, " + "this is a bug".format(file_format)) # Set file format attributes saver[file_format]["Depth"] = 1 # int8 | int16 | float32 | other diff --git a/openpype/hosts/fusion/plugins/load/load_alembic.py b/openpype/hosts/fusion/plugins/load/load_alembic.py new file mode 100644 index 0000000000..f8b8c2cb0a --- /dev/null +++ b/openpype/hosts/fusion/plugins/load/load_alembic.py @@ -0,0 +1,70 @@ +from openpype.pipeline import ( + load, + get_representation_path, +) +from openpype.hosts.fusion.api import ( + imprint_container, + get_current_comp, + comp_lock_and_undo_chunk +) + + +class FusionLoadAlembicMesh(load.LoaderPlugin): + """Load Alembic mesh into Fusion""" + + families = ["pointcache", "model"] + representations = ["abc"] + + label = "Load alembic mesh" + order = -10 + icon = "code-fork" + color = "orange" + + tool_type = "SurfaceAlembicMesh" + + def load(self, context, name, namespace, data): + # Fallback to asset name when namespace is None + if namespace is None: + namespace = context['asset']['name'] + + # Create the Loader with the filename path set + comp = get_current_comp() + with comp_lock_and_undo_chunk(comp, "Create tool"): + + path = self.fname + + args = (-32768, -32768) + tool = comp.AddTool(self.tool_type, *args) + tool["Filename"] = path + + imprint_container(tool, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__) + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + """Update Alembic path""" + + tool = container["_tool"] + assert tool.ID == self.tool_type, f"Must be {self.tool_type}" + comp = tool.Comp() + + path = get_representation_path(representation) + + with comp_lock_and_undo_chunk(comp, "Update tool"): + tool["Filename"] = path + + # Update the imprinted representation + tool.SetData("avalon.representation", str(representation["_id"])) + + def remove(self, container): + tool = container["_tool"] + assert tool.ID == self.tool_type, f"Must be {self.tool_type}" + comp = tool.Comp() + + with comp_lock_and_undo_chunk(comp, "Remove tool"): + tool.Delete() diff --git a/openpype/hosts/fusion/plugins/load/load_fbx.py b/openpype/hosts/fusion/plugins/load/load_fbx.py new file mode 100644 index 0000000000..70fe82ffef --- /dev/null +++ b/openpype/hosts/fusion/plugins/load/load_fbx.py @@ -0,0 +1,71 @@ + +from openpype.pipeline import ( + load, + get_representation_path, +) +from openpype.hosts.fusion.api import ( + imprint_container, + get_current_comp, + comp_lock_and_undo_chunk +) + + +class FusionLoadFBXMesh(load.LoaderPlugin): + """Load FBX mesh into Fusion""" + + families = ["*"] + representations = ["fbx"] + + label = "Load FBX mesh" + order = -10 + icon = "code-fork" + color = "orange" + + tool_type = "SurfaceFBXMesh" + + def load(self, context, name, namespace, data): + # Fallback to asset name when namespace is None + if namespace is None: + namespace = context['asset']['name'] + + # Create the Loader with the filename path set + comp = get_current_comp() + with comp_lock_and_undo_chunk(comp, "Create tool"): + + path = self.fname + + args = (-32768, -32768) + tool = comp.AddTool(self.tool_type, *args) + tool["ImportFile"] = path + + imprint_container(tool, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__) + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + """Update path""" + + tool = container["_tool"] + assert tool.ID == self.tool_type, f"Must be {self.tool_type}" + comp = tool.Comp() + + path = get_representation_path(representation) + + with comp_lock_and_undo_chunk(comp, "Update tool"): + tool["ImportFile"] = path + + # Update the imprinted representation + tool.SetData("avalon.representation", str(representation["_id"])) + + def remove(self, container): + tool = container["_tool"] + assert tool.ID == self.tool_type, f"Must be {self.tool_type}" + comp = tool.Comp() + + with comp_lock_and_undo_chunk(comp, "Remove tool"): + tool.Delete() diff --git a/openpype/hosts/fusion/plugins/load/load_sequence.py b/openpype/hosts/fusion/plugins/load/load_sequence.py index b860abd88b..6f44c61d1b 100644 --- a/openpype/hosts/fusion/plugins/load/load_sequence.py +++ b/openpype/hosts/fusion/plugins/load/load_sequence.py @@ -1,6 +1,7 @@ import os import contextlib +from openpype.client import get_version_by_id from openpype.pipeline import ( load, legacy_io, @@ -100,6 +101,9 @@ def loader_shift(loader, frame, relative=True): else: shift = frame - old_in + if not shift: + return 0 + # Shifting global in will try to automatically compensate for the change # in the "ClipTimeStart" and "HoldFirstFrame" inputs, so we preserve those # input values to "just shift" the clip @@ -123,7 +127,7 @@ def loader_shift(loader, frame, relative=True): class FusionLoadSequence(load.LoaderPlugin): """Load image sequence into Fusion""" - families = ["imagesequence", "review", "render"] + families = ["imagesequence", "review", "render", "plate"] representations = ["*"] label = "Load sequence" @@ -148,9 +152,8 @@ class FusionLoadSequence(load.LoaderPlugin): tool["Clip"] = path # Set global in point to start frame (if in version.data) - start = context["version"]["data"].get("frameStart", None) - if start is not None: - loader_shift(tool, start, relative=False) + start = self._get_start(context["version"], tool) + loader_shift(tool, start, relative=False) imprint_container(tool, name=name, @@ -211,16 +214,9 @@ class FusionLoadSequence(load.LoaderPlugin): path = self._get_first_image(root) # Get start frame from version data - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) - start = version["data"].get("frameStart") - if start is None: - self.log.warning("Missing start frame for updated version" - "assuming starts at frame 0 for: " - "{} ({})".format(tool.Name, representation)) - start = 0 + project_name = legacy_io.active_project() + version = get_version_by_id(project_name, representation["parent"]) + start = self._get_start(version, tool) with comp_lock_and_undo_chunk(comp, "Update Loader"): @@ -257,3 +253,27 @@ class FusionLoadSequence(load.LoaderPlugin): """Get first file in representation root""" files = sorted(os.listdir(root)) return os.path.join(root, files[0]) + + def _get_start(self, version_doc, tool): + """Return real start frame of published files (incl. handles)""" + data = version_doc["data"] + + # Get start frame directly with handle if it's in data + start = data.get("frameStartHandle") + if start is not None: + return start + + # Get frame start without handles + start = data.get("frameStart") + if start is None: + self.log.warning("Missing start frame for version " + "assuming starts at frame 0 for: " + "{}".format(tool.Name)) + return 0 + + # Use `handleStart` if the data is available + handle_start = data.get("handleStart") + if handle_start: + start -= handle_start + + return start diff --git a/openpype/hosts/fusion/plugins/publish/collect_inputs.py b/openpype/hosts/fusion/plugins/publish/collect_inputs.py new file mode 100644 index 0000000000..8f9857b02f --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/collect_inputs.py @@ -0,0 +1,114 @@ +from bson.objectid import ObjectId + +import pyblish.api + +from openpype.pipeline import registered_host + + +def collect_input_containers(tools): + """Collect containers that contain any of the node in `nodes`. + + This will return any loaded Avalon container that contains at least one of + the nodes. As such, the Avalon container is an input for it. Or in short, + there are member nodes of that container. + + Returns: + list: Input avalon containers + + """ + + # Lookup by node ids + lookup = frozenset([tool.Name for tool in tools]) + + containers = [] + host = registered_host() + for container in host.ls(): + + name = container["_tool"].Name + + # We currently assume no "groups" as containers but just single tools + # like a single "Loader" operator. As such we just check whether the + # Loader is part of the processing queue. + if name in lookup: + containers.append(container) + + return containers + + +def iter_upstream(tool): + """Yields all upstream inputs for the current tool. + + Yields: + tool: The input tools. + + """ + + def get_connected_input_tools(tool): + """Helper function that returns connected input tools for a tool.""" + inputs = [] + + # Filter only to actual types that will have sensible upstream + # connections. So we ignore just "Number" inputs as they can be + # many to iterate, slowing things down quite a bit - and in practice + # they don't have upstream connections. + VALID_INPUT_TYPES = ['Image', 'Particles', 'Mask', 'DataType3D'] + for type_ in VALID_INPUT_TYPES: + for input_ in tool.GetInputList(type_).values(): + output = input_.GetConnectedOutput() + if output: + input_tool = output.GetTool() + inputs.append(input_tool) + + return inputs + + # Initialize process queue with the node's inputs itself + queue = get_connected_input_tools(tool) + + # We keep track of which node names we have processed so far, to ensure we + # don't process the same hierarchy again. We are not pushing the tool + # itself into the set as that doesn't correctly recognize the same tool. + # Since tool names are unique in a comp in Fusion we rely on that. + collected = set(tool.Name for tool in queue) + + # Traverse upstream references for all nodes and yield them as we + # process the queue. + while queue: + upstream_tool = queue.pop() + yield upstream_tool + + # Find upstream tools that are not collected yet. + upstream_inputs = get_connected_input_tools(upstream_tool) + upstream_inputs = [t for t in upstream_inputs if + t.Name not in collected] + + queue.extend(upstream_inputs) + collected.update(tool.Name for tool in upstream_inputs) + + +class CollectUpstreamInputs(pyblish.api.InstancePlugin): + """Collect source input containers used for this publish. + + This will include `inputs` data of which loaded publishes were used in the + generation of this publish. This leaves an upstream trace to what was used + as input. + + """ + + label = "Collect Inputs" + order = pyblish.api.CollectorOrder + 0.2 + hosts = ["fusion"] + + def process(self, instance): + + # Get all upstream and include itself + tool = instance[0] + nodes = list(iter_upstream(tool)) + nodes.append(tool) + + # Collect containers for the given set of nodes + containers = collect_input_containers(nodes) + + inputs = [ObjectId(c["representation"]) for c in containers] + instance.data["inputRepresentations"] = inputs + + self.log.info("Collected inputs: %s" % inputs) diff --git a/openpype/hosts/fusion/plugins/publish/collect_instances.py b/openpype/hosts/fusion/plugins/publish/collect_instances.py index b2192d1dd9..fe60b83827 100644 --- a/openpype/hosts/fusion/plugins/publish/collect_instances.py +++ b/openpype/hosts/fusion/plugins/publish/collect_instances.py @@ -4,19 +4,21 @@ import pyblish.api def get_comp_render_range(comp): - """Return comp's start and end render range.""" + """Return comp's start-end render range and global start-end range.""" comp_attrs = comp.GetAttrs() start = comp_attrs["COMPN_RenderStart"] end = comp_attrs["COMPN_RenderEnd"] + global_start = comp_attrs["COMPN_GlobalStart"] + global_end = comp_attrs["COMPN_GlobalEnd"] # Whenever render ranges are undefined fall back # to the comp's global start and end if start == -1000000000: - start = comp_attrs["COMPN_GlobalEnd"] + start = global_start if end == -1000000000: - end = comp_attrs["COMPN_GlobalStart"] + end = global_end - return start, end + return start, end, global_start, global_end class CollectInstances(pyblish.api.ContextPlugin): @@ -42,9 +44,11 @@ class CollectInstances(pyblish.api.ContextPlugin): tools = comp.GetToolList(False).values() savers = [tool for tool in tools if tool.ID == "Saver"] - start, end = get_comp_render_range(comp) + start, end, global_start, global_end = get_comp_render_range(comp) context.data["frameStart"] = int(start) context.data["frameEnd"] = int(end) + context.data["frameStartHandle"] = int(global_start) + context.data["frameEndHandle"] = int(global_end) for tool in savers: path = tool["Clip"][comp.TIME_UNDEFINED] @@ -78,8 +82,10 @@ class CollectInstances(pyblish.api.ContextPlugin): "label": label, "frameStart": context.data["frameStart"], "frameEnd": context.data["frameEnd"], + "frameStartHandle": context.data["frameStartHandle"], + "frameEndHandle": context.data["frameStartHandle"], "fps": context.data["fps"], - "families": ["render", "review", "ftrack"], + "families": ["render", "review"], "family": "render", "active": active, "publish": active # backwards compatibility diff --git a/openpype/hosts/fusion/plugins/publish/increment_current_file_deadline.py b/openpype/hosts/fusion/plugins/publish/increment_current_file_deadline.py index 6483454d96..5c595638e9 100644 --- a/openpype/hosts/fusion/plugins/publish/increment_current_file_deadline.py +++ b/openpype/hosts/fusion/plugins/publish/increment_current_file_deadline.py @@ -17,9 +17,9 @@ class FusionIncrementCurrentFile(pyblish.api.ContextPlugin): def process(self, context): from openpype.lib import version_up - from openpype.action import get_errored_plugins_from_data + from openpype.pipeline.publish import get_errored_plugins_from_context - errored_plugins = get_errored_plugins_from_data(context) + errored_plugins = get_errored_plugins_from_context(context) if any(plugin.__name__ == "FusionSubmitDeadline" for plugin in errored_plugins): raise RuntimeError("Skipping incrementing current file because " diff --git a/openpype/hosts/fusion/plugins/publish/render_local.py b/openpype/hosts/fusion/plugins/publish/render_local.py index 601c2ffccf..79e458b40a 100644 --- a/openpype/hosts/fusion/plugins/publish/render_local.py +++ b/openpype/hosts/fusion/plugins/publish/render_local.py @@ -20,6 +20,8 @@ class Fusionlocal(pyblish.api.InstancePlugin): def process(self, instance): + # This plug-in runs only once and thus assumes all instances + # currently will render the same frame range context = instance.context key = "__hasRun{}".format(self.__class__.__name__) if context.data.get(key, False): @@ -28,8 +30,8 @@ class Fusionlocal(pyblish.api.InstancePlugin): context.data[key] = True current_comp = context.data["currentComp"] - frame_start = current_comp.GetAttrs("COMPN_RenderStart") - frame_end = current_comp.GetAttrs("COMPN_RenderEnd") + frame_start = context.data["frameStartHandle"] + frame_end = context.data["frameEndHandle"] path = instance.data["path"] output_dir = instance.data["outputDir"] @@ -40,7 +42,11 @@ class Fusionlocal(pyblish.api.InstancePlugin): self.log.info("End frame: {}".format(frame_end)) with comp_lock_and_undo_chunk(current_comp): - result = current_comp.Render() + result = current_comp.Render({ + "Start": frame_start, + "End": frame_end, + "Wait": True + }) if "representations" not in instance.data: instance.data["representations"] = [] diff --git a/openpype/hosts/fusion/plugins/publish/validate_background_depth.py b/openpype/hosts/fusion/plugins/publish/validate_background_depth.py index a0734d8278..4268fab528 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_background_depth.py +++ b/openpype/hosts/fusion/plugins/publish/validate_background_depth.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype import action +from openpype.pipeline.publish import RepairAction class ValidateBackgroundDepth(pyblish.api.InstancePlugin): @@ -8,7 +8,7 @@ class ValidateBackgroundDepth(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder label = "Validate Background Depth 32 bit" - actions = [action.RepairAction] + actions = [RepairAction] hosts = ["fusion"] families = ["render"] optional = True diff --git a/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py b/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py index 45ed53f65c..f6beefefc1 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py +++ b/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype import action +from openpype.pipeline.publish import RepairAction class ValidateCreateFolderChecked(pyblish.api.InstancePlugin): @@ -11,7 +11,7 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin): """ order = pyblish.api.ValidatorOrder - actions = [action.RepairAction] + actions = [RepairAction] label = "Validate Create Folder Checked" families = ["render"] hosts = ["fusion"] diff --git a/openpype/hosts/fusion/scripts/fusion_switch_shot.py b/openpype/hosts/fusion/scripts/fusion_switch_shot.py deleted file mode 100644 index 704f420796..0000000000 --- a/openpype/hosts/fusion/scripts/fusion_switch_shot.py +++ /dev/null @@ -1,286 +0,0 @@ -import os -import re -import sys -import logging - -# Pipeline imports -from openpype.pipeline import ( - legacy_io, - install_host, - registered_host, -) -from openpype.lib import version_up -from openpype.hosts.fusion import api -from openpype.hosts.fusion.api import lib -from openpype.lib.avalon_context import get_workdir_from_session - -log = logging.getLogger("Update Slap Comp") - -self = sys.modules[__name__] -self._project = None - - -def _format_version_folder(folder): - """Format a version folder based on the filepath - - Assumption here is made that, if the path does not exists the folder - will be "v001" - - Args: - folder: file path to a folder - - Returns: - str: new version folder name - """ - - new_version = 1 - if os.path.isdir(folder): - re_version = re.compile(r"v\d+$") - versions = [i for i in os.listdir(folder) if os.path.isdir(i) - and re_version.match(i)] - if versions: - # ensure the "v" is not included - new_version = int(max(versions)[1:]) + 1 - - version_folder = "v{:03d}".format(new_version) - - return version_folder - - -def _get_fusion_instance(): - fusion = getattr(sys.modules["__main__"], "fusion", None) - if fusion is None: - try: - # Support for FuScript.exe, BlackmagicFusion module for py2 only - import BlackmagicFusion as bmf - fusion = bmf.scriptapp("Fusion") - except ImportError: - raise RuntimeError("Could not find a Fusion instance") - return fusion - - -def _format_filepath(session): - - project = session["AVALON_PROJECT"] - asset = session["AVALON_ASSET"] - - # Save updated slap comp - work_path = get_workdir_from_session(session) - walk_to_dir = os.path.join(work_path, "scenes", "slapcomp") - slapcomp_dir = os.path.abspath(walk_to_dir) - - # Ensure destination exists - if not os.path.isdir(slapcomp_dir): - log.warning("Folder did not exist, creating folder structure") - os.makedirs(slapcomp_dir) - - # Compute output path - new_filename = "{}_{}_slapcomp_v001.comp".format(project, asset) - new_filepath = os.path.join(slapcomp_dir, new_filename) - - # Create new unique filepath - if os.path.exists(new_filepath): - new_filepath = version_up(new_filepath) - - return new_filepath - - -def _update_savers(comp, session): - """Update all savers of the current comp to ensure the output is correct - - This will refactor the Saver file outputs to the renders of the new session - that is provided. - - In the case the original saver path had a path set relative to a /fusion/ - folder then that relative path will be matched with the exception of all - "version" (e.g. v010) references will be reset to v001. Otherwise only a - version folder will be computed in the new session's work "render" folder - to dump the files in and keeping the original filenames. - - Args: - comp (object): current comp instance - session (dict): the current Avalon session - - Returns: - None - """ - - new_work = get_workdir_from_session(session) - renders = os.path.join(new_work, "renders") - version_folder = _format_version_folder(renders) - renders_version = os.path.join(renders, version_folder) - - comp.Print("New renders to: %s\n" % renders) - - with api.comp_lock_and_undo_chunk(comp): - savers = comp.GetToolList(False, "Saver").values() - for saver in savers: - filepath = saver.GetAttrs("TOOLST_Clip_Name")[1.0] - - # Get old relative path to the "fusion" app folder so we can apply - # the same relative path afterwards. If not found fall back to - # using just a version folder with the filename in it. - # todo: can we make this less magical? - relpath = filepath.replace("\\", "/").rsplit("/fusion/", 1)[-1] - - if os.path.isabs(relpath): - # If not relative to a "/fusion/" folder then just use filename - filename = os.path.basename(filepath) - log.warning("Can't parse relative path, refactoring to only" - "filename in a version folder: %s" % filename) - new_path = os.path.join(renders_version, filename) - - else: - # Else reuse the relative path - # Reset version in folder and filename in the relative path - # to v001. The version should be is only detected when prefixed - # with either `_v` (underscore) or `/v` (folder) - version_pattern = r"(/|_)v[0-9]+" - if re.search(version_pattern, relpath): - new_relpath = re.sub(version_pattern, - r"\1v001", - relpath) - log.info("Resetting version folders to v001: " - "%s -> %s" % (relpath, new_relpath)) - relpath = new_relpath - - new_path = os.path.join(new_work, relpath) - - saver["Clip"] = new_path - - -def update_frame_range(comp, representations): - """Update the frame range of the comp and render length - - The start and end frame are based on the lowest start frame and the highest - end frame - - Args: - comp (object): current focused comp - representations (list) collection of dicts - - Returns: - None - - """ - - version_ids = [r["parent"] for r in representations] - versions = legacy_io.find({"type": "version", "_id": {"$in": version_ids}}) - versions = list(versions) - - versions = [v for v in versions - if v["data"].get("frameStart", None) is not None] - - if not versions: - log.warning("No versions loaded to match frame range to.\n") - return - - start = min(v["data"]["frameStart"] for v in versions) - end = max(v["data"]["frameEnd"] for v in versions) - - lib.update_frame_range(start, end, comp=comp) - - -def switch(asset_name, filepath=None, new=True): - """Switch the current containers of the file to the other asset (shot) - - Args: - filepath (str): file path of the comp file - asset_name (str): name of the asset (shot) - new (bool): Save updated comp under a different name - - Returns: - comp path (str): new filepath of the updated comp - - """ - - # If filepath provided, ensure it is valid absolute path - if filepath is not None: - if not os.path.isabs(filepath): - filepath = os.path.abspath(filepath) - - assert os.path.exists(filepath), "%s must exist " % filepath - - # Assert asset name exists - # It is better to do this here then to wait till switch_shot does it - asset = legacy_io.find_one({"type": "asset", "name": asset_name}) - assert asset, "Could not find '%s' in the database" % asset_name - - # Get current project - self._project = legacy_io.find_one({"type": "project"}) - - # Go to comp - if not filepath: - current_comp = api.get_current_comp() - assert current_comp is not None, "Could not find current comp" - else: - fusion = _get_fusion_instance() - current_comp = fusion.LoadComp(filepath, quiet=True) - assert current_comp is not None, ( - "Fusion could not load '{}'").format(filepath) - - host = registered_host() - containers = list(host.ls()) - assert containers, "Nothing to update" - - representations = [] - for container in containers: - try: - representation = lib.switch_item( - container, - asset_name=asset_name) - representations.append(representation) - except Exception as e: - current_comp.Print("Error in switching! %s\n" % e.message) - - message = "Switched %i Loaders of the %i\n" % (len(representations), - len(containers)) - current_comp.Print(message) - - # Build the session to switch to - switch_to_session = legacy_io.Session.copy() - switch_to_session["AVALON_ASSET"] = asset['name'] - - if new: - comp_path = _format_filepath(switch_to_session) - - # Update savers output based on new session - _update_savers(current_comp, switch_to_session) - else: - comp_path = version_up(filepath) - - current_comp.Print(comp_path) - - current_comp.Print("\nUpdating frame range") - update_frame_range(current_comp, representations) - - current_comp.Save(comp_path) - - return comp_path - - -if __name__ == '__main__': - - # QUESTION: can we convert this to gui rather then standalone script? - # TODO: convert to gui tool - import argparse - - parser = argparse.ArgumentParser(description="Switch to a shot within an" - "existing comp file") - - parser.add_argument("--file_path", - type=str, - default=True, - help="File path of the comp to use") - - parser.add_argument("--asset_name", - type=str, - default=True, - help="Name of the asset (shot) to switch") - - args, unknown = parser.parse_args() - - install_host(api) - switch(args.asset_name, args.file_path) - - sys.exit(0) diff --git a/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py b/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py deleted file mode 100644 index de8fc4b3b4..0000000000 --- a/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py +++ /dev/null @@ -1,27 +0,0 @@ -import os -import sys - -from openpype.api import Logger -from openpype.pipeline import ( - install_host, - registered_host, -) - -log = Logger().get_logger(__name__) - - -def main(env): - from openpype.hosts.fusion import api - from openpype.hosts.fusion.api import menu - - # activate resolve from pype - install_host(api) - - log.info(f"Registered host: {registered_host()}") - - menu.launch_openpype_menu() - - -if __name__ == "__main__": - result = main(os.environ) - sys.exit(not bool(result)) diff --git a/openpype/hosts/harmony/__init__.py b/openpype/hosts/harmony/__init__.py index d2f710d83d..9177eaa285 100644 --- a/openpype/hosts/harmony/__init__.py +++ b/openpype/hosts/harmony/__init__.py @@ -1,11 +1,10 @@ -import os +from .addon import ( + HARMONY_HOST_DIR, + HarmonyAddon, +) -def add_implementation_envs(env, _app): - """Modify environments to contain all required for implementation.""" - openharmony_path = os.path.join( - os.environ["OPENPYPE_REPOS_ROOT"], "openpype", "hosts", - "harmony", "vendor", "OpenHarmony" - ) - # TODO check if is already set? What to do if is already set? - env["LIB_OPENHARMONY_PATH"] = openharmony_path +__all__ = ( + "HARMONY_HOST_DIR", + "HarmonyAddon", +) diff --git a/openpype/hosts/harmony/addon.py b/openpype/hosts/harmony/addon.py new file mode 100644 index 0000000000..efef40ab92 --- /dev/null +++ b/openpype/hosts/harmony/addon.py @@ -0,0 +1,23 @@ +import os +from openpype.modules import OpenPypeModule, IHostAddon + +HARMONY_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class HarmonyAddon(OpenPypeModule, IHostAddon): + name = "harmony" + host_name = "harmony" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + """Modify environments to contain all required for implementation.""" + openharmony_path = os.path.join( + HARMONY_HOST_DIR, "vendor", "OpenHarmony" + ) + # TODO check if is already set? What to do if is already set? + env["LIB_OPENHARMONY_PATH"] = openharmony_path + + def get_workfile_extensions(self): + return [".zip"] diff --git a/openpype/hosts/harmony/api/README.md b/openpype/hosts/harmony/api/README.md index dd45eb14dd..b39f900886 100644 --- a/openpype/hosts/harmony/api/README.md +++ b/openpype/hosts/harmony/api/README.md @@ -610,7 +610,8 @@ class ImageSequenceLoader(load.LoaderPlugin): def update(self, container, representation): node = container.pop("node") - version = legacy_io.find_one({"_id": representation["parent"]}) + project_name = legacy_io.active_project() + version = get_version_by_id(project_name, representation["parent"]) files = [] for f in version["data"]["files"]: files.append( diff --git a/openpype/hosts/harmony/api/TB_sceneOpened.js b/openpype/hosts/harmony/api/TB_sceneOpened.js index 6a403fa65e..e7cd555332 100644 --- a/openpype/hosts/harmony/api/TB_sceneOpened.js +++ b/openpype/hosts/harmony/api/TB_sceneOpened.js @@ -35,7 +35,11 @@ function Client() { self.pack = function(num) { var ascii=''; for (var i = 3; i >= 0; i--) { - ascii += String.fromCharCode((num >> (8 * i)) & 255); + var hex = ((num >> (8 * i)) & 255).toString(16); + if (hex.length < 2){ + ascii += "0"; + } + ascii += hex; } return ascii; }; @@ -279,19 +283,22 @@ function Client() { }; self._send = function(message) { - var data = new QByteArray(); - var outstr = new QDataStream(data, QIODevice.WriteOnly); - outstr.writeInt(0); - data.append('UTF-8'); - outstr.device().seek(0); - outstr.writeInt(data.size() - 4); - var codec = QTextCodec.codecForUtfText(data); - var msg = codec.fromUnicode(message); - var l = msg.size(); - var coded = new QByteArray('AH').append(self.pack(l)); - coded = coded.append(msg); - self.socket.write(new QByteArray(coded)); - self.logDebug('Sent.'); + /** Harmony 21.1 doesn't have QDataStream anymore. + + This means we aren't able to write bytes into QByteArray so we had + modify how content lenght is sent do the server. + Content lenght is sent as string of 8 char convertible into integer + (instead of 0x00000001[4 bytes] > "000000001"[8 bytes]) */ + var codec_name = new QByteArray().append("UTF-8"); + + var codec = QTextCodec.codecForName(codec_name); + var msg = codec.fromUnicode(message); + var l = msg.size(); + var header = new QByteArray().append('AH').append(self.pack(l)); + var coded = msg.prepend(header); + + self.socket.write(coded); + self.logDebug('Sent.'); }; self.waitForLock = function() { @@ -351,7 +358,14 @@ function start() { app.avalonClient = new Client(); app.avalonClient.socket.connectToHost(host, port); } - var menuBar = QApplication.activeWindow().menuBar(); + var mainWindow = null; + var widgets = QApplication.topLevelWidgets(); + for (var i = 0 ; i < widgets.length; i++) { + if (widgets[i] instanceof QMainWindow){ + mainWindow = widgets[i]; + } + } + var menuBar = mainWindow.menuBar(); var actions = menuBar.actions(); app.avalonMenu = null; diff --git a/openpype/hosts/harmony/api/pipeline.py b/openpype/hosts/harmony/api/pipeline.py index b953d0e984..4b9849c190 100644 --- a/openpype/hosts/harmony/api/pipeline.py +++ b/openpype/hosts/harmony/api/pipeline.py @@ -2,27 +2,26 @@ import os from pathlib import Path import logging -from bson.objectid import ObjectId import pyblish.api -from openpype import lib from openpype.lib import register_event_callback from openpype.pipeline import ( - legacy_io, register_loader_plugin_path, register_creator_plugin_path, deregister_loader_plugin_path, deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) -import openpype.hosts.harmony +from openpype.pipeline.load import get_outdated_containers +from openpype.pipeline.context_tools import get_current_project_asset + +from openpype.hosts.harmony import HARMONY_HOST_DIR import openpype.hosts.harmony.api as harmony log = logging.getLogger("openpype.hosts.harmony") -HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.harmony.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +PLUGINS_DIR = os.path.join(HARMONY_HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") @@ -50,7 +49,9 @@ def get_asset_settings(): dict: Scene data. """ - asset_data = lib.get_asset()["data"] + + asset_doc = get_current_project_asset() + asset_data = asset_doc["data"] fps = asset_data.get("fps") frame_start = asset_data.get("frameStart") frame_end = asset_data.get("frameEnd") @@ -104,21 +105,10 @@ def check_inventory(): If it does it will colorize outdated nodes and display warning message in Harmony. """ - if not lib.any_outdated(): - return - outdated_containers = [] - for container in ls(): - representation = container['representation'] - representation_doc = legacy_io.find_one( - { - "_id": ObjectId(representation), - "type": "representation" - }, - projection={"parent": True} - ) - if representation_doc and not lib.is_latest(representation_doc): - outdated_containers.append(container) + outdated_containers = get_outdated_containers() + if not outdated_containers: + return # Colour nodes. outdated_nodes = [] diff --git a/openpype/hosts/harmony/api/server.py b/openpype/hosts/harmony/api/server.py index 88cfe54521..0de359ec61 100644 --- a/openpype/hosts/harmony/api/server.py +++ b/openpype/hosts/harmony/api/server.py @@ -88,21 +88,25 @@ class Server(threading.Thread): """ current_time = time.time() while True: - + self.log.info("wait ttt") # Receive the data in small chunks and retransmit it request = None - header = self.connection.recv(6) + header = self.connection.recv(10) if len(header) == 0: # null data received, socket is closing. self.log.info(f"[{self.timestamp()}] Connection closing.") break + if header[0:2] != b"AH": self.log.error("INVALID HEADER") - length = struct.unpack(">I", header[2:])[0] + content_length_str = header[2:].decode() + + length = int(content_length_str, 16) data = self.connection.recv(length) while (len(data) < length): # we didn't received everything in first try, lets wait for # all data. + self.log.info("loop") time.sleep(0.1) if self.connection is None: self.log.error(f"[{self.timestamp()}] " @@ -113,7 +117,7 @@ class Server(threading.Thread): break data += self.connection.recv(length - len(data)) - + self.log.debug("data:: {} {}".format(data, type(data))) self.received += data.decode("utf-8") pretty = self._pretty(self.received) self.log.debug( diff --git a/openpype/hosts/harmony/api/workio.py b/openpype/hosts/harmony/api/workio.py index ab1cb9b1a9..8df5ede917 100644 --- a/openpype/hosts/harmony/api/workio.py +++ b/openpype/hosts/harmony/api/workio.py @@ -2,8 +2,6 @@ import os import shutil -from openpype.pipeline import HOST_WORKFILE_EXTENSIONS - from .lib import ( ProcessContext, get_local_harmony_path, @@ -16,7 +14,7 @@ save_disabled = False def file_extensions(): - return HOST_WORKFILE_EXTENSIONS["harmony"] + return [".zip"] def has_unsaved_changes(): diff --git a/openpype/hosts/harmony/plugins/load/load_background.py b/openpype/hosts/harmony/plugins/load/load_background.py index 9c01fe3cd8..c28a87791e 100644 --- a/openpype/hosts/harmony/plugins/load/load_background.py +++ b/openpype/hosts/harmony/plugins/load/load_background.py @@ -5,8 +5,8 @@ from openpype.pipeline import ( load, get_representation_path, ) +from openpype.pipeline.context_tools import is_representation_from_latest import openpype.hosts.harmony.api as harmony -import openpype.lib copy_files = """function copyFile(srcFilename, dstFilename) @@ -280,9 +280,7 @@ class BackgroundLoader(load.LoaderPlugin): ) def update(self, container, representation): - path = get_representation_path(representation) - with open(path) as json_file: data = json.load(json_file) @@ -300,10 +298,9 @@ class BackgroundLoader(load.LoaderPlugin): bg_folder = os.path.dirname(path) - path = get_representation_path(representation) - print(container) + is_latest = is_representation_from_latest(representation) for layer in sorted(layers): file_to_import = [ os.path.join(bg_folder, layer).replace("\\", "/") @@ -347,7 +344,7 @@ class BackgroundLoader(load.LoaderPlugin): } %s """ % (sig, sig) - if openpype.lib.is_latest(representation): + if is_latest: harmony.send({"function": func, "args": [node, "green"]}) else: harmony.send({"function": func, "args": [node, "red"]}) diff --git a/openpype/hosts/harmony/plugins/load/load_imagesequence.py b/openpype/hosts/harmony/plugins/load/load_imagesequence.py index 18695438d5..1b64aff595 100644 --- a/openpype/hosts/harmony/plugins/load/load_imagesequence.py +++ b/openpype/hosts/harmony/plugins/load/load_imagesequence.py @@ -10,8 +10,8 @@ from openpype.pipeline import ( load, get_representation_path, ) +from openpype.pipeline.context_tools import is_representation_from_latest import openpype.hosts.harmony.api as harmony -import openpype.lib class ImageSequenceLoader(load.LoaderPlugin): @@ -109,7 +109,7 @@ class ImageSequenceLoader(load.LoaderPlugin): ) # Colour node. - if openpype.lib.is_latest(representation): + if is_representation_from_latest(representation): harmony.send( { "function": "PypeHarmony.setColor", diff --git a/openpype/hosts/harmony/plugins/load/load_template.py b/openpype/hosts/harmony/plugins/load/load_template.py index c6dc9d913b..f3c69a9104 100644 --- a/openpype/hosts/harmony/plugins/load/load_template.py +++ b/openpype/hosts/harmony/plugins/load/load_template.py @@ -10,8 +10,8 @@ from openpype.pipeline import ( load, get_representation_path, ) +from openpype.pipeline.context_tools import is_representation_from_latest import openpype.hosts.harmony.api as harmony -import openpype.lib class TemplateLoader(load.LoaderPlugin): @@ -83,7 +83,7 @@ class TemplateLoader(load.LoaderPlugin): self_name = self.__class__.__name__ update_and_replace = False - if openpype.lib.is_latest(representation): + if is_representation_from_latest(representation): self._set_green(node) else: self._set_red(node) diff --git a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py b/openpype/hosts/harmony/plugins/publish/collect_farm_render.py index 3e9e680efd..f6b26eb3e8 100644 --- a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py +++ b/openpype/hosts/harmony/plugins/publish/collect_farm_render.py @@ -4,11 +4,10 @@ from pathlib import Path import attr -import openpype.lib -import openpype.lib.abstract_collect_render -from openpype.lib.abstract_collect_render import RenderInstance from openpype.lib import get_formatted_current_time from openpype.pipeline import legacy_io +from openpype.pipeline import publish +from openpype.pipeline.publish import RenderInstance import openpype.hosts.harmony.api as harmony @@ -20,8 +19,7 @@ class HarmonyRenderInstance(RenderInstance): leadingZeros = attr.ib(default=3) -class CollectFarmRender(openpype.lib.abstract_collect_render. - AbstractCollectRender): +class CollectFarmRender(publish.AbstractCollectRender): """Gather all publishable renders.""" # https://docs.toonboom.com/help/harmony-17/premium/reference/node/output/write-node-image-formats.html diff --git a/openpype/hosts/harmony/plugins/publish/collect_workfile.py b/openpype/hosts/harmony/plugins/publish/collect_workfile.py index c0493315a4..3624147435 100644 --- a/openpype/hosts/harmony/plugins/publish/collect_workfile.py +++ b/openpype/hosts/harmony/plugins/publish/collect_workfile.py @@ -1,9 +1,9 @@ # -*- coding: utf-8 -*- """Collect current workfile from Harmony.""" -import pyblish.api import os +import pyblish.api -from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline.create import get_subset_name class CollectWorkfile(pyblish.api.ContextPlugin): @@ -17,13 +17,14 @@ class CollectWorkfile(pyblish.api.ContextPlugin): """Plugin entry point.""" family = "workfile" basename = os.path.basename(context.data["currentFile"]) - subset = get_subset_name_with_asset_doc( + subset = get_subset_name( family, "", context.data["anatomyData"]["task"]["name"], context.data["assetEntity"], context.data["anatomyData"]["project"]["name"], - host_name=context.data["hostName"] + host_name=context.data["hostName"], + project_settings=context.data["project_settings"] ) # Create instance diff --git a/openpype/hosts/harmony/plugins/publish/extract_palette.py b/openpype/hosts/harmony/plugins/publish/extract_palette.py index fae778f6b0..69c6e098ff 100644 --- a/openpype/hosts/harmony/plugins/publish/extract_palette.py +++ b/openpype/hosts/harmony/plugins/publish/extract_palette.py @@ -6,10 +6,10 @@ import csv from PIL import Image, ImageDraw, ImageFont import openpype.hosts.harmony.api as harmony -import openpype.api +from openpype.pipeline import publish -class ExtractPalette(openpype.api.Extractor): +class ExtractPalette(publish.Extractor): """Extract palette.""" label = "Extract Palette" diff --git a/openpype/hosts/harmony/plugins/publish/extract_template.py b/openpype/hosts/harmony/plugins/publish/extract_template.py index d25b07bba3..458bf25a3c 100644 --- a/openpype/hosts/harmony/plugins/publish/extract_template.py +++ b/openpype/hosts/harmony/plugins/publish/extract_template.py @@ -3,12 +3,11 @@ import os import shutil -import openpype.api +from openpype.pipeline import publish import openpype.hosts.harmony.api as harmony -import openpype.hosts.harmony -class ExtractTemplate(openpype.api.Extractor): +class ExtractTemplate(publish.Extractor): """Extract the connected nodes to the composite instance.""" label = "Extract Template" @@ -50,7 +49,7 @@ class ExtractTemplate(openpype.api.Extractor): dependencies.remove(instance.data["setMembers"][0]) # Export template. - openpype.hosts.harmony.api.export_template( + harmony.export_template( unique_backdrops, dependencies, filepath ) diff --git a/openpype/hosts/harmony/plugins/publish/extract_workfile.py b/openpype/hosts/harmony/plugins/publish/extract_workfile.py index 7f25ec8150..9bb3090558 100644 --- a/openpype/hosts/harmony/plugins/publish/extract_workfile.py +++ b/openpype/hosts/harmony/plugins/publish/extract_workfile.py @@ -4,10 +4,10 @@ import os import shutil from zipfile import ZipFile -import openpype.api +from openpype.pipeline import publish -class ExtractWorkfile(openpype.api.Extractor): +class ExtractWorkfile(publish.Extractor): """Extract and zip complete workfile folder into zip.""" label = "Extract Workfile" diff --git a/openpype/hosts/harmony/plugins/publish/increment_workfile.py b/openpype/hosts/harmony/plugins/publish/increment_workfile.py index 417377fff8..1caf581567 100644 --- a/openpype/hosts/harmony/plugins/publish/increment_workfile.py +++ b/openpype/hosts/harmony/plugins/publish/increment_workfile.py @@ -1,7 +1,7 @@ import os import pyblish.api -from openpype.action import get_errored_plugins_from_data +from openpype.pipeline.publish import get_errored_plugins_from_context from openpype.lib import version_up import openpype.hosts.harmony.api as harmony @@ -19,7 +19,7 @@ class IncrementWorkfile(pyblish.api.InstancePlugin): optional = True def process(self, instance): - errored_plugins = get_errored_plugins_from_data(instance.context) + errored_plugins = get_errored_plugins_from_context(instance.context) if errored_plugins: raise RuntimeError( "Skipping incrementing current file because publishing failed." diff --git a/openpype/hosts/harmony/plugins/publish/validate_audio.py b/openpype/hosts/harmony/plugins/publish/validate_audio.py index cb6b2307cd..e9b8609803 100644 --- a/openpype/hosts/harmony/plugins/publish/validate_audio.py +++ b/openpype/hosts/harmony/plugins/publish/validate_audio.py @@ -47,6 +47,6 @@ class ValidateAudio(pyblish.api.InstancePlugin): formatting_data = { "audio_url": audio_path } - if os.path.isfile(audio_path): + if not os.path.isfile(audio_path): raise PublishXmlValidationError(self, msg, formatting_data=formatting_data) diff --git a/openpype/hosts/harmony/plugins/publish/validate_instances.py b/openpype/hosts/harmony/plugins/publish/validate_instances.py index 373ef94cc3..ac367082ef 100644 --- a/openpype/hosts/harmony/plugins/publish/validate_instances.py +++ b/openpype/hosts/harmony/plugins/publish/validate_instances.py @@ -1,9 +1,12 @@ import os import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError + import openpype.hosts.harmony.api as harmony +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateInstanceRepair(pyblish.api.Action): @@ -37,7 +40,7 @@ class ValidateInstance(pyblish.api.InstancePlugin): label = "Validate Instance" hosts = ["harmony"] actions = [ValidateInstanceRepair] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder def process(self, instance): instance_asset = instance.data["asset"] diff --git a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py index 4c3a6c4465..936533abd6 100644 --- a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py +++ b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py @@ -55,6 +55,10 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): def process(self, instance): """Plugin entry point.""" + + # TODO 'get_asset_settings' could expect asset document as argument + # which is available on 'context.data["assetEntity"]' + # - the same approach can be used in 'ValidateSceneSettingsRepair' expected_settings = harmony.get_asset_settings() self.log.info("scene settings from DB:".format(expected_settings)) diff --git a/openpype/hosts/hiero/__init__.py b/openpype/hosts/hiero/__init__.py index d2ac82391b..e6744d5aec 100644 --- a/openpype/hosts/hiero/__init__.py +++ b/openpype/hosts/hiero/__init__.py @@ -1,41 +1,10 @@ -import os -import platform +from .addon import ( + HIERO_ROOT_DIR, + HieroAddon, +) -def add_implementation_envs(env, _app): - # Add requirements to HIERO_PLUGIN_PATH - pype_root = os.environ["OPENPYPE_REPOS_ROOT"] - new_hiero_paths = [ - os.path.join(pype_root, "openpype", "hosts", "hiero", "api", "startup") - ] - old_hiero_path = env.get("HIERO_PLUGIN_PATH") or "" - for path in old_hiero_path.split(os.pathsep): - if not path: - continue - - norm_path = os.path.normpath(path) - if norm_path not in new_hiero_paths: - new_hiero_paths.append(norm_path) - - env["HIERO_PLUGIN_PATH"] = os.pathsep.join(new_hiero_paths) - env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) - - # Try to add QuickTime to PATH - quick_time_path = "C:/Program Files (x86)/QuickTime/QTSystem" - if platform.system() == "windows" and os.path.exists(quick_time_path): - path_value = env.get("PATH") or "" - path_paths = [ - path - for path in path_value.split(os.pathsep) - if path - ] - path_paths.append(quick_time_path) - env["PATH"] = os.pathsep.join(path_paths) - - # Set default values if are not already set via settings - defaults = { - "LOGLEVEL": "DEBUG" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value +__all__ = ( + "HIERO_ROOT_DIR", + "HieroAddon", +) diff --git a/openpype/hosts/hiero/addon.py b/openpype/hosts/hiero/addon.py new file mode 100644 index 0000000000..f5bb94dbaa --- /dev/null +++ b/openpype/hosts/hiero/addon.py @@ -0,0 +1,62 @@ +import os +import platform +from openpype.modules import OpenPypeModule, IHostAddon + +HIERO_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class HieroAddon(OpenPypeModule, IHostAddon): + name = "hiero" + host_name = "hiero" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + # Add requirements to HIERO_PLUGIN_PATH + new_hiero_paths = [ + os.path.join(HIERO_ROOT_DIR, "api", "startup") + ] + old_hiero_path = env.get("HIERO_PLUGIN_PATH") or "" + for path in old_hiero_path.split(os.pathsep): + if not path: + continue + + norm_path = os.path.normpath(path) + if norm_path not in new_hiero_paths: + new_hiero_paths.append(norm_path) + + env["HIERO_PLUGIN_PATH"] = os.pathsep.join(new_hiero_paths) + env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) + + # Add vendor to PYTHONPATH + python_path = env["PYTHONPATH"] + python_path_parts = [] + if python_path: + python_path_parts = python_path.split(os.pathsep) + vendor_path = os.path.join(HIERO_ROOT_DIR, "vendor") + python_path_parts.insert(0, vendor_path) + env["PYTHONPATH"] = os.pathsep.join(python_path_parts) + + # Set default values if are not already set via settings + defaults = { + "LOGLEVEL": "DEBUG" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + # Try to add QuickTime to PATH + quick_time_path = "C:/Program Files (x86)/QuickTime/QTSystem" + if platform.system() == "windows" and os.path.exists(quick_time_path): + path_value = env.get("PATH") or "" + path_paths = [ + path + for path in path_value.split(os.pathsep) + if path + ] + path_paths.append(quick_time_path) + env["PATH"] = os.pathsep.join(path_paths) + + def get_workfile_extensions(self): + return [".hrox"] diff --git a/openpype/hosts/hiero/api/__init__.py b/openpype/hosts/hiero/api/__init__.py index f3c32b268c..1fa40c9f74 100644 --- a/openpype/hosts/hiero/api/__init__.py +++ b/openpype/hosts/hiero/api/__init__.py @@ -27,10 +27,18 @@ from .lib import ( get_track_items, get_current_project, get_current_sequence, + get_timeline_selection, get_current_track, + get_track_item_tags, + get_track_openpype_tag, + set_track_openpype_tag, + get_track_openpype_data, get_track_item_pype_tag, set_track_item_pype_tag, get_track_item_pype_data, + get_trackitem_openpype_tag, + set_trackitem_openpype_tag, + get_trackitem_openpype_data, set_publish_attribute, get_publish_attribute, imprint, @@ -80,10 +88,15 @@ __all__ = [ "get_track_items", "get_current_project", "get_current_sequence", + "get_timeline_selection", "get_current_track", - "get_track_item_pype_tag", - "set_track_item_pype_tag", - "get_track_item_pype_data", + "get_track_item_tags", + "get_track_openpype_tag", + "set_track_openpype_tag", + "get_track_openpype_data", + "get_trackitem_openpype_tag", + "set_trackitem_openpype_tag", + "get_trackitem_openpype_data", "set_publish_attribute", "get_publish_attribute", "imprint", @@ -95,6 +108,10 @@ __all__ = [ "apply_colorspace_project", "apply_colorspace_clips", "get_sequence_pattern_and_padding", + # depricated + "get_track_item_pype_tag", + "set_track_item_pype_tag", + "get_track_item_pype_data", # plugins "CreatorWidget", diff --git a/openpype/hosts/hiero/api/events.py b/openpype/hosts/hiero/api/events.py index 7fab3edfc8..862a2607c1 100644 --- a/openpype/hosts/hiero/api/events.py +++ b/openpype/hosts/hiero/api/events.py @@ -1,7 +1,6 @@ import os import hiero.core.events -from openpype.api import Logger -from openpype.lib import register_event_callback +from openpype.lib import Logger, register_event_callback from .lib import ( sync_avalon_data_to_workfile, launch_workfiles_app, @@ -11,7 +10,7 @@ from .lib import ( from .tags import add_tags_to_workfile from .menu import update_menu_task_label -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) def startupCompleted(event): @@ -109,8 +108,9 @@ def register_hiero_events(): # hiero.core.events.registerInterest("kShutdown", shutDown) # hiero.core.events.registerInterest("kStartup", startupCompleted) - hiero.core.events.registerInterest( - ("kSelectionChanged", "kTimeline"), selection_changed_timeline) + # INFO: was disabled because it was slowing down timeline operations + # hiero.core.events.registerInterest( + # ("kSelectionChanged", "kTimeline"), selection_changed_timeline) # workfiles try: diff --git a/openpype/hosts/hiero/api/launchforhiero.py b/openpype/hosts/hiero/api/launchforhiero.py new file mode 100644 index 0000000000..5f7dbe23c9 --- /dev/null +++ b/openpype/hosts/hiero/api/launchforhiero.py @@ -0,0 +1,85 @@ +import logging + +from scriptsmenu import scriptsmenu +from Qt import QtWidgets + + +log = logging.getLogger(__name__) + + +def _hiero_main_window(): + """Return Hiero's main window""" + for obj in QtWidgets.QApplication.topLevelWidgets(): + if (obj.inherits('QMainWindow') and + obj.metaObject().className() == 'Foundry::UI::DockMainWindow'): + return obj + raise RuntimeError('Could not find HieroWindow instance') + + +def _hiero_main_menubar(): + """Retrieve the main menubar of the Hiero window""" + hiero_window = _hiero_main_window() + menubar = [i for i in hiero_window.children() if isinstance( + i, + QtWidgets.QMenuBar + )] + + assert len(menubar) == 1, "Error, could not find menu bar!" + return menubar[0] + + +def find_scripts_menu(title, parent): + """ + Check if the menu exists with the given title in the parent + + Args: + title (str): the title name of the scripts menu + + parent (QtWidgets.QMenuBar): the menubar to check + + Returns: + QtWidgets.QMenu or None + + """ + + menu = None + search = [i for i in parent.children() if + isinstance(i, scriptsmenu.ScriptsMenu) + and i.title() == title] + if search: + assert len(search) < 2, ("Multiple instances of menu '{}' " + "in menu bar".format(title)) + menu = search[0] + + return menu + + +def main(title="Scripts", parent=None, objectName=None): + """Build the main scripts menu in Hiero + + Args: + title (str): name of the menu in the application + + parent (QtWidgets.QtMenuBar): the parent object for the menu + + objectName (str): custom objectName for scripts menu + + Returns: + scriptsmenu.ScriptsMenu instance + + """ + hieromainbar = parent or _hiero_main_menubar() + try: + # check menu already exists + menu = find_scripts_menu(title, hieromainbar) + if not menu: + log.info("Attempting to build menu ...") + object_name = objectName or title.lower() + menu = scriptsmenu.ScriptsMenu(title=title, + parent=hieromainbar, + objectName=object_name) + except Exception as e: + log.error(e) + return + + return menu diff --git a/openpype/hosts/hiero/api/lib.py b/openpype/hosts/hiero/api/lib.py index 2a4cd03b76..7f0cf8149a 100644 --- a/openpype/hosts/hiero/api/lib.py +++ b/openpype/hosts/hiero/api/lib.py @@ -1,32 +1,76 @@ """ Host specific functions where host api is connected """ + +from copy import deepcopy import os import re import sys import platform +import functools +import warnings +import json import ast +import secrets import shutil import hiero -from Qt import QtWidgets -from bson.objectid import ObjectId +from Qt import QtWidgets, QtCore, QtXml -from openpype.pipeline import legacy_io -from openpype.api import (Logger, Anatomy, get_anatomy_settings) +from openpype.client import get_project +from openpype.settings import get_project_settings +from openpype.pipeline import legacy_io, Anatomy +from openpype.pipeline.load import filter_containers +from openpype.lib import Logger from . import tags -try: - from PySide.QtCore import QFile, QTextStream - from PySide.QtXml import QDomDocument -except ImportError: - from PySide2.QtCore import QFile, QTextStream - from PySide2.QtXml import QDomDocument -# from opentimelineio import opentime -# from pprint import pformat +class DeprecatedWarning(DeprecationWarning): + pass -log = Logger().get_logger(__name__) + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", DeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=DeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + +log = Logger.get_logger(__name__) self = sys.modules[__name__] self._has_been_setup = False @@ -89,13 +133,19 @@ def get_current_sequence(name=None, new=False): if not sequence: # if nothing found create new with input name sequence = get_current_sequence(name, True) - elif not name and not new: + else: # if name is none and new is False then return current open sequence sequence = hiero.ui.activeSequence() return sequence +def get_timeline_selection(): + active_sequence = hiero.ui.activeSequence() + timeline_editor = hiero.ui.getTimelineEditor(active_sequence) + return list(timeline_editor.selection()) + + def get_current_track(sequence, name, audio=False): """ Get current track in context of active project. @@ -118,7 +168,7 @@ def get_current_track(sequence, name, audio=False): # get track by name track = None for _track in tracks: - if _track.name() in name: + if _track.name() == name: track = _track if not track: @@ -126,13 +176,14 @@ def get_current_track(sequence, name, audio=False): track = hiero.core.VideoTrack(name) else: track = hiero.core.AudioTrack(name) + sequence.addTrack(track) return track def get_track_items( - selected=False, + selection=False, sequence_name=None, track_item_name=None, track_name=None, @@ -143,7 +194,7 @@ def get_track_items( """Get all available current timeline track items. Attribute: - selected (bool)[optional]: return only selected items on timeline + selection (list)[optional]: list of selected track items sequence_name (str)[optional]: return only clips from input sequence track_item_name (str)[optional]: return only item with input name track_name (str)[optional]: return only items from track name @@ -155,32 +206,34 @@ def get_track_items( Return: list or hiero.core.TrackItem: list of track items or single track item """ - return_list = list() - track_items = list() + track_type = track_type or "video" + selection = selection or [] + return_list = [] # get selected track items or all in active sequence - if selected: + if selection: try: - selected_items = list(hiero.selection) - for item in selected_items: - if track_name and track_name in item.parent().name(): - # filter only items fitting input track name - track_items.append(item) - elif not track_name: - # or add all if no track_name was defined - track_items.append(item) + for track_item in selection: + log.info("___ track_item: {}".format(track_item)) + # make sure only trackitems are selected + if not isinstance(track_item, hiero.core.TrackItem): + continue + + if _validate_all_atrributes( + track_item, + track_item_name, + track_name, + track_type, + check_enabled, + check_tagged + ): + log.info("___ valid trackitem: {}".format(track_item)) + return_list.append(track_item) except AttributeError: pass - # check if any collected track items are - # `core.Hiero.Python.TrackItem` instance - if track_items: - any_track_item = track_items[0] - if not isinstance(any_track_item, hiero.core.TrackItem): - selected_items = [] - # collect all available active sequence track items - if not track_items: + if not return_list: sequence = get_current_sequence(name=sequence_name) # get all available tracks from sequence tracks = list(sequence.audioTracks()) + list(sequence.videoTracks()) @@ -191,45 +244,221 @@ def get_track_items( if check_enabled and not track.isEnabled(): continue # and all items in track - for item in track.items(): - if check_tagged and not item.tags(): + for track_item in track.items(): + # make sure no subtrackitem is also track items + if not isinstance(track_item, hiero.core.TrackItem): continue - # check if track item is enabled - if check_enabled: - if not item.isEnabled(): - continue - if track_item_name: - if track_item_name in item.name(): - return item - # make sure only track items with correct track names are added - if track_name and track_name in track.name(): - # filter out only defined track_name items - track_items.append(item) - elif not track_name: - # or add all if no track_name is defined - track_items.append(item) + if _validate_all_atrributes( + track_item, + track_item_name, + track_name, + track_type, + check_enabled, + check_tagged + ): + return_list.append(track_item) - # filter out only track items with defined track_type - for track_item in track_items: - if track_type and track_type == "video" and isinstance( + return return_list + + +def _validate_all_atrributes( + track_item, + track_item_name, + track_name, + track_type, + check_enabled, + check_tagged +): + def _validate_correct_name_track_item(): + if track_item_name and track_item_name in track_item.name(): + return True + elif not track_item_name: + return True + + def _validate_tagged_track_item(): + if check_tagged and track_item.tags(): + return True + elif not check_tagged: + return True + + def _validate_enabled_track_item(): + if check_enabled and track_item.isEnabled(): + return True + elif not check_enabled: + return True + + def _validate_parent_track_item(): + if track_name and track_name in track_item.parent().name(): + # filter only items fitting input track name + return True + elif not track_name: + # or add all if no track_name was defined + return True + + def _validate_type_track_item(): + if track_type == "video" and isinstance( track_item.parent(), hiero.core.VideoTrack): # only video track items are allowed - return_list.append(track_item) - elif track_type and track_type == "audio" and isinstance( + return True + elif track_type == "audio" and isinstance( track_item.parent(), hiero.core.AudioTrack): # only audio track items are allowed - return_list.append(track_item) - elif not track_type: - # add all if no track_type is defined - return_list.append(track_item) + return True - # return output list but make sure all items are TrackItems - return [_i for _i in return_list - if type(_i) == hiero.core.TrackItem] + # check if track item is enabled + return all([ + _validate_enabled_track_item(), + _validate_type_track_item(), + _validate_tagged_track_item(), + _validate_parent_track_item(), + _validate_correct_name_track_item() + ]) +def get_track_item_tags(track_item): + """ + Get track item tags excluded openpype tag + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + hiero.core.Tag: hierarchy, orig clip attributes + """ + returning_tag_data = [] + # get all tags from track item + _tags = track_item.tags() + if not _tags: + return [] + + # collect all tags which are not openpype tag + returning_tag_data.extend( + tag for tag in _tags + if tag.name() != self.pype_tag_name + ) + + return returning_tag_data + + +def _get_tag_unique_hash(): + # sourcery skip: avoid-builtin-shadow + return secrets.token_hex(nbytes=4) + + +def set_track_openpype_tag(track, data=None): + """ + Set openpype track tag to input track object. + + Attributes: + track (hiero.core.VideoTrack): hiero object + + Returns: + hiero.core.Tag + """ + data = data or {} + + # basic Tag's attribute + tag_data = { + "editable": "0", + "note": "OpenPype data container", + "icon": "openpype_icon.png", + "metadata": dict(data.items()) + } + # get available pype tag if any + _tag = get_track_openpype_tag(track) + + if _tag: + # it not tag then create one + tag = tags.update_tag(_tag, tag_data) + else: + # if pype tag available then update with input data + tag = tags.create_tag( + "{}_{}".format( + self.pype_tag_name, + _get_tag_unique_hash() + ), + tag_data + ) + # add it to the input track item + track.addTag(tag) + + return tag + + +def get_track_openpype_tag(track): + """ + Get pype track item tag created by creator or loader plugin. + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + hiero.core.Tag: hierarchy, orig clip attributes + """ + # get all tags from track item + _tags = track.tags() + if not _tags: + return None + for tag in _tags: + # return only correct tag defined by global name + if self.pype_tag_name in tag.name(): + return tag + + +def get_track_openpype_data(track, container_name=None): + """ + Get track's openpype tag data. + + Attributes: + trackItem (hiero.core.VideoTrack): hiero object + + Returns: + dict: data found on pype tag + """ + return_data = {} + # get pype data tag from track item + tag = get_track_openpype_tag(track) + + if not tag: + return None + + # get tag metadata attribute + tag_data = deepcopy(dict(tag.metadata())) + + for obj_name, obj_data in tag_data.items(): + obj_name = obj_name.replace("tag.", "") + + if obj_name in ["applieswhole", "note", "label"]: + continue + return_data[obj_name] = json.loads(obj_data) + + return ( + return_data[container_name] + if container_name + else return_data + ) + + +@deprecated("openpype.hosts.hiero.api.lib.get_trackitem_openpype_tag") def get_track_item_pype_tag(track_item): + # backward compatibility alias + return get_trackitem_openpype_tag(track_item) + + +@deprecated("openpype.hosts.hiero.api.lib.set_trackitem_openpype_tag") +def set_track_item_pype_tag(track_item, data=None): + # backward compatibility alias + return set_trackitem_openpype_tag(track_item, data) + + +@deprecated("openpype.hosts.hiero.api.lib.get_trackitem_openpype_data") +def get_track_item_pype_data(track_item): + # backward compatibility alias + return get_trackitem_openpype_data(track_item) + + +def get_trackitem_openpype_tag(track_item): """ Get pype track item tag created by creator or loader plugin. @@ -245,16 +474,16 @@ def get_track_item_pype_tag(track_item): return None for tag in _tags: # return only correct tag defined by global name - if tag.name() in self.pype_tag_name: + if self.pype_tag_name in tag.name(): return tag -def set_track_item_pype_tag(track_item, data=None): +def set_trackitem_openpype_tag(track_item, data=None): """ - Set pype track item tag to input track_item. + Set openpype track tag to input track object. Attributes: - trackItem (hiero.core.TrackItem): hiero object + track (hiero.core.VideoTrack): hiero object Returns: hiero.core.Tag @@ -266,24 +495,29 @@ def set_track_item_pype_tag(track_item, data=None): "editable": "0", "note": "OpenPype data container", "icon": "openpype_icon.png", - "metadata": {k: v for k, v in data.items()} + "metadata": dict(data.items()) } # get available pype tag if any - _tag = get_track_item_pype_tag(track_item) - + _tag = get_trackitem_openpype_tag(track_item) if _tag: # it not tag then create one tag = tags.update_tag(_tag, tag_data) else: # if pype tag available then update with input data - tag = tags.create_tag(self.pype_tag_name, tag_data) + tag = tags.create_tag( + "{}_{}".format( + self.pype_tag_name, + _get_tag_unique_hash() + ), + tag_data + ) # add it to the input track item track_item.addTag(tag) return tag -def get_track_item_pype_data(track_item): +def get_trackitem_openpype_data(track_item): """ Get track item's pype tag data. @@ -295,15 +529,15 @@ def get_track_item_pype_data(track_item): """ data = {} # get pype data tag from track item - tag = get_track_item_pype_tag(track_item) + tag = get_trackitem_openpype_tag(track_item) if not tag: return None # get tag metadata attribute - tag_data = tag.metadata() + tag_data = deepcopy(dict(tag.metadata())) # convert tag metadata to normal keys names and values to correct types - for k, v in dict(tag_data).items(): + for k, v in tag_data.items(): key = k.replace("tag.", "") try: @@ -324,7 +558,7 @@ def get_track_item_pype_data(track_item): log.warning(msg) value = v - data.update({key: value}) + data[key] = value return data @@ -348,7 +582,7 @@ def imprint(track_item, data=None): """ data = data or {} - tag = set_track_item_pype_tag(track_item, data) + tag = set_trackitem_openpype_tag(track_item, data) # add publish attribute set_publish_attribute(tag, True) @@ -407,7 +641,7 @@ def sync_avalon_data_to_workfile(): project.setProjectRoot(active_project_root) # get project data from avalon db - project_doc = legacy_io.find_one({"type": "project"}) + project_doc = get_project(project_name) project_data = project_doc["data"] log.debug("project_data: {}".format(project_data)) @@ -497,7 +731,7 @@ class PyblishSubmission(hiero.exporters.FnSubmission.Submission): from . import publish # Add submission to Hiero module for retrieval in plugins. hiero.submission = self - publish() + publish(hiero.ui.mainWindow()) def add_submission(): @@ -527,7 +761,7 @@ class PublishAction(QtWidgets.QAction): # from getting picked up when not using the "Export" dialog. if hasattr(hiero, "submission"): del hiero.submission - publish() + publish(hiero.ui.mainWindow()) def eventHandler(self, event): # Add the Menu to the right-click menu @@ -760,22 +994,22 @@ def set_selected_track_items(track_items_list, sequence=None): def _read_doc_from_path(path): - # reading QDomDocument from HROX path - hrox_file = QFile(path) - if not hrox_file.open(QFile.ReadOnly): + # reading QtXml.QDomDocument from HROX path + hrox_file = QtCore.QFile(path) + if not hrox_file.open(QtCore.QFile.ReadOnly): raise RuntimeError("Failed to open file for reading") - doc = QDomDocument() + doc = QtXml.QDomDocument() doc.setContent(hrox_file) hrox_file.close() return doc def _write_doc_to_path(doc, path): - # write QDomDocument to path as HROX - hrox_file = QFile(path) - if not hrox_file.open(QFile.WriteOnly): + # write QtXml.QDomDocument to path as HROX + hrox_file = QtCore.QFile(path) + if not hrox_file.open(QtCore.QFile.WriteOnly): raise RuntimeError("Failed to open file for writing") - stream = QTextStream(hrox_file) + stream = QtCore.QTextStream(hrox_file) doc.save(stream, 1) hrox_file.close() @@ -806,8 +1040,7 @@ def apply_colorspace_project(): project.close() # get presets for hiero - imageio = get_anatomy_settings( - project_name)["imageio"].get("hiero", None) + imageio = get_project_settings(project_name)["hiero"]["imageio"] presets = imageio.get("workfile") # save the workfile as subversion "comment:_colorspaceChange" @@ -860,8 +1093,7 @@ def apply_colorspace_clips(): clips = project.clips() # get presets for hiero - imageio = get_anatomy_settings( - project_name)["imageio"].get("hiero", None) + imageio = get_project_settings(project_name)["hiero"]["imageio"] from pprint import pprint presets = imageio.get("regexInputs", {}).get("inputs", {}) @@ -893,32 +1125,33 @@ def apply_colorspace_clips(): def is_overlapping(ti_test, ti_original, strict=False): - covering_exp = bool( + covering_exp = ( (ti_test.timelineIn() <= ti_original.timelineIn()) and (ti_test.timelineOut() >= ti_original.timelineOut()) ) - inside_exp = bool( + + if strict: + return covering_exp + + inside_exp = ( (ti_test.timelineIn() >= ti_original.timelineIn()) and (ti_test.timelineOut() <= ti_original.timelineOut()) ) - overlaying_right_exp = bool( + overlaying_right_exp = ( (ti_test.timelineIn() < ti_original.timelineOut()) and (ti_test.timelineOut() >= ti_original.timelineOut()) ) - overlaying_left_exp = bool( + overlaying_left_exp = ( (ti_test.timelineOut() > ti_original.timelineIn()) and (ti_test.timelineIn() <= ti_original.timelineIn()) ) - if not strict: - return any(( - covering_exp, - inside_exp, - overlaying_right_exp, - overlaying_left_exp - )) - else: - return covering_exp + return any(( + covering_exp, + inside_exp, + overlaying_right_exp, + overlaying_left_exp + )) def get_sequence_pattern_and_padding(file): @@ -936,17 +1169,13 @@ def get_sequence_pattern_and_padding(file): """ foundall = re.findall( r"(#+)|(%\d+d)|(?<=[^a-zA-Z0-9])(\d+)(?=\.\w+$)", file) - if foundall: - found = sorted(list(set(foundall[0])))[-1] - - if "%" in found: - padding = int(re.findall(r"\d+", found)[-1]) - else: - padding = len(found) - - return found, padding - else: + if not foundall: return None, None + found = sorted(list(set(foundall[0])))[-1] + + padding = int( + re.findall(r"\d+", found)[-1]) if "%" in found else len(found) + return found, padding def sync_clip_name_to_data_asset(track_items_list): @@ -963,7 +1192,7 @@ def sync_clip_name_to_data_asset(track_items_list): # get name and data ti_name = track_item.name() - data = get_track_item_pype_data(track_item) + data = get_trackitem_openpype_data(track_item) # ignore if no data on the clip or not publish instance if not data: @@ -975,14 +1204,18 @@ def sync_clip_name_to_data_asset(track_items_list): if data["asset"] != ti_name: data["asset"] = ti_name # remove the original tag - tag = get_track_item_pype_tag(track_item) + tag = get_trackitem_openpype_tag(track_item) track_item.removeTag(tag) # create new tag with updated data - set_track_item_pype_tag(track_item, data) + set_trackitem_openpype_tag(track_item, data) print("asset was changed in clip: {}".format(ti_name)) -def check_inventory_versions(): +def set_track_color(track_item, color): + track_item.source().binItem().setColor(color) + + +def check_inventory_versions(track_items=None): """ Actual version color idetifier of Loaded containers @@ -993,40 +1226,29 @@ def check_inventory_versions(): """ from . import parse_container + track_items = track_items or get_track_items() # presets clip_color_last = "green" clip_color = "red" - # get all track items from current timeline - for track_item in get_track_items(): + containers = [] + # Find all containers and collect it's node and representation ids + for track_item in track_items: container = parse_container(track_item) - if container: - # get representation from io - representation = legacy_io.find_one({ - "type": "representation", - "_id": ObjectId(container["representation"]) - }) + containers.append(container) - # Get start frame from version data - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + # Skip if nothing was found + if not containers: + return - # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') + project_name = legacy_io.active_project() + filter_result = filter_containers(containers, project_name) + for container in filter_result.latest: + set_track_color(container["_item"], clip_color) - max_version = max(versions) - - # set clip colour - if version.get("name") == max_version: - track_item.source().binItem().setColor(clip_color_last) - else: - track_item.source().binItem().setColor(clip_color) + for container in filter_result.outdated: + set_track_color(container["_item"], clip_color_last) def selection_changed_timeline(event): @@ -1038,29 +1260,31 @@ def selection_changed_timeline(event): timeline_editor = event.sender selection = timeline_editor.selection() - selection = [ti for ti in selection - if isinstance(ti, hiero.core.TrackItem)] + track_items = get_track_items( + selection=selection, + track_type="video", + check_enabled=True, + check_locked=True, + check_tagged=True + ) # run checking function - sync_clip_name_to_data_asset(selection) - - # also mark old versions of loaded containers - check_inventory_versions() + sync_clip_name_to_data_asset(track_items) def before_project_save(event): track_items = get_track_items( - selected=False, track_type="video", check_enabled=True, check_locked=True, - check_tagged=True) + check_tagged=True + ) # run checking function sync_clip_name_to_data_asset(track_items) # also mark old versions of loaded containers - check_inventory_versions() + check_inventory_versions(track_items) def get_main_window(): diff --git a/openpype/hosts/hiero/api/menu.py b/openpype/hosts/hiero/api/menu.py index e262abec00..2a7560c6ba 100644 --- a/openpype/hosts/hiero/api/menu.py +++ b/openpype/hosts/hiero/api/menu.py @@ -4,11 +4,12 @@ import sys import hiero.core from hiero.ui import findMenuAction -from openpype.api import Logger +from openpype.lib import Logger from openpype.pipeline import legacy_io from openpype.tools.utils import host_tools from . import tags +from openpype.settings import get_project_settings log = Logger.get_logger(__name__) @@ -41,6 +42,7 @@ def menu_install(): Installing menu into Hiero """ + from Qt import QtGui from . import ( publish, launch_workfiles_app, reload_config, @@ -138,3 +140,30 @@ def menu_install(): exeprimental_action.triggered.connect( lambda: host_tools.show_experimental_tools_dialog(parent=main_window) ) + + +def add_scripts_menu(): + try: + from . import launchforhiero + except ImportError: + + log.warning( + "Skipping studio.menu install, because " + "'scriptsmenu' module seems unavailable." + ) + return + + # load configuration of custom menu + project_settings = get_project_settings(os.getenv("AVALON_PROJECT")) + config = project_settings["hiero"]["scriptsmenu"]["definition"] + _menu = project_settings["hiero"]["scriptsmenu"]["name"] + + if not config: + log.warning("Skipping studio menu, no definition found.") + return + + # run the launcher for Hiero menu + studio_menu = launchforhiero.main(title=_menu.title()) + + # apply configuration + studio_menu.build_from_configuration(studio_menu, config) diff --git a/openpype/hosts/hiero/api/otio/hiero_export.py b/openpype/hosts/hiero/api/otio/hiero_export.py index 1e4088d9c0..81cb43fa12 100644 --- a/openpype/hosts/hiero/api/otio/hiero_export.py +++ b/openpype/hosts/hiero/api/otio/hiero_export.py @@ -132,7 +132,7 @@ def create_time_effects(otio_clip, track_item): otio_effect = otio.schema.TimeEffect() otio_effect.name = name otio_effect.effect_name = effect_name - otio_effect.metadata = metadata + otio_effect.metadata.update(metadata) # add otio effect to clip effects otio_clip.effects.append(otio_effect) diff --git a/openpype/hosts/hiero/api/pipeline.py b/openpype/hosts/hiero/api/pipeline.py index 8025ebff05..4ab73e7d19 100644 --- a/openpype/hosts/hiero/api/pipeline.py +++ b/openpype/hosts/hiero/api/pipeline.py @@ -1,12 +1,13 @@ """ Basic avalon integration """ +from copy import deepcopy import os import contextlib from collections import OrderedDict from pyblish import api as pyblish -from openpype.api import Logger +from openpype.lib import Logger from openpype.pipeline import ( schema, register_creator_plugin_path, @@ -17,8 +18,9 @@ from openpype.pipeline import ( ) from openpype.tools.utils import host_tools from . import lib, menu, events +import hiero -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) # plugin paths API_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -48,6 +50,7 @@ def install(): # install menu menu.menu_install() + menu.add_scripts_menu() # register hiero events events.register_hiero_events() @@ -105,7 +108,7 @@ def containerise(track_item, data_imprint.update({k: v}) log.debug("_ data_imprint: {}".format(data_imprint)) - lib.set_track_item_pype_tag(track_item, data_imprint) + lib.set_trackitem_openpype_tag(track_item, data_imprint) return track_item @@ -122,74 +125,131 @@ def ls(): """ # get all track items from current timeline - all_track_items = lib.get_track_items() + all_items = lib.get_track_items() - for track_item in all_track_items: - container = parse_container(track_item) - if container: - yield container + # append all video tracks + for track in lib.get_current_sequence(): + if type(track) != hiero.core.VideoTrack: + continue + all_items.append(track) + + for item in all_items: + container_data = parse_container(item) + + if isinstance(container_data, list): + for _c in container_data: + yield _c + elif container_data: + yield container_data -def parse_container(track_item, validate=True): +def parse_container(item, validate=True): """Return container data from track_item's pype tag. Args: - track_item (hiero.core.TrackItem): A containerised track item. + item (hiero.core.TrackItem or hiero.core.VideoTrack): + A containerised track item. validate (bool)[optional]: validating with avalon scheme Returns: dict: The container schema data for input containerized track item. """ + def data_to_container(item, data): + if ( + not data + or data.get("id") != "pyblish.avalon.container" + ): + return + + if validate and data and data.get("schema"): + schema.validate(data) + + if not isinstance(data, dict): + return + + # If not all required data return the empty container + required = ['schema', 'id', 'name', + 'namespace', 'loader', 'representation'] + + if any(key not in data for key in required): + return + + container = {key: data[key] for key in required} + + container["objectName"] = item.name() + + # Store reference to the node object + container["_item"] = item + + return container + # convert tag metadata to normal keys names - data = lib.get_track_item_pype_data(track_item) + if type(item) == hiero.core.VideoTrack: + return_list = [] + _data = lib.get_track_openpype_data(item) - if validate and data and data.get("schema"): - schema.validate(data) + if not _data: + return + # convert the data to list and validate them + for _, obj_data in _data.items(): + cotnainer = data_to_container(item, obj_data) + return_list.append(cotnainer) + return return_list + else: + _data = lib.get_trackitem_openpype_data(item) + return data_to_container(item, _data) - if not isinstance(data, dict): - return - - # If not all required data return the empty container - required = ['schema', 'id', 'name', - 'namespace', 'loader', 'representation'] - - if not all(key in data for key in required): - return - - container = {key: data[key] for key in required} - - container["objectName"] = track_item.name() - - # Store reference to the node object - container["_track_item"] = track_item +def _update_container_data(container, data): + for key in container: + try: + container[key] = data[key] + except KeyError: + pass return container -def update_container(track_item, data=None): - """Update container data to input track_item's pype tag. +def update_container(item, data=None): + """Update container data to input track_item or track's + openpype tag. Args: - track_item (hiero.core.TrackItem): A containerised track item. + item (hiero.core.TrackItem or hiero.core.VideoTrack): + A containerised track item. data (dict)[optional]: dictionery with data to be updated Returns: bool: True if container was updated correctly """ - data = data or dict() - container = lib.get_track_item_pype_data(track_item) + data = data or {} + data = deepcopy(data) - for _key, _value in container.items(): - try: - container[_key] = data[_key] - except KeyError: - pass + if type(item) == hiero.core.VideoTrack: + # form object data for test + object_name = data["objectName"] - log.info("Updating container: `{}`".format(track_item.name())) - return bool(lib.set_track_item_pype_tag(track_item, container)) + # get all available containers + containers = lib.get_track_openpype_data(item) + container = lib.get_track_openpype_data(item, object_name) + + containers = deepcopy(containers) + container = deepcopy(container) + + # update data in container + updated_container = _update_container_data(container, data) + # merge updated container back to containers + containers.update({object_name: updated_container}) + + return bool(lib.set_track_openpype_tag(item, containers)) + else: + container = lib.get_trackitem_openpype_data(item) + updated_container = _update_container_data(container, data) + + log.info("Updating container: `{}`".format(item.name())) + return bool(lib.set_trackitem_openpype_tag(item, updated_container)) def launch_workfiles_app(*args): @@ -245,7 +305,6 @@ def reload_config(): import importlib for module in ( - "openpype.api", "openpype.hosts.hiero.lib", "openpype.hosts.hiero.menu", "openpype.hosts.hiero.tags" @@ -267,11 +326,11 @@ def on_pyblish_instance_toggled(instance, old_value, new_value): instance, old_value, new_value)) from openpype.hosts.hiero.api import ( - get_track_item_pype_tag, + get_trackitem_openpype_tag, set_publish_attribute ) # Whether instances should be passthrough based on new value track_item = instance.data["item"] - tag = get_track_item_pype_tag(track_item) + tag = get_trackitem_openpype_tag(track_item) set_publish_attribute(tag, new_value) diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py index 54e66bf99a..5ec1c78aaa 100644 --- a/openpype/hosts/hiero/api/plugin.py +++ b/openpype/hosts/hiero/api/plugin.py @@ -1,4 +1,5 @@ import os +from pprint import pformat import re from copy import deepcopy @@ -7,11 +8,13 @@ import hiero from Qt import QtWidgets, QtCore import qargparse -import openpype.api as openpype +from openpype.settings import get_current_project_settings +from openpype.lib import Logger from openpype.pipeline import LoaderPlugin, LegacyCreator +from openpype.pipeline.context_tools import get_current_project_asset from . import lib -log = openpype.Logger().get_logger(__name__) +log = Logger.get_logger(__name__) def load_stylesheet(): @@ -167,7 +170,10 @@ class CreatorWidget(QtWidgets.QDialog): for func, val in kwargs.items(): if getattr(item, func): func_attr = getattr(item, func) - func_attr(val) + if isinstance(val, tuple): + func_attr(*val) + else: + func_attr(val) # add to layout layout.addRow(label, item) @@ -270,8 +276,8 @@ class CreatorWidget(QtWidgets.QDialog): elif v["type"] == "QSpinBox": data[k]["value"] = self.create_row( content_layout, "QSpinBox", v["label"], - setValue=v["value"], setMinimum=0, - setMaximum=100000, setToolTip=tool_tip) + setRange=(1, 9999999), setValue=v["value"], + setToolTip=tool_tip) return data @@ -400,7 +406,8 @@ class ClipLoader: # inject asset data to representation dict self._get_asset_data() - log.debug("__init__ self.data: `{}`".format(self.data)) + log.info("__init__ self.data: `{}`".format(pformat(self.data))) + log.info("__init__ options: `{}`".format(pformat(options))) # add active components to class if self.new_sequence: @@ -482,7 +489,9 @@ class ClipLoader: """ asset_name = self.context["representation"]["context"]["asset"] - self.data["assetData"] = openpype.get_asset(asset_name)["data"] + asset_doc = get_current_project_asset(asset_name) + log.debug("__ asset_doc: {}".format(pformat(asset_doc))) + self.data["assetData"] = asset_doc["data"] def _make_track_item(self, source_bin_item, audio=False): """ Create track item with """ @@ -500,7 +509,7 @@ class ClipLoader: track_item.setSource(clip) track_item.setSourceIn(self.handle_start) track_item.setTimelineIn(self.timeline_in) - track_item.setSourceOut(self.media_duration - self.handle_end) + track_item.setSourceOut((self.media_duration) - self.handle_end) track_item.setTimelineOut(self.timeline_out) track_item.setPlaybackSpeed(1) self.active_track.addTrackItem(track_item) @@ -520,14 +529,18 @@ class ClipLoader: self.handle_start = self.data["versionData"].get("handleStart") self.handle_end = self.data["versionData"].get("handleEnd") if self.handle_start is None: - self.handle_start = int(self.data["assetData"]["handleStart"]) + self.handle_start = self.data["assetData"]["handleStart"] if self.handle_end is None: - self.handle_end = int(self.data["assetData"]["handleEnd"]) + self.handle_end = self.data["assetData"]["handleEnd"] + + self.handle_start = int(self.handle_start) + self.handle_end = int(self.handle_end) if self.sequencial_load: last_track_item = lib.get_track_items( sequence_name=self.active_sequence.name(), - track_name=self.active_track.name()) + track_name=self.active_track.name() + ) if len(last_track_item) == 0: last_timeline_out = 0 else: @@ -541,17 +554,12 @@ class ClipLoader: self.timeline_in = int(self.data["assetData"]["clipIn"]) self.timeline_out = int(self.data["assetData"]["clipOut"]) + log.debug("__ self.timeline_in: {}".format(self.timeline_in)) + log.debug("__ self.timeline_out: {}".format(self.timeline_out)) + # check if slate is included - # either in version data families or by calculating frame diff - slate_on = next( - # check iterate if slate is in families - (f for f in self.context["version"]["data"]["families"] - if "slate" in f), - # if nothing was found then use default None - # so other bool could be used - None) or bool(int( - (self.timeline_out - self.timeline_in + 1) - + self.handle_start + self.handle_end) < self.media_duration) + slate_on = "slate" in self.context["version"]["data"]["families"] + log.debug("__ slate_on: {}".format(slate_on)) # if slate is on then remove the slate frame from beginning if slate_on: @@ -572,7 +580,7 @@ class ClipLoader: # there were some cases were hiero was not creating it source_bin_item = None for item in self.active_bin.items(): - if self.data["clip_name"] in item.name(): + if self.data["clip_name"] == item.name(): source_bin_item = item if not source_bin_item: log.warning("Problem with created Source clip: `{}`".format( @@ -599,9 +607,9 @@ class Creator(LegacyCreator): rename_index = None def __init__(self, *args, **kwargs): - import openpype.hosts.hiero.api as phiero super(Creator, self).__init__(*args, **kwargs) - self.presets = openpype.get_current_project_settings()[ + import openpype.hosts.hiero.api as phiero + self.presets = get_current_project_settings()[ "hiero"]["create"].get(self.__class__.__name__, {}) # adding basic current context resolve objects @@ -609,7 +617,10 @@ class Creator(LegacyCreator): self.sequence = phiero.get_current_sequence() if (self.options or {}).get("useSelection"): - self.selected = phiero.get_track_items(selected=True) + timeline_selection = phiero.get_timeline_selection() + self.selected = phiero.get_track_items( + selection=timeline_selection + ) else: self.selected = phiero.get_track_items() @@ -716,6 +727,10 @@ class PublishClip: else: self.tag_data.update({"reviewTrack": None}) + log.debug("___ self.tag_data: {}".format( + pformat(self.tag_data) + )) + # create pype tag on track_item and add data lib.imprint(self.track_item, self.tag_data) diff --git a/openpype/hosts/hiero/api/tags.py b/openpype/hosts/hiero/api/tags.py index 8877b92b9d..cb7bc14edb 100644 --- a/openpype/hosts/hiero/api/tags.py +++ b/openpype/hosts/hiero/api/tags.py @@ -1,8 +1,10 @@ +import json import re import os import hiero -from openpype.api import Logger +from openpype.client import get_project, get_assets +from openpype.lib import Logger from openpype.pipeline import legacy_io log = Logger.get_logger(__name__) @@ -84,17 +86,16 @@ def update_tag(tag, data): # get metadata key from data data_mtd = data.get("metadata", {}) - # due to hiero bug we have to make sure keys which are not existent in - # data are cleared of value by `None` - for _mk in mtd.keys(): - if _mk.replace("tag.", "") not in data_mtd.keys(): - mtd.setValue(_mk, str(None)) - # set all data metadata to tag metadata - for k, v in data_mtd.items(): + for _k, _v in data_mtd.items(): + value = str(_v) + if type(_v) == dict: + value = json.dumps(_v) + + # set the value mtd.setValue( - "tag.{}".format(str(k)), - str(v) + "tag.{}".format(str(_k)), + value ) # set note description of tag @@ -141,7 +142,9 @@ def add_tags_to_workfile(): nks_pres_tags = tag_data() # Get project task types. - tasks = legacy_io.find_one({"type": "project"})["config"]["tasks"] + project_name = legacy_io.active_project() + project_doc = get_project(project_name) + tasks = project_doc["config"]["tasks"] nks_pres_tags["[Tasks]"] = {} log.debug("__ tasks: {}".format(tasks)) for task_type in tasks.keys(): @@ -159,7 +162,9 @@ def add_tags_to_workfile(): # asset builds and shots. if int(os.getenv("TAG_ASSETBUILD_STARTUP", 0)) == 1: nks_pres_tags["[AssetBuilds]"] = {} - for asset in legacy_io.find({"type": "asset"}): + for asset in get_assets( + project_name, fields=["name", "data.entityType"] + ): if asset["data"]["entityType"] == "AssetBuild": nks_pres_tags["[AssetBuilds]"][asset["name"]] = { "editable": "1", diff --git a/openpype/hosts/hiero/api/workio.py b/openpype/hosts/hiero/api/workio.py index 394cb5e2ab..040fd1435a 100644 --- a/openpype/hosts/hiero/api/workio.py +++ b/openpype/hosts/hiero/api/workio.py @@ -1,14 +1,13 @@ import os import hiero -from openpype.api import Logger -from openpype.pipeline import HOST_WORKFILE_EXTENSIONS +from openpype.lib import Logger log = Logger.get_logger(__name__) def file_extensions(): - return HOST_WORKFILE_EXTENSIONS["hiero"] + return [".hrox"] def has_unsaved_changes(): diff --git a/openpype/hosts/hiero/plugins/load/load_clip.py b/openpype/hosts/hiero/plugins/load/load_clip.py index da4326c8c1..2a7d1af41e 100644 --- a/openpype/hosts/hiero/plugins/load/load_clip.py +++ b/openpype/hosts/hiero/plugins/load/load_clip.py @@ -1,12 +1,12 @@ +from openpype.client import ( + get_version_by_id, + get_last_version_by_subset_id +) from openpype.pipeline import ( legacy_io, get_representation_path, ) import openpype.hosts.hiero.api as phiero -# from openpype.hosts.hiero.api import plugin, lib -# reload(lib) -# reload(plugin) -# reload(phiero) class LoadClip(phiero.SequenceLoader): @@ -106,13 +106,13 @@ class LoadClip(phiero.SequenceLoader): name = container['name'] namespace = container['namespace'] track_item = phiero.get_track_items( - track_item_name=namespace) - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) - version_data = version.get("data", {}) - version_name = version.get("name", None) + track_item_name=namespace).pop() + + project_name = legacy_io.active_project() + version_doc = get_version_by_id(project_name, representation["parent"]) + + version_data = version_doc.get("data", {}) + version_name = version_doc.get("name", None) colorspace = version_data.get("colorspace", None) object_name = "{}_{}".format(name, namespace) file = get_representation_path(representation).replace("\\", "/") @@ -147,7 +147,7 @@ class LoadClip(phiero.SequenceLoader): }) # update color of clip regarding the version order - self.set_item_color(track_item, version) + self.set_item_color(track_item, version_doc) return phiero.update_container(track_item, data_imprint) @@ -157,7 +157,7 @@ class LoadClip(phiero.SequenceLoader): # load clip to timeline and get main variables namespace = container['namespace'] track_item = phiero.get_track_items( - track_item_name=namespace) + track_item_name=namespace).pop() track = track_item.parent() # remove track item from track @@ -170,21 +170,14 @@ class LoadClip(phiero.SequenceLoader): cls.sequence = cls.track.parent() @classmethod - def set_item_color(cls, track_item, version): - + def set_item_color(cls, track_item, version_doc): + project_name = legacy_io.active_project() + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) clip = track_item.source() - # define version name - version_name = version.get("name", None) - # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') - - max_version = max(versions) - # set clip colour - if version_name == max_version: + if version_doc["_id"] == last_version_doc["_id"]: clip.binItem().setColor(cls.clip_color_last) else: clip.binItem().setColor(cls.clip_color) diff --git a/openpype/hosts/hiero/plugins/load/load_effects.py b/openpype/hosts/hiero/plugins/load/load_effects.py new file mode 100644 index 0000000000..a3fcd63b5b --- /dev/null +++ b/openpype/hosts/hiero/plugins/load/load_effects.py @@ -0,0 +1,308 @@ +import json +from collections import OrderedDict +import six + +from openpype.client import ( + get_version_by_id +) + +from openpype.pipeline import ( + AVALON_CONTAINER_ID, + load, + legacy_io, + get_representation_path +) +from openpype.hosts.hiero import api as phiero +from openpype.lib import Logger + + +class LoadEffects(load.LoaderPlugin): + """Loading colorspace soft effect exported from nukestudio""" + + representations = ["effectJson"] + families = ["effect"] + + label = "Load Effects" + order = 0 + icon = "cc" + color = "white" + + log = Logger.get_logger(__name__) + + def load(self, context, name, namespace, data): + """ + Loading function to get the soft effects to particular read node + + Arguments: + context (dict): context of version + name (str): name of the version + namespace (str): asset name + data (dict): compulsory attribute > not used + + Returns: + nuke node: containerised nuke node object + """ + active_sequence = phiero.get_current_sequence() + active_track = phiero.get_current_track( + active_sequence, "Loaded_{}".format(name)) + + # get main variables + namespace = namespace or context["asset"]["name"] + object_name = "{}_{}".format(name, namespace) + clip_in = context["asset"]["data"]["clipIn"] + clip_out = context["asset"]["data"]["clipOut"] + + data_imprint = { + "objectName": object_name, + "children_names": [] + } + + # getting file path + file = self.fname.replace("\\", "/") + + if self._shared_loading( + file, + active_track, + clip_in, + clip_out, + data_imprint + ): + self.containerise( + active_track, + name=name, + namespace=namespace, + object_name=object_name, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def _shared_loading( + self, + file, + active_track, + clip_in, + clip_out, + data_imprint, + update=False + ): + # getting data from json file with unicode conversion + with open(file, "r") as f: + json_f = {self.byteify(key): self.byteify(value) + for key, value in json.load(f).items()} + + # get correct order of nodes by positions on track and subtrack + nodes_order = self.reorder_nodes(json_f) + + used_subtracks = { + stitem.name(): stitem + for stitem in phiero.flatten(active_track.subTrackItems()) + } + + loaded = False + for index_order, (ef_name, ef_val) in enumerate(nodes_order.items()): + new_name = "{}_loaded".format(ef_name) + if new_name not in used_subtracks: + effect_track_item = active_track.createEffect( + effectType=ef_val["class"], + timelineIn=clip_in, + timelineOut=clip_out, + subTrackIndex=index_order + + ) + effect_track_item.setName(new_name) + else: + effect_track_item = used_subtracks[new_name] + + node = effect_track_item.node() + for knob_name, knob_value in ef_val["node"].items(): + if ( + not knob_value + or knob_name == "name" + ): + continue + + try: + # assume list means animation + # except 4 values could be RGBA or vector + if isinstance(knob_value, list) and len(knob_value) > 4: + node[knob_name].setAnimated() + for i, value in enumerate(knob_value): + if isinstance(value, list): + # list can have vector animation + for ci, cv in enumerate(value): + node[knob_name].setValueAt( + cv, + (clip_in + i), + ci + ) + else: + # list is single values + node[knob_name].setValueAt( + value, + (clip_in + i) + ) + else: + node[knob_name].setValue(knob_value) + except NameError: + self.log.warning("Knob: {} cannot be set".format( + knob_name)) + + # register all loaded children + data_imprint["children_names"].append(new_name) + + # make sure containerisation will happen + loaded = True + + return loaded + + def update(self, container, representation): + """ Updating previously loaded effects + """ + active_track = container["_item"] + file = get_representation_path(representation).replace("\\", "/") + + # get main variables + name = container['name'] + namespace = container['namespace'] + + # get timeline in out data + project_name = legacy_io.active_project() + version_doc = get_version_by_id(project_name, representation["parent"]) + version_data = version_doc["data"] + clip_in = version_data["clipIn"] + clip_out = version_data["clipOut"] + + object_name = "{}_{}".format(name, namespace) + + # Disable previously created nodes + used_subtracks = { + stitem.name(): stitem + for stitem in phiero.flatten(active_track.subTrackItems()) + } + container = phiero.get_track_openpype_data( + active_track, object_name + ) + + loaded_subtrack_items = container["children_names"] + for loaded_stitem in loaded_subtrack_items: + if loaded_stitem not in used_subtracks: + continue + item_to_remove = used_subtracks.pop(loaded_stitem) + # TODO: find a way to erase nodes + self.log.debug( + "This node needs to be removed: {}".format(item_to_remove)) + + data_imprint = { + "objectName": object_name, + "name": name, + "representation": str(representation["_id"]), + "children_names": [] + } + + if self._shared_loading( + file, + active_track, + clip_in, + clip_out, + data_imprint, + update=True + ): + return phiero.update_container(active_track, data_imprint) + + def reorder_nodes(self, data): + new_order = OrderedDict() + trackNums = [v["trackIndex"] for k, v in data.items() + if isinstance(v, dict)] + subTrackNums = [v["subTrackIndex"] for k, v in data.items() + if isinstance(v, dict)] + + for trackIndex in range( + min(trackNums), max(trackNums) + 1): + for subTrackIndex in range( + min(subTrackNums), max(subTrackNums) + 1): + item = self.get_item(data, trackIndex, subTrackIndex) + if item is not {}: + new_order.update(item) + return new_order + + def get_item(self, data, trackIndex, subTrackIndex): + return {key: val for key, val in data.items() + if isinstance(val, dict) + if subTrackIndex == val["subTrackIndex"] + if trackIndex == val["trackIndex"]} + + def byteify(self, input): + """ + Converts unicode strings to strings + It goes through all dictionary + + Arguments: + input (dict/str): input + + Returns: + dict: with fixed values and keys + + """ + + if isinstance(input, dict): + return {self.byteify(key): self.byteify(value) + for key, value in input.items()} + elif isinstance(input, list): + return [self.byteify(element) for element in input] + elif isinstance(input, six.text_type): + return str(input) + else: + return input + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + pass + + def containerise( + self, + track, + name, + namespace, + object_name, + context, + loader=None, + data=None + ): + """Bundle Hiero's object into an assembly and imprint it with metadata + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + track (hiero.core.VideoTrack): object to imprint as container + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + object_name (str): name of container + context (dict): Asset information + loader (str, optional): Name of node used to produce this + container. + + Returns: + track_item (hiero.core.TrackItem): containerised object + + """ + + data_imprint = { + object_name: { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": str(name), + "namespace": str(namespace), + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + } + } + + if data: + for k, v in data.items(): + data_imprint[object_name].update({k: v}) + + self.log.debug("_ data_imprint: {}".format(data_imprint)) + phiero.set_track_openpype_tag(track, data_imprint) diff --git a/openpype/hosts/hiero/plugins/publish/collect_clip_effects.py b/openpype/hosts/hiero/plugins/publish/collect_clip_effects.py index 8d2ed9a9c2..9489b1c4fb 100644 --- a/openpype/hosts/hiero/plugins/publish/collect_clip_effects.py +++ b/openpype/hosts/hiero/plugins/publish/collect_clip_effects.py @@ -16,6 +16,9 @@ class CollectClipEffects(pyblish.api.InstancePlugin): review_track_index = instance.context.data.get("reviewTrackIndex") item = instance.data["item"] + if "audio" in instance.data["family"]: + return + # frame range self.handle_start = instance.data["handleStart"] self.handle_end = instance.data["handleEnd"] diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_tag_tasks.py b/openpype/hosts/hiero/plugins/publish/collect_tag_tasks.py similarity index 91% rename from openpype/hosts/hiero/plugins/publish_old_workflow/collect_tag_tasks.py rename to openpype/hosts/hiero/plugins/publish/collect_tag_tasks.py index 70891d5b45..27968060e1 100644 --- a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_tag_tasks.py +++ b/openpype/hosts/hiero/plugins/publish/collect_tag_tasks.py @@ -4,16 +4,16 @@ from pyblish import api class CollectClipTagTasks(api.InstancePlugin): """Collect Tags from selected track items.""" - order = api.CollectorOrder + order = api.CollectorOrder - 0.077 label = "Collect Tag Tasks" hosts = ["hiero"] - families = ['clip'] + families = ["shot"] def process(self, instance): # gets tags tags = instance.data["tags"] - tasks = dict() + tasks = {} for tag in tags: t_metadata = dict(tag.metadata()) t_family = t_metadata.get("tag.family", "") diff --git a/openpype/hosts/hiero/plugins/publish/extract_clip_effects.py b/openpype/hosts/hiero/plugins/publish/extract_clip_effects.py index 5b0aa270a7..7fb381ff7e 100644 --- a/openpype/hosts/hiero/plugins/publish/extract_clip_effects.py +++ b/openpype/hosts/hiero/plugins/publish/extract_clip_effects.py @@ -2,10 +2,11 @@ import os import json import pyblish.api -import openpype + +from openpype.pipeline import publish -class ExtractClipEffects(openpype.api.Extractor): +class ExtractClipEffects(publish.Extractor): """Extract clip effects instances.""" order = pyblish.api.ExtractorOrder diff --git a/openpype/hosts/hiero/plugins/publish/extract_frames.py b/openpype/hosts/hiero/plugins/publish/extract_frames.py index aa3eda2e9f..f865d2fb39 100644 --- a/openpype/hosts/hiero/plugins/publish/extract_frames.py +++ b/openpype/hosts/hiero/plugins/publish/extract_frames.py @@ -1,9 +1,14 @@ import os import pyblish.api -import openpype + +from openpype.lib import ( + get_oiio_tools_path, + run_subprocess, +) +from openpype.pipeline import publish -class ExtractFrames(openpype.api.Extractor): +class ExtractFrames(publish.Extractor): """Extracts frames""" order = pyblish.api.ExtractorOrder @@ -13,7 +18,7 @@ class ExtractFrames(openpype.api.Extractor): movie_extensions = ["mov", "mp4"] def process(self, instance): - oiio_tool_path = openpype.lib.get_oiio_tools_path() + oiio_tool_path = get_oiio_tools_path() staging_dir = self.staging_dir(instance) output_template = os.path.join(staging_dir, instance.data["name"]) sequence = instance.context.data["activeTimeline"] @@ -43,7 +48,7 @@ class ExtractFrames(openpype.api.Extractor): args.extend(["--powc", "0.45,0.45,0.45,1.0"]) args.extend([input_path, "-o", output_path]) - output = openpype.api.run_subprocess(args) + output = run_subprocess(args) failed_output = "oiiotool produced no output." if failed_output in output: diff --git a/openpype/hosts/hiero/plugins/publish/extract_thumbnail.py b/openpype/hosts/hiero/plugins/publish/extract_thumbnail.py index d12e7665bf..e64aa89b26 100644 --- a/openpype/hosts/hiero/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/hiero/plugins/publish/extract_thumbnail.py @@ -1,9 +1,10 @@ import os import pyblish.api -import openpype.api + +from openpype.pipeline import publish -class ExtractThumnail(openpype.api.Extractor): +class ExtractThumnail(publish.Extractor): """ Extractor for track item's tumnails """ diff --git a/openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py b/openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py index 934e7112fa..6ccbe955f2 100644 --- a/openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py +++ b/openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py @@ -1,5 +1,6 @@ from pyblish import api -import openpype.api as pype + +from openpype.lib import version_up class IntegrateVersionUpWorkfile(api.ContextPlugin): @@ -15,7 +16,7 @@ class IntegrateVersionUpWorkfile(api.ContextPlugin): def process(self, context): project = context.data["activeProject"] path = context.data.get("currentFile") - new_path = pype.version_up(path) + new_path = version_up(path) if project: project.saveAs(new_path) diff --git a/openpype/hosts/hiero/plugins/publish/precollect_instances.py b/openpype/hosts/hiero/plugins/publish/precollect_instances.py index 4eac6a008a..bb02919b35 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_instances.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_instances.py @@ -1,5 +1,5 @@ import pyblish -import openpype +from openpype.pipeline.editorial import is_overlapping_otio_ranges from openpype.hosts.hiero import api as phiero from openpype.hosts.hiero.api.otio import hiero_export import hiero @@ -19,9 +19,12 @@ class PrecollectInstances(pyblish.api.ContextPlugin): def process(self, context): self.otio_timeline = context.data["otioTimeline"] - + timeline_selection = phiero.get_timeline_selection() selected_timeline_items = phiero.get_track_items( - selected=True, check_tagged=True, check_enabled=True) + selection=timeline_selection, + check_tagged=True, + check_enabled=True + ) # only return enabled track items if not selected_timeline_items: @@ -45,7 +48,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin): self.log.debug("clip_name: {}".format(clip_name)) # get openpype tag data - tag_data = phiero.get_track_item_pype_data(track_item) + tag_data = phiero.get_trackitem_openpype_data(track_item) self.log.debug("__ tag_data: {}".format(pformat(tag_data))) if not tag_data: @@ -103,7 +106,11 @@ class PrecollectInstances(pyblish.api.ContextPlugin): # clip's effect "clipEffectItems": subtracks, - "clipAnnotations": annotations + "clipAnnotations": annotations, + + # add all additional tags + "tags": phiero.get_track_item_tags(track_item), + "newAssetPublishing": True }) # otio clip data @@ -269,7 +276,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin): parent_range = otio_audio.range_in_parent() # if any overaling clip found then return True - if openpype.lib.is_overlapping_otio_ranges( + if is_overlapping_otio_ranges( parent_range, timeline_range, strict=False): return True @@ -292,11 +299,13 @@ class PrecollectInstances(pyblish.api.ContextPlugin): for otio_clip in self.otio_timeline.each_clip(): track_name = otio_clip.parent().name parent_range = otio_clip.range_in_parent() - if ti_track_name not in track_name: + if ti_track_name != track_name: continue - if otio_clip.name not in track_item.name(): + if otio_clip.name != track_item.name(): continue - if openpype.lib.is_overlapping_otio_ranges( + self.log.debug("__ parent_range: {}".format(parent_range)) + self.log.debug("__ timeline_range: {}".format(timeline_range)) + if is_overlapping_otio_ranges( parent_range, timeline_range, strict=True): # add pypedata marker to otio_clip metadata @@ -309,17 +318,15 @@ class PrecollectInstances(pyblish.api.ContextPlugin): @staticmethod def create_otio_time_range_from_timeline_item_data(track_item): - speed = track_item.playbackSpeed() timeline = phiero.get_current_sequence() frame_start = int(track_item.timelineIn()) - frame_duration = int(track_item.sourceDuration() / speed) + frame_duration = int(track_item.duration()) fps = timeline.framerate().toFloat() return hiero_export.create_otio_time_range( frame_start, frame_duration, fps) - @staticmethod - def collect_sub_track_items(tracks): + def collect_sub_track_items(self, tracks): """ Returns dictionary with track index as key and list of subtracks """ @@ -328,8 +335,10 @@ class PrecollectInstances(pyblish.api.ContextPlugin): for track in tracks: items = track.items() + effet_items = track.subTrackItems() + # skip if no clips on track > need track with effect only - if items: + if not effet_items: continue # skip all disabled tracks @@ -337,10 +346,11 @@ class PrecollectInstances(pyblish.api.ContextPlugin): continue track_index = track.trackIndex() - _sub_track_items = phiero.flatten(track.subTrackItems()) + _sub_track_items = phiero.flatten(effet_items) + _sub_track_items = list(_sub_track_items) # continue only if any subtrack items are collected - if not list(_sub_track_items): + if not _sub_track_items: continue enabled_sti = [] diff --git a/openpype/hosts/hiero/plugins/publish/precollect_workfile.py b/openpype/hosts/hiero/plugins/publish/precollect_workfile.py index b9f58c15f6..c9bfb86810 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_workfile.py @@ -16,7 +16,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin): """Inject the current working file into context""" label = "Precollect Workfile" - order = pyblish.api.CollectorOrder - 0.5 + order = pyblish.api.CollectorOrder - 0.491 def process(self, context): @@ -84,6 +84,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin): "colorspace": self.get_colorspace(project), "fps": fps } + self.log.debug("__ context_data: {}".format(pformat(context_data))) context.data.update(context_data) self.log.info("Creating instance: {}".format(instance)) diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py index 10baf25803..5f96533052 100644 --- a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py @@ -1,4 +1,5 @@ from pyblish import api +from openpype.client import get_assets from openpype.pipeline import legacy_io @@ -17,8 +18,9 @@ class CollectAssetBuilds(api.ContextPlugin): hosts = ["hiero"] def process(self, context): + project_name = legacy_io.active_project() asset_builds = {} - for asset in legacy_io.find({"type": "asset"}): + for asset in get_assets(project_name): if asset["data"]["entityType"] == "AssetBuild": self.log.debug("Found \"{}\" in database.".format(asset)) asset_builds[asset["name"]] = asset diff --git a/openpype/hosts/hiero/vendor/google/protobuf/__init__.py b/openpype/hosts/hiero/vendor/google/protobuf/__init__.py new file mode 100644 index 0000000000..03f3b29ee7 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/__init__.py @@ -0,0 +1,33 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Copyright 2007 Google Inc. All Rights Reserved. + +__version__ = '3.20.1' diff --git a/openpype/hosts/hiero/vendor/google/protobuf/any_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/any_pb2.py new file mode 100644 index 0000000000..9121193d11 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/any_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/any.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"&\n\x03\x41ny\x12\x10\n\x08type_url\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\x42v\n\x13\x63om.google.protobufB\x08\x41nyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.any_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\010AnyProtoP\001Z,google.golang.org/protobuf/types/known/anypb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _ANY._serialized_start=46 + _ANY._serialized_end=84 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/api_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/api_pb2.py new file mode 100644 index 0000000000..1721b10a75 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/api_pb2.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/api.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2 +from google.protobuf import type_pb2 as google_dot_protobuf_dot_type__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19google/protobuf/api.proto\x12\x0fgoogle.protobuf\x1a$google/protobuf/source_context.proto\x1a\x1agoogle/protobuf/type.proto\"\x81\x02\n\x03\x41pi\x12\x0c\n\x04name\x18\x01 \x01(\t\x12(\n\x07methods\x18\x02 \x03(\x0b\x32\x17.google.protobuf.Method\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x36\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12&\n\x06mixins\x18\x06 \x03(\x0b\x32\x16.google.protobuf.Mixin\x12\'\n\x06syntax\x18\x07 \x01(\x0e\x32\x17.google.protobuf.Syntax\"\xd5\x01\n\x06Method\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10request_type_url\x18\x02 \x01(\t\x12\x19\n\x11request_streaming\x18\x03 \x01(\x08\x12\x19\n\x11response_type_url\x18\x04 \x01(\t\x12\x1a\n\x12response_streaming\x18\x05 \x01(\x08\x12(\n\x07options\x18\x06 \x03(\x0b\x32\x17.google.protobuf.Option\x12\'\n\x06syntax\x18\x07 \x01(\x0e\x32\x17.google.protobuf.Syntax\"#\n\x05Mixin\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04root\x18\x02 \x01(\tBv\n\x13\x63om.google.protobufB\x08\x41piProtoP\x01Z,google.golang.org/protobuf/types/known/apipb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.api_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\010ApiProtoP\001Z,google.golang.org/protobuf/types/known/apipb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _API._serialized_start=113 + _API._serialized_end=370 + _METHOD._serialized_start=373 + _METHOD._serialized_end=586 + _MIXIN._serialized_start=588 + _MIXIN._serialized_end=623 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/compiler/__init__.py b/openpype/hosts/hiero/vendor/google/protobuf/compiler/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/hiero/vendor/google/protobuf/compiler/plugin_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/compiler/plugin_pb2.py new file mode 100644 index 0000000000..715a891370 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/compiler/plugin_pb2.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/compiler/plugin.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n%google/protobuf/compiler/plugin.proto\x12\x18google.protobuf.compiler\x1a google/protobuf/descriptor.proto\"F\n\x07Version\x12\r\n\x05major\x18\x01 \x01(\x05\x12\r\n\x05minor\x18\x02 \x01(\x05\x12\r\n\x05patch\x18\x03 \x01(\x05\x12\x0e\n\x06suffix\x18\x04 \x01(\t\"\xba\x01\n\x14\x43odeGeneratorRequest\x12\x18\n\x10\x66ile_to_generate\x18\x01 \x03(\t\x12\x11\n\tparameter\x18\x02 \x01(\t\x12\x38\n\nproto_file\x18\x0f \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\x12;\n\x10\x63ompiler_version\x18\x03 \x01(\x0b\x32!.google.protobuf.compiler.Version\"\xc1\x02\n\x15\x43odeGeneratorResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x1a\n\x12supported_features\x18\x02 \x01(\x04\x12\x42\n\x04\x66ile\x18\x0f \x03(\x0b\x32\x34.google.protobuf.compiler.CodeGeneratorResponse.File\x1a\x7f\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0finsertion_point\x18\x02 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x0f \x01(\t\x12?\n\x13generated_code_info\x18\x10 \x01(\x0b\x32\".google.protobuf.GeneratedCodeInfo\"8\n\x07\x46\x65\x61ture\x12\x10\n\x0c\x46\x45\x41TURE_NONE\x10\x00\x12\x1b\n\x17\x46\x45\x41TURE_PROTO3_OPTIONAL\x10\x01\x42W\n\x1c\x63om.google.protobuf.compilerB\x0cPluginProtosZ)google.golang.org/protobuf/types/pluginpb') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.compiler.plugin_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\034com.google.protobuf.compilerB\014PluginProtosZ)google.golang.org/protobuf/types/pluginpb' + _VERSION._serialized_start=101 + _VERSION._serialized_end=171 + _CODEGENERATORREQUEST._serialized_start=174 + _CODEGENERATORREQUEST._serialized_end=360 + _CODEGENERATORRESPONSE._serialized_start=363 + _CODEGENERATORRESPONSE._serialized_end=684 + _CODEGENERATORRESPONSE_FILE._serialized_start=499 + _CODEGENERATORRESPONSE_FILE._serialized_end=626 + _CODEGENERATORRESPONSE_FEATURE._serialized_start=628 + _CODEGENERATORRESPONSE_FEATURE._serialized_end=684 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/descriptor.py b/openpype/hosts/hiero/vendor/google/protobuf/descriptor.py new file mode 100644 index 0000000000..ad70be9a11 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/descriptor.py @@ -0,0 +1,1224 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Descriptors essentially contain exactly the information found in a .proto +file, in types that make this information accessible in Python. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import threading +import warnings + +from google.protobuf.internal import api_implementation + +_USE_C_DESCRIPTORS = False +if api_implementation.Type() == 'cpp': + # Used by MakeDescriptor in cpp mode + import binascii + import os + from google.protobuf.pyext import _message + _USE_C_DESCRIPTORS = True + + +class Error(Exception): + """Base error for this module.""" + + +class TypeTransformationError(Error): + """Error transforming between python proto type and corresponding C++ type.""" + + +if _USE_C_DESCRIPTORS: + # This metaclass allows to override the behavior of code like + # isinstance(my_descriptor, FieldDescriptor) + # and make it return True when the descriptor is an instance of the extension + # type written in C++. + class DescriptorMetaclass(type): + def __instancecheck__(cls, obj): + if super(DescriptorMetaclass, cls).__instancecheck__(obj): + return True + if isinstance(obj, cls._C_DESCRIPTOR_CLASS): + return True + return False +else: + # The standard metaclass; nothing changes. + DescriptorMetaclass = type + + +class _Lock(object): + """Wrapper class of threading.Lock(), which is allowed by 'with'.""" + + def __new__(cls): + self = object.__new__(cls) + self._lock = threading.Lock() # pylint: disable=protected-access + return self + + def __enter__(self): + self._lock.acquire() + + def __exit__(self, exc_type, exc_value, exc_tb): + self._lock.release() + + +_lock = threading.Lock() + + +def _Deprecated(name): + if _Deprecated.count > 0: + _Deprecated.count -= 1 + warnings.warn( + 'Call to deprecated create function %s(). Note: Create unlinked ' + 'descriptors is going to go away. Please use get/find descriptors from ' + 'generated code or query the descriptor_pool.' + % name, + category=DeprecationWarning, stacklevel=3) + + +# Deprecated warnings will print 100 times at most which should be enough for +# users to notice and do not cause timeout. +_Deprecated.count = 100 + + +_internal_create_key = object() + + +class DescriptorBase(metaclass=DescriptorMetaclass): + + """Descriptors base class. + + This class is the base of all descriptor classes. It provides common options + related functionality. + + Attributes: + has_options: True if the descriptor has non-default options. Usually it + is not necessary to read this -- just call GetOptions() which will + happily return the default instance. However, it's sometimes useful + for efficiency, and also useful inside the protobuf implementation to + avoid some bootstrapping issues. + """ + + if _USE_C_DESCRIPTORS: + # The class, or tuple of classes, that are considered as "virtual + # subclasses" of this descriptor class. + _C_DESCRIPTOR_CLASS = () + + def __init__(self, options, serialized_options, options_class_name): + """Initialize the descriptor given its options message and the name of the + class of the options message. The name of the class is required in case + the options message is None and has to be created. + """ + self._options = options + self._options_class_name = options_class_name + self._serialized_options = serialized_options + + # Does this descriptor have non-default options? + self.has_options = (options is not None) or (serialized_options is not None) + + def _SetOptions(self, options, options_class_name): + """Sets the descriptor's options + + This function is used in generated proto2 files to update descriptor + options. It must not be used outside proto2. + """ + self._options = options + self._options_class_name = options_class_name + + # Does this descriptor have non-default options? + self.has_options = options is not None + + def GetOptions(self): + """Retrieves descriptor options. + + This method returns the options set or creates the default options for the + descriptor. + """ + if self._options: + return self._options + + from google.protobuf import descriptor_pb2 + try: + options_class = getattr(descriptor_pb2, + self._options_class_name) + except AttributeError: + raise RuntimeError('Unknown options class name %s!' % + (self._options_class_name)) + + with _lock: + if self._serialized_options is None: + self._options = options_class() + else: + self._options = _ParseOptions(options_class(), + self._serialized_options) + + return self._options + + +class _NestedDescriptorBase(DescriptorBase): + """Common class for descriptors that can be nested.""" + + def __init__(self, options, options_class_name, name, full_name, + file, containing_type, serialized_start=None, + serialized_end=None, serialized_options=None): + """Constructor. + + Args: + options: Protocol message options or None + to use default message options. + options_class_name (str): The class name of the above options. + name (str): Name of this protocol message type. + full_name (str): Fully-qualified name of this protocol message type, + which will include protocol "package" name and the name of any + enclosing types. + file (FileDescriptor): Reference to file info. + containing_type: if provided, this is a nested descriptor, with this + descriptor as parent, otherwise None. + serialized_start: The start index (inclusive) in block in the + file.serialized_pb that describes this descriptor. + serialized_end: The end index (exclusive) in block in the + file.serialized_pb that describes this descriptor. + serialized_options: Protocol message serialized options or None. + """ + super(_NestedDescriptorBase, self).__init__( + options, serialized_options, options_class_name) + + self.name = name + # TODO(falk): Add function to calculate full_name instead of having it in + # memory? + self.full_name = full_name + self.file = file + self.containing_type = containing_type + + self._serialized_start = serialized_start + self._serialized_end = serialized_end + + def CopyToProto(self, proto): + """Copies this to the matching proto in descriptor_pb2. + + Args: + proto: An empty proto instance from descriptor_pb2. + + Raises: + Error: If self couldn't be serialized, due to to few constructor + arguments. + """ + if (self.file is not None and + self._serialized_start is not None and + self._serialized_end is not None): + proto.ParseFromString(self.file.serialized_pb[ + self._serialized_start:self._serialized_end]) + else: + raise Error('Descriptor does not contain serialization.') + + +class Descriptor(_NestedDescriptorBase): + + """Descriptor for a protocol message type. + + Attributes: + name (str): Name of this protocol message type. + full_name (str): Fully-qualified name of this protocol message type, + which will include protocol "package" name and the name of any + enclosing types. + containing_type (Descriptor): Reference to the descriptor of the type + containing us, or None if this is top-level. + fields (list[FieldDescriptor]): Field descriptors for all fields in + this type. + fields_by_number (dict(int, FieldDescriptor)): Same + :class:`FieldDescriptor` objects as in :attr:`fields`, but indexed + by "number" attribute in each FieldDescriptor. + fields_by_name (dict(str, FieldDescriptor)): Same + :class:`FieldDescriptor` objects as in :attr:`fields`, but indexed by + "name" attribute in each :class:`FieldDescriptor`. + nested_types (list[Descriptor]): Descriptor references + for all protocol message types nested within this one. + nested_types_by_name (dict(str, Descriptor)): Same Descriptor + objects as in :attr:`nested_types`, but indexed by "name" attribute + in each Descriptor. + enum_types (list[EnumDescriptor]): :class:`EnumDescriptor` references + for all enums contained within this type. + enum_types_by_name (dict(str, EnumDescriptor)): Same + :class:`EnumDescriptor` objects as in :attr:`enum_types`, but + indexed by "name" attribute in each EnumDescriptor. + enum_values_by_name (dict(str, EnumValueDescriptor)): Dict mapping + from enum value name to :class:`EnumValueDescriptor` for that value. + extensions (list[FieldDescriptor]): All extensions defined directly + within this message type (NOT within a nested type). + extensions_by_name (dict(str, FieldDescriptor)): Same FieldDescriptor + objects as :attr:`extensions`, but indexed by "name" attribute of each + FieldDescriptor. + is_extendable (bool): Does this type define any extension ranges? + oneofs (list[OneofDescriptor]): The list of descriptors for oneof fields + in this message. + oneofs_by_name (dict(str, OneofDescriptor)): Same objects as in + :attr:`oneofs`, but indexed by "name" attribute. + file (FileDescriptor): Reference to file descriptor. + + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.Descriptor + + def __new__( + cls, + name=None, + full_name=None, + filename=None, + containing_type=None, + fields=None, + nested_types=None, + enum_types=None, + extensions=None, + options=None, + serialized_options=None, + is_extendable=True, + extension_ranges=None, + oneofs=None, + file=None, # pylint: disable=redefined-builtin + serialized_start=None, + serialized_end=None, + syntax=None, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindMessageTypeByName(full_name) + + # NOTE(tmarek): The file argument redefining a builtin is nothing we can + # fix right now since we don't know how many clients already rely on the + # name of the argument. + def __init__(self, name, full_name, filename, containing_type, fields, + nested_types, enum_types, extensions, options=None, + serialized_options=None, + is_extendable=True, extension_ranges=None, oneofs=None, + file=None, serialized_start=None, serialized_end=None, # pylint: disable=redefined-builtin + syntax=None, create_key=None): + """Arguments to __init__() are as described in the description + of Descriptor fields above. + + Note that filename is an obsolete argument, that is not used anymore. + Please use file.name to access this as an attribute. + """ + if create_key is not _internal_create_key: + _Deprecated('Descriptor') + + super(Descriptor, self).__init__( + options, 'MessageOptions', name, full_name, file, + containing_type, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + + # We have fields in addition to fields_by_name and fields_by_number, + # so that: + # 1. Clients can index fields by "order in which they're listed." + # 2. Clients can easily iterate over all fields with the terse + # syntax: for f in descriptor.fields: ... + self.fields = fields + for field in self.fields: + field.containing_type = self + self.fields_by_number = dict((f.number, f) for f in fields) + self.fields_by_name = dict((f.name, f) for f in fields) + self._fields_by_camelcase_name = None + + self.nested_types = nested_types + for nested_type in nested_types: + nested_type.containing_type = self + self.nested_types_by_name = dict((t.name, t) for t in nested_types) + + self.enum_types = enum_types + for enum_type in self.enum_types: + enum_type.containing_type = self + self.enum_types_by_name = dict((t.name, t) for t in enum_types) + self.enum_values_by_name = dict( + (v.name, v) for t in enum_types for v in t.values) + + self.extensions = extensions + for extension in self.extensions: + extension.extension_scope = self + self.extensions_by_name = dict((f.name, f) for f in extensions) + self.is_extendable = is_extendable + self.extension_ranges = extension_ranges + self.oneofs = oneofs if oneofs is not None else [] + self.oneofs_by_name = dict((o.name, o) for o in self.oneofs) + for oneof in self.oneofs: + oneof.containing_type = self + self.syntax = syntax or "proto2" + + @property + def fields_by_camelcase_name(self): + """Same FieldDescriptor objects as in :attr:`fields`, but indexed by + :attr:`FieldDescriptor.camelcase_name`. + """ + if self._fields_by_camelcase_name is None: + self._fields_by_camelcase_name = dict( + (f.camelcase_name, f) for f in self.fields) + return self._fields_by_camelcase_name + + def EnumValueName(self, enum, value): + """Returns the string name of an enum value. + + This is just a small helper method to simplify a common operation. + + Args: + enum: string name of the Enum. + value: int, value of the enum. + + Returns: + string name of the enum value. + + Raises: + KeyError if either the Enum doesn't exist or the value is not a valid + value for the enum. + """ + return self.enum_types_by_name[enum].values_by_number[value].name + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.DescriptorProto. + + Args: + proto: An empty descriptor_pb2.DescriptorProto. + """ + # This function is overridden to give a better doc comment. + super(Descriptor, self).CopyToProto(proto) + + +# TODO(robinson): We should have aggressive checking here, +# for example: +# * If you specify a repeated field, you should not be allowed +# to specify a default value. +# * [Other examples here as needed]. +# +# TODO(robinson): for this and other *Descriptor classes, we +# might also want to lock things down aggressively (e.g., +# prevent clients from setting the attributes). Having +# stronger invariants here in general will reduce the number +# of runtime checks we must do in reflection.py... +class FieldDescriptor(DescriptorBase): + + """Descriptor for a single field in a .proto file. + + Attributes: + name (str): Name of this field, exactly as it appears in .proto. + full_name (str): Name of this field, including containing scope. This is + particularly relevant for extensions. + index (int): Dense, 0-indexed index giving the order that this + field textually appears within its message in the .proto file. + number (int): Tag number declared for this field in the .proto file. + + type (int): (One of the TYPE_* constants below) Declared type. + cpp_type (int): (One of the CPPTYPE_* constants below) C++ type used to + represent this field. + + label (int): (One of the LABEL_* constants below) Tells whether this + field is optional, required, or repeated. + has_default_value (bool): True if this field has a default value defined, + otherwise false. + default_value (Varies): Default value of this field. Only + meaningful for non-repeated scalar fields. Repeated fields + should always set this to [], and non-repeated composite + fields should always set this to None. + + containing_type (Descriptor): Descriptor of the protocol message + type that contains this field. Set by the Descriptor constructor + if we're passed into one. + Somewhat confusingly, for extension fields, this is the + descriptor of the EXTENDED message, not the descriptor + of the message containing this field. (See is_extension and + extension_scope below). + message_type (Descriptor): If a composite field, a descriptor + of the message type contained in this field. Otherwise, this is None. + enum_type (EnumDescriptor): If this field contains an enum, a + descriptor of that enum. Otherwise, this is None. + + is_extension: True iff this describes an extension field. + extension_scope (Descriptor): Only meaningful if is_extension is True. + Gives the message that immediately contains this extension field. + Will be None iff we're a top-level (file-level) extension field. + + options (descriptor_pb2.FieldOptions): Protocol message field options or + None to use default field options. + + containing_oneof (OneofDescriptor): If the field is a member of a oneof + union, contains its descriptor. Otherwise, None. + + file (FileDescriptor): Reference to file descriptor. + """ + + # Must be consistent with C++ FieldDescriptor::Type enum in + # descriptor.h. + # + # TODO(robinson): Find a way to eliminate this repetition. + TYPE_DOUBLE = 1 + TYPE_FLOAT = 2 + TYPE_INT64 = 3 + TYPE_UINT64 = 4 + TYPE_INT32 = 5 + TYPE_FIXED64 = 6 + TYPE_FIXED32 = 7 + TYPE_BOOL = 8 + TYPE_STRING = 9 + TYPE_GROUP = 10 + TYPE_MESSAGE = 11 + TYPE_BYTES = 12 + TYPE_UINT32 = 13 + TYPE_ENUM = 14 + TYPE_SFIXED32 = 15 + TYPE_SFIXED64 = 16 + TYPE_SINT32 = 17 + TYPE_SINT64 = 18 + MAX_TYPE = 18 + + # Must be consistent with C++ FieldDescriptor::CppType enum in + # descriptor.h. + # + # TODO(robinson): Find a way to eliminate this repetition. + CPPTYPE_INT32 = 1 + CPPTYPE_INT64 = 2 + CPPTYPE_UINT32 = 3 + CPPTYPE_UINT64 = 4 + CPPTYPE_DOUBLE = 5 + CPPTYPE_FLOAT = 6 + CPPTYPE_BOOL = 7 + CPPTYPE_ENUM = 8 + CPPTYPE_STRING = 9 + CPPTYPE_MESSAGE = 10 + MAX_CPPTYPE = 10 + + _PYTHON_TO_CPP_PROTO_TYPE_MAP = { + TYPE_DOUBLE: CPPTYPE_DOUBLE, + TYPE_FLOAT: CPPTYPE_FLOAT, + TYPE_ENUM: CPPTYPE_ENUM, + TYPE_INT64: CPPTYPE_INT64, + TYPE_SINT64: CPPTYPE_INT64, + TYPE_SFIXED64: CPPTYPE_INT64, + TYPE_UINT64: CPPTYPE_UINT64, + TYPE_FIXED64: CPPTYPE_UINT64, + TYPE_INT32: CPPTYPE_INT32, + TYPE_SFIXED32: CPPTYPE_INT32, + TYPE_SINT32: CPPTYPE_INT32, + TYPE_UINT32: CPPTYPE_UINT32, + TYPE_FIXED32: CPPTYPE_UINT32, + TYPE_BYTES: CPPTYPE_STRING, + TYPE_STRING: CPPTYPE_STRING, + TYPE_BOOL: CPPTYPE_BOOL, + TYPE_MESSAGE: CPPTYPE_MESSAGE, + TYPE_GROUP: CPPTYPE_MESSAGE + } + + # Must be consistent with C++ FieldDescriptor::Label enum in + # descriptor.h. + # + # TODO(robinson): Find a way to eliminate this repetition. + LABEL_OPTIONAL = 1 + LABEL_REQUIRED = 2 + LABEL_REPEATED = 3 + MAX_LABEL = 3 + + # Must be consistent with C++ constants kMaxNumber, kFirstReservedNumber, + # and kLastReservedNumber in descriptor.h + MAX_FIELD_NUMBER = (1 << 29) - 1 + FIRST_RESERVED_FIELD_NUMBER = 19000 + LAST_RESERVED_FIELD_NUMBER = 19999 + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.FieldDescriptor + + def __new__(cls, name, full_name, index, number, type, cpp_type, label, + default_value, message_type, enum_type, containing_type, + is_extension, extension_scope, options=None, + serialized_options=None, + has_default_value=True, containing_oneof=None, json_name=None, + file=None, create_key=None): # pylint: disable=redefined-builtin + _message.Message._CheckCalledFromGeneratedFile() + if is_extension: + return _message.default_pool.FindExtensionByName(full_name) + else: + return _message.default_pool.FindFieldByName(full_name) + + def __init__(self, name, full_name, index, number, type, cpp_type, label, + default_value, message_type, enum_type, containing_type, + is_extension, extension_scope, options=None, + serialized_options=None, + has_default_value=True, containing_oneof=None, json_name=None, + file=None, create_key=None): # pylint: disable=redefined-builtin + """The arguments are as described in the description of FieldDescriptor + attributes above. + + Note that containing_type may be None, and may be set later if necessary + (to deal with circular references between message types, for example). + Likewise for extension_scope. + """ + if create_key is not _internal_create_key: + _Deprecated('FieldDescriptor') + + super(FieldDescriptor, self).__init__( + options, serialized_options, 'FieldOptions') + self.name = name + self.full_name = full_name + self.file = file + self._camelcase_name = None + if json_name is None: + self.json_name = _ToJsonName(name) + else: + self.json_name = json_name + self.index = index + self.number = number + self.type = type + self.cpp_type = cpp_type + self.label = label + self.has_default_value = has_default_value + self.default_value = default_value + self.containing_type = containing_type + self.message_type = message_type + self.enum_type = enum_type + self.is_extension = is_extension + self.extension_scope = extension_scope + self.containing_oneof = containing_oneof + if api_implementation.Type() == 'cpp': + if is_extension: + self._cdescriptor = _message.default_pool.FindExtensionByName(full_name) + else: + self._cdescriptor = _message.default_pool.FindFieldByName(full_name) + else: + self._cdescriptor = None + + @property + def camelcase_name(self): + """Camelcase name of this field. + + Returns: + str: the name in CamelCase. + """ + if self._camelcase_name is None: + self._camelcase_name = _ToCamelCase(self.name) + return self._camelcase_name + + @property + def has_presence(self): + """Whether the field distinguishes between unpopulated and default values. + + Raises: + RuntimeError: singular field that is not linked with message nor file. + """ + if self.label == FieldDescriptor.LABEL_REPEATED: + return False + if (self.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE or + self.containing_oneof): + return True + if hasattr(self.file, 'syntax'): + return self.file.syntax == 'proto2' + if hasattr(self.message_type, 'syntax'): + return self.message_type.syntax == 'proto2' + raise RuntimeError( + 'has_presence is not ready to use because field %s is not' + ' linked with message type nor file' % self.full_name) + + @staticmethod + def ProtoTypeToCppProtoType(proto_type): + """Converts from a Python proto type to a C++ Proto Type. + + The Python ProtocolBuffer classes specify both the 'Python' datatype and the + 'C++' datatype - and they're not the same. This helper method should + translate from one to another. + + Args: + proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*) + Returns: + int: descriptor.FieldDescriptor.CPPTYPE_*, the C++ type. + Raises: + TypeTransformationError: when the Python proto type isn't known. + """ + try: + return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type] + except KeyError: + raise TypeTransformationError('Unknown proto_type: %s' % proto_type) + + +class EnumDescriptor(_NestedDescriptorBase): + + """Descriptor for an enum defined in a .proto file. + + Attributes: + name (str): Name of the enum type. + full_name (str): Full name of the type, including package name + and any enclosing type(s). + + values (list[EnumValueDescriptor]): List of the values + in this enum. + values_by_name (dict(str, EnumValueDescriptor)): Same as :attr:`values`, + but indexed by the "name" field of each EnumValueDescriptor. + values_by_number (dict(int, EnumValueDescriptor)): Same as :attr:`values`, + but indexed by the "number" field of each EnumValueDescriptor. + containing_type (Descriptor): Descriptor of the immediate containing + type of this enum, or None if this is an enum defined at the + top level in a .proto file. Set by Descriptor's constructor + if we're passed into one. + file (FileDescriptor): Reference to file descriptor. + options (descriptor_pb2.EnumOptions): Enum options message or + None to use default enum options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.EnumDescriptor + + def __new__(cls, name, full_name, filename, values, + containing_type=None, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindEnumTypeByName(full_name) + + def __init__(self, name, full_name, filename, values, + containing_type=None, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + """Arguments are as described in the attribute description above. + + Note that filename is an obsolete argument, that is not used anymore. + Please use file.name to access this as an attribute. + """ + if create_key is not _internal_create_key: + _Deprecated('EnumDescriptor') + + super(EnumDescriptor, self).__init__( + options, 'EnumOptions', name, full_name, file, + containing_type, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + + self.values = values + for value in self.values: + value.type = self + self.values_by_name = dict((v.name, v) for v in values) + # Values are reversed to ensure that the first alias is retained. + self.values_by_number = dict((v.number, v) for v in reversed(values)) + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.EnumDescriptorProto. + + Args: + proto (descriptor_pb2.EnumDescriptorProto): An empty descriptor proto. + """ + # This function is overridden to give a better doc comment. + super(EnumDescriptor, self).CopyToProto(proto) + + +class EnumValueDescriptor(DescriptorBase): + + """Descriptor for a single value within an enum. + + Attributes: + name (str): Name of this value. + index (int): Dense, 0-indexed index giving the order that this + value appears textually within its enum in the .proto file. + number (int): Actual number assigned to this enum value. + type (EnumDescriptor): :class:`EnumDescriptor` to which this value + belongs. Set by :class:`EnumDescriptor`'s constructor if we're + passed into one. + options (descriptor_pb2.EnumValueOptions): Enum value options message or + None to use default enum value options options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.EnumValueDescriptor + + def __new__(cls, name, index, number, + type=None, # pylint: disable=redefined-builtin + options=None, serialized_options=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + # There is no way we can build a complete EnumValueDescriptor with the + # given parameters (the name of the Enum is not known, for example). + # Fortunately generated files just pass it to the EnumDescriptor() + # constructor, which will ignore it, so returning None is good enough. + return None + + def __init__(self, name, index, number, + type=None, # pylint: disable=redefined-builtin + options=None, serialized_options=None, create_key=None): + """Arguments are as described in the attribute description above.""" + if create_key is not _internal_create_key: + _Deprecated('EnumValueDescriptor') + + super(EnumValueDescriptor, self).__init__( + options, serialized_options, 'EnumValueOptions') + self.name = name + self.index = index + self.number = number + self.type = type + + +class OneofDescriptor(DescriptorBase): + """Descriptor for a oneof field. + + Attributes: + name (str): Name of the oneof field. + full_name (str): Full name of the oneof field, including package name. + index (int): 0-based index giving the order of the oneof field inside + its containing type. + containing_type (Descriptor): :class:`Descriptor` of the protocol message + type that contains this field. Set by the :class:`Descriptor` constructor + if we're passed into one. + fields (list[FieldDescriptor]): The list of field descriptors this + oneof can contain. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.OneofDescriptor + + def __new__( + cls, name, full_name, index, containing_type, fields, options=None, + serialized_options=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindOneofByName(full_name) + + def __init__( + self, name, full_name, index, containing_type, fields, options=None, + serialized_options=None, create_key=None): + """Arguments are as described in the attribute description above.""" + if create_key is not _internal_create_key: + _Deprecated('OneofDescriptor') + + super(OneofDescriptor, self).__init__( + options, serialized_options, 'OneofOptions') + self.name = name + self.full_name = full_name + self.index = index + self.containing_type = containing_type + self.fields = fields + + +class ServiceDescriptor(_NestedDescriptorBase): + + """Descriptor for a service. + + Attributes: + name (str): Name of the service. + full_name (str): Full name of the service, including package name. + index (int): 0-indexed index giving the order that this services + definition appears within the .proto file. + methods (list[MethodDescriptor]): List of methods provided by this + service. + methods_by_name (dict(str, MethodDescriptor)): Same + :class:`MethodDescriptor` objects as in :attr:`methods_by_name`, but + indexed by "name" attribute in each :class:`MethodDescriptor`. + options (descriptor_pb2.ServiceOptions): Service options message or + None to use default service options. + file (FileDescriptor): Reference to file info. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.ServiceDescriptor + + def __new__( + cls, + name=None, + full_name=None, + index=None, + methods=None, + options=None, + serialized_options=None, + file=None, # pylint: disable=redefined-builtin + serialized_start=None, + serialized_end=None, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access + return _message.default_pool.FindServiceByName(full_name) + + def __init__(self, name, full_name, index, methods, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + if create_key is not _internal_create_key: + _Deprecated('ServiceDescriptor') + + super(ServiceDescriptor, self).__init__( + options, 'ServiceOptions', name, full_name, file, + None, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + self.index = index + self.methods = methods + self.methods_by_name = dict((m.name, m) for m in methods) + # Set the containing service for each method in this service. + for method in self.methods: + method.containing_service = self + + def FindMethodByName(self, name): + """Searches for the specified method, and returns its descriptor. + + Args: + name (str): Name of the method. + Returns: + MethodDescriptor or None: the descriptor for the requested method, if + found. + """ + return self.methods_by_name.get(name, None) + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.ServiceDescriptorProto. + + Args: + proto (descriptor_pb2.ServiceDescriptorProto): An empty descriptor proto. + """ + # This function is overridden to give a better doc comment. + super(ServiceDescriptor, self).CopyToProto(proto) + + +class MethodDescriptor(DescriptorBase): + + """Descriptor for a method in a service. + + Attributes: + name (str): Name of the method within the service. + full_name (str): Full name of method. + index (int): 0-indexed index of the method inside the service. + containing_service (ServiceDescriptor): The service that contains this + method. + input_type (Descriptor): The descriptor of the message that this method + accepts. + output_type (Descriptor): The descriptor of the message that this method + returns. + client_streaming (bool): Whether this method uses client streaming. + server_streaming (bool): Whether this method uses server streaming. + options (descriptor_pb2.MethodOptions or None): Method options message, or + None to use default method options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.MethodDescriptor + + def __new__(cls, + name, + full_name, + index, + containing_service, + input_type, + output_type, + client_streaming=False, + server_streaming=False, + options=None, + serialized_options=None, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access + return _message.default_pool.FindMethodByName(full_name) + + def __init__(self, + name, + full_name, + index, + containing_service, + input_type, + output_type, + client_streaming=False, + server_streaming=False, + options=None, + serialized_options=None, + create_key=None): + """The arguments are as described in the description of MethodDescriptor + attributes above. + + Note that containing_service may be None, and may be set later if necessary. + """ + if create_key is not _internal_create_key: + _Deprecated('MethodDescriptor') + + super(MethodDescriptor, self).__init__( + options, serialized_options, 'MethodOptions') + self.name = name + self.full_name = full_name + self.index = index + self.containing_service = containing_service + self.input_type = input_type + self.output_type = output_type + self.client_streaming = client_streaming + self.server_streaming = server_streaming + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.MethodDescriptorProto. + + Args: + proto (descriptor_pb2.MethodDescriptorProto): An empty descriptor proto. + + Raises: + Error: If self couldn't be serialized, due to too few constructor + arguments. + """ + if self.containing_service is not None: + from google.protobuf import descriptor_pb2 + service_proto = descriptor_pb2.ServiceDescriptorProto() + self.containing_service.CopyToProto(service_proto) + proto.CopyFrom(service_proto.method[self.index]) + else: + raise Error('Descriptor does not contain a service.') + + +class FileDescriptor(DescriptorBase): + """Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto. + + Note that :attr:`enum_types_by_name`, :attr:`extensions_by_name`, and + :attr:`dependencies` fields are only set by the + :py:mod:`google.protobuf.message_factory` module, and not by the generated + proto code. + + Attributes: + name (str): Name of file, relative to root of source tree. + package (str): Name of the package + syntax (str): string indicating syntax of the file (can be "proto2" or + "proto3") + serialized_pb (bytes): Byte string of serialized + :class:`descriptor_pb2.FileDescriptorProto`. + dependencies (list[FileDescriptor]): List of other :class:`FileDescriptor` + objects this :class:`FileDescriptor` depends on. + public_dependencies (list[FileDescriptor]): A subset of + :attr:`dependencies`, which were declared as "public". + message_types_by_name (dict(str, Descriptor)): Mapping from message names + to their :class:`Descriptor`. + enum_types_by_name (dict(str, EnumDescriptor)): Mapping from enum names to + their :class:`EnumDescriptor`. + extensions_by_name (dict(str, FieldDescriptor)): Mapping from extension + names declared at file scope to their :class:`FieldDescriptor`. + services_by_name (dict(str, ServiceDescriptor)): Mapping from services' + names to their :class:`ServiceDescriptor`. + pool (DescriptorPool): The pool this descriptor belongs to. When not + passed to the constructor, the global default pool is used. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.FileDescriptor + + def __new__(cls, name, package, options=None, + serialized_options=None, serialized_pb=None, + dependencies=None, public_dependencies=None, + syntax=None, pool=None, create_key=None): + # FileDescriptor() is called from various places, not only from generated + # files, to register dynamic proto files and messages. + # pylint: disable=g-explicit-bool-comparison + if serialized_pb == b'': + # Cpp generated code must be linked in if serialized_pb is '' + try: + return _message.default_pool.FindFileByName(name) + except KeyError: + raise RuntimeError('Please link in cpp generated lib for %s' % (name)) + elif serialized_pb: + return _message.default_pool.AddSerializedFile(serialized_pb) + else: + return super(FileDescriptor, cls).__new__(cls) + + def __init__(self, name, package, options=None, + serialized_options=None, serialized_pb=None, + dependencies=None, public_dependencies=None, + syntax=None, pool=None, create_key=None): + """Constructor.""" + if create_key is not _internal_create_key: + _Deprecated('FileDescriptor') + + super(FileDescriptor, self).__init__( + options, serialized_options, 'FileOptions') + + if pool is None: + from google.protobuf import descriptor_pool + pool = descriptor_pool.Default() + self.pool = pool + self.message_types_by_name = {} + self.name = name + self.package = package + self.syntax = syntax or "proto2" + self.serialized_pb = serialized_pb + + self.enum_types_by_name = {} + self.extensions_by_name = {} + self.services_by_name = {} + self.dependencies = (dependencies or []) + self.public_dependencies = (public_dependencies or []) + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.FileDescriptorProto. + + Args: + proto: An empty descriptor_pb2.FileDescriptorProto. + """ + proto.ParseFromString(self.serialized_pb) + + +def _ParseOptions(message, string): + """Parses serialized options. + + This helper function is used to parse serialized options in generated + proto2 files. It must not be used outside proto2. + """ + message.ParseFromString(string) + return message + + +def _ToCamelCase(name): + """Converts name to camel-case and returns it.""" + capitalize_next = False + result = [] + + for c in name: + if c == '_': + if result: + capitalize_next = True + elif capitalize_next: + result.append(c.upper()) + capitalize_next = False + else: + result += c + + # Lower-case the first letter. + if result and result[0].isupper(): + result[0] = result[0].lower() + return ''.join(result) + + +def _OptionsOrNone(descriptor_proto): + """Returns the value of the field `options`, or None if it is not set.""" + if descriptor_proto.HasField('options'): + return descriptor_proto.options + else: + return None + + +def _ToJsonName(name): + """Converts name to Json name and returns it.""" + capitalize_next = False + result = [] + + for c in name: + if c == '_': + capitalize_next = True + elif capitalize_next: + result.append(c.upper()) + capitalize_next = False + else: + result += c + + return ''.join(result) + + +def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True, + syntax=None): + """Make a protobuf Descriptor given a DescriptorProto protobuf. + + Handles nested descriptors. Note that this is limited to the scope of defining + a message inside of another message. Composite fields can currently only be + resolved if the message is defined in the same scope as the field. + + Args: + desc_proto: The descriptor_pb2.DescriptorProto protobuf message. + package: Optional package name for the new message Descriptor (string). + build_file_if_cpp: Update the C++ descriptor pool if api matches. + Set to False on recursion, so no duplicates are created. + syntax: The syntax/semantics that should be used. Set to "proto3" to get + proto3 field presence semantics. + Returns: + A Descriptor for protobuf messages. + """ + if api_implementation.Type() == 'cpp' and build_file_if_cpp: + # The C++ implementation requires all descriptors to be backed by the same + # definition in the C++ descriptor pool. To do this, we build a + # FileDescriptorProto with the same definition as this descriptor and build + # it into the pool. + from google.protobuf import descriptor_pb2 + file_descriptor_proto = descriptor_pb2.FileDescriptorProto() + file_descriptor_proto.message_type.add().MergeFrom(desc_proto) + + # Generate a random name for this proto file to prevent conflicts with any + # imported ones. We need to specify a file name so the descriptor pool + # accepts our FileDescriptorProto, but it is not important what that file + # name is actually set to. + proto_name = binascii.hexlify(os.urandom(16)).decode('ascii') + + if package: + file_descriptor_proto.name = os.path.join(package.replace('.', '/'), + proto_name + '.proto') + file_descriptor_proto.package = package + else: + file_descriptor_proto.name = proto_name + '.proto' + + _message.default_pool.Add(file_descriptor_proto) + result = _message.default_pool.FindFileByName(file_descriptor_proto.name) + + if _USE_C_DESCRIPTORS: + return result.message_types_by_name[desc_proto.name] + + full_message_name = [desc_proto.name] + if package: full_message_name.insert(0, package) + + # Create Descriptors for enum types + enum_types = {} + for enum_proto in desc_proto.enum_type: + full_name = '.'.join(full_message_name + [enum_proto.name]) + enum_desc = EnumDescriptor( + enum_proto.name, full_name, None, [ + EnumValueDescriptor(enum_val.name, ii, enum_val.number, + create_key=_internal_create_key) + for ii, enum_val in enumerate(enum_proto.value)], + create_key=_internal_create_key) + enum_types[full_name] = enum_desc + + # Create Descriptors for nested types + nested_types = {} + for nested_proto in desc_proto.nested_type: + full_name = '.'.join(full_message_name + [nested_proto.name]) + # Nested types are just those defined inside of the message, not all types + # used by fields in the message, so no loops are possible here. + nested_desc = MakeDescriptor(nested_proto, + package='.'.join(full_message_name), + build_file_if_cpp=False, + syntax=syntax) + nested_types[full_name] = nested_desc + + fields = [] + for field_proto in desc_proto.field: + full_name = '.'.join(full_message_name + [field_proto.name]) + enum_desc = None + nested_desc = None + if field_proto.json_name: + json_name = field_proto.json_name + else: + json_name = None + if field_proto.HasField('type_name'): + type_name = field_proto.type_name + full_type_name = '.'.join(full_message_name + + [type_name[type_name.rfind('.')+1:]]) + if full_type_name in nested_types: + nested_desc = nested_types[full_type_name] + elif full_type_name in enum_types: + enum_desc = enum_types[full_type_name] + # Else type_name references a non-local type, which isn't implemented + field = FieldDescriptor( + field_proto.name, full_name, field_proto.number - 1, + field_proto.number, field_proto.type, + FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type), + field_proto.label, None, nested_desc, enum_desc, None, False, None, + options=_OptionsOrNone(field_proto), has_default_value=False, + json_name=json_name, create_key=_internal_create_key) + fields.append(field) + + desc_name = '.'.join(full_message_name) + return Descriptor(desc_proto.name, desc_name, None, None, fields, + list(nested_types.values()), list(enum_types.values()), [], + options=_OptionsOrNone(desc_proto), + create_key=_internal_create_key) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/descriptor_database.py b/openpype/hosts/hiero/vendor/google/protobuf/descriptor_database.py new file mode 100644 index 0000000000..073eddc711 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/descriptor_database.py @@ -0,0 +1,177 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides a container for DescriptorProtos.""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +import warnings + + +class Error(Exception): + pass + + +class DescriptorDatabaseConflictingDefinitionError(Error): + """Raised when a proto is added with the same name & different descriptor.""" + + +class DescriptorDatabase(object): + """A container accepting FileDescriptorProtos and maps DescriptorProtos.""" + + def __init__(self): + self._file_desc_protos_by_file = {} + self._file_desc_protos_by_symbol = {} + + def Add(self, file_desc_proto): + """Adds the FileDescriptorProto and its types to this database. + + Args: + file_desc_proto: The FileDescriptorProto to add. + Raises: + DescriptorDatabaseConflictingDefinitionError: if an attempt is made to + add a proto with the same name but different definition than an + existing proto in the database. + """ + proto_name = file_desc_proto.name + if proto_name not in self._file_desc_protos_by_file: + self._file_desc_protos_by_file[proto_name] = file_desc_proto + elif self._file_desc_protos_by_file[proto_name] != file_desc_proto: + raise DescriptorDatabaseConflictingDefinitionError( + '%s already added, but with different descriptor.' % proto_name) + else: + return + + # Add all the top-level descriptors to the index. + package = file_desc_proto.package + for message in file_desc_proto.message_type: + for name in _ExtractSymbols(message, package): + self._AddSymbol(name, file_desc_proto) + for enum in file_desc_proto.enum_type: + self._AddSymbol(('.'.join((package, enum.name))), file_desc_proto) + for enum_value in enum.value: + self._file_desc_protos_by_symbol[ + '.'.join((package, enum_value.name))] = file_desc_proto + for extension in file_desc_proto.extension: + self._AddSymbol(('.'.join((package, extension.name))), file_desc_proto) + for service in file_desc_proto.service: + self._AddSymbol(('.'.join((package, service.name))), file_desc_proto) + + def FindFileByName(self, name): + """Finds the file descriptor proto by file name. + + Typically the file name is a relative path ending to a .proto file. The + proto with the given name will have to have been added to this database + using the Add method or else an error will be raised. + + Args: + name: The file name to find. + + Returns: + The file descriptor proto matching the name. + + Raises: + KeyError if no file by the given name was added. + """ + + return self._file_desc_protos_by_file[name] + + def FindFileContainingSymbol(self, symbol): + """Finds the file descriptor proto containing the specified symbol. + + The symbol should be a fully qualified name including the file descriptor's + package and any containing messages. Some examples: + + 'some.package.name.Message' + 'some.package.name.Message.NestedEnum' + 'some.package.name.Message.some_field' + + The file descriptor proto containing the specified symbol must be added to + this database using the Add method or else an error will be raised. + + Args: + symbol: The fully qualified symbol name. + + Returns: + The file descriptor proto containing the symbol. + + Raises: + KeyError if no file contains the specified symbol. + """ + try: + return self._file_desc_protos_by_symbol[symbol] + except KeyError: + # Fields, enum values, and nested extensions are not in + # _file_desc_protos_by_symbol. Try to find the top level + # descriptor. Non-existent nested symbol under a valid top level + # descriptor can also be found. The behavior is the same with + # protobuf C++. + top_level, _, _ = symbol.rpartition('.') + try: + return self._file_desc_protos_by_symbol[top_level] + except KeyError: + # Raise the original symbol as a KeyError for better diagnostics. + raise KeyError(symbol) + + def FindFileContainingExtension(self, extendee_name, extension_number): + # TODO(jieluo): implement this API. + return None + + def FindAllExtensionNumbers(self, extendee_name): + # TODO(jieluo): implement this API. + return [] + + def _AddSymbol(self, name, file_desc_proto): + if name in self._file_desc_protos_by_symbol: + warn_msg = ('Conflict register for file "' + file_desc_proto.name + + '": ' + name + + ' is already defined in file "' + + self._file_desc_protos_by_symbol[name].name + '"') + warnings.warn(warn_msg, RuntimeWarning) + self._file_desc_protos_by_symbol[name] = file_desc_proto + + +def _ExtractSymbols(desc_proto, package): + """Pulls out all the symbols from a descriptor proto. + + Args: + desc_proto: The proto to extract symbols from. + package: The package containing the descriptor type. + + Yields: + The fully qualified name found in the descriptor. + """ + message_name = package + '.' + desc_proto.name if package else desc_proto.name + yield message_name + for nested_type in desc_proto.nested_type: + for symbol in _ExtractSymbols(nested_type, message_name): + yield symbol + for enum_type in desc_proto.enum_type: + yield '.'.join((message_name, enum_type.name)) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/descriptor_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/descriptor_pb2.py new file mode 100644 index 0000000000..f570386432 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/descriptor_pb2.py @@ -0,0 +1,1925 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/descriptor.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR = _descriptor.FileDescriptor( + name='google/protobuf/descriptor.proto', + package='google.protobuf', + syntax='proto2', + serialized_options=None, + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"G\n\x11\x46ileDescriptorSet\x12\x32\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xdb\x03\n\x13\x46ileDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07package\x18\x02 \x01(\t\x12\x12\n\ndependency\x18\x03 \x03(\t\x12\x19\n\x11public_dependency\x18\n \x03(\x05\x12\x17\n\x0fweak_dependency\x18\x0b \x03(\x05\x12\x36\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12\x38\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProto\x12\x38\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12-\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptions\x12\x39\n\x10source_code_info\x18\t \x01(\x0b\x32\x1f.google.protobuf.SourceCodeInfo\x12\x0e\n\x06syntax\x18\x0c \x01(\t\"\xa9\x05\n\x0f\x44\x65scriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x38\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x35\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12H\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRange\x12\x39\n\noneof_decl\x18\x08 \x03(\x0b\x32%.google.protobuf.OneofDescriptorProto\x12\x30\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptions\x12\x46\n\x0ereserved_range\x18\t \x03(\x0b\x32..google.protobuf.DescriptorProto.ReservedRange\x12\x15\n\rreserved_name\x18\n \x03(\t\x1a\x65\n\x0e\x45xtensionRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\x12\x37\n\x07options\x18\x03 \x01(\x0b\x32&.google.protobuf.ExtensionRangeOptions\x1a+\n\rReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"g\n\x15\x45xtensionRangeOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xd5\x05\n\x14\x46ieldDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12:\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.Label\x12\x38\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.Type\x12\x11\n\ttype_name\x18\x06 \x01(\t\x12\x10\n\x08\x65xtendee\x18\x02 \x01(\t\x12\x15\n\rdefault_value\x18\x07 \x01(\t\x12\x13\n\x0boneof_index\x18\t \x01(\x05\x12\x11\n\tjson_name\x18\n \x01(\t\x12.\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptions\x12\x17\n\x0fproto3_optional\x18\x11 \x01(\x08\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"T\n\x14OneofDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\x07options\x18\x02 \x01(\x0b\x32\x1d.google.protobuf.OneofOptions\"\xa4\x02\n\x13\x45numDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProto\x12-\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptions\x12N\n\x0ereserved_range\x18\x04 \x03(\x0b\x32\x36.google.protobuf.EnumDescriptorProto.EnumReservedRange\x12\x15\n\rreserved_name\x18\x05 \x03(\t\x1a/\n\x11\x45numReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"l\n\x18\x45numValueDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12\x32\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptions\"\x90\x01\n\x16ServiceDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProto\x12\x30\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptions\"\xc1\x01\n\x15MethodDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninput_type\x18\x02 \x01(\t\x12\x13\n\x0boutput_type\x18\x03 \x01(\t\x12/\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptions\x12\x1f\n\x10\x63lient_streaming\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10server_streaming\x18\x06 \x01(\x08:\x05\x66\x61lse\"\xa5\x06\n\x0b\x46ileOptions\x12\x14\n\x0cjava_package\x18\x01 \x01(\t\x12\x1c\n\x14java_outer_classname\x18\x08 \x01(\t\x12\"\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lse\x12)\n\x1djava_generate_equals_and_hash\x18\x14 \x01(\x08\x42\x02\x18\x01\x12%\n\x16java_string_check_utf8\x18\x1b \x01(\x08:\x05\x66\x61lse\x12\x46\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEED\x12\x12\n\ngo_package\x18\x0b \x01(\t\x12\"\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x05\x66\x61lse\x12$\n\x15java_generic_services\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13py_generic_services\x18\x12 \x01(\x08:\x05\x66\x61lse\x12#\n\x14php_generic_services\x18* \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x10\x63\x63_enable_arenas\x18\x1f \x01(\x08:\x04true\x12\x19\n\x11objc_class_prefix\x18$ \x01(\t\x12\x18\n\x10\x63sharp_namespace\x18% \x01(\t\x12\x14\n\x0cswift_prefix\x18\' \x01(\t\x12\x18\n\x10php_class_prefix\x18( \x01(\t\x12\x15\n\rphp_namespace\x18) \x01(\t\x12\x1e\n\x16php_metadata_namespace\x18, \x01(\t\x12\x14\n\x0cruby_package\x18- \x01(\t\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08&\x10\'\"\x84\x02\n\x0eMessageOptions\x12&\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lse\x12.\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x11\n\tmap_entry\x18\x07 \x01(\x08\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x08\x10\tJ\x04\x08\t\x10\n\"\xbe\x03\n\x0c\x46ieldOptions\x12:\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRING\x12\x0e\n\x06packed\x18\x02 \x01(\x08\x12?\n\x06jstype\x18\x06 \x01(\x0e\x32$.google.protobuf.FieldOptions.JSType:\tJS_NORMAL\x12\x13\n\x04lazy\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x0funverified_lazy\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x13\n\x04weak\x18\n \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02\"5\n\x06JSType\x12\r\n\tJS_NORMAL\x10\x00\x12\r\n\tJS_STRING\x10\x01\x12\r\n\tJS_NUMBER\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05\"^\n\x0cOneofOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x93\x01\n\x0b\x45numOptions\x12\x13\n\x0b\x61llow_alias\x18\x02 \x01(\x08\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x05\x10\x06\"}\n\x10\x45numValueOptions\x12\x19\n\ndeprecated\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"{\n\x0eServiceOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xad\x02\n\rMethodOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12_\n\x11idempotency_level\x18\" \x01(\x0e\x32/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWN\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"P\n\x10IdempotencyLevel\x12\x17\n\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n\nIDEMPOTENT\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9e\x02\n\x13UninterpretedOption\x12;\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePart\x12\x18\n\x10identifier_value\x18\x03 \x01(\t\x12\x1a\n\x12positive_int_value\x18\x04 \x01(\x04\x12\x1a\n\x12negative_int_value\x18\x05 \x01(\x03\x12\x14\n\x0c\x64ouble_value\x18\x06 \x01(\x01\x12\x14\n\x0cstring_value\x18\x07 \x01(\x0c\x12\x17\n\x0f\x61ggregate_value\x18\x08 \x01(\t\x1a\x33\n\x08NamePart\x12\x11\n\tname_part\x18\x01 \x02(\t\x12\x14\n\x0cis_extension\x18\x02 \x02(\x08\"\xd5\x01\n\x0eSourceCodeInfo\x12:\n\x08location\x18\x01 \x03(\x0b\x32(.google.protobuf.SourceCodeInfo.Location\x1a\x86\x01\n\x08Location\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x10\n\x04span\x18\x02 \x03(\x05\x42\x02\x10\x01\x12\x18\n\x10leading_comments\x18\x03 \x01(\t\x12\x19\n\x11trailing_comments\x18\x04 \x01(\t\x12!\n\x19leading_detached_comments\x18\x06 \x03(\t\"\xa7\x01\n\x11GeneratedCodeInfo\x12\x41\n\nannotation\x18\x01 \x03(\x0b\x32-.google.protobuf.GeneratedCodeInfo.Annotation\x1aO\n\nAnnotation\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x13\n\x0bsource_file\x18\x02 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x03 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x05\x42~\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection' + ) +else: + DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"G\n\x11\x46ileDescriptorSet\x12\x32\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xdb\x03\n\x13\x46ileDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07package\x18\x02 \x01(\t\x12\x12\n\ndependency\x18\x03 \x03(\t\x12\x19\n\x11public_dependency\x18\n \x03(\x05\x12\x17\n\x0fweak_dependency\x18\x0b \x03(\x05\x12\x36\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12\x38\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProto\x12\x38\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12-\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptions\x12\x39\n\x10source_code_info\x18\t \x01(\x0b\x32\x1f.google.protobuf.SourceCodeInfo\x12\x0e\n\x06syntax\x18\x0c \x01(\t\"\xa9\x05\n\x0f\x44\x65scriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x38\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x35\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12H\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRange\x12\x39\n\noneof_decl\x18\x08 \x03(\x0b\x32%.google.protobuf.OneofDescriptorProto\x12\x30\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptions\x12\x46\n\x0ereserved_range\x18\t \x03(\x0b\x32..google.protobuf.DescriptorProto.ReservedRange\x12\x15\n\rreserved_name\x18\n \x03(\t\x1a\x65\n\x0e\x45xtensionRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\x12\x37\n\x07options\x18\x03 \x01(\x0b\x32&.google.protobuf.ExtensionRangeOptions\x1a+\n\rReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"g\n\x15\x45xtensionRangeOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xd5\x05\n\x14\x46ieldDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12:\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.Label\x12\x38\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.Type\x12\x11\n\ttype_name\x18\x06 \x01(\t\x12\x10\n\x08\x65xtendee\x18\x02 \x01(\t\x12\x15\n\rdefault_value\x18\x07 \x01(\t\x12\x13\n\x0boneof_index\x18\t \x01(\x05\x12\x11\n\tjson_name\x18\n \x01(\t\x12.\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptions\x12\x17\n\x0fproto3_optional\x18\x11 \x01(\x08\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"T\n\x14OneofDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\x07options\x18\x02 \x01(\x0b\x32\x1d.google.protobuf.OneofOptions\"\xa4\x02\n\x13\x45numDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProto\x12-\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptions\x12N\n\x0ereserved_range\x18\x04 \x03(\x0b\x32\x36.google.protobuf.EnumDescriptorProto.EnumReservedRange\x12\x15\n\rreserved_name\x18\x05 \x03(\t\x1a/\n\x11\x45numReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"l\n\x18\x45numValueDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12\x32\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptions\"\x90\x01\n\x16ServiceDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProto\x12\x30\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptions\"\xc1\x01\n\x15MethodDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninput_type\x18\x02 \x01(\t\x12\x13\n\x0boutput_type\x18\x03 \x01(\t\x12/\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptions\x12\x1f\n\x10\x63lient_streaming\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10server_streaming\x18\x06 \x01(\x08:\x05\x66\x61lse\"\xa5\x06\n\x0b\x46ileOptions\x12\x14\n\x0cjava_package\x18\x01 \x01(\t\x12\x1c\n\x14java_outer_classname\x18\x08 \x01(\t\x12\"\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lse\x12)\n\x1djava_generate_equals_and_hash\x18\x14 \x01(\x08\x42\x02\x18\x01\x12%\n\x16java_string_check_utf8\x18\x1b \x01(\x08:\x05\x66\x61lse\x12\x46\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEED\x12\x12\n\ngo_package\x18\x0b \x01(\t\x12\"\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x05\x66\x61lse\x12$\n\x15java_generic_services\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13py_generic_services\x18\x12 \x01(\x08:\x05\x66\x61lse\x12#\n\x14php_generic_services\x18* \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x10\x63\x63_enable_arenas\x18\x1f \x01(\x08:\x04true\x12\x19\n\x11objc_class_prefix\x18$ \x01(\t\x12\x18\n\x10\x63sharp_namespace\x18% \x01(\t\x12\x14\n\x0cswift_prefix\x18\' \x01(\t\x12\x18\n\x10php_class_prefix\x18( \x01(\t\x12\x15\n\rphp_namespace\x18) \x01(\t\x12\x1e\n\x16php_metadata_namespace\x18, \x01(\t\x12\x14\n\x0cruby_package\x18- \x01(\t\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08&\x10\'\"\x84\x02\n\x0eMessageOptions\x12&\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lse\x12.\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x11\n\tmap_entry\x18\x07 \x01(\x08\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x08\x10\tJ\x04\x08\t\x10\n\"\xbe\x03\n\x0c\x46ieldOptions\x12:\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRING\x12\x0e\n\x06packed\x18\x02 \x01(\x08\x12?\n\x06jstype\x18\x06 \x01(\x0e\x32$.google.protobuf.FieldOptions.JSType:\tJS_NORMAL\x12\x13\n\x04lazy\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x0funverified_lazy\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x13\n\x04weak\x18\n \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02\"5\n\x06JSType\x12\r\n\tJS_NORMAL\x10\x00\x12\r\n\tJS_STRING\x10\x01\x12\r\n\tJS_NUMBER\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05\"^\n\x0cOneofOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x93\x01\n\x0b\x45numOptions\x12\x13\n\x0b\x61llow_alias\x18\x02 \x01(\x08\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x05\x10\x06\"}\n\x10\x45numValueOptions\x12\x19\n\ndeprecated\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"{\n\x0eServiceOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xad\x02\n\rMethodOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12_\n\x11idempotency_level\x18\" \x01(\x0e\x32/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWN\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"P\n\x10IdempotencyLevel\x12\x17\n\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n\nIDEMPOTENT\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9e\x02\n\x13UninterpretedOption\x12;\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePart\x12\x18\n\x10identifier_value\x18\x03 \x01(\t\x12\x1a\n\x12positive_int_value\x18\x04 \x01(\x04\x12\x1a\n\x12negative_int_value\x18\x05 \x01(\x03\x12\x14\n\x0c\x64ouble_value\x18\x06 \x01(\x01\x12\x14\n\x0cstring_value\x18\x07 \x01(\x0c\x12\x17\n\x0f\x61ggregate_value\x18\x08 \x01(\t\x1a\x33\n\x08NamePart\x12\x11\n\tname_part\x18\x01 \x02(\t\x12\x14\n\x0cis_extension\x18\x02 \x02(\x08\"\xd5\x01\n\x0eSourceCodeInfo\x12:\n\x08location\x18\x01 \x03(\x0b\x32(.google.protobuf.SourceCodeInfo.Location\x1a\x86\x01\n\x08Location\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x10\n\x04span\x18\x02 \x03(\x05\x42\x02\x10\x01\x12\x18\n\x10leading_comments\x18\x03 \x01(\t\x12\x19\n\x11trailing_comments\x18\x04 \x01(\t\x12!\n\x19leading_detached_comments\x18\x06 \x03(\t\"\xa7\x01\n\x11GeneratedCodeInfo\x12\x41\n\nannotation\x18\x01 \x03(\x0b\x32-.google.protobuf.GeneratedCodeInfo.Annotation\x1aO\n\nAnnotation\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x13\n\x0bsource_file\x18\x02 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x03 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x05\x42~\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection') + +if _descriptor._USE_C_DESCRIPTORS == False: + _FIELDDESCRIPTORPROTO_TYPE = _descriptor.EnumDescriptor( + name='Type', + full_name='google.protobuf.FieldDescriptorProto.Type', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='TYPE_DOUBLE', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FLOAT', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_INT64', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_UINT64', index=3, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_INT32', index=4, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FIXED64', index=5, number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FIXED32', index=6, number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_BOOL', index=7, number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_STRING', index=8, number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_GROUP', index=9, number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_MESSAGE', index=10, number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_BYTES', index=11, number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_UINT32', index=12, number=13, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_ENUM', index=13, number=14, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SFIXED32', index=14, number=15, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SFIXED64', index=15, number=16, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SINT32', index=16, number=17, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SINT64', index=17, number=18, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_TYPE) + + _FIELDDESCRIPTORPROTO_LABEL = _descriptor.EnumDescriptor( + name='Label', + full_name='google.protobuf.FieldDescriptorProto.Label', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='LABEL_OPTIONAL', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LABEL_REQUIRED', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LABEL_REPEATED', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_LABEL) + + _FILEOPTIONS_OPTIMIZEMODE = _descriptor.EnumDescriptor( + name='OptimizeMode', + full_name='google.protobuf.FileOptions.OptimizeMode', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='SPEED', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='CODE_SIZE', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LITE_RUNTIME', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FILEOPTIONS_OPTIMIZEMODE) + + _FIELDOPTIONS_CTYPE = _descriptor.EnumDescriptor( + name='CType', + full_name='google.protobuf.FieldOptions.CType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='STRING', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='CORD', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='STRING_PIECE', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_CTYPE) + + _FIELDOPTIONS_JSTYPE = _descriptor.EnumDescriptor( + name='JSType', + full_name='google.protobuf.FieldOptions.JSType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='JS_NORMAL', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='JS_STRING', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='JS_NUMBER', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_JSTYPE) + + _METHODOPTIONS_IDEMPOTENCYLEVEL = _descriptor.EnumDescriptor( + name='IdempotencyLevel', + full_name='google.protobuf.MethodOptions.IdempotencyLevel', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='IDEMPOTENCY_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='NO_SIDE_EFFECTS', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='IDEMPOTENT', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_METHODOPTIONS_IDEMPOTENCYLEVEL) + + + _FILEDESCRIPTORSET = _descriptor.Descriptor( + name='FileDescriptorSet', + full_name='google.protobuf.FileDescriptorSet', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='file', full_name='google.protobuf.FileDescriptorSet.file', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _FILEDESCRIPTORPROTO = _descriptor.Descriptor( + name='FileDescriptorProto', + full_name='google.protobuf.FileDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.FileDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='package', full_name='google.protobuf.FileDescriptorProto.package', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='dependency', full_name='google.protobuf.FileDescriptorProto.dependency', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='public_dependency', full_name='google.protobuf.FileDescriptorProto.public_dependency', index=3, + number=10, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='weak_dependency', full_name='google.protobuf.FileDescriptorProto.weak_dependency', index=4, + number=11, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message_type', full_name='google.protobuf.FileDescriptorProto.message_type', index=5, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='enum_type', full_name='google.protobuf.FileDescriptorProto.enum_type', index=6, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='service', full_name='google.protobuf.FileDescriptorProto.service', index=7, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension', full_name='google.protobuf.FileDescriptorProto.extension', index=8, + number=7, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.FileDescriptorProto.options', index=9, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='source_code_info', full_name='google.protobuf.FileDescriptorProto.source_code_info', index=10, + number=9, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='syntax', full_name='google.protobuf.FileDescriptorProto.syntax', index=11, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _DESCRIPTORPROTO_EXTENSIONRANGE = _descriptor.Descriptor( + name='ExtensionRange', + full_name='google.protobuf.DescriptorProto.ExtensionRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.DescriptorProto.ExtensionRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.DescriptorProto.ExtensionRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.DescriptorProto.ExtensionRange.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _DESCRIPTORPROTO_RESERVEDRANGE = _descriptor.Descriptor( + name='ReservedRange', + full_name='google.protobuf.DescriptorProto.ReservedRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.DescriptorProto.ReservedRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.DescriptorProto.ReservedRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _DESCRIPTORPROTO = _descriptor.Descriptor( + name='DescriptorProto', + full_name='google.protobuf.DescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.DescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='field', full_name='google.protobuf.DescriptorProto.field', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension', full_name='google.protobuf.DescriptorProto.extension', index=2, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='nested_type', full_name='google.protobuf.DescriptorProto.nested_type', index=3, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='enum_type', full_name='google.protobuf.DescriptorProto.enum_type', index=4, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension_range', full_name='google.protobuf.DescriptorProto.extension_range', index=5, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='oneof_decl', full_name='google.protobuf.DescriptorProto.oneof_decl', index=6, + number=8, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.DescriptorProto.options', index=7, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_range', full_name='google.protobuf.DescriptorProto.reserved_range', index=8, + number=9, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_name', full_name='google.protobuf.DescriptorProto.reserved_name', index=9, + number=10, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_DESCRIPTORPROTO_EXTENSIONRANGE, _DESCRIPTORPROTO_RESERVEDRANGE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _EXTENSIONRANGEOPTIONS = _descriptor.Descriptor( + name='ExtensionRangeOptions', + full_name='google.protobuf.ExtensionRangeOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.ExtensionRangeOptions.uninterpreted_option', index=0, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _FIELDDESCRIPTORPROTO = _descriptor.Descriptor( + name='FieldDescriptorProto', + full_name='google.protobuf.FieldDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.FieldDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='number', full_name='google.protobuf.FieldDescriptorProto.number', index=1, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='label', full_name='google.protobuf.FieldDescriptorProto.label', index=2, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='type', full_name='google.protobuf.FieldDescriptorProto.type', index=3, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='type_name', full_name='google.protobuf.FieldDescriptorProto.type_name', index=4, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extendee', full_name='google.protobuf.FieldDescriptorProto.extendee', index=5, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='default_value', full_name='google.protobuf.FieldDescriptorProto.default_value', index=6, + number=7, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='oneof_index', full_name='google.protobuf.FieldDescriptorProto.oneof_index', index=7, + number=9, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='json_name', full_name='google.protobuf.FieldDescriptorProto.json_name', index=8, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.FieldDescriptorProto.options', index=9, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='proto3_optional', full_name='google.protobuf.FieldDescriptorProto.proto3_optional', index=10, + number=17, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FIELDDESCRIPTORPROTO_TYPE, + _FIELDDESCRIPTORPROTO_LABEL, + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _ONEOFDESCRIPTORPROTO = _descriptor.Descriptor( + name='OneofDescriptorProto', + full_name='google.protobuf.OneofDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.OneofDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.OneofDescriptorProto.options', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE = _descriptor.Descriptor( + name='EnumReservedRange', + full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _ENUMDESCRIPTORPROTO = _descriptor.Descriptor( + name='EnumDescriptorProto', + full_name='google.protobuf.EnumDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.EnumDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='google.protobuf.EnumDescriptorProto.value', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.EnumDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_range', full_name='google.protobuf.EnumDescriptorProto.reserved_range', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_name', full_name='google.protobuf.EnumDescriptorProto.reserved_name', index=4, + number=5, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _ENUMVALUEDESCRIPTORPROTO = _descriptor.Descriptor( + name='EnumValueDescriptorProto', + full_name='google.protobuf.EnumValueDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.EnumValueDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='number', full_name='google.protobuf.EnumValueDescriptorProto.number', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.EnumValueDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _SERVICEDESCRIPTORPROTO = _descriptor.Descriptor( + name='ServiceDescriptorProto', + full_name='google.protobuf.ServiceDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.ServiceDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='method', full_name='google.protobuf.ServiceDescriptorProto.method', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.ServiceDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _METHODDESCRIPTORPROTO = _descriptor.Descriptor( + name='MethodDescriptorProto', + full_name='google.protobuf.MethodDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.MethodDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='input_type', full_name='google.protobuf.MethodDescriptorProto.input_type', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='output_type', full_name='google.protobuf.MethodDescriptorProto.output_type', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.MethodDescriptorProto.options', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='client_streaming', full_name='google.protobuf.MethodDescriptorProto.client_streaming', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='server_streaming', full_name='google.protobuf.MethodDescriptorProto.server_streaming', index=5, + number=6, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _FILEOPTIONS = _descriptor.Descriptor( + name='FileOptions', + full_name='google.protobuf.FileOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='java_package', full_name='google.protobuf.FileOptions.java_package', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_outer_classname', full_name='google.protobuf.FileOptions.java_outer_classname', index=1, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_multiple_files', full_name='google.protobuf.FileOptions.java_multiple_files', index=2, + number=10, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_generate_equals_and_hash', full_name='google.protobuf.FileOptions.java_generate_equals_and_hash', index=3, + number=20, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_string_check_utf8', full_name='google.protobuf.FileOptions.java_string_check_utf8', index=4, + number=27, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='optimize_for', full_name='google.protobuf.FileOptions.optimize_for', index=5, + number=9, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='go_package', full_name='google.protobuf.FileOptions.go_package', index=6, + number=11, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='cc_generic_services', full_name='google.protobuf.FileOptions.cc_generic_services', index=7, + number=16, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_generic_services', full_name='google.protobuf.FileOptions.java_generic_services', index=8, + number=17, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='py_generic_services', full_name='google.protobuf.FileOptions.py_generic_services', index=9, + number=18, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_generic_services', full_name='google.protobuf.FileOptions.php_generic_services', index=10, + number=42, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.FileOptions.deprecated', index=11, + number=23, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='cc_enable_arenas', full_name='google.protobuf.FileOptions.cc_enable_arenas', index=12, + number=31, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='objc_class_prefix', full_name='google.protobuf.FileOptions.objc_class_prefix', index=13, + number=36, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='csharp_namespace', full_name='google.protobuf.FileOptions.csharp_namespace', index=14, + number=37, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='swift_prefix', full_name='google.protobuf.FileOptions.swift_prefix', index=15, + number=39, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_class_prefix', full_name='google.protobuf.FileOptions.php_class_prefix', index=16, + number=40, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_namespace', full_name='google.protobuf.FileOptions.php_namespace', index=17, + number=41, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_metadata_namespace', full_name='google.protobuf.FileOptions.php_metadata_namespace', index=18, + number=44, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ruby_package', full_name='google.protobuf.FileOptions.ruby_package', index=19, + number=45, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.FileOptions.uninterpreted_option', index=20, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FILEOPTIONS_OPTIMIZEMODE, + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _MESSAGEOPTIONS = _descriptor.Descriptor( + name='MessageOptions', + full_name='google.protobuf.MessageOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='message_set_wire_format', full_name='google.protobuf.MessageOptions.message_set_wire_format', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='no_standard_descriptor_accessor', full_name='google.protobuf.MessageOptions.no_standard_descriptor_accessor', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.MessageOptions.deprecated', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='map_entry', full_name='google.protobuf.MessageOptions.map_entry', index=3, + number=7, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.MessageOptions.uninterpreted_option', index=4, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _FIELDOPTIONS = _descriptor.Descriptor( + name='FieldOptions', + full_name='google.protobuf.FieldOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ctype', full_name='google.protobuf.FieldOptions.ctype', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='packed', full_name='google.protobuf.FieldOptions.packed', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='jstype', full_name='google.protobuf.FieldOptions.jstype', index=2, + number=6, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='lazy', full_name='google.protobuf.FieldOptions.lazy', index=3, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='unverified_lazy', full_name='google.protobuf.FieldOptions.unverified_lazy', index=4, + number=15, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.FieldOptions.deprecated', index=5, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='weak', full_name='google.protobuf.FieldOptions.weak', index=6, + number=10, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.FieldOptions.uninterpreted_option', index=7, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FIELDOPTIONS_CTYPE, + _FIELDOPTIONS_JSTYPE, + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ONEOFOPTIONS = _descriptor.Descriptor( + name='OneofOptions', + full_name='google.protobuf.OneofOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.OneofOptions.uninterpreted_option', index=0, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ENUMOPTIONS = _descriptor.Descriptor( + name='EnumOptions', + full_name='google.protobuf.EnumOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='allow_alias', full_name='google.protobuf.EnumOptions.allow_alias', index=0, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.EnumOptions.deprecated', index=1, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.EnumOptions.uninterpreted_option', index=2, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ENUMVALUEOPTIONS = _descriptor.Descriptor( + name='EnumValueOptions', + full_name='google.protobuf.EnumValueOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.EnumValueOptions.deprecated', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.EnumValueOptions.uninterpreted_option', index=1, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _SERVICEOPTIONS = _descriptor.Descriptor( + name='ServiceOptions', + full_name='google.protobuf.ServiceOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.ServiceOptions.deprecated', index=0, + number=33, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.ServiceOptions.uninterpreted_option', index=1, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _METHODOPTIONS = _descriptor.Descriptor( + name='MethodOptions', + full_name='google.protobuf.MethodOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.MethodOptions.deprecated', index=0, + number=33, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='idempotency_level', full_name='google.protobuf.MethodOptions.idempotency_level', index=1, + number=34, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.MethodOptions.uninterpreted_option', index=2, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _METHODOPTIONS_IDEMPOTENCYLEVEL, + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _UNINTERPRETEDOPTION_NAMEPART = _descriptor.Descriptor( + name='NamePart', + full_name='google.protobuf.UninterpretedOption.NamePart', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name_part', full_name='google.protobuf.UninterpretedOption.NamePart.name_part', index=0, + number=1, type=9, cpp_type=9, label=2, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='is_extension', full_name='google.protobuf.UninterpretedOption.NamePart.is_extension', index=1, + number=2, type=8, cpp_type=7, label=2, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _UNINTERPRETEDOPTION = _descriptor.Descriptor( + name='UninterpretedOption', + full_name='google.protobuf.UninterpretedOption', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.UninterpretedOption.name', index=0, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='identifier_value', full_name='google.protobuf.UninterpretedOption.identifier_value', index=1, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='positive_int_value', full_name='google.protobuf.UninterpretedOption.positive_int_value', index=2, + number=4, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='negative_int_value', full_name='google.protobuf.UninterpretedOption.negative_int_value', index=3, + number=5, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='double_value', full_name='google.protobuf.UninterpretedOption.double_value', index=4, + number=6, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='string_value', full_name='google.protobuf.UninterpretedOption.string_value', index=5, + number=7, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='aggregate_value', full_name='google.protobuf.UninterpretedOption.aggregate_value', index=6, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_UNINTERPRETEDOPTION_NAMEPART, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _SOURCECODEINFO_LOCATION = _descriptor.Descriptor( + name='Location', + full_name='google.protobuf.SourceCodeInfo.Location', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='path', full_name='google.protobuf.SourceCodeInfo.Location.path', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='span', full_name='google.protobuf.SourceCodeInfo.Location.span', index=1, + number=2, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='leading_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_comments', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='trailing_comments', full_name='google.protobuf.SourceCodeInfo.Location.trailing_comments', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='leading_detached_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_detached_comments', index=4, + number=6, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _SOURCECODEINFO = _descriptor.Descriptor( + name='SourceCodeInfo', + full_name='google.protobuf.SourceCodeInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='location', full_name='google.protobuf.SourceCodeInfo.location', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SOURCECODEINFO_LOCATION, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _GENERATEDCODEINFO_ANNOTATION = _descriptor.Descriptor( + name='Annotation', + full_name='google.protobuf.GeneratedCodeInfo.Annotation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='path', full_name='google.protobuf.GeneratedCodeInfo.Annotation.path', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='source_file', full_name='google.protobuf.GeneratedCodeInfo.Annotation.source_file', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='begin', full_name='google.protobuf.GeneratedCodeInfo.Annotation.begin', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.GeneratedCodeInfo.Annotation.end', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _GENERATEDCODEINFO = _descriptor.Descriptor( + name='GeneratedCodeInfo', + full_name='google.protobuf.GeneratedCodeInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='annotation', full_name='google.protobuf.GeneratedCodeInfo.annotation', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_GENERATEDCODEINFO_ANNOTATION, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _FILEDESCRIPTORSET.fields_by_name['file'].message_type = _FILEDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['message_type'].message_type = _DESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['service'].message_type = _SERVICEDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['options'].message_type = _FILEOPTIONS + _FILEDESCRIPTORPROTO.fields_by_name['source_code_info'].message_type = _SOURCECODEINFO + _DESCRIPTORPROTO_EXTENSIONRANGE.fields_by_name['options'].message_type = _EXTENSIONRANGEOPTIONS + _DESCRIPTORPROTO_EXTENSIONRANGE.containing_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO_RESERVEDRANGE.containing_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['field'].message_type = _FIELDDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['nested_type'].message_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['extension_range'].message_type = _DESCRIPTORPROTO_EXTENSIONRANGE + _DESCRIPTORPROTO.fields_by_name['oneof_decl'].message_type = _ONEOFDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['options'].message_type = _MESSAGEOPTIONS + _DESCRIPTORPROTO.fields_by_name['reserved_range'].message_type = _DESCRIPTORPROTO_RESERVEDRANGE + _EXTENSIONRANGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FIELDDESCRIPTORPROTO.fields_by_name['label'].enum_type = _FIELDDESCRIPTORPROTO_LABEL + _FIELDDESCRIPTORPROTO.fields_by_name['type'].enum_type = _FIELDDESCRIPTORPROTO_TYPE + _FIELDDESCRIPTORPROTO.fields_by_name['options'].message_type = _FIELDOPTIONS + _FIELDDESCRIPTORPROTO_TYPE.containing_type = _FIELDDESCRIPTORPROTO + _FIELDDESCRIPTORPROTO_LABEL.containing_type = _FIELDDESCRIPTORPROTO + _ONEOFDESCRIPTORPROTO.fields_by_name['options'].message_type = _ONEOFOPTIONS + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE.containing_type = _ENUMDESCRIPTORPROTO + _ENUMDESCRIPTORPROTO.fields_by_name['value'].message_type = _ENUMVALUEDESCRIPTORPROTO + _ENUMDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMOPTIONS + _ENUMDESCRIPTORPROTO.fields_by_name['reserved_range'].message_type = _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE + _ENUMVALUEDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMVALUEOPTIONS + _SERVICEDESCRIPTORPROTO.fields_by_name['method'].message_type = _METHODDESCRIPTORPROTO + _SERVICEDESCRIPTORPROTO.fields_by_name['options'].message_type = _SERVICEOPTIONS + _METHODDESCRIPTORPROTO.fields_by_name['options'].message_type = _METHODOPTIONS + _FILEOPTIONS.fields_by_name['optimize_for'].enum_type = _FILEOPTIONS_OPTIMIZEMODE + _FILEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FILEOPTIONS_OPTIMIZEMODE.containing_type = _FILEOPTIONS + _MESSAGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FIELDOPTIONS.fields_by_name['ctype'].enum_type = _FIELDOPTIONS_CTYPE + _FIELDOPTIONS.fields_by_name['jstype'].enum_type = _FIELDOPTIONS_JSTYPE + _FIELDOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FIELDOPTIONS_CTYPE.containing_type = _FIELDOPTIONS + _FIELDOPTIONS_JSTYPE.containing_type = _FIELDOPTIONS + _ONEOFOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _ENUMOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _ENUMVALUEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _SERVICEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _METHODOPTIONS.fields_by_name['idempotency_level'].enum_type = _METHODOPTIONS_IDEMPOTENCYLEVEL + _METHODOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _METHODOPTIONS_IDEMPOTENCYLEVEL.containing_type = _METHODOPTIONS + _UNINTERPRETEDOPTION_NAMEPART.containing_type = _UNINTERPRETEDOPTION + _UNINTERPRETEDOPTION.fields_by_name['name'].message_type = _UNINTERPRETEDOPTION_NAMEPART + _SOURCECODEINFO_LOCATION.containing_type = _SOURCECODEINFO + _SOURCECODEINFO.fields_by_name['location'].message_type = _SOURCECODEINFO_LOCATION + _GENERATEDCODEINFO_ANNOTATION.containing_type = _GENERATEDCODEINFO + _GENERATEDCODEINFO.fields_by_name['annotation'].message_type = _GENERATEDCODEINFO_ANNOTATION + DESCRIPTOR.message_types_by_name['FileDescriptorSet'] = _FILEDESCRIPTORSET + DESCRIPTOR.message_types_by_name['FileDescriptorProto'] = _FILEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['DescriptorProto'] = _DESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['ExtensionRangeOptions'] = _EXTENSIONRANGEOPTIONS + DESCRIPTOR.message_types_by_name['FieldDescriptorProto'] = _FIELDDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['OneofDescriptorProto'] = _ONEOFDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['EnumDescriptorProto'] = _ENUMDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['EnumValueDescriptorProto'] = _ENUMVALUEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['ServiceDescriptorProto'] = _SERVICEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['MethodDescriptorProto'] = _METHODDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['FileOptions'] = _FILEOPTIONS + DESCRIPTOR.message_types_by_name['MessageOptions'] = _MESSAGEOPTIONS + DESCRIPTOR.message_types_by_name['FieldOptions'] = _FIELDOPTIONS + DESCRIPTOR.message_types_by_name['OneofOptions'] = _ONEOFOPTIONS + DESCRIPTOR.message_types_by_name['EnumOptions'] = _ENUMOPTIONS + DESCRIPTOR.message_types_by_name['EnumValueOptions'] = _ENUMVALUEOPTIONS + DESCRIPTOR.message_types_by_name['ServiceOptions'] = _SERVICEOPTIONS + DESCRIPTOR.message_types_by_name['MethodOptions'] = _METHODOPTIONS + DESCRIPTOR.message_types_by_name['UninterpretedOption'] = _UNINTERPRETEDOPTION + DESCRIPTOR.message_types_by_name['SourceCodeInfo'] = _SOURCECODEINFO + DESCRIPTOR.message_types_by_name['GeneratedCodeInfo'] = _GENERATEDCODEINFO + _sym_db.RegisterFileDescriptor(DESCRIPTOR) + +else: + _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.descriptor_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _FILEDESCRIPTORSET._serialized_start=53 + _FILEDESCRIPTORSET._serialized_end=124 + _FILEDESCRIPTORPROTO._serialized_start=127 + _FILEDESCRIPTORPROTO._serialized_end=602 + _DESCRIPTORPROTO._serialized_start=605 + _DESCRIPTORPROTO._serialized_end=1286 + _DESCRIPTORPROTO_EXTENSIONRANGE._serialized_start=1140 + _DESCRIPTORPROTO_EXTENSIONRANGE._serialized_end=1241 + _DESCRIPTORPROTO_RESERVEDRANGE._serialized_start=1243 + _DESCRIPTORPROTO_RESERVEDRANGE._serialized_end=1286 + _EXTENSIONRANGEOPTIONS._serialized_start=1288 + _EXTENSIONRANGEOPTIONS._serialized_end=1391 + _FIELDDESCRIPTORPROTO._serialized_start=1394 + _FIELDDESCRIPTORPROTO._serialized_end=2119 + _FIELDDESCRIPTORPROTO_TYPE._serialized_start=1740 + _FIELDDESCRIPTORPROTO_TYPE._serialized_end=2050 + _FIELDDESCRIPTORPROTO_LABEL._serialized_start=2052 + _FIELDDESCRIPTORPROTO_LABEL._serialized_end=2119 + _ONEOFDESCRIPTORPROTO._serialized_start=2121 + _ONEOFDESCRIPTORPROTO._serialized_end=2205 + _ENUMDESCRIPTORPROTO._serialized_start=2208 + _ENUMDESCRIPTORPROTO._serialized_end=2500 + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE._serialized_start=2453 + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE._serialized_end=2500 + _ENUMVALUEDESCRIPTORPROTO._serialized_start=2502 + _ENUMVALUEDESCRIPTORPROTO._serialized_end=2610 + _SERVICEDESCRIPTORPROTO._serialized_start=2613 + _SERVICEDESCRIPTORPROTO._serialized_end=2757 + _METHODDESCRIPTORPROTO._serialized_start=2760 + _METHODDESCRIPTORPROTO._serialized_end=2953 + _FILEOPTIONS._serialized_start=2956 + _FILEOPTIONS._serialized_end=3761 + _FILEOPTIONS_OPTIMIZEMODE._serialized_start=3686 + _FILEOPTIONS_OPTIMIZEMODE._serialized_end=3744 + _MESSAGEOPTIONS._serialized_start=3764 + _MESSAGEOPTIONS._serialized_end=4024 + _FIELDOPTIONS._serialized_start=4027 + _FIELDOPTIONS._serialized_end=4473 + _FIELDOPTIONS_CTYPE._serialized_start=4354 + _FIELDOPTIONS_CTYPE._serialized_end=4401 + _FIELDOPTIONS_JSTYPE._serialized_start=4403 + _FIELDOPTIONS_JSTYPE._serialized_end=4456 + _ONEOFOPTIONS._serialized_start=4475 + _ONEOFOPTIONS._serialized_end=4569 + _ENUMOPTIONS._serialized_start=4572 + _ENUMOPTIONS._serialized_end=4719 + _ENUMVALUEOPTIONS._serialized_start=4721 + _ENUMVALUEOPTIONS._serialized_end=4846 + _SERVICEOPTIONS._serialized_start=4848 + _SERVICEOPTIONS._serialized_end=4971 + _METHODOPTIONS._serialized_start=4974 + _METHODOPTIONS._serialized_end=5275 + _METHODOPTIONS_IDEMPOTENCYLEVEL._serialized_start=5184 + _METHODOPTIONS_IDEMPOTENCYLEVEL._serialized_end=5264 + _UNINTERPRETEDOPTION._serialized_start=5278 + _UNINTERPRETEDOPTION._serialized_end=5564 + _UNINTERPRETEDOPTION_NAMEPART._serialized_start=5513 + _UNINTERPRETEDOPTION_NAMEPART._serialized_end=5564 + _SOURCECODEINFO._serialized_start=5567 + _SOURCECODEINFO._serialized_end=5780 + _SOURCECODEINFO_LOCATION._serialized_start=5646 + _SOURCECODEINFO_LOCATION._serialized_end=5780 + _GENERATEDCODEINFO._serialized_start=5783 + _GENERATEDCODEINFO._serialized_end=5950 + _GENERATEDCODEINFO_ANNOTATION._serialized_start=5871 + _GENERATEDCODEINFO_ANNOTATION._serialized_end=5950 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/descriptor_pool.py b/openpype/hosts/hiero/vendor/google/protobuf/descriptor_pool.py new file mode 100644 index 0000000000..911372a8b0 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/descriptor_pool.py @@ -0,0 +1,1295 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides DescriptorPool to use as a container for proto2 descriptors. + +The DescriptorPool is used in conjection with a DescriptorDatabase to maintain +a collection of protocol buffer descriptors for use when dynamically creating +message types at runtime. + +For most applications protocol buffers should be used via modules generated by +the protocol buffer compiler tool. This should only be used when the type of +protocol buffers used in an application or library cannot be predetermined. + +Below is a straightforward example on how to use this class:: + + pool = DescriptorPool() + file_descriptor_protos = [ ... ] + for file_descriptor_proto in file_descriptor_protos: + pool.Add(file_descriptor_proto) + my_message_descriptor = pool.FindMessageTypeByName('some.package.MessageType') + +The message descriptor can be used in conjunction with the message_factory +module in order to create a protocol buffer class that can be encoded and +decoded. + +If you want to get a Python class for the specified proto, use the +helper functions inside google.protobuf.message_factory +directly instead of this class. +""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +import collections +import warnings + +from google.protobuf import descriptor +from google.protobuf import descriptor_database +from google.protobuf import text_encoding + + +_USE_C_DESCRIPTORS = descriptor._USE_C_DESCRIPTORS # pylint: disable=protected-access + + +def _Deprecated(func): + """Mark functions as deprecated.""" + + def NewFunc(*args, **kwargs): + warnings.warn( + 'Call to deprecated function %s(). Note: Do add unlinked descriptors ' + 'to descriptor_pool is wrong. Use Add() or AddSerializedFile() ' + 'instead.' % func.__name__, + category=DeprecationWarning) + return func(*args, **kwargs) + NewFunc.__name__ = func.__name__ + NewFunc.__doc__ = func.__doc__ + NewFunc.__dict__.update(func.__dict__) + return NewFunc + + +def _NormalizeFullyQualifiedName(name): + """Remove leading period from fully-qualified type name. + + Due to b/13860351 in descriptor_database.py, types in the root namespace are + generated with a leading period. This function removes that prefix. + + Args: + name (str): The fully-qualified symbol name. + + Returns: + str: The normalized fully-qualified symbol name. + """ + return name.lstrip('.') + + +def _OptionsOrNone(descriptor_proto): + """Returns the value of the field `options`, or None if it is not set.""" + if descriptor_proto.HasField('options'): + return descriptor_proto.options + else: + return None + + +def _IsMessageSetExtension(field): + return (field.is_extension and + field.containing_type.has_options and + field.containing_type.GetOptions().message_set_wire_format and + field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL) + + +class DescriptorPool(object): + """A collection of protobufs dynamically constructed by descriptor protos.""" + + if _USE_C_DESCRIPTORS: + + def __new__(cls, descriptor_db=None): + # pylint: disable=protected-access + return descriptor._message.DescriptorPool(descriptor_db) + + def __init__(self, descriptor_db=None): + """Initializes a Pool of proto buffs. + + The descriptor_db argument to the constructor is provided to allow + specialized file descriptor proto lookup code to be triggered on demand. An + example would be an implementation which will read and compile a file + specified in a call to FindFileByName() and not require the call to Add() + at all. Results from this database will be cached internally here as well. + + Args: + descriptor_db: A secondary source of file descriptors. + """ + + self._internal_db = descriptor_database.DescriptorDatabase() + self._descriptor_db = descriptor_db + self._descriptors = {} + self._enum_descriptors = {} + self._service_descriptors = {} + self._file_descriptors = {} + self._toplevel_extensions = {} + # TODO(jieluo): Remove _file_desc_by_toplevel_extension after + # maybe year 2020 for compatibility issue (with 3.4.1 only). + self._file_desc_by_toplevel_extension = {} + self._top_enum_values = {} + # We store extensions in two two-level mappings: The first key is the + # descriptor of the message being extended, the second key is the extension + # full name or its tag number. + self._extensions_by_name = collections.defaultdict(dict) + self._extensions_by_number = collections.defaultdict(dict) + + def _CheckConflictRegister(self, desc, desc_name, file_name): + """Check if the descriptor name conflicts with another of the same name. + + Args: + desc: Descriptor of a message, enum, service, extension or enum value. + desc_name (str): the full name of desc. + file_name (str): The file name of descriptor. + """ + for register, descriptor_type in [ + (self._descriptors, descriptor.Descriptor), + (self._enum_descriptors, descriptor.EnumDescriptor), + (self._service_descriptors, descriptor.ServiceDescriptor), + (self._toplevel_extensions, descriptor.FieldDescriptor), + (self._top_enum_values, descriptor.EnumValueDescriptor)]: + if desc_name in register: + old_desc = register[desc_name] + if isinstance(old_desc, descriptor.EnumValueDescriptor): + old_file = old_desc.type.file.name + else: + old_file = old_desc.file.name + + if not isinstance(desc, descriptor_type) or ( + old_file != file_name): + error_msg = ('Conflict register for file "' + file_name + + '": ' + desc_name + + ' is already defined in file "' + + old_file + '". Please fix the conflict by adding ' + 'package name on the proto file, or use different ' + 'name for the duplication.') + if isinstance(desc, descriptor.EnumValueDescriptor): + error_msg += ('\nNote: enum values appear as ' + 'siblings of the enum type instead of ' + 'children of it.') + + raise TypeError(error_msg) + + return + + def Add(self, file_desc_proto): + """Adds the FileDescriptorProto and its types to this pool. + + Args: + file_desc_proto (FileDescriptorProto): The file descriptor to add. + """ + + self._internal_db.Add(file_desc_proto) + + def AddSerializedFile(self, serialized_file_desc_proto): + """Adds the FileDescriptorProto and its types to this pool. + + Args: + serialized_file_desc_proto (bytes): A bytes string, serialization of the + :class:`FileDescriptorProto` to add. + + Returns: + FileDescriptor: Descriptor for the added file. + """ + + # pylint: disable=g-import-not-at-top + from google.protobuf import descriptor_pb2 + file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString( + serialized_file_desc_proto) + file_desc = self._ConvertFileProtoToFileDescriptor(file_desc_proto) + file_desc.serialized_pb = serialized_file_desc_proto + return file_desc + + # Add Descriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddDescriptor(self, desc): + self._AddDescriptor(desc) + + # Never call this method. It is for internal usage only. + def _AddDescriptor(self, desc): + """Adds a Descriptor to the pool, non-recursively. + + If the Descriptor contains nested messages or enums, the caller must + explicitly register them. This method also registers the FileDescriptor + associated with the message. + + Args: + desc: A Descriptor. + """ + if not isinstance(desc, descriptor.Descriptor): + raise TypeError('Expected instance of descriptor.Descriptor.') + + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + + self._descriptors[desc.full_name] = desc + self._AddFileDescriptor(desc.file) + + # Add EnumDescriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddEnumDescriptor(self, enum_desc): + self._AddEnumDescriptor(enum_desc) + + # Never call this method. It is for internal usage only. + def _AddEnumDescriptor(self, enum_desc): + """Adds an EnumDescriptor to the pool. + + This method also registers the FileDescriptor associated with the enum. + + Args: + enum_desc: An EnumDescriptor. + """ + + if not isinstance(enum_desc, descriptor.EnumDescriptor): + raise TypeError('Expected instance of descriptor.EnumDescriptor.') + + file_name = enum_desc.file.name + self._CheckConflictRegister(enum_desc, enum_desc.full_name, file_name) + self._enum_descriptors[enum_desc.full_name] = enum_desc + + # Top enum values need to be indexed. + # Count the number of dots to see whether the enum is toplevel or nested + # in a message. We cannot use enum_desc.containing_type at this stage. + if enum_desc.file.package: + top_level = (enum_desc.full_name.count('.') + - enum_desc.file.package.count('.') == 1) + else: + top_level = enum_desc.full_name.count('.') == 0 + if top_level: + file_name = enum_desc.file.name + package = enum_desc.file.package + for enum_value in enum_desc.values: + full_name = _NormalizeFullyQualifiedName( + '.'.join((package, enum_value.name))) + self._CheckConflictRegister(enum_value, full_name, file_name) + self._top_enum_values[full_name] = enum_value + self._AddFileDescriptor(enum_desc.file) + + # Add ServiceDescriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddServiceDescriptor(self, service_desc): + self._AddServiceDescriptor(service_desc) + + # Never call this method. It is for internal usage only. + def _AddServiceDescriptor(self, service_desc): + """Adds a ServiceDescriptor to the pool. + + Args: + service_desc: A ServiceDescriptor. + """ + + if not isinstance(service_desc, descriptor.ServiceDescriptor): + raise TypeError('Expected instance of descriptor.ServiceDescriptor.') + + self._CheckConflictRegister(service_desc, service_desc.full_name, + service_desc.file.name) + self._service_descriptors[service_desc.full_name] = service_desc + + # Add ExtensionDescriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddExtensionDescriptor(self, extension): + self._AddExtensionDescriptor(extension) + + # Never call this method. It is for internal usage only. + def _AddExtensionDescriptor(self, extension): + """Adds a FieldDescriptor describing an extension to the pool. + + Args: + extension: A FieldDescriptor. + + Raises: + AssertionError: when another extension with the same number extends the + same message. + TypeError: when the specified extension is not a + descriptor.FieldDescriptor. + """ + if not (isinstance(extension, descriptor.FieldDescriptor) and + extension.is_extension): + raise TypeError('Expected an extension descriptor.') + + if extension.extension_scope is None: + self._toplevel_extensions[extension.full_name] = extension + + try: + existing_desc = self._extensions_by_number[ + extension.containing_type][extension.number] + except KeyError: + pass + else: + if extension is not existing_desc: + raise AssertionError( + 'Extensions "%s" and "%s" both try to extend message type "%s" ' + 'with field number %d.' % + (extension.full_name, existing_desc.full_name, + extension.containing_type.full_name, extension.number)) + + self._extensions_by_number[extension.containing_type][ + extension.number] = extension + self._extensions_by_name[extension.containing_type][ + extension.full_name] = extension + + # Also register MessageSet extensions with the type name. + if _IsMessageSetExtension(extension): + self._extensions_by_name[extension.containing_type][ + extension.message_type.full_name] = extension + + @_Deprecated + def AddFileDescriptor(self, file_desc): + self._InternalAddFileDescriptor(file_desc) + + # Never call this method. It is for internal usage only. + def _InternalAddFileDescriptor(self, file_desc): + """Adds a FileDescriptor to the pool, non-recursively. + + If the FileDescriptor contains messages or enums, the caller must explicitly + register them. + + Args: + file_desc: A FileDescriptor. + """ + + self._AddFileDescriptor(file_desc) + # TODO(jieluo): This is a temporary solution for FieldDescriptor.file. + # FieldDescriptor.file is added in code gen. Remove this solution after + # maybe 2020 for compatibility reason (with 3.4.1 only). + for extension in file_desc.extensions_by_name.values(): + self._file_desc_by_toplevel_extension[ + extension.full_name] = file_desc + + def _AddFileDescriptor(self, file_desc): + """Adds a FileDescriptor to the pool, non-recursively. + + If the FileDescriptor contains messages or enums, the caller must explicitly + register them. + + Args: + file_desc: A FileDescriptor. + """ + + if not isinstance(file_desc, descriptor.FileDescriptor): + raise TypeError('Expected instance of descriptor.FileDescriptor.') + self._file_descriptors[file_desc.name] = file_desc + + def FindFileByName(self, file_name): + """Gets a FileDescriptor by file name. + + Args: + file_name (str): The path to the file to get a descriptor for. + + Returns: + FileDescriptor: The descriptor for the named file. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + + try: + return self._file_descriptors[file_name] + except KeyError: + pass + + try: + file_proto = self._internal_db.FindFileByName(file_name) + except KeyError as error: + if self._descriptor_db: + file_proto = self._descriptor_db.FindFileByName(file_name) + else: + raise error + if not file_proto: + raise KeyError('Cannot find a file named %s' % file_name) + return self._ConvertFileProtoToFileDescriptor(file_proto) + + def FindFileContainingSymbol(self, symbol): + """Gets the FileDescriptor for the file containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: Descriptor for the file that contains the specified + symbol. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + + symbol = _NormalizeFullyQualifiedName(symbol) + try: + return self._InternalFindFileContainingSymbol(symbol) + except KeyError: + pass + + try: + # Try fallback database. Build and find again if possible. + self._FindFileContainingSymbolInDb(symbol) + return self._InternalFindFileContainingSymbol(symbol) + except KeyError: + raise KeyError('Cannot find a file containing %s' % symbol) + + def _InternalFindFileContainingSymbol(self, symbol): + """Gets the already built FileDescriptor containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: Descriptor for the file that contains the specified + symbol. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + try: + return self._descriptors[symbol].file + except KeyError: + pass + + try: + return self._enum_descriptors[symbol].file + except KeyError: + pass + + try: + return self._service_descriptors[symbol].file + except KeyError: + pass + + try: + return self._top_enum_values[symbol].type.file + except KeyError: + pass + + try: + return self._file_desc_by_toplevel_extension[symbol] + except KeyError: + pass + + # Try fields, enum values and nested extensions inside a message. + top_name, _, sub_name = symbol.rpartition('.') + try: + message = self.FindMessageTypeByName(top_name) + assert (sub_name in message.extensions_by_name or + sub_name in message.fields_by_name or + sub_name in message.enum_values_by_name) + return message.file + except (KeyError, AssertionError): + raise KeyError('Cannot find a file containing %s' % symbol) + + def FindMessageTypeByName(self, full_name): + """Loads the named descriptor from the pool. + + Args: + full_name (str): The full name of the descriptor to load. + + Returns: + Descriptor: The descriptor for the named type. + + Raises: + KeyError: if the message cannot be found in the pool. + """ + + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._descriptors[full_name] + + def FindEnumTypeByName(self, full_name): + """Loads the named enum descriptor from the pool. + + Args: + full_name (str): The full name of the enum descriptor to load. + + Returns: + EnumDescriptor: The enum descriptor for the named type. + + Raises: + KeyError: if the enum cannot be found in the pool. + """ + + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._enum_descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._enum_descriptors[full_name] + + def FindFieldByName(self, full_name): + """Loads the named field descriptor from the pool. + + Args: + full_name (str): The full name of the field descriptor to load. + + Returns: + FieldDescriptor: The field descriptor for the named field. + + Raises: + KeyError: if the field cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + message_name, _, field_name = full_name.rpartition('.') + message_descriptor = self.FindMessageTypeByName(message_name) + return message_descriptor.fields_by_name[field_name] + + def FindOneofByName(self, full_name): + """Loads the named oneof descriptor from the pool. + + Args: + full_name (str): The full name of the oneof descriptor to load. + + Returns: + OneofDescriptor: The oneof descriptor for the named oneof. + + Raises: + KeyError: if the oneof cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + message_name, _, oneof_name = full_name.rpartition('.') + message_descriptor = self.FindMessageTypeByName(message_name) + return message_descriptor.oneofs_by_name[oneof_name] + + def FindExtensionByName(self, full_name): + """Loads the named extension descriptor from the pool. + + Args: + full_name (str): The full name of the extension descriptor to load. + + Returns: + FieldDescriptor: The field descriptor for the named extension. + + Raises: + KeyError: if the extension cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + try: + # The proto compiler does not give any link between the FileDescriptor + # and top-level extensions unless the FileDescriptorProto is added to + # the DescriptorDatabase, but this can impact memory usage. + # So we registered these extensions by name explicitly. + return self._toplevel_extensions[full_name] + except KeyError: + pass + message_name, _, extension_name = full_name.rpartition('.') + try: + # Most extensions are nested inside a message. + scope = self.FindMessageTypeByName(message_name) + except KeyError: + # Some extensions are defined at file scope. + scope = self._FindFileContainingSymbolInDb(full_name) + return scope.extensions_by_name[extension_name] + + def FindExtensionByNumber(self, message_descriptor, number): + """Gets the extension of the specified message with the specified number. + + Extensions have to be registered to this pool by calling :func:`Add` or + :func:`AddExtensionDescriptor`. + + Args: + message_descriptor (Descriptor): descriptor of the extended message. + number (int): Number of the extension field. + + Returns: + FieldDescriptor: The descriptor for the extension. + + Raises: + KeyError: when no extension with the given number is known for the + specified message. + """ + try: + return self._extensions_by_number[message_descriptor][number] + except KeyError: + self._TryLoadExtensionFromDB(message_descriptor, number) + return self._extensions_by_number[message_descriptor][number] + + def FindAllExtensions(self, message_descriptor): + """Gets all the known extensions of a given message. + + Extensions have to be registered to this pool by build related + :func:`Add` or :func:`AddExtensionDescriptor`. + + Args: + message_descriptor (Descriptor): Descriptor of the extended message. + + Returns: + list[FieldDescriptor]: Field descriptors describing the extensions. + """ + # Fallback to descriptor db if FindAllExtensionNumbers is provided. + if self._descriptor_db and hasattr( + self._descriptor_db, 'FindAllExtensionNumbers'): + full_name = message_descriptor.full_name + all_numbers = self._descriptor_db.FindAllExtensionNumbers(full_name) + for number in all_numbers: + if number in self._extensions_by_number[message_descriptor]: + continue + self._TryLoadExtensionFromDB(message_descriptor, number) + + return list(self._extensions_by_number[message_descriptor].values()) + + def _TryLoadExtensionFromDB(self, message_descriptor, number): + """Try to Load extensions from descriptor db. + + Args: + message_descriptor: descriptor of the extended message. + number: the extension number that needs to be loaded. + """ + if not self._descriptor_db: + return + # Only supported when FindFileContainingExtension is provided. + if not hasattr( + self._descriptor_db, 'FindFileContainingExtension'): + return + + full_name = message_descriptor.full_name + file_proto = self._descriptor_db.FindFileContainingExtension( + full_name, number) + + if file_proto is None: + return + + try: + self._ConvertFileProtoToFileDescriptor(file_proto) + except: + warn_msg = ('Unable to load proto file %s for extension number %d.' % + (file_proto.name, number)) + warnings.warn(warn_msg, RuntimeWarning) + + def FindServiceByName(self, full_name): + """Loads the named service descriptor from the pool. + + Args: + full_name (str): The full name of the service descriptor to load. + + Returns: + ServiceDescriptor: The service descriptor for the named service. + + Raises: + KeyError: if the service cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._service_descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._service_descriptors[full_name] + + def FindMethodByName(self, full_name): + """Loads the named service method descriptor from the pool. + + Args: + full_name (str): The full name of the method descriptor to load. + + Returns: + MethodDescriptor: The method descriptor for the service method. + + Raises: + KeyError: if the method cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + service_name, _, method_name = full_name.rpartition('.') + service_descriptor = self.FindServiceByName(service_name) + return service_descriptor.methods_by_name[method_name] + + def _FindFileContainingSymbolInDb(self, symbol): + """Finds the file in descriptor DB containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: The file that contains the specified symbol. + + Raises: + KeyError: if the file cannot be found in the descriptor database. + """ + try: + file_proto = self._internal_db.FindFileContainingSymbol(symbol) + except KeyError as error: + if self._descriptor_db: + file_proto = self._descriptor_db.FindFileContainingSymbol(symbol) + else: + raise error + if not file_proto: + raise KeyError('Cannot find a file containing %s' % symbol) + return self._ConvertFileProtoToFileDescriptor(file_proto) + + def _ConvertFileProtoToFileDescriptor(self, file_proto): + """Creates a FileDescriptor from a proto or returns a cached copy. + + This method also has the side effect of loading all the symbols found in + the file into the appropriate dictionaries in the pool. + + Args: + file_proto: The proto to convert. + + Returns: + A FileDescriptor matching the passed in proto. + """ + if file_proto.name not in self._file_descriptors: + built_deps = list(self._GetDeps(file_proto.dependency)) + direct_deps = [self.FindFileByName(n) for n in file_proto.dependency] + public_deps = [direct_deps[i] for i in file_proto.public_dependency] + + file_descriptor = descriptor.FileDescriptor( + pool=self, + name=file_proto.name, + package=file_proto.package, + syntax=file_proto.syntax, + options=_OptionsOrNone(file_proto), + serialized_pb=file_proto.SerializeToString(), + dependencies=direct_deps, + public_dependencies=public_deps, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + scope = {} + + # This loop extracts all the message and enum types from all the + # dependencies of the file_proto. This is necessary to create the + # scope of available message types when defining the passed in + # file proto. + for dependency in built_deps: + scope.update(self._ExtractSymbols( + dependency.message_types_by_name.values())) + scope.update((_PrefixWithDot(enum.full_name), enum) + for enum in dependency.enum_types_by_name.values()) + + for message_type in file_proto.message_type: + message_desc = self._ConvertMessageDescriptor( + message_type, file_proto.package, file_descriptor, scope, + file_proto.syntax) + file_descriptor.message_types_by_name[message_desc.name] = ( + message_desc) + + for enum_type in file_proto.enum_type: + file_descriptor.enum_types_by_name[enum_type.name] = ( + self._ConvertEnumDescriptor(enum_type, file_proto.package, + file_descriptor, None, scope, True)) + + for index, extension_proto in enumerate(file_proto.extension): + extension_desc = self._MakeFieldDescriptor( + extension_proto, file_proto.package, index, file_descriptor, + is_extension=True) + extension_desc.containing_type = self._GetTypeFromScope( + file_descriptor.package, extension_proto.extendee, scope) + self._SetFieldType(extension_proto, extension_desc, + file_descriptor.package, scope) + file_descriptor.extensions_by_name[extension_desc.name] = ( + extension_desc) + self._file_desc_by_toplevel_extension[extension_desc.full_name] = ( + file_descriptor) + + for desc_proto in file_proto.message_type: + self._SetAllFieldTypes(file_proto.package, desc_proto, scope) + + if file_proto.package: + desc_proto_prefix = _PrefixWithDot(file_proto.package) + else: + desc_proto_prefix = '' + + for desc_proto in file_proto.message_type: + desc = self._GetTypeFromScope( + desc_proto_prefix, desc_proto.name, scope) + file_descriptor.message_types_by_name[desc_proto.name] = desc + + for index, service_proto in enumerate(file_proto.service): + file_descriptor.services_by_name[service_proto.name] = ( + self._MakeServiceDescriptor(service_proto, index, scope, + file_proto.package, file_descriptor)) + + self._file_descriptors[file_proto.name] = file_descriptor + + # Add extensions to the pool + file_desc = self._file_descriptors[file_proto.name] + for extension in file_desc.extensions_by_name.values(): + self._AddExtensionDescriptor(extension) + for message_type in file_desc.message_types_by_name.values(): + for extension in message_type.extensions: + self._AddExtensionDescriptor(extension) + + return file_desc + + def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None, + scope=None, syntax=None): + """Adds the proto to the pool in the specified package. + + Args: + desc_proto: The descriptor_pb2.DescriptorProto protobuf message. + package: The package the proto should be located in. + file_desc: The file containing this message. + scope: Dict mapping short and full symbols to message and enum types. + syntax: string indicating syntax of the file ("proto2" or "proto3") + + Returns: + The added descriptor. + """ + + if package: + desc_name = '.'.join((package, desc_proto.name)) + else: + desc_name = desc_proto.name + + if file_desc is None: + file_name = None + else: + file_name = file_desc.name + + if scope is None: + scope = {} + + nested = [ + self._ConvertMessageDescriptor( + nested, desc_name, file_desc, scope, syntax) + for nested in desc_proto.nested_type] + enums = [ + self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, + scope, False) + for enum in desc_proto.enum_type] + fields = [self._MakeFieldDescriptor(field, desc_name, index, file_desc) + for index, field in enumerate(desc_proto.field)] + extensions = [ + self._MakeFieldDescriptor(extension, desc_name, index, file_desc, + is_extension=True) + for index, extension in enumerate(desc_proto.extension)] + oneofs = [ + # pylint: disable=g-complex-comprehension + descriptor.OneofDescriptor( + desc.name, + '.'.join((desc_name, desc.name)), + index, + None, + [], + _OptionsOrNone(desc), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + for index, desc in enumerate(desc_proto.oneof_decl) + ] + extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range] + if extension_ranges: + is_extendable = True + else: + is_extendable = False + desc = descriptor.Descriptor( + name=desc_proto.name, + full_name=desc_name, + filename=file_name, + containing_type=None, + fields=fields, + oneofs=oneofs, + nested_types=nested, + enum_types=enums, + extensions=extensions, + options=_OptionsOrNone(desc_proto), + is_extendable=is_extendable, + extension_ranges=extension_ranges, + file=file_desc, + serialized_start=None, + serialized_end=None, + syntax=syntax, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + for nested in desc.nested_types: + nested.containing_type = desc + for enum in desc.enum_types: + enum.containing_type = desc + for field_index, field_desc in enumerate(desc_proto.field): + if field_desc.HasField('oneof_index'): + oneof_index = field_desc.oneof_index + oneofs[oneof_index].fields.append(fields[field_index]) + fields[field_index].containing_oneof = oneofs[oneof_index] + + scope[_PrefixWithDot(desc_name)] = desc + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._descriptors[desc_name] = desc + return desc + + def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None, + containing_type=None, scope=None, top_level=False): + """Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf. + + Args: + enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message. + package: Optional package name for the new message EnumDescriptor. + file_desc: The file containing the enum descriptor. + containing_type: The type containing this enum. + scope: Scope containing available types. + top_level: If True, the enum is a top level symbol. If False, the enum + is defined inside a message. + + Returns: + The added descriptor + """ + + if package: + enum_name = '.'.join((package, enum_proto.name)) + else: + enum_name = enum_proto.name + + if file_desc is None: + file_name = None + else: + file_name = file_desc.name + + values = [self._MakeEnumValueDescriptor(value, index) + for index, value in enumerate(enum_proto.value)] + desc = descriptor.EnumDescriptor(name=enum_proto.name, + full_name=enum_name, + filename=file_name, + file=file_desc, + values=values, + containing_type=containing_type, + options=_OptionsOrNone(enum_proto), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + scope['.%s' % enum_name] = desc + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._enum_descriptors[enum_name] = desc + + # Add top level enum values. + if top_level: + for value in values: + full_name = _NormalizeFullyQualifiedName( + '.'.join((package, value.name))) + self._CheckConflictRegister(value, full_name, file_name) + self._top_enum_values[full_name] = value + + return desc + + def _MakeFieldDescriptor(self, field_proto, message_name, index, + file_desc, is_extension=False): + """Creates a field descriptor from a FieldDescriptorProto. + + For message and enum type fields, this method will do a look up + in the pool for the appropriate descriptor for that type. If it + is unavailable, it will fall back to the _source function to + create it. If this type is still unavailable, construction will + fail. + + Args: + field_proto: The proto describing the field. + message_name: The name of the containing message. + index: Index of the field + file_desc: The file containing the field descriptor. + is_extension: Indication that this field is for an extension. + + Returns: + An initialized FieldDescriptor object + """ + + if message_name: + full_name = '.'.join((message_name, field_proto.name)) + else: + full_name = field_proto.name + + if field_proto.json_name: + json_name = field_proto.json_name + else: + json_name = None + + return descriptor.FieldDescriptor( + name=field_proto.name, + full_name=full_name, + index=index, + number=field_proto.number, + type=field_proto.type, + cpp_type=None, + message_type=None, + enum_type=None, + containing_type=None, + label=field_proto.label, + has_default_value=False, + default_value=None, + is_extension=is_extension, + extension_scope=None, + options=_OptionsOrNone(field_proto), + json_name=json_name, + file=file_desc, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _SetAllFieldTypes(self, package, desc_proto, scope): + """Sets all the descriptor's fields's types. + + This method also sets the containing types on any extensions. + + Args: + package: The current package of desc_proto. + desc_proto: The message descriptor to update. + scope: Enclosing scope of available types. + """ + + package = _PrefixWithDot(package) + + main_desc = self._GetTypeFromScope(package, desc_proto.name, scope) + + if package == '.': + nested_package = _PrefixWithDot(desc_proto.name) + else: + nested_package = '.'.join([package, desc_proto.name]) + + for field_proto, field_desc in zip(desc_proto.field, main_desc.fields): + self._SetFieldType(field_proto, field_desc, nested_package, scope) + + for extension_proto, extension_desc in ( + zip(desc_proto.extension, main_desc.extensions)): + extension_desc.containing_type = self._GetTypeFromScope( + nested_package, extension_proto.extendee, scope) + self._SetFieldType(extension_proto, extension_desc, nested_package, scope) + + for nested_type in desc_proto.nested_type: + self._SetAllFieldTypes(nested_package, nested_type, scope) + + def _SetFieldType(self, field_proto, field_desc, package, scope): + """Sets the field's type, cpp_type, message_type and enum_type. + + Args: + field_proto: Data about the field in proto format. + field_desc: The descriptor to modify. + package: The package the field's container is in. + scope: Enclosing scope of available types. + """ + if field_proto.type_name: + desc = self._GetTypeFromScope(package, field_proto.type_name, scope) + else: + desc = None + + if not field_proto.HasField('type'): + if isinstance(desc, descriptor.Descriptor): + field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE + else: + field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM + + field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType( + field_proto.type) + + if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE + or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP): + field_desc.message_type = desc + + if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.enum_type = desc + + if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED: + field_desc.has_default_value = False + field_desc.default_value = [] + elif field_proto.HasField('default_value'): + field_desc.has_default_value = True + if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or + field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): + field_desc.default_value = float(field_proto.default_value) + elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: + field_desc.default_value = field_proto.default_value + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: + field_desc.default_value = field_proto.default_value.lower() == 'true' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.default_value = field_desc.enum_type.values_by_name[ + field_proto.default_value].number + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: + field_desc.default_value = text_encoding.CUnescape( + field_proto.default_value) + elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE: + field_desc.default_value = None + else: + # All other types are of the "int" type. + field_desc.default_value = int(field_proto.default_value) + else: + field_desc.has_default_value = False + if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or + field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): + field_desc.default_value = 0.0 + elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: + field_desc.default_value = u'' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: + field_desc.default_value = False + elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.default_value = field_desc.enum_type.values[0].number + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: + field_desc.default_value = b'' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE: + field_desc.default_value = None + elif field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP: + field_desc.default_value = None + else: + # All other types are of the "int" type. + field_desc.default_value = 0 + + field_desc.type = field_proto.type + + def _MakeEnumValueDescriptor(self, value_proto, index): + """Creates a enum value descriptor object from a enum value proto. + + Args: + value_proto: The proto describing the enum value. + index: The index of the enum value. + + Returns: + An initialized EnumValueDescriptor object. + """ + + return descriptor.EnumValueDescriptor( + name=value_proto.name, + index=index, + number=value_proto.number, + options=_OptionsOrNone(value_proto), + type=None, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _MakeServiceDescriptor(self, service_proto, service_index, scope, + package, file_desc): + """Make a protobuf ServiceDescriptor given a ServiceDescriptorProto. + + Args: + service_proto: The descriptor_pb2.ServiceDescriptorProto protobuf message. + service_index: The index of the service in the File. + scope: Dict mapping short and full symbols to message and enum types. + package: Optional package name for the new message EnumDescriptor. + file_desc: The file containing the service descriptor. + + Returns: + The added descriptor. + """ + + if package: + service_name = '.'.join((package, service_proto.name)) + else: + service_name = service_proto.name + + methods = [self._MakeMethodDescriptor(method_proto, service_name, package, + scope, index) + for index, method_proto in enumerate(service_proto.method)] + desc = descriptor.ServiceDescriptor( + name=service_proto.name, + full_name=service_name, + index=service_index, + methods=methods, + options=_OptionsOrNone(service_proto), + file=file_desc, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._service_descriptors[service_name] = desc + return desc + + def _MakeMethodDescriptor(self, method_proto, service_name, package, scope, + index): + """Creates a method descriptor from a MethodDescriptorProto. + + Args: + method_proto: The proto describing the method. + service_name: The name of the containing service. + package: Optional package name to look up for types. + scope: Scope containing available types. + index: Index of the method in the service. + + Returns: + An initialized MethodDescriptor object. + """ + full_name = '.'.join((service_name, method_proto.name)) + input_type = self._GetTypeFromScope( + package, method_proto.input_type, scope) + output_type = self._GetTypeFromScope( + package, method_proto.output_type, scope) + return descriptor.MethodDescriptor( + name=method_proto.name, + full_name=full_name, + index=index, + containing_service=None, + input_type=input_type, + output_type=output_type, + client_streaming=method_proto.client_streaming, + server_streaming=method_proto.server_streaming, + options=_OptionsOrNone(method_proto), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _ExtractSymbols(self, descriptors): + """Pulls out all the symbols from descriptor protos. + + Args: + descriptors: The messages to extract descriptors from. + Yields: + A two element tuple of the type name and descriptor object. + """ + + for desc in descriptors: + yield (_PrefixWithDot(desc.full_name), desc) + for symbol in self._ExtractSymbols(desc.nested_types): + yield symbol + for enum in desc.enum_types: + yield (_PrefixWithDot(enum.full_name), enum) + + def _GetDeps(self, dependencies, visited=None): + """Recursively finds dependencies for file protos. + + Args: + dependencies: The names of the files being depended on. + visited: The names of files already found. + + Yields: + Each direct and indirect dependency. + """ + + visited = visited or set() + for dependency in dependencies: + if dependency not in visited: + visited.add(dependency) + dep_desc = self.FindFileByName(dependency) + yield dep_desc + public_files = [d.name for d in dep_desc.public_dependencies] + yield from self._GetDeps(public_files, visited) + + def _GetTypeFromScope(self, package, type_name, scope): + """Finds a given type name in the current scope. + + Args: + package: The package the proto should be located in. + type_name: The name of the type to be found in the scope. + scope: Dict mapping short and full symbols to message and enum types. + + Returns: + The descriptor for the requested type. + """ + if type_name not in scope: + components = _PrefixWithDot(package).split('.') + while components: + possible_match = '.'.join(components + [type_name]) + if possible_match in scope: + type_name = possible_match + break + else: + components.pop(-1) + return scope[type_name] + + +def _PrefixWithDot(name): + return name if name.startswith('.') else '.%s' % name + + +if _USE_C_DESCRIPTORS: + # TODO(amauryfa): This pool could be constructed from Python code, when we + # support a flag like 'use_cpp_generated_pool=True'. + # pylint: disable=protected-access + _DEFAULT = descriptor._message.default_pool +else: + _DEFAULT = DescriptorPool() + + +def Default(): + return _DEFAULT diff --git a/openpype/hosts/hiero/vendor/google/protobuf/duration_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/duration_pb2.py new file mode 100644 index 0000000000..a8ecc07bdf --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/duration_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/duration.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\"*\n\x08\x44uration\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42\x83\x01\n\x13\x63om.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.duration_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\rDurationProtoP\001Z1google.golang.org/protobuf/types/known/durationpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _DURATION._serialized_start=51 + _DURATION._serialized_end=93 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/empty_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/empty_pb2.py new file mode 100644 index 0000000000..0b4d554db3 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/empty_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/empty.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\x07\n\x05\x45mptyB}\n\x13\x63om.google.protobufB\nEmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.empty_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\nEmptyProtoP\001Z.google.golang.org/protobuf/types/known/emptypb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _EMPTY._serialized_start=48 + _EMPTY._serialized_end=55 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/field_mask_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/field_mask_pb2.py new file mode 100644 index 0000000000..80a4e96e59 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/field_mask_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/field_mask.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"\x1a\n\tFieldMask\x12\r\n\x05paths\x18\x01 \x03(\tB\x85\x01\n\x13\x63om.google.protobufB\x0e\x46ieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.field_mask_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\016FieldMaskProtoP\001Z2google.golang.org/protobuf/types/known/fieldmaskpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _FIELDMASK._serialized_start=53 + _FIELDMASK._serialized_end=79 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/__init__.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/_parameterized.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/_parameterized.py new file mode 100644 index 0000000000..afdbb78c36 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/_parameterized.py @@ -0,0 +1,443 @@ +#! /usr/bin/env python +# +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Adds support for parameterized tests to Python's unittest TestCase class. + +A parameterized test is a method in a test case that is invoked with different +argument tuples. + +A simple example: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + (1, 2, 3), + (4, 5, 9), + (1, 1, 3)) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + + +Each invocation is a separate test case and properly isolated just +like a normal test method, with its own setUp/tearDown cycle. In the +example above, there are three separate testcases, one of which will +fail due to an assertion error (1 + 1 != 3). + +Parameters for individual test cases can be tuples (with positional parameters) +or dictionaries (with named parameters): + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + {'op1': 1, 'op2': 2, 'result': 3}, + {'op1': 4, 'op2': 5, 'result': 9}, + ) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + +If a parameterized test fails, the error message will show the +original test name (which is modified internally) and the arguments +for the specific invocation, which are part of the string returned by +the shortDescription() method on test cases. + +The id method of the test, used internally by the unittest framework, +is also modified to show the arguments. To make sure that test names +stay the same across several invocations, object representations like + + >>> class Foo(object): + ... pass + >>> repr(Foo()) + '<__main__.Foo object at 0x23d8610>' + +are turned into '<__main__.Foo>'. For even more descriptive names, +especially in test logs, you can use the named_parameters decorator. In +this case, only tuples are supported, and the first parameters has to +be a string (or an object that returns an apt name when converted via +str()): + + class NamedExample(parameterized.TestCase): + @parameterized.named_parameters( + ('Normal', 'aa', 'aaa', True), + ('EmptyPrefix', '', 'abc', True), + ('BothEmpty', '', '', True)) + def testStartsWith(self, prefix, string, result): + self.assertEqual(result, strings.startswith(prefix)) + +Named tests also have the benefit that they can be run individually +from the command line: + + $ testmodule.py NamedExample.testStartsWithNormal + . + -------------------------------------------------------------------- + Ran 1 test in 0.000s + + OK + +Parameterized Classes +===================== +If invocation arguments are shared across test methods in a single +TestCase class, instead of decorating all test methods +individually, the class itself can be decorated: + + @parameterized.parameters( + (1, 2, 3) + (4, 5, 9)) + class ArithmeticTest(parameterized.TestCase): + def testAdd(self, arg1, arg2, result): + self.assertEqual(arg1 + arg2, result) + + def testSubtract(self, arg2, arg2, result): + self.assertEqual(result - arg1, arg2) + +Inputs from Iterables +===================== +If parameters should be shared across several test cases, or are dynamically +created from other sources, a single non-tuple iterable can be passed into +the decorator. This iterable will be used to obtain the test cases: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + c.op1, c.op2, c.result for c in testcases + ) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + + +Single-Argument Test Methods +============================ +If a test method takes only one argument, the single argument does not need to +be wrapped into a tuple: + + class NegativeNumberExample(parameterized.TestCase): + @parameterized.parameters( + -1, -3, -4, -5 + ) + def testIsNegative(self, arg): + self.assertTrue(IsNegative(arg)) +""" + +__author__ = 'tmarek@google.com (Torsten Marek)' + +import functools +import re +import types +import unittest +import uuid + +try: + # Since python 3 + import collections.abc as collections_abc +except ImportError: + # Won't work after python 3.8 + import collections as collections_abc + +ADDR_RE = re.compile(r'\<([a-zA-Z0-9_\-\.]+) object at 0x[a-fA-F0-9]+\>') +_SEPARATOR = uuid.uuid1().hex +_FIRST_ARG = object() +_ARGUMENT_REPR = object() + + +def _CleanRepr(obj): + return ADDR_RE.sub(r'<\1>', repr(obj)) + + +# Helper function formerly from the unittest module, removed from it in +# Python 2.7. +def _StrClass(cls): + return '%s.%s' % (cls.__module__, cls.__name__) + + +def _NonStringIterable(obj): + return (isinstance(obj, collections_abc.Iterable) and + not isinstance(obj, str)) + + +def _FormatParameterList(testcase_params): + if isinstance(testcase_params, collections_abc.Mapping): + return ', '.join('%s=%s' % (argname, _CleanRepr(value)) + for argname, value in testcase_params.items()) + elif _NonStringIterable(testcase_params): + return ', '.join(map(_CleanRepr, testcase_params)) + else: + return _FormatParameterList((testcase_params,)) + + +class _ParameterizedTestIter(object): + """Callable and iterable class for producing new test cases.""" + + def __init__(self, test_method, testcases, naming_type): + """Returns concrete test functions for a test and a list of parameters. + + The naming_type is used to determine the name of the concrete + functions as reported by the unittest framework. If naming_type is + _FIRST_ARG, the testcases must be tuples, and the first element must + have a string representation that is a valid Python identifier. + + Args: + test_method: The decorated test method. + testcases: (list of tuple/dict) A list of parameter + tuples/dicts for individual test invocations. + naming_type: The test naming type, either _NAMED or _ARGUMENT_REPR. + """ + self._test_method = test_method + self.testcases = testcases + self._naming_type = naming_type + + def __call__(self, *args, **kwargs): + raise RuntimeError('You appear to be running a parameterized test case ' + 'without having inherited from parameterized.' + 'TestCase. This is bad because none of ' + 'your test cases are actually being run.') + + def __iter__(self): + test_method = self._test_method + naming_type = self._naming_type + + def MakeBoundParamTest(testcase_params): + @functools.wraps(test_method) + def BoundParamTest(self): + if isinstance(testcase_params, collections_abc.Mapping): + test_method(self, **testcase_params) + elif _NonStringIterable(testcase_params): + test_method(self, *testcase_params) + else: + test_method(self, testcase_params) + + if naming_type is _FIRST_ARG: + # Signal the metaclass that the name of the test function is unique + # and descriptive. + BoundParamTest.__x_use_name__ = True + BoundParamTest.__name__ += str(testcase_params[0]) + testcase_params = testcase_params[1:] + elif naming_type is _ARGUMENT_REPR: + # __x_extra_id__ is used to pass naming information to the __new__ + # method of TestGeneratorMetaclass. + # The metaclass will make sure to create a unique, but nondescriptive + # name for this test. + BoundParamTest.__x_extra_id__ = '(%s)' % ( + _FormatParameterList(testcase_params),) + else: + raise RuntimeError('%s is not a valid naming type.' % (naming_type,)) + + BoundParamTest.__doc__ = '%s(%s)' % ( + BoundParamTest.__name__, _FormatParameterList(testcase_params)) + if test_method.__doc__: + BoundParamTest.__doc__ += '\n%s' % (test_method.__doc__,) + return BoundParamTest + return (MakeBoundParamTest(c) for c in self.testcases) + + +def _IsSingletonList(testcases): + """True iff testcases contains only a single non-tuple element.""" + return len(testcases) == 1 and not isinstance(testcases[0], tuple) + + +def _ModifyClass(class_object, testcases, naming_type): + assert not getattr(class_object, '_id_suffix', None), ( + 'Cannot add parameters to %s,' + ' which already has parameterized methods.' % (class_object,)) + class_object._id_suffix = id_suffix = {} + # We change the size of __dict__ while we iterate over it, + # which Python 3.x will complain about, so use copy(). + for name, obj in class_object.__dict__.copy().items(): + if (name.startswith(unittest.TestLoader.testMethodPrefix) + and isinstance(obj, types.FunctionType)): + delattr(class_object, name) + methods = {} + _UpdateClassDictForParamTestCase( + methods, id_suffix, name, + _ParameterizedTestIter(obj, testcases, naming_type)) + for name, meth in methods.items(): + setattr(class_object, name, meth) + + +def _ParameterDecorator(naming_type, testcases): + """Implementation of the parameterization decorators. + + Args: + naming_type: The naming type. + testcases: Testcase parameters. + + Returns: + A function for modifying the decorated object. + """ + def _Apply(obj): + if isinstance(obj, type): + _ModifyClass( + obj, + list(testcases) if not isinstance(testcases, collections_abc.Sequence) + else testcases, + naming_type) + return obj + else: + return _ParameterizedTestIter(obj, testcases, naming_type) + + if _IsSingletonList(testcases): + assert _NonStringIterable(testcases[0]), ( + 'Single parameter argument must be a non-string iterable') + testcases = testcases[0] + + return _Apply + + +def parameters(*testcases): # pylint: disable=invalid-name + """A decorator for creating parameterized tests. + + See the module docstring for a usage example. + Args: + *testcases: Parameters for the decorated method, either a single + iterable, or a list of tuples/dicts/objects (for tests + with only one argument). + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + return _ParameterDecorator(_ARGUMENT_REPR, testcases) + + +def named_parameters(*testcases): # pylint: disable=invalid-name + """A decorator for creating parameterized tests. + + See the module docstring for a usage example. The first element of + each parameter tuple should be a string and will be appended to the + name of the test method. + + Args: + *testcases: Parameters for the decorated method, either a single + iterable, or a list of tuples. + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + return _ParameterDecorator(_FIRST_ARG, testcases) + + +class TestGeneratorMetaclass(type): + """Metaclass for test cases with test generators. + + A test generator is an iterable in a testcase that produces callables. These + callables must be single-argument methods. These methods are injected into + the class namespace and the original iterable is removed. If the name of the + iterable conforms to the test pattern, the injected methods will be picked + up as tests by the unittest framework. + + In general, it is supposed to be used in conjunction with the + parameters decorator. + """ + + def __new__(mcs, class_name, bases, dct): + dct['_id_suffix'] = id_suffix = {} + for name, obj in dct.copy().items(): + if (name.startswith(unittest.TestLoader.testMethodPrefix) and + _NonStringIterable(obj)): + iterator = iter(obj) + dct.pop(name) + _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator) + + return type.__new__(mcs, class_name, bases, dct) + + +def _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator): + """Adds individual test cases to a dictionary. + + Args: + dct: The target dictionary. + id_suffix: The dictionary for mapping names to test IDs. + name: The original name of the test case. + iterator: The iterator generating the individual test cases. + """ + for idx, func in enumerate(iterator): + assert callable(func), 'Test generators must yield callables, got %r' % ( + func,) + if getattr(func, '__x_use_name__', False): + new_name = func.__name__ + else: + new_name = '%s%s%d' % (name, _SEPARATOR, idx) + assert new_name not in dct, ( + 'Name of parameterized test case "%s" not unique' % (new_name,)) + dct[new_name] = func + id_suffix[new_name] = getattr(func, '__x_extra_id__', '') + + +class TestCase(unittest.TestCase, metaclass=TestGeneratorMetaclass): + """Base class for test cases using the parameters decorator.""" + + def _OriginalName(self): + return self._testMethodName.split(_SEPARATOR)[0] + + def __str__(self): + return '%s (%s)' % (self._OriginalName(), _StrClass(self.__class__)) + + def id(self): # pylint: disable=invalid-name + """Returns the descriptive ID of the test. + + This is used internally by the unittesting framework to get a name + for the test to be used in reports. + + Returns: + The test id. + """ + return '%s.%s%s' % (_StrClass(self.__class__), + self._OriginalName(), + self._id_suffix.get(self._testMethodName, '')) + + +def CoopTestCase(other_base_class): + """Returns a new base class with a cooperative metaclass base. + + This enables the TestCase to be used in combination + with other base classes that have custom metaclasses, such as + mox.MoxTestBase. + + Only works with metaclasses that do not override type.__new__. + + Example: + + import google3 + import mox + + from google3.testing.pybase import parameterized + + class ExampleTest(parameterized.CoopTestCase(mox.MoxTestBase)): + ... + + Args: + other_base_class: (class) A test case base class. + + Returns: + A new class object. + """ + metaclass = type( + 'CoopMetaclass', + (other_base_class.__metaclass__, + TestGeneratorMetaclass), {}) + return metaclass( + 'CoopTestCase', + (other_base_class, TestCase), {}) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/api_implementation.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/api_implementation.py new file mode 100644 index 0000000000..7fef237670 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/api_implementation.py @@ -0,0 +1,112 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Determine which implementation of the protobuf API is used in this process. +""" + +import os +import sys +import warnings + +try: + # pylint: disable=g-import-not-at-top + from google.protobuf.internal import _api_implementation + # The compile-time constants in the _api_implementation module can be used to + # switch to a certain implementation of the Python API at build time. + _api_version = _api_implementation.api_version +except ImportError: + _api_version = -1 # Unspecified by compiler flags. + +if _api_version == 1: + raise ValueError('api_version=1 is no longer supported.') + + +_default_implementation_type = ('cpp' if _api_version > 0 else 'python') + + +# This environment variable can be used to switch to a certain implementation +# of the Python API, overriding the compile-time constants in the +# _api_implementation module. Right now only 'python' and 'cpp' are valid +# values. Any other value will be ignored. +_implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', + _default_implementation_type) + +if _implementation_type != 'python': + _implementation_type = 'cpp' + +if 'PyPy' in sys.version and _implementation_type == 'cpp': + warnings.warn('PyPy does not work yet with cpp protocol buffers. ' + 'Falling back to the python implementation.') + _implementation_type = 'python' + + +# Detect if serialization should be deterministic by default +try: + # The presence of this module in a build allows the proto implementation to + # be upgraded merely via build deps. + # + # NOTE: Merely importing this automatically enables deterministic proto + # serialization for C++ code, but we still need to export it as a boolean so + # that we can do the same for `_implementation_type == 'python'`. + # + # NOTE2: It is possible for C++ code to enable deterministic serialization by + # default _without_ affecting Python code, if the C++ implementation is not in + # use by this module. That is intended behavior, so we don't actually expose + # this boolean outside of this module. + # + # pylint: disable=g-import-not-at-top,unused-import + from google.protobuf import enable_deterministic_proto_serialization + _python_deterministic_proto_serialization = True +except ImportError: + _python_deterministic_proto_serialization = False + + +# Usage of this function is discouraged. Clients shouldn't care which +# implementation of the API is in use. Note that there is no guarantee +# that differences between APIs will be maintained. +# Please don't use this function if possible. +def Type(): + return _implementation_type + + +def _SetType(implementation_type): + """Never use! Only for protobuf benchmark.""" + global _implementation_type + _implementation_type = implementation_type + + +# See comment on 'Type' above. +def Version(): + return 2 + + +# For internal use only +def IsPythonDefaultSerializationDeterministic(): + return _python_deterministic_proto_serialization diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/builder.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/builder.py new file mode 100644 index 0000000000..64353ee4af --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/builder.py @@ -0,0 +1,130 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Builds descriptors, message classes and services for generated _pb2.py. + +This file is only called in python generated _pb2.py files. It builds +descriptors, message classes and services that users can directly use +in generated code. +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +_sym_db = _symbol_database.Default() + + +def BuildMessageAndEnumDescriptors(file_des, module): + """Builds message and enum descriptors. + + Args: + file_des: FileDescriptor of the .proto file + module: Generated _pb2 module + """ + + def BuildNestedDescriptors(msg_des, prefix): + for (name, nested_msg) in msg_des.nested_types_by_name.items(): + module_name = prefix + name.upper() + module[module_name] = nested_msg + BuildNestedDescriptors(nested_msg, module_name + '_') + for enum_des in msg_des.enum_types: + module[prefix + enum_des.name.upper()] = enum_des + + for (name, msg_des) in file_des.message_types_by_name.items(): + module_name = '_' + name.upper() + module[module_name] = msg_des + BuildNestedDescriptors(msg_des, module_name + '_') + + +def BuildTopDescriptorsAndMessages(file_des, module_name, module): + """Builds top level descriptors and message classes. + + Args: + file_des: FileDescriptor of the .proto file + module_name: str, the name of generated _pb2 module + module: Generated _pb2 module + """ + + def BuildMessage(msg_des): + create_dict = {} + for (name, nested_msg) in msg_des.nested_types_by_name.items(): + create_dict[name] = BuildMessage(nested_msg) + create_dict['DESCRIPTOR'] = msg_des + create_dict['__module__'] = module_name + message_class = _reflection.GeneratedProtocolMessageType( + msg_des.name, (_message.Message,), create_dict) + _sym_db.RegisterMessage(message_class) + return message_class + + # top level enums + for (name, enum_des) in file_des.enum_types_by_name.items(): + module['_' + name.upper()] = enum_des + module[name] = enum_type_wrapper.EnumTypeWrapper(enum_des) + for enum_value in enum_des.values: + module[enum_value.name] = enum_value.number + + # top level extensions + for (name, extension_des) in file_des.extensions_by_name.items(): + module[name.upper() + '_FIELD_NUMBER'] = extension_des.number + module[name] = extension_des + + # services + for (name, service) in file_des.services_by_name.items(): + module['_' + name.upper()] = service + + # Build messages. + for (name, msg_des) in file_des.message_types_by_name.items(): + module[name] = BuildMessage(msg_des) + + +def BuildServices(file_des, module_name, module): + """Builds services classes and services stub class. + + Args: + file_des: FileDescriptor of the .proto file + module_name: str, the name of generated _pb2 module + module: Generated _pb2 module + """ + # pylint: disable=g-import-not-at-top + from google.protobuf import service as _service + from google.protobuf import service_reflection + # pylint: enable=g-import-not-at-top + for (name, service) in file_des.services_by_name.items(): + module[name] = service_reflection.GeneratedServiceType( + name, (_service.Service,), + dict(DESCRIPTOR=service, __module__=module_name)) + stub_name = name + '_Stub' + module[stub_name] = service_reflection.GeneratedServiceStubType( + stub_name, (module[name],), + dict(DESCRIPTOR=service, __module__=module_name)) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/containers.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/containers.py new file mode 100644 index 0000000000..29fbb53d2f --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/containers.py @@ -0,0 +1,710 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains container classes to represent different protocol buffer types. + +This file defines container classes which represent categories of protocol +buffer field types which need extra maintenance. Currently these categories +are: + +- Repeated scalar fields - These are all repeated fields which aren't + composite (e.g. they are of simple types like int32, string, etc). +- Repeated composite fields - Repeated fields which are composite. This + includes groups and nested messages. +""" + +import collections.abc +import copy +import pickle +from typing import ( + Any, + Iterable, + Iterator, + List, + MutableMapping, + MutableSequence, + NoReturn, + Optional, + Sequence, + TypeVar, + Union, + overload, +) + + +_T = TypeVar('_T') +_K = TypeVar('_K') +_V = TypeVar('_V') + + +class BaseContainer(Sequence[_T]): + """Base container class.""" + + # Minimizes memory usage and disallows assignment to other attributes. + __slots__ = ['_message_listener', '_values'] + + def __init__(self, message_listener: Any) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The RepeatedScalarFieldContainer will call this object's + Modified() method when it is modified. + """ + self._message_listener = message_listener + self._values = [] + + @overload + def __getitem__(self, key: int) -> _T: + ... + + @overload + def __getitem__(self, key: slice) -> List[_T]: + ... + + def __getitem__(self, key): + """Retrieves item by the specified key.""" + return self._values[key] + + def __len__(self) -> int: + """Returns the number of elements in the container.""" + return len(self._values) + + def __ne__(self, other: Any) -> bool: + """Checks if another instance isn't equal to this one.""" + # The concrete classes should define __eq__. + return not self == other + + __hash__ = None + + def __repr__(self) -> str: + return repr(self._values) + + def sort(self, *args, **kwargs) -> None: + # Continue to support the old sort_function keyword argument. + # This is expected to be a rare occurrence, so use LBYL to avoid + # the overhead of actually catching KeyError. + if 'sort_function' in kwargs: + kwargs['cmp'] = kwargs.pop('sort_function') + self._values.sort(*args, **kwargs) + + def reverse(self) -> None: + self._values.reverse() + + +# TODO(slebedev): Remove this. BaseContainer does *not* conform to +# MutableSequence, only its subclasses do. +collections.abc.MutableSequence.register(BaseContainer) + + +class RepeatedScalarFieldContainer(BaseContainer[_T], MutableSequence[_T]): + """Simple, type-checked, list-like container for holding repeated scalars.""" + + # Disallows assignment to other attributes. + __slots__ = ['_type_checker'] + + def __init__( + self, + message_listener: Any, + type_checker: Any, + ) -> None: + """Args: + + message_listener: A MessageListener implementation. The + RepeatedScalarFieldContainer will call this object's Modified() method + when it is modified. + type_checker: A type_checkers.ValueChecker instance to run on elements + inserted into this container. + """ + super().__init__(message_listener) + self._type_checker = type_checker + + def append(self, value: _T) -> None: + """Appends an item to the list. Similar to list.append().""" + self._values.append(self._type_checker.CheckValue(value)) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def insert(self, key: int, value: _T) -> None: + """Inserts the item at the specified position. Similar to list.insert().""" + self._values.insert(key, self._type_checker.CheckValue(value)) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def extend(self, elem_seq: Iterable[_T]) -> None: + """Extends by appending the given iterable. Similar to list.extend().""" + if elem_seq is None: + return + try: + elem_seq_iter = iter(elem_seq) + except TypeError: + if not elem_seq: + # silently ignore falsy inputs :-/. + # TODO(ptucker): Deprecate this behavior. b/18413862 + return + raise + + new_values = [self._type_checker.CheckValue(elem) for elem in elem_seq_iter] + if new_values: + self._values.extend(new_values) + self._message_listener.Modified() + + def MergeFrom( + self, + other: Union['RepeatedScalarFieldContainer[_T]', Iterable[_T]], + ) -> None: + """Appends the contents of another repeated field of the same type to this + one. We do not check the types of the individual fields. + """ + self._values.extend(other) + self._message_listener.Modified() + + def remove(self, elem: _T): + """Removes an item from the list. Similar to list.remove().""" + self._values.remove(elem) + self._message_listener.Modified() + + def pop(self, key: Optional[int] = -1) -> _T: + """Removes and returns an item at a given index. Similar to list.pop().""" + value = self._values[key] + self.__delitem__(key) + return value + + @overload + def __setitem__(self, key: int, value: _T) -> None: + ... + + @overload + def __setitem__(self, key: slice, value: Iterable[_T]) -> None: + ... + + def __setitem__(self, key, value) -> None: + """Sets the item on the specified position.""" + if isinstance(key, slice): + if key.step is not None: + raise ValueError('Extended slices not supported') + self._values[key] = map(self._type_checker.CheckValue, value) + self._message_listener.Modified() + else: + self._values[key] = self._type_checker.CheckValue(value) + self._message_listener.Modified() + + def __delitem__(self, key: Union[int, slice]) -> None: + """Deletes the item at the specified position.""" + del self._values[key] + self._message_listener.Modified() + + def __eq__(self, other: Any) -> bool: + """Compares the current instance with another one.""" + if self is other: + return True + # Special case for the same type which should be common and fast. + if isinstance(other, self.__class__): + return other._values == self._values + # We are presumably comparing against some other sequence type. + return other == self._values + + def __deepcopy__( + self, + unused_memo: Any = None, + ) -> 'RepeatedScalarFieldContainer[_T]': + clone = RepeatedScalarFieldContainer( + copy.deepcopy(self._message_listener), self._type_checker) + clone.MergeFrom(self) + return clone + + def __reduce__(self, **kwargs) -> NoReturn: + raise pickle.PickleError( + "Can't pickle repeated scalar fields, convert to list first") + + +# TODO(slebedev): Constrain T to be a subtype of Message. +class RepeatedCompositeFieldContainer(BaseContainer[_T], MutableSequence[_T]): + """Simple, list-like container for holding repeated composite fields.""" + + # Disallows assignment to other attributes. + __slots__ = ['_message_descriptor'] + + def __init__(self, message_listener: Any, message_descriptor: Any) -> None: + """ + Note that we pass in a descriptor instead of the generated directly, + since at the time we construct a _RepeatedCompositeFieldContainer we + haven't yet necessarily initialized the type that will be contained in the + container. + + Args: + message_listener: A MessageListener implementation. + The RepeatedCompositeFieldContainer will call this object's + Modified() method when it is modified. + message_descriptor: A Descriptor instance describing the protocol type + that should be present in this container. We'll use the + _concrete_class field of this descriptor when the client calls add(). + """ + super().__init__(message_listener) + self._message_descriptor = message_descriptor + + def add(self, **kwargs: Any) -> _T: + """Adds a new element at the end of the list and returns it. Keyword + arguments may be used to initialize the element. + """ + new_element = self._message_descriptor._concrete_class(**kwargs) + new_element._SetListener(self._message_listener) + self._values.append(new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + return new_element + + def append(self, value: _T) -> None: + """Appends one element by copying the message.""" + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + new_element.CopyFrom(value) + self._values.append(new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def insert(self, key: int, value: _T) -> None: + """Inserts the item at the specified position by copying.""" + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + new_element.CopyFrom(value) + self._values.insert(key, new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def extend(self, elem_seq: Iterable[_T]) -> None: + """Extends by appending the given sequence of elements of the same type + + as this one, copying each individual message. + """ + message_class = self._message_descriptor._concrete_class + listener = self._message_listener + values = self._values + for message in elem_seq: + new_element = message_class() + new_element._SetListener(listener) + new_element.MergeFrom(message) + values.append(new_element) + listener.Modified() + + def MergeFrom( + self, + other: Union['RepeatedCompositeFieldContainer[_T]', Iterable[_T]], + ) -> None: + """Appends the contents of another repeated field of the same type to this + one, copying each individual message. + """ + self.extend(other) + + def remove(self, elem: _T) -> None: + """Removes an item from the list. Similar to list.remove().""" + self._values.remove(elem) + self._message_listener.Modified() + + def pop(self, key: Optional[int] = -1) -> _T: + """Removes and returns an item at a given index. Similar to list.pop().""" + value = self._values[key] + self.__delitem__(key) + return value + + @overload + def __setitem__(self, key: int, value: _T) -> None: + ... + + @overload + def __setitem__(self, key: slice, value: Iterable[_T]) -> None: + ... + + def __setitem__(self, key, value): + # This method is implemented to make RepeatedCompositeFieldContainer + # structurally compatible with typing.MutableSequence. It is + # otherwise unsupported and will always raise an error. + raise TypeError( + f'{self.__class__.__name__} object does not support item assignment') + + def __delitem__(self, key: Union[int, slice]) -> None: + """Deletes the item at the specified position.""" + del self._values[key] + self._message_listener.Modified() + + def __eq__(self, other: Any) -> bool: + """Compares the current instance with another one.""" + if self is other: + return True + if not isinstance(other, self.__class__): + raise TypeError('Can only compare repeated composite fields against ' + 'other repeated composite fields.') + return self._values == other._values + + +class ScalarMap(MutableMapping[_K, _V]): + """Simple, type-checked, dict-like container for holding repeated scalars.""" + + # Disallows assignment to other attributes. + __slots__ = ['_key_checker', '_value_checker', '_values', '_message_listener', + '_entry_descriptor'] + + def __init__( + self, + message_listener: Any, + key_checker: Any, + value_checker: Any, + entry_descriptor: Any, + ) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The ScalarMap will call this object's Modified() method when it + is modified. + key_checker: A type_checkers.ValueChecker instance to run on keys + inserted into this container. + value_checker: A type_checkers.ValueChecker instance to run on values + inserted into this container. + entry_descriptor: The MessageDescriptor of a map entry: key and value. + """ + self._message_listener = message_listener + self._key_checker = key_checker + self._value_checker = value_checker + self._entry_descriptor = entry_descriptor + self._values = {} + + def __getitem__(self, key: _K) -> _V: + try: + return self._values[key] + except KeyError: + key = self._key_checker.CheckValue(key) + val = self._value_checker.DefaultValue() + self._values[key] = val + return val + + def __contains__(self, item: _K) -> bool: + # We check the key's type to match the strong-typing flavor of the API. + # Also this makes it easier to match the behavior of the C++ implementation. + self._key_checker.CheckValue(item) + return item in self._values + + @overload + def get(self, key: _K) -> Optional[_V]: + ... + + @overload + def get(self, key: _K, default: _T) -> Union[_V, _T]: + ... + + # We need to override this explicitly, because our defaultdict-like behavior + # will make the default implementation (from our base class) always insert + # the key. + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + def __setitem__(self, key: _K, value: _V) -> _T: + checked_key = self._key_checker.CheckValue(key) + checked_value = self._value_checker.CheckValue(value) + self._values[checked_key] = checked_value + self._message_listener.Modified() + + def __delitem__(self, key: _K) -> None: + del self._values[key] + self._message_listener.Modified() + + def __len__(self) -> int: + return len(self._values) + + def __iter__(self) -> Iterator[_K]: + return iter(self._values) + + def __repr__(self) -> str: + return repr(self._values) + + def MergeFrom(self, other: 'ScalarMap[_K, _V]') -> None: + self._values.update(other._values) + self._message_listener.Modified() + + def InvalidateIterators(self) -> None: + # It appears that the only way to reliably invalidate iterators to + # self._values is to ensure that its size changes. + original = self._values + self._values = original.copy() + original[None] = None + + # This is defined in the abstract base, but we can do it much more cheaply. + def clear(self) -> None: + self._values.clear() + self._message_listener.Modified() + + def GetEntryClass(self) -> Any: + return self._entry_descriptor._concrete_class + + +class MessageMap(MutableMapping[_K, _V]): + """Simple, type-checked, dict-like container for with submessage values.""" + + # Disallows assignment to other attributes. + __slots__ = ['_key_checker', '_values', '_message_listener', + '_message_descriptor', '_entry_descriptor'] + + def __init__( + self, + message_listener: Any, + message_descriptor: Any, + key_checker: Any, + entry_descriptor: Any, + ) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The ScalarMap will call this object's Modified() method when it + is modified. + key_checker: A type_checkers.ValueChecker instance to run on keys + inserted into this container. + value_checker: A type_checkers.ValueChecker instance to run on values + inserted into this container. + entry_descriptor: The MessageDescriptor of a map entry: key and value. + """ + self._message_listener = message_listener + self._message_descriptor = message_descriptor + self._key_checker = key_checker + self._entry_descriptor = entry_descriptor + self._values = {} + + def __getitem__(self, key: _K) -> _V: + key = self._key_checker.CheckValue(key) + try: + return self._values[key] + except KeyError: + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + self._values[key] = new_element + self._message_listener.Modified() + return new_element + + def get_or_create(self, key: _K) -> _V: + """get_or_create() is an alias for getitem (ie. map[key]). + + Args: + key: The key to get or create in the map. + + This is useful in cases where you want to be explicit that the call is + mutating the map. This can avoid lint errors for statements like this + that otherwise would appear to be pointless statements: + + msg.my_map[key] + """ + return self[key] + + @overload + def get(self, key: _K) -> Optional[_V]: + ... + + @overload + def get(self, key: _K, default: _T) -> Union[_V, _T]: + ... + + # We need to override this explicitly, because our defaultdict-like behavior + # will make the default implementation (from our base class) always insert + # the key. + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + def __contains__(self, item: _K) -> bool: + item = self._key_checker.CheckValue(item) + return item in self._values + + def __setitem__(self, key: _K, value: _V) -> NoReturn: + raise ValueError('May not set values directly, call my_map[key].foo = 5') + + def __delitem__(self, key: _K) -> None: + key = self._key_checker.CheckValue(key) + del self._values[key] + self._message_listener.Modified() + + def __len__(self) -> int: + return len(self._values) + + def __iter__(self) -> Iterator[_K]: + return iter(self._values) + + def __repr__(self) -> str: + return repr(self._values) + + def MergeFrom(self, other: 'MessageMap[_K, _V]') -> None: + # pylint: disable=protected-access + for key in other._values: + # According to documentation: "When parsing from the wire or when merging, + # if there are duplicate map keys the last key seen is used". + if key in self: + del self[key] + self[key].CopyFrom(other[key]) + # self._message_listener.Modified() not required here, because + # mutations to submessages already propagate. + + def InvalidateIterators(self) -> None: + # It appears that the only way to reliably invalidate iterators to + # self._values is to ensure that its size changes. + original = self._values + self._values = original.copy() + original[None] = None + + # This is defined in the abstract base, but we can do it much more cheaply. + def clear(self) -> None: + self._values.clear() + self._message_listener.Modified() + + def GetEntryClass(self) -> Any: + return self._entry_descriptor._concrete_class + + +class _UnknownField: + """A parsed unknown field.""" + + # Disallows assignment to other attributes. + __slots__ = ['_field_number', '_wire_type', '_data'] + + def __init__(self, field_number, wire_type, data): + self._field_number = field_number + self._wire_type = wire_type + self._data = data + return + + def __lt__(self, other): + # pylint: disable=protected-access + return self._field_number < other._field_number + + def __eq__(self, other): + if self is other: + return True + # pylint: disable=protected-access + return (self._field_number == other._field_number and + self._wire_type == other._wire_type and + self._data == other._data) + + +class UnknownFieldRef: # pylint: disable=missing-class-docstring + + def __init__(self, parent, index): + self._parent = parent + self._index = index + + def _check_valid(self): + if not self._parent: + raise ValueError('UnknownField does not exist. ' + 'The parent message might be cleared.') + if self._index >= len(self._parent): + raise ValueError('UnknownField does not exist. ' + 'The parent message might be cleared.') + + @property + def field_number(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._field_number + + @property + def wire_type(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._wire_type + + @property + def data(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._data + + +class UnknownFieldSet: + """UnknownField container""" + + # Disallows assignment to other attributes. + __slots__ = ['_values'] + + def __init__(self): + self._values = [] + + def __getitem__(self, index): + if self._values is None: + raise ValueError('UnknownFields does not exist. ' + 'The parent message might be cleared.') + size = len(self._values) + if index < 0: + index += size + if index < 0 or index >= size: + raise IndexError('index %d out of range'.index) + + return UnknownFieldRef(self, index) + + def _internal_get(self, index): + return self._values[index] + + def __len__(self): + if self._values is None: + raise ValueError('UnknownFields does not exist. ' + 'The parent message might be cleared.') + return len(self._values) + + def _add(self, field_number, wire_type, data): + unknown_field = _UnknownField(field_number, wire_type, data) + self._values.append(unknown_field) + return unknown_field + + def __iter__(self): + for i in range(len(self)): + yield UnknownFieldRef(self, i) + + def _extend(self, other): + if other is None: + return + # pylint: disable=protected-access + self._values.extend(other._values) + + def __eq__(self, other): + if self is other: + return True + # Sort unknown fields because their order shouldn't + # affect equality test. + values = list(self._values) + if other is None: + return not values + values.sort() + # pylint: disable=protected-access + other_values = sorted(other._values) + return values == other_values + + def _clear(self): + for value in self._values: + # pylint: disable=protected-access + if isinstance(value._data, UnknownFieldSet): + value._data._clear() # pylint: disable=protected-access + self._values = None diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/decoder.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/decoder.py new file mode 100644 index 0000000000..bc1b7b785c --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/decoder.py @@ -0,0 +1,1029 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Code for decoding protocol buffer primitives. + +This code is very similar to encoder.py -- read the docs for that module first. + +A "decoder" is a function with the signature: + Decode(buffer, pos, end, message, field_dict) +The arguments are: + buffer: The string containing the encoded message. + pos: The current position in the string. + end: The position in the string where the current message ends. May be + less than len(buffer) if we're reading a sub-message. + message: The message object into which we're parsing. + field_dict: message._fields (avoids a hashtable lookup). +The decoder reads the field and stores it into field_dict, returning the new +buffer position. A decoder for a repeated field may proactively decode all of +the elements of that field, if they appear consecutively. + +Note that decoders may throw any of the following: + IndexError: Indicates a truncated message. + struct.error: Unpacking of a fixed-width field failed. + message.DecodeError: Other errors. + +Decoders are expected to raise an exception if they are called with pos > end. +This allows callers to be lax about bounds checking: it's fineto read past +"end" as long as you are sure that someone else will notice and throw an +exception later on. + +Something up the call stack is expected to catch IndexError and struct.error +and convert them to message.DecodeError. + +Decoders are constructed using decoder constructors with the signature: + MakeDecoder(field_number, is_repeated, is_packed, key, new_default) +The arguments are: + field_number: The field number of the field we want to decode. + is_repeated: Is the field a repeated field? (bool) + is_packed: Is the field a packed field? (bool) + key: The key to use when looking up the field within field_dict. + (This is actually the FieldDescriptor but nothing in this + file should depend on that.) + new_default: A function which takes a message object as a parameter and + returns a new instance of the default value for this field. + (This is called for repeated fields and sub-messages, when an + instance does not already exist.) + +As with encoders, we define a decoder constructor for every type of field. +Then, for every field of every message class we construct an actual decoder. +That decoder goes into a dict indexed by tag, so when we decode a message +we repeatedly read a tag, look up the corresponding decoder, and invoke it. +""" + +__author__ = 'kenton@google.com (Kenton Varda)' + +import math +import struct + +from google.protobuf.internal import containers +from google.protobuf.internal import encoder +from google.protobuf.internal import wire_format +from google.protobuf import message + + +# This is not for optimization, but rather to avoid conflicts with local +# variables named "message". +_DecodeError = message.DecodeError + + +def _VarintDecoder(mask, result_type): + """Return an encoder for a basic varint value (does not include tag). + + Decoded values will be bitwise-anded with the given mask before being + returned, e.g. to limit them to 32 bits. The returned decoder does not + take the usual "end" parameter -- the caller is expected to do bounds checking + after the fact (often the caller can defer such checking until later). The + decoder returns a (value, new_pos) pair. + """ + + def DecodeVarint(buffer, pos): + result = 0 + shift = 0 + while 1: + b = buffer[pos] + result |= ((b & 0x7f) << shift) + pos += 1 + if not (b & 0x80): + result &= mask + result = result_type(result) + return (result, pos) + shift += 7 + if shift >= 64: + raise _DecodeError('Too many bytes when decoding varint.') + return DecodeVarint + + +def _SignedVarintDecoder(bits, result_type): + """Like _VarintDecoder() but decodes signed values.""" + + signbit = 1 << (bits - 1) + mask = (1 << bits) - 1 + + def DecodeVarint(buffer, pos): + result = 0 + shift = 0 + while 1: + b = buffer[pos] + result |= ((b & 0x7f) << shift) + pos += 1 + if not (b & 0x80): + result &= mask + result = (result ^ signbit) - signbit + result = result_type(result) + return (result, pos) + shift += 7 + if shift >= 64: + raise _DecodeError('Too many bytes when decoding varint.') + return DecodeVarint + +# All 32-bit and 64-bit values are represented as int. +_DecodeVarint = _VarintDecoder((1 << 64) - 1, int) +_DecodeSignedVarint = _SignedVarintDecoder(64, int) + +# Use these versions for values which must be limited to 32 bits. +_DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int) +_DecodeSignedVarint32 = _SignedVarintDecoder(32, int) + + +def ReadTag(buffer, pos): + """Read a tag from the memoryview, and return a (tag_bytes, new_pos) tuple. + + We return the raw bytes of the tag rather than decoding them. The raw + bytes can then be used to look up the proper decoder. This effectively allows + us to trade some work that would be done in pure-python (decoding a varint) + for work that is done in C (searching for a byte string in a hash table). + In a low-level language it would be much cheaper to decode the varint and + use that, but not in Python. + + Args: + buffer: memoryview object of the encoded bytes + pos: int of the current position to start from + + Returns: + Tuple[bytes, int] of the tag data and new position. + """ + start = pos + while buffer[pos] & 0x80: + pos += 1 + pos += 1 + + tag_bytes = buffer[start:pos].tobytes() + return tag_bytes, pos + + +# -------------------------------------------------------------------- + + +def _SimpleDecoder(wire_type, decode_value): + """Return a constructor for a decoder for fields of a particular type. + + Args: + wire_type: The field's wire type. + decode_value: A function which decodes an individual value, e.g. + _DecodeVarint() + """ + + def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default, + clear_if_default=False): + if is_packed: + local_DecodeVarint = _DecodeVarint + def DecodePackedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + (endpoint, pos) = local_DecodeVarint(buffer, pos) + endpoint += pos + if endpoint > end: + raise _DecodeError('Truncated message.') + while pos < endpoint: + (element, pos) = decode_value(buffer, pos) + value.append(element) + if pos > endpoint: + del value[-1] # Discard corrupt value. + raise _DecodeError('Packed element was truncated.') + return pos + return DecodePackedField + elif is_repeated: + tag_bytes = encoder.TagBytes(field_number, wire_type) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (element, new_pos) = decode_value(buffer, pos) + value.append(element) + # Predict that the next tag is another copy of the same repeated + # field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos >= end: + # Prediction failed. Return. + if new_pos > end: + raise _DecodeError('Truncated message.') + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (new_value, pos) = decode_value(buffer, pos) + if pos > end: + raise _DecodeError('Truncated message.') + if clear_if_default and not new_value: + field_dict.pop(key, None) + else: + field_dict[key] = new_value + return pos + return DecodeField + + return SpecificDecoder + + +def _ModifiedDecoder(wire_type, decode_value, modify_value): + """Like SimpleDecoder but additionally invokes modify_value on every value + before storing it. Usually modify_value is ZigZagDecode. + """ + + # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but + # not enough to make a significant difference. + + def InnerDecode(buffer, pos): + (result, new_pos) = decode_value(buffer, pos) + return (modify_value(result), new_pos) + return _SimpleDecoder(wire_type, InnerDecode) + + +def _StructPackDecoder(wire_type, format): + """Return a constructor for a decoder for a fixed-width field. + + Args: + wire_type: The field's wire type. + format: The format string to pass to struct.unpack(). + """ + + value_size = struct.calcsize(format) + local_unpack = struct.unpack + + # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but + # not enough to make a significant difference. + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + + def InnerDecode(buffer, pos): + new_pos = pos + value_size + result = local_unpack(format, buffer[pos:new_pos])[0] + return (result, new_pos) + return _SimpleDecoder(wire_type, InnerDecode) + + +def _FloatDecoder(): + """Returns a decoder for a float field. + + This code works around a bug in struct.unpack for non-finite 32-bit + floating-point values. + """ + + local_unpack = struct.unpack + + def InnerDecode(buffer, pos): + """Decode serialized float to a float and new position. + + Args: + buffer: memoryview of the serialized bytes + pos: int, position in the memory view to start at. + + Returns: + Tuple[float, int] of the deserialized float value and new position + in the serialized data. + """ + # We expect a 32-bit value in little-endian byte order. Bit 1 is the sign + # bit, bits 2-9 represent the exponent, and bits 10-32 are the significand. + new_pos = pos + 4 + float_bytes = buffer[pos:new_pos].tobytes() + + # If this value has all its exponent bits set, then it's non-finite. + # In Python 2.4, struct.unpack will convert it to a finite 64-bit value. + # To avoid that, we parse it specially. + if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'): + # If at least one significand bit is set... + if float_bytes[0:3] != b'\x00\x00\x80': + return (math.nan, new_pos) + # If sign bit is set... + if float_bytes[3:4] == b'\xFF': + return (-math.inf, new_pos) + return (math.inf, new_pos) + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + result = local_unpack('= b'\xF0') + and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')): + return (math.nan, new_pos) + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + result = local_unpack(' end: + raise _DecodeError('Truncated message.') + while pos < endpoint: + value_start_pos = pos + (element, pos) = _DecodeSignedVarint32(buffer, pos) + # pylint: disable=protected-access + if element in enum_type.values_by_number: + value.append(element) + else: + if not message._unknown_fields: + message._unknown_fields = [] + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_VARINT) + + message._unknown_fields.append( + (tag_bytes, buffer[value_start_pos:pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + field_number, wire_format.WIRETYPE_VARINT, element) + # pylint: enable=protected-access + if pos > endpoint: + if element in enum_type.values_by_number: + del value[-1] # Discard corrupt value. + else: + del message._unknown_fields[-1] + # pylint: disable=protected-access + del message._unknown_field_set._values[-1] + # pylint: enable=protected-access + raise _DecodeError('Packed element was truncated.') + return pos + return DecodePackedField + elif is_repeated: + tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_VARINT) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + """Decode serialized repeated enum to its value and a new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (element, new_pos) = _DecodeSignedVarint32(buffer, pos) + # pylint: disable=protected-access + if element in enum_type.values_by_number: + value.append(element) + else: + if not message._unknown_fields: + message._unknown_fields = [] + message._unknown_fields.append( + (tag_bytes, buffer[pos:new_pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + field_number, wire_format.WIRETYPE_VARINT, element) + # pylint: enable=protected-access + # Predict that the next tag is another copy of the same repeated + # field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos >= end: + # Prediction failed. Return. + if new_pos > end: + raise _DecodeError('Truncated message.') + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + """Decode serialized repeated enum to its value and a new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + value_start_pos = pos + (enum_value, pos) = _DecodeSignedVarint32(buffer, pos) + if pos > end: + raise _DecodeError('Truncated message.') + if clear_if_default and not enum_value: + field_dict.pop(key, None) + return pos + # pylint: disable=protected-access + if enum_value in enum_type.values_by_number: + field_dict[key] = enum_value + else: + if not message._unknown_fields: + message._unknown_fields = [] + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_VARINT) + message._unknown_fields.append( + (tag_bytes, buffer[value_start_pos:pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + field_number, wire_format.WIRETYPE_VARINT, enum_value) + # pylint: enable=protected-access + return pos + return DecodeField + + +# -------------------------------------------------------------------- + + +Int32Decoder = _SimpleDecoder( + wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32) + +Int64Decoder = _SimpleDecoder( + wire_format.WIRETYPE_VARINT, _DecodeSignedVarint) + +UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32) +UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint) + +SInt32Decoder = _ModifiedDecoder( + wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode) +SInt64Decoder = _ModifiedDecoder( + wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode) + +# Note that Python conveniently guarantees that when using the '<' prefix on +# formats, they will also have the same size across all platforms (as opposed +# to without the prefix, where their sizes depend on the C compiler's basic +# type sizes). +Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, ' end: + raise _DecodeError('Truncated string.') + value.append(_ConvertToUnicode(buffer[pos:new_pos])) + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + if clear_if_default and not size: + field_dict.pop(key, None) + else: + field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos]) + return new_pos + return DecodeField + + +def BytesDecoder(field_number, is_repeated, is_packed, key, new_default, + clear_if_default=False): + """Returns a decoder for a bytes field.""" + + local_DecodeVarint = _DecodeVarint + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + value.append(buffer[pos:new_pos].tobytes()) + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + if clear_if_default and not size: + field_dict.pop(key, None) + else: + field_dict[key] = buffer[pos:new_pos].tobytes() + return new_pos + return DecodeField + + +def GroupDecoder(field_number, is_repeated, is_packed, key, new_default): + """Returns a decoder for a group field.""" + + end_tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_END_GROUP) + end_tag_len = len(end_tag_bytes) + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_START_GROUP) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read sub-message. + pos = value.add()._InternalParse(buffer, pos, end) + # Read end tag. + new_pos = pos+end_tag_len + if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: + raise _DecodeError('Missing group end tag.') + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read sub-message. + pos = value._InternalParse(buffer, pos, end) + # Read end tag. + new_pos = pos+end_tag_len + if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: + raise _DecodeError('Missing group end tag.') + return new_pos + return DecodeField + + +def MessageDecoder(field_number, is_repeated, is_packed, key, new_default): + """Returns a decoder for a message field.""" + + local_DecodeVarint = _DecodeVarint + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + if value.add()._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + if value._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it encountered + # an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + return new_pos + return DecodeField + + +# -------------------------------------------------------------------- + +MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP) + +def MessageSetItemDecoder(descriptor): + """Returns a decoder for a MessageSet item. + + The parameter is the message Descriptor. + + The message set message looks like this: + message MessageSet { + repeated group Item = 1 { + required int32 type_id = 2; + required string message = 3; + } + } + """ + + type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT) + message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED) + item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP) + + local_ReadTag = ReadTag + local_DecodeVarint = _DecodeVarint + local_SkipField = SkipField + + def DecodeItem(buffer, pos, end, message, field_dict): + """Decode serialized message set to its value and new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + message_set_item_start = pos + type_id = -1 + message_start = -1 + message_end = -1 + + # Technically, type_id and message can appear in any order, so we need + # a little loop here. + while 1: + (tag_bytes, pos) = local_ReadTag(buffer, pos) + if tag_bytes == type_id_tag_bytes: + (type_id, pos) = local_DecodeVarint(buffer, pos) + elif tag_bytes == message_tag_bytes: + (size, message_start) = local_DecodeVarint(buffer, pos) + pos = message_end = message_start + size + elif tag_bytes == item_end_tag_bytes: + break + else: + pos = SkipField(buffer, pos, end, tag_bytes) + if pos == -1: + raise _DecodeError('Missing group end tag.') + + if pos > end: + raise _DecodeError('Truncated message.') + + if type_id == -1: + raise _DecodeError('MessageSet item missing type_id.') + if message_start == -1: + raise _DecodeError('MessageSet item missing message.') + + extension = message.Extensions._FindExtensionByNumber(type_id) + # pylint: disable=protected-access + if extension is not None: + value = field_dict.get(extension) + if value is None: + message_type = extension.message_type + if not hasattr(message_type, '_concrete_class'): + # pylint: disable=protected-access + message._FACTORY.GetPrototype(message_type) + value = field_dict.setdefault( + extension, message_type._concrete_class()) + if value._InternalParse(buffer, message_start,message_end) != message_end: + # The only reason _InternalParse would return early is if it encountered + # an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + else: + if not message._unknown_fields: + message._unknown_fields = [] + message._unknown_fields.append( + (MESSAGE_SET_ITEM_TAG, buffer[message_set_item_start:pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + type_id, + wire_format.WIRETYPE_LENGTH_DELIMITED, + buffer[message_start:message_end].tobytes()) + # pylint: enable=protected-access + + return pos + + return DecodeItem + +# -------------------------------------------------------------------- + +def MapDecoder(field_descriptor, new_default, is_message_map): + """Returns a decoder for a map field.""" + + key = field_descriptor + tag_bytes = encoder.TagBytes(field_descriptor.number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + local_DecodeVarint = _DecodeVarint + # Can't read _concrete_class yet; might not be initialized. + message_type = field_descriptor.message_type + + def DecodeMap(buffer, pos, end, message, field_dict): + submsg = message_type._concrete_class() + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + submsg.Clear() + if submsg._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + + if is_message_map: + value[submsg.key].CopyFrom(submsg.value) + else: + value[submsg.key] = submsg.value + + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + + return DecodeMap + +# -------------------------------------------------------------------- +# Optimization is not as heavy here because calls to SkipField() are rare, +# except for handling end-group tags. + +def _SkipVarint(buffer, pos, end): + """Skip a varint value. Returns the new position.""" + # Previously ord(buffer[pos]) raised IndexError when pos is out of range. + # With this code, ord(b'') raises TypeError. Both are handled in + # python_message.py to generate a 'Truncated message' error. + while ord(buffer[pos:pos+1].tobytes()) & 0x80: + pos += 1 + pos += 1 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + +def _SkipFixed64(buffer, pos, end): + """Skip a fixed64 value. Returns the new position.""" + + pos += 8 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + + +def _DecodeFixed64(buffer, pos): + """Decode a fixed64.""" + new_pos = pos + 8 + return (struct.unpack(' end: + raise _DecodeError('Truncated message.') + return pos + + +def _SkipGroup(buffer, pos, end): + """Skip sub-group. Returns the new position.""" + + while 1: + (tag_bytes, pos) = ReadTag(buffer, pos) + new_pos = SkipField(buffer, pos, end, tag_bytes) + if new_pos == -1: + return pos + pos = new_pos + + +def _DecodeUnknownFieldSet(buffer, pos, end_pos=None): + """Decode UnknownFieldSet. Returns the UnknownFieldSet and new position.""" + + unknown_field_set = containers.UnknownFieldSet() + while end_pos is None or pos < end_pos: + (tag_bytes, pos) = ReadTag(buffer, pos) + (tag, _) = _DecodeVarint(tag_bytes, 0) + field_number, wire_type = wire_format.UnpackTag(tag) + if wire_type == wire_format.WIRETYPE_END_GROUP: + break + (data, pos) = _DecodeUnknownField(buffer, pos, wire_type) + # pylint: disable=protected-access + unknown_field_set._add(field_number, wire_type, data) + + return (unknown_field_set, pos) + + +def _DecodeUnknownField(buffer, pos, wire_type): + """Decode a unknown field. Returns the UnknownField and new position.""" + + if wire_type == wire_format.WIRETYPE_VARINT: + (data, pos) = _DecodeVarint(buffer, pos) + elif wire_type == wire_format.WIRETYPE_FIXED64: + (data, pos) = _DecodeFixed64(buffer, pos) + elif wire_type == wire_format.WIRETYPE_FIXED32: + (data, pos) = _DecodeFixed32(buffer, pos) + elif wire_type == wire_format.WIRETYPE_LENGTH_DELIMITED: + (size, pos) = _DecodeVarint(buffer, pos) + data = buffer[pos:pos+size].tobytes() + pos += size + elif wire_type == wire_format.WIRETYPE_START_GROUP: + (data, pos) = _DecodeUnknownFieldSet(buffer, pos) + elif wire_type == wire_format.WIRETYPE_END_GROUP: + return (0, -1) + else: + raise _DecodeError('Wrong wire type in tag.') + + return (data, pos) + + +def _EndGroup(buffer, pos, end): + """Skipping an END_GROUP tag returns -1 to tell the parent loop to break.""" + + return -1 + + +def _SkipFixed32(buffer, pos, end): + """Skip a fixed32 value. Returns the new position.""" + + pos += 4 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + + +def _DecodeFixed32(buffer, pos): + """Decode a fixed32.""" + + new_pos = pos + 4 + return (struct.unpack('B').pack + + def EncodeVarint(write, value, unused_deterministic=None): + bits = value & 0x7f + value >>= 7 + while value: + write(local_int2byte(0x80|bits)) + bits = value & 0x7f + value >>= 7 + return write(local_int2byte(bits)) + + return EncodeVarint + + +def _SignedVarintEncoder(): + """Return an encoder for a basic signed varint value (does not include + tag).""" + + local_int2byte = struct.Struct('>B').pack + + def EncodeSignedVarint(write, value, unused_deterministic=None): + if value < 0: + value += (1 << 64) + bits = value & 0x7f + value >>= 7 + while value: + write(local_int2byte(0x80|bits)) + bits = value & 0x7f + value >>= 7 + return write(local_int2byte(bits)) + + return EncodeSignedVarint + + +_EncodeVarint = _VarintEncoder() +_EncodeSignedVarint = _SignedVarintEncoder() + + +def _VarintBytes(value): + """Encode the given integer as a varint and return the bytes. This is only + called at startup time so it doesn't need to be fast.""" + + pieces = [] + _EncodeVarint(pieces.append, value, True) + return b"".join(pieces) + + +def TagBytes(field_number, wire_type): + """Encode the given tag and return the bytes. Only called at startup.""" + + return bytes(_VarintBytes(wire_format.PackTag(field_number, wire_type))) + +# -------------------------------------------------------------------- +# As with sizers (see above), we have a number of common encoder +# implementations. + + +def _SimpleEncoder(wire_type, encode_value, compute_value_size): + """Return a constructor for an encoder for fields of a particular type. + + Args: + wire_type: The field's wire type, for encoding tags. + encode_value: A function which encodes an individual value, e.g. + _EncodeVarint(). + compute_value_size: A function which computes the size of an individual + value, e.g. _VarintSize(). + """ + + def SpecificEncoder(field_number, is_repeated, is_packed): + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + size = 0 + for element in value: + size += compute_value_size(element) + local_EncodeVarint(write, size, deterministic) + for element in value: + encode_value(write, element, deterministic) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, deterministic): + for element in value: + write(tag_bytes) + encode_value(write, element, deterministic) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, deterministic): + write(tag_bytes) + return encode_value(write, value, deterministic) + return EncodeField + + return SpecificEncoder + + +def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value): + """Like SimpleEncoder but additionally invokes modify_value on every value + before passing it to encode_value. Usually modify_value is ZigZagEncode.""" + + def SpecificEncoder(field_number, is_repeated, is_packed): + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + size = 0 + for element in value: + size += compute_value_size(modify_value(element)) + local_EncodeVarint(write, size, deterministic) + for element in value: + encode_value(write, modify_value(element), deterministic) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, deterministic): + for element in value: + write(tag_bytes) + encode_value(write, modify_value(element), deterministic) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, deterministic): + write(tag_bytes) + return encode_value(write, modify_value(value), deterministic) + return EncodeField + + return SpecificEncoder + + +def _StructPackEncoder(wire_type, format): + """Return a constructor for an encoder for a fixed-width field. + + Args: + wire_type: The field's wire type, for encoding tags. + format: The format string to pass to struct.pack(). + """ + + value_size = struct.calcsize(format) + + def SpecificEncoder(field_number, is_repeated, is_packed): + local_struct_pack = struct.pack + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + local_EncodeVarint(write, len(value) * value_size, deterministic) + for element in value: + write(local_struct_pack(format, element)) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, unused_deterministic=None): + for element in value: + write(tag_bytes) + write(local_struct_pack(format, element)) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, unused_deterministic=None): + write(tag_bytes) + return write(local_struct_pack(format, value)) + return EncodeField + + return SpecificEncoder + + +def _FloatingPointEncoder(wire_type, format): + """Return a constructor for an encoder for float fields. + + This is like StructPackEncoder, but catches errors that may be due to + passing non-finite floating-point values to struct.pack, and makes a + second attempt to encode those values. + + Args: + wire_type: The field's wire type, for encoding tags. + format: The format string to pass to struct.pack(). + """ + + value_size = struct.calcsize(format) + if value_size == 4: + def EncodeNonFiniteOrRaise(write, value): + # Remember that the serialized form uses little-endian byte order. + if value == _POS_INF: + write(b'\x00\x00\x80\x7F') + elif value == _NEG_INF: + write(b'\x00\x00\x80\xFF') + elif value != value: # NaN + write(b'\x00\x00\xC0\x7F') + else: + raise + elif value_size == 8: + def EncodeNonFiniteOrRaise(write, value): + if value == _POS_INF: + write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F') + elif value == _NEG_INF: + write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF') + elif value != value: # NaN + write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F') + else: + raise + else: + raise ValueError('Can\'t encode floating-point values that are ' + '%d bytes long (only 4 or 8)' % value_size) + + def SpecificEncoder(field_number, is_repeated, is_packed): + local_struct_pack = struct.pack + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + local_EncodeVarint(write, len(value) * value_size, deterministic) + for element in value: + # This try/except block is going to be faster than any code that + # we could write to check whether element is finite. + try: + write(local_struct_pack(format, element)) + except SystemError: + EncodeNonFiniteOrRaise(write, element) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, unused_deterministic=None): + for element in value: + write(tag_bytes) + try: + write(local_struct_pack(format, element)) + except SystemError: + EncodeNonFiniteOrRaise(write, element) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, unused_deterministic=None): + write(tag_bytes) + try: + write(local_struct_pack(format, value)) + except SystemError: + EncodeNonFiniteOrRaise(write, value) + return EncodeField + + return SpecificEncoder + + +# ==================================================================== +# Here we declare an encoder constructor for each field type. These work +# very similarly to sizer constructors, described earlier. + + +Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder( + wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize) + +UInt32Encoder = UInt64Encoder = _SimpleEncoder( + wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize) + +SInt32Encoder = SInt64Encoder = _ModifiedEncoder( + wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize, + wire_format.ZigZagEncode) + +# Note that Python conveniently guarantees that when using the '<' prefix on +# formats, they will also have the same size across all platforms (as opposed +# to without the prefix, where their sizes depend on the C compiler's basic +# type sizes). +Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, ' str + ValueType = int + + def __init__(self, enum_type): + """Inits EnumTypeWrapper with an EnumDescriptor.""" + self._enum_type = enum_type + self.DESCRIPTOR = enum_type # pylint: disable=invalid-name + + def Name(self, number): # pylint: disable=invalid-name + """Returns a string containing the name of an enum value.""" + try: + return self._enum_type.values_by_number[number].name + except KeyError: + pass # fall out to break exception chaining + + if not isinstance(number, int): + raise TypeError( + 'Enum value for {} must be an int, but got {} {!r}.'.format( + self._enum_type.name, type(number), number)) + else: + # repr here to handle the odd case when you pass in a boolean. + raise ValueError('Enum {} has no name defined for value {!r}'.format( + self._enum_type.name, number)) + + def Value(self, name): # pylint: disable=invalid-name + """Returns the value corresponding to the given enum name.""" + try: + return self._enum_type.values_by_name[name].number + except KeyError: + pass # fall out to break exception chaining + raise ValueError('Enum {} has no value defined for name {!r}'.format( + self._enum_type.name, name)) + + def keys(self): + """Return a list of the string names in the enum. + + Returns: + A list of strs, in the order they were defined in the .proto file. + """ + + return [value_descriptor.name + for value_descriptor in self._enum_type.values] + + def values(self): + """Return a list of the integer values in the enum. + + Returns: + A list of ints, in the order they were defined in the .proto file. + """ + + return [value_descriptor.number + for value_descriptor in self._enum_type.values] + + def items(self): + """Return a list of the (name, value) pairs of the enum. + + Returns: + A list of (str, int) pairs, in the order they were defined + in the .proto file. + """ + return [(value_descriptor.name, value_descriptor.number) + for value_descriptor in self._enum_type.values] + + def __getattr__(self, name): + """Returns the value corresponding to the given enum name.""" + try: + return super( + EnumTypeWrapper, + self).__getattribute__('_enum_type').values_by_name[name].number + except KeyError: + pass # fall out to break exception chaining + raise AttributeError('Enum {} has no value defined for name {!r}'.format( + self._enum_type.name, name)) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/extension_dict.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/extension_dict.py new file mode 100644 index 0000000000..b346cf283e --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/extension_dict.py @@ -0,0 +1,213 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains _ExtensionDict class to represent extensions. +""" + +from google.protobuf.internal import type_checkers +from google.protobuf.descriptor import FieldDescriptor + + +def _VerifyExtensionHandle(message, extension_handle): + """Verify that the given extension handle is valid.""" + + if not isinstance(extension_handle, FieldDescriptor): + raise KeyError('HasExtension() expects an extension handle, got: %s' % + extension_handle) + + if not extension_handle.is_extension: + raise KeyError('"%s" is not an extension.' % extension_handle.full_name) + + if not extension_handle.containing_type: + raise KeyError('"%s" is missing a containing_type.' + % extension_handle.full_name) + + if extension_handle.containing_type is not message.DESCRIPTOR: + raise KeyError('Extension "%s" extends message type "%s", but this ' + 'message is of type "%s".' % + (extension_handle.full_name, + extension_handle.containing_type.full_name, + message.DESCRIPTOR.full_name)) + + +# TODO(robinson): Unify error handling of "unknown extension" crap. +# TODO(robinson): Support iteritems()-style iteration over all +# extensions with the "has" bits turned on? +class _ExtensionDict(object): + + """Dict-like container for Extension fields on proto instances. + + Note that in all cases we expect extension handles to be + FieldDescriptors. + """ + + def __init__(self, extended_message): + """ + Args: + extended_message: Message instance for which we are the Extensions dict. + """ + self._extended_message = extended_message + + def __getitem__(self, extension_handle): + """Returns the current value of the given extension handle.""" + + _VerifyExtensionHandle(self._extended_message, extension_handle) + + result = self._extended_message._fields.get(extension_handle) + if result is not None: + return result + + if extension_handle.label == FieldDescriptor.LABEL_REPEATED: + result = extension_handle._default_constructor(self._extended_message) + elif extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + message_type = extension_handle.message_type + if not hasattr(message_type, '_concrete_class'): + # pylint: disable=protected-access + self._extended_message._FACTORY.GetPrototype(message_type) + assert getattr(extension_handle.message_type, '_concrete_class', None), ( + 'Uninitialized concrete class found for field %r (message type %r)' + % (extension_handle.full_name, + extension_handle.message_type.full_name)) + result = extension_handle.message_type._concrete_class() + try: + result._SetListener(self._extended_message._listener_for_children) + except ReferenceError: + pass + else: + # Singular scalar -- just return the default without inserting into the + # dict. + return extension_handle.default_value + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + result = self._extended_message._fields.setdefault( + extension_handle, result) + + return result + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + + my_fields = self._extended_message.ListFields() + other_fields = other._extended_message.ListFields() + + # Get rid of non-extension fields. + my_fields = [field for field in my_fields if field.is_extension] + other_fields = [field for field in other_fields if field.is_extension] + + return my_fields == other_fields + + def __ne__(self, other): + return not self == other + + def __len__(self): + fields = self._extended_message.ListFields() + # Get rid of non-extension fields. + extension_fields = [field for field in fields if field[0].is_extension] + return len(extension_fields) + + def __hash__(self): + raise TypeError('unhashable object') + + # Note that this is only meaningful for non-repeated, scalar extension + # fields. Note also that we may have to call _Modified() when we do + # successfully set a field this way, to set any necessary "has" bits in the + # ancestors of the extended message. + def __setitem__(self, extension_handle, value): + """If extension_handle specifies a non-repeated, scalar extension + field, sets the value of that field. + """ + + _VerifyExtensionHandle(self._extended_message, extension_handle) + + if (extension_handle.label == FieldDescriptor.LABEL_REPEATED or + extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE): + raise TypeError( + 'Cannot assign to extension "%s" because it is a repeated or ' + 'composite type.' % extension_handle.full_name) + + # It's slightly wasteful to lookup the type checker each time, + # but we expect this to be a vanishingly uncommon case anyway. + type_checker = type_checkers.GetTypeChecker(extension_handle) + # pylint: disable=protected-access + self._extended_message._fields[extension_handle] = ( + type_checker.CheckValue(value)) + self._extended_message._Modified() + + def __delitem__(self, extension_handle): + self._extended_message.ClearExtension(extension_handle) + + def _FindExtensionByName(self, name): + """Tries to find a known extension with the specified name. + + Args: + name: Extension full name. + + Returns: + Extension field descriptor. + """ + return self._extended_message._extensions_by_name.get(name, None) + + def _FindExtensionByNumber(self, number): + """Tries to find a known extension with the field number. + + Args: + number: Extension field number. + + Returns: + Extension field descriptor. + """ + return self._extended_message._extensions_by_number.get(number, None) + + def __iter__(self): + # Return a generator over the populated extension fields + return (f[0] for f in self._extended_message.ListFields() + if f[0].is_extension) + + def __contains__(self, extension_handle): + _VerifyExtensionHandle(self._extended_message, extension_handle) + + if extension_handle not in self._extended_message._fields: + return False + + if extension_handle.label == FieldDescriptor.LABEL_REPEATED: + return bool(self._extended_message._fields.get(extension_handle)) + + if extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + value = self._extended_message._fields.get(extension_handle) + # pylint: disable=protected-access + return value is not None and value._is_present_in_parent + + return True diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/message_listener.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/message_listener.py new file mode 100644 index 0000000000..0fc255a774 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/message_listener.py @@ -0,0 +1,78 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Defines a listener interface for observing certain +state transitions on Message objects. + +Also defines a null implementation of this interface. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + + +class MessageListener(object): + + """Listens for modifications made to a message. Meant to be registered via + Message._SetListener(). + + Attributes: + dirty: If True, then calling Modified() would be a no-op. This can be + used to avoid these calls entirely in the common case. + """ + + def Modified(self): + """Called every time the message is modified in such a way that the parent + message may need to be updated. This currently means either: + (a) The message was modified for the first time, so the parent message + should henceforth mark the message as present. + (b) The message's cached byte size became dirty -- i.e. the message was + modified for the first time after a previous call to ByteSize(). + Therefore the parent should also mark its byte size as dirty. + Note that (a) implies (b), since new objects start out with a client cached + size (zero). However, we document (a) explicitly because it is important. + + Modified() will *only* be called in response to one of these two events -- + not every time the sub-message is modified. + + Note that if the listener's |dirty| attribute is true, then calling + Modified at the moment would be a no-op, so it can be skipped. Performance- + sensitive callers should check this attribute directly before calling since + it will be true most of the time. + """ + + raise NotImplementedError + + +class NullMessageListener(object): + + """No-op MessageListener implementation.""" + + def Modified(self): + pass diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/message_set_extensions_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/message_set_extensions_pb2.py new file mode 100644 index 0000000000..63651a3f19 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/message_set_extensions_pb2.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/message_set_extensions.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n5google/protobuf/internal/message_set_extensions.proto\x12\x18google.protobuf.internal\"\x1e\n\x0eTestMessageSet*\x08\x08\x04\x10\xff\xff\xff\xff\x07:\x02\x08\x01\"\xa5\x01\n\x18TestMessageSetExtension1\x12\t\n\x01i\x18\x0f \x01(\x05\x32~\n\x15message_set_extension\x12(.google.protobuf.internal.TestMessageSet\x18\xab\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension1\"\xa7\x01\n\x18TestMessageSetExtension2\x12\x0b\n\x03str\x18\x19 \x01(\t2~\n\x15message_set_extension\x12(.google.protobuf.internal.TestMessageSet\x18\xca\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension2\"(\n\x18TestMessageSetExtension3\x12\x0c\n\x04text\x18# \x01(\t:\x7f\n\x16message_set_extension3\x12(.google.protobuf.internal.TestMessageSet\x18\xdf\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.message_set_extensions_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + TestMessageSet.RegisterExtension(message_set_extension3) + TestMessageSet.RegisterExtension(_TESTMESSAGESETEXTENSION1.extensions_by_name['message_set_extension']) + TestMessageSet.RegisterExtension(_TESTMESSAGESETEXTENSION2.extensions_by_name['message_set_extension']) + + DESCRIPTOR._options = None + _TESTMESSAGESET._options = None + _TESTMESSAGESET._serialized_options = b'\010\001' + _TESTMESSAGESET._serialized_start=83 + _TESTMESSAGESET._serialized_end=113 + _TESTMESSAGESETEXTENSION1._serialized_start=116 + _TESTMESSAGESETEXTENSION1._serialized_end=281 + _TESTMESSAGESETEXTENSION2._serialized_start=284 + _TESTMESSAGESETEXTENSION2._serialized_end=451 + _TESTMESSAGESETEXTENSION3._serialized_start=453 + _TESTMESSAGESETEXTENSION3._serialized_end=493 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/missing_enum_values_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/missing_enum_values_pb2.py new file mode 100644 index 0000000000..5497083197 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/missing_enum_values_pb2.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/missing_enum_values.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n2google/protobuf/internal/missing_enum_values.proto\x12\x1fgoogle.protobuf.python.internal\"\xc1\x02\n\x0eTestEnumValues\x12X\n\x14optional_nested_enum\x18\x01 \x01(\x0e\x32:.google.protobuf.python.internal.TestEnumValues.NestedEnum\x12X\n\x14repeated_nested_enum\x18\x02 \x03(\x0e\x32:.google.protobuf.python.internal.TestEnumValues.NestedEnum\x12Z\n\x12packed_nested_enum\x18\x03 \x03(\x0e\x32:.google.protobuf.python.internal.TestEnumValues.NestedEnumB\x02\x10\x01\"\x1f\n\nNestedEnum\x12\x08\n\x04ZERO\x10\x00\x12\x07\n\x03ONE\x10\x01\"\xd3\x02\n\x15TestMissingEnumValues\x12_\n\x14optional_nested_enum\x18\x01 \x01(\x0e\x32\x41.google.protobuf.python.internal.TestMissingEnumValues.NestedEnum\x12_\n\x14repeated_nested_enum\x18\x02 \x03(\x0e\x32\x41.google.protobuf.python.internal.TestMissingEnumValues.NestedEnum\x12\x61\n\x12packed_nested_enum\x18\x03 \x03(\x0e\x32\x41.google.protobuf.python.internal.TestMissingEnumValues.NestedEnumB\x02\x10\x01\"\x15\n\nNestedEnum\x12\x07\n\x03TWO\x10\x02\"\x1b\n\nJustString\x12\r\n\x05\x64ummy\x18\x01 \x02(\t') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.missing_enum_values_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _TESTENUMVALUES.fields_by_name['packed_nested_enum']._options = None + _TESTENUMVALUES.fields_by_name['packed_nested_enum']._serialized_options = b'\020\001' + _TESTMISSINGENUMVALUES.fields_by_name['packed_nested_enum']._options = None + _TESTMISSINGENUMVALUES.fields_by_name['packed_nested_enum']._serialized_options = b'\020\001' + _TESTENUMVALUES._serialized_start=88 + _TESTENUMVALUES._serialized_end=409 + _TESTENUMVALUES_NESTEDENUM._serialized_start=378 + _TESTENUMVALUES_NESTEDENUM._serialized_end=409 + _TESTMISSINGENUMVALUES._serialized_start=412 + _TESTMISSINGENUMVALUES._serialized_end=751 + _TESTMISSINGENUMVALUES_NESTEDENUM._serialized_start=730 + _TESTMISSINGENUMVALUES_NESTEDENUM._serialized_end=751 + _JUSTSTRING._serialized_start=753 + _JUSTSTRING._serialized_end=780 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/more_extensions_dynamic_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/more_extensions_dynamic_pb2.py new file mode 100644 index 0000000000..0953706bac --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/more_extensions_dynamic_pb2.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/more_extensions_dynamic.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf.internal import more_extensions_pb2 as google_dot_protobuf_dot_internal_dot_more__extensions__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n6google/protobuf/internal/more_extensions_dynamic.proto\x12\x18google.protobuf.internal\x1a.google/protobuf/internal/more_extensions.proto\"\x1f\n\x12\x44ynamicMessageType\x12\t\n\x01\x61\x18\x01 \x01(\x05:J\n\x17\x64ynamic_int32_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x64 \x01(\x05:z\n\x19\x64ynamic_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x65 \x01(\x0b\x32,.google.protobuf.internal.DynamicMessageType:\x83\x01\n\"repeated_dynamic_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x66 \x03(\x0b\x32,.google.protobuf.internal.DynamicMessageType') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.more_extensions_dynamic_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + google_dot_protobuf_dot_internal_dot_more__extensions__pb2.ExtendedMessage.RegisterExtension(dynamic_int32_extension) + google_dot_protobuf_dot_internal_dot_more__extensions__pb2.ExtendedMessage.RegisterExtension(dynamic_message_extension) + google_dot_protobuf_dot_internal_dot_more__extensions__pb2.ExtendedMessage.RegisterExtension(repeated_dynamic_message_extension) + + DESCRIPTOR._options = None + _DYNAMICMESSAGETYPE._serialized_start=132 + _DYNAMICMESSAGETYPE._serialized_end=163 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/more_extensions_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/more_extensions_pb2.py new file mode 100644 index 0000000000..1cfa1b7c8b --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/more_extensions_pb2.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/more_extensions.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n.google/protobuf/internal/more_extensions.proto\x12\x18google.protobuf.internal\"\x99\x01\n\x0fTopLevelMessage\x12\x41\n\nsubmessage\x18\x01 \x01(\x0b\x32).google.protobuf.internal.ExtendedMessageB\x02(\x01\x12\x43\n\x0enested_message\x18\x02 \x01(\x0b\x32\'.google.protobuf.internal.NestedMessageB\x02(\x01\"R\n\rNestedMessage\x12\x41\n\nsubmessage\x18\x01 \x01(\x0b\x32).google.protobuf.internal.ExtendedMessageB\x02(\x01\"K\n\x0f\x45xtendedMessage\x12\x17\n\x0eoptional_int32\x18\xe9\x07 \x01(\x05\x12\x18\n\x0frepeated_string\x18\xea\x07 \x03(\t*\x05\x08\x01\x10\xe8\x07\"-\n\x0e\x46oreignMessage\x12\x1b\n\x13\x66oreign_message_int\x18\x01 \x01(\x05:I\n\x16optional_int_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x01 \x01(\x05:w\n\x1aoptional_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x02 \x01(\x0b\x32(.google.protobuf.internal.ForeignMessage:I\n\x16repeated_int_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x03 \x03(\x05:w\n\x1arepeated_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x04 \x03(\x0b\x32(.google.protobuf.internal.ForeignMessage') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.more_extensions_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + ExtendedMessage.RegisterExtension(optional_int_extension) + ExtendedMessage.RegisterExtension(optional_message_extension) + ExtendedMessage.RegisterExtension(repeated_int_extension) + ExtendedMessage.RegisterExtension(repeated_message_extension) + + DESCRIPTOR._options = None + _TOPLEVELMESSAGE.fields_by_name['submessage']._options = None + _TOPLEVELMESSAGE.fields_by_name['submessage']._serialized_options = b'(\001' + _TOPLEVELMESSAGE.fields_by_name['nested_message']._options = None + _TOPLEVELMESSAGE.fields_by_name['nested_message']._serialized_options = b'(\001' + _NESTEDMESSAGE.fields_by_name['submessage']._options = None + _NESTEDMESSAGE.fields_by_name['submessage']._serialized_options = b'(\001' + _TOPLEVELMESSAGE._serialized_start=77 + _TOPLEVELMESSAGE._serialized_end=230 + _NESTEDMESSAGE._serialized_start=232 + _NESTEDMESSAGE._serialized_end=314 + _EXTENDEDMESSAGE._serialized_start=316 + _EXTENDEDMESSAGE._serialized_end=391 + _FOREIGNMESSAGE._serialized_start=393 + _FOREIGNMESSAGE._serialized_end=438 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/more_messages_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/more_messages_pb2.py new file mode 100644 index 0000000000..d7f7115609 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/more_messages_pb2.py @@ -0,0 +1,556 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/more_messages.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n,google/protobuf/internal/more_messages.proto\x12\x18google.protobuf.internal\"h\n\x10OutOfOrderFields\x12\x17\n\x0foptional_sint32\x18\x05 \x01(\x11\x12\x17\n\x0foptional_uint32\x18\x03 \x01(\r\x12\x16\n\x0eoptional_int32\x18\x01 \x01(\x05*\x04\x08\x04\x10\x05*\x04\x08\x02\x10\x03\"\xcd\x02\n\x05\x63lass\x12\x1b\n\tint_field\x18\x01 \x01(\x05R\x08json_int\x12\n\n\x02if\x18\x02 \x01(\x05\x12(\n\x02\x61s\x18\x03 \x01(\x0e\x32\x1c.google.protobuf.internal.is\x12\x30\n\nenum_field\x18\x04 \x01(\x0e\x32\x1c.google.protobuf.internal.is\x12>\n\x11nested_enum_field\x18\x05 \x01(\x0e\x32#.google.protobuf.internal.class.for\x12;\n\x0enested_message\x18\x06 \x01(\x0b\x32#.google.protobuf.internal.class.try\x1a\x1c\n\x03try\x12\r\n\x05\x66ield\x18\x01 \x01(\x05*\x06\x08\xe7\x07\x10\x90N\"\x1c\n\x03\x66or\x12\x0b\n\x07\x64\x65\x66\x61ult\x10\x00\x12\x08\n\x04True\x10\x01*\x06\x08\xe7\x07\x10\x90N\"?\n\x0b\x45xtendClass20\n\x06return\x12\x1f.google.protobuf.internal.class\x18\xea\x07 \x01(\x05\"~\n\x0fTestFullKeyword\x12:\n\x06\x66ield1\x18\x01 \x01(\x0b\x32*.google.protobuf.internal.OutOfOrderFields\x12/\n\x06\x66ield2\x18\x02 \x01(\x0b\x32\x1f.google.protobuf.internal.class\"\xa5\x0f\n\x11LotsNestedMessage\x1a\x04\n\x02\x42\x30\x1a\x04\n\x02\x42\x31\x1a\x04\n\x02\x42\x32\x1a\x04\n\x02\x42\x33\x1a\x04\n\x02\x42\x34\x1a\x04\n\x02\x42\x35\x1a\x04\n\x02\x42\x36\x1a\x04\n\x02\x42\x37\x1a\x04\n\x02\x42\x38\x1a\x04\n\x02\x42\x39\x1a\x05\n\x03\x42\x31\x30\x1a\x05\n\x03\x42\x31\x31\x1a\x05\n\x03\x42\x31\x32\x1a\x05\n\x03\x42\x31\x33\x1a\x05\n\x03\x42\x31\x34\x1a\x05\n\x03\x42\x31\x35\x1a\x05\n\x03\x42\x31\x36\x1a\x05\n\x03\x42\x31\x37\x1a\x05\n\x03\x42\x31\x38\x1a\x05\n\x03\x42\x31\x39\x1a\x05\n\x03\x42\x32\x30\x1a\x05\n\x03\x42\x32\x31\x1a\x05\n\x03\x42\x32\x32\x1a\x05\n\x03\x42\x32\x33\x1a\x05\n\x03\x42\x32\x34\x1a\x05\n\x03\x42\x32\x35\x1a\x05\n\x03\x42\x32\x36\x1a\x05\n\x03\x42\x32\x37\x1a\x05\n\x03\x42\x32\x38\x1a\x05\n\x03\x42\x32\x39\x1a\x05\n\x03\x42\x33\x30\x1a\x05\n\x03\x42\x33\x31\x1a\x05\n\x03\x42\x33\x32\x1a\x05\n\x03\x42\x33\x33\x1a\x05\n\x03\x42\x33\x34\x1a\x05\n\x03\x42\x33\x35\x1a\x05\n\x03\x42\x33\x36\x1a\x05\n\x03\x42\x33\x37\x1a\x05\n\x03\x42\x33\x38\x1a\x05\n\x03\x42\x33\x39\x1a\x05\n\x03\x42\x34\x30\x1a\x05\n\x03\x42\x34\x31\x1a\x05\n\x03\x42\x34\x32\x1a\x05\n\x03\x42\x34\x33\x1a\x05\n\x03\x42\x34\x34\x1a\x05\n\x03\x42\x34\x35\x1a\x05\n\x03\x42\x34\x36\x1a\x05\n\x03\x42\x34\x37\x1a\x05\n\x03\x42\x34\x38\x1a\x05\n\x03\x42\x34\x39\x1a\x05\n\x03\x42\x35\x30\x1a\x05\n\x03\x42\x35\x31\x1a\x05\n\x03\x42\x35\x32\x1a\x05\n\x03\x42\x35\x33\x1a\x05\n\x03\x42\x35\x34\x1a\x05\n\x03\x42\x35\x35\x1a\x05\n\x03\x42\x35\x36\x1a\x05\n\x03\x42\x35\x37\x1a\x05\n\x03\x42\x35\x38\x1a\x05\n\x03\x42\x35\x39\x1a\x05\n\x03\x42\x36\x30\x1a\x05\n\x03\x42\x36\x31\x1a\x05\n\x03\x42\x36\x32\x1a\x05\n\x03\x42\x36\x33\x1a\x05\n\x03\x42\x36\x34\x1a\x05\n\x03\x42\x36\x35\x1a\x05\n\x03\x42\x36\x36\x1a\x05\n\x03\x42\x36\x37\x1a\x05\n\x03\x42\x36\x38\x1a\x05\n\x03\x42\x36\x39\x1a\x05\n\x03\x42\x37\x30\x1a\x05\n\x03\x42\x37\x31\x1a\x05\n\x03\x42\x37\x32\x1a\x05\n\x03\x42\x37\x33\x1a\x05\n\x03\x42\x37\x34\x1a\x05\n\x03\x42\x37\x35\x1a\x05\n\x03\x42\x37\x36\x1a\x05\n\x03\x42\x37\x37\x1a\x05\n\x03\x42\x37\x38\x1a\x05\n\x03\x42\x37\x39\x1a\x05\n\x03\x42\x38\x30\x1a\x05\n\x03\x42\x38\x31\x1a\x05\n\x03\x42\x38\x32\x1a\x05\n\x03\x42\x38\x33\x1a\x05\n\x03\x42\x38\x34\x1a\x05\n\x03\x42\x38\x35\x1a\x05\n\x03\x42\x38\x36\x1a\x05\n\x03\x42\x38\x37\x1a\x05\n\x03\x42\x38\x38\x1a\x05\n\x03\x42\x38\x39\x1a\x05\n\x03\x42\x39\x30\x1a\x05\n\x03\x42\x39\x31\x1a\x05\n\x03\x42\x39\x32\x1a\x05\n\x03\x42\x39\x33\x1a\x05\n\x03\x42\x39\x34\x1a\x05\n\x03\x42\x39\x35\x1a\x05\n\x03\x42\x39\x36\x1a\x05\n\x03\x42\x39\x37\x1a\x05\n\x03\x42\x39\x38\x1a\x05\n\x03\x42\x39\x39\x1a\x06\n\x04\x42\x31\x30\x30\x1a\x06\n\x04\x42\x31\x30\x31\x1a\x06\n\x04\x42\x31\x30\x32\x1a\x06\n\x04\x42\x31\x30\x33\x1a\x06\n\x04\x42\x31\x30\x34\x1a\x06\n\x04\x42\x31\x30\x35\x1a\x06\n\x04\x42\x31\x30\x36\x1a\x06\n\x04\x42\x31\x30\x37\x1a\x06\n\x04\x42\x31\x30\x38\x1a\x06\n\x04\x42\x31\x30\x39\x1a\x06\n\x04\x42\x31\x31\x30\x1a\x06\n\x04\x42\x31\x31\x31\x1a\x06\n\x04\x42\x31\x31\x32\x1a\x06\n\x04\x42\x31\x31\x33\x1a\x06\n\x04\x42\x31\x31\x34\x1a\x06\n\x04\x42\x31\x31\x35\x1a\x06\n\x04\x42\x31\x31\x36\x1a\x06\n\x04\x42\x31\x31\x37\x1a\x06\n\x04\x42\x31\x31\x38\x1a\x06\n\x04\x42\x31\x31\x39\x1a\x06\n\x04\x42\x31\x32\x30\x1a\x06\n\x04\x42\x31\x32\x31\x1a\x06\n\x04\x42\x31\x32\x32\x1a\x06\n\x04\x42\x31\x32\x33\x1a\x06\n\x04\x42\x31\x32\x34\x1a\x06\n\x04\x42\x31\x32\x35\x1a\x06\n\x04\x42\x31\x32\x36\x1a\x06\n\x04\x42\x31\x32\x37\x1a\x06\n\x04\x42\x31\x32\x38\x1a\x06\n\x04\x42\x31\x32\x39\x1a\x06\n\x04\x42\x31\x33\x30\x1a\x06\n\x04\x42\x31\x33\x31\x1a\x06\n\x04\x42\x31\x33\x32\x1a\x06\n\x04\x42\x31\x33\x33\x1a\x06\n\x04\x42\x31\x33\x34\x1a\x06\n\x04\x42\x31\x33\x35\x1a\x06\n\x04\x42\x31\x33\x36\x1a\x06\n\x04\x42\x31\x33\x37\x1a\x06\n\x04\x42\x31\x33\x38\x1a\x06\n\x04\x42\x31\x33\x39\x1a\x06\n\x04\x42\x31\x34\x30\x1a\x06\n\x04\x42\x31\x34\x31\x1a\x06\n\x04\x42\x31\x34\x32\x1a\x06\n\x04\x42\x31\x34\x33\x1a\x06\n\x04\x42\x31\x34\x34\x1a\x06\n\x04\x42\x31\x34\x35\x1a\x06\n\x04\x42\x31\x34\x36\x1a\x06\n\x04\x42\x31\x34\x37\x1a\x06\n\x04\x42\x31\x34\x38\x1a\x06\n\x04\x42\x31\x34\x39\x1a\x06\n\x04\x42\x31\x35\x30\x1a\x06\n\x04\x42\x31\x35\x31\x1a\x06\n\x04\x42\x31\x35\x32\x1a\x06\n\x04\x42\x31\x35\x33\x1a\x06\n\x04\x42\x31\x35\x34\x1a\x06\n\x04\x42\x31\x35\x35\x1a\x06\n\x04\x42\x31\x35\x36\x1a\x06\n\x04\x42\x31\x35\x37\x1a\x06\n\x04\x42\x31\x35\x38\x1a\x06\n\x04\x42\x31\x35\x39\x1a\x06\n\x04\x42\x31\x36\x30\x1a\x06\n\x04\x42\x31\x36\x31\x1a\x06\n\x04\x42\x31\x36\x32\x1a\x06\n\x04\x42\x31\x36\x33\x1a\x06\n\x04\x42\x31\x36\x34\x1a\x06\n\x04\x42\x31\x36\x35\x1a\x06\n\x04\x42\x31\x36\x36\x1a\x06\n\x04\x42\x31\x36\x37\x1a\x06\n\x04\x42\x31\x36\x38\x1a\x06\n\x04\x42\x31\x36\x39\x1a\x06\n\x04\x42\x31\x37\x30\x1a\x06\n\x04\x42\x31\x37\x31\x1a\x06\n\x04\x42\x31\x37\x32\x1a\x06\n\x04\x42\x31\x37\x33\x1a\x06\n\x04\x42\x31\x37\x34\x1a\x06\n\x04\x42\x31\x37\x35\x1a\x06\n\x04\x42\x31\x37\x36\x1a\x06\n\x04\x42\x31\x37\x37\x1a\x06\n\x04\x42\x31\x37\x38\x1a\x06\n\x04\x42\x31\x37\x39\x1a\x06\n\x04\x42\x31\x38\x30\x1a\x06\n\x04\x42\x31\x38\x31\x1a\x06\n\x04\x42\x31\x38\x32\x1a\x06\n\x04\x42\x31\x38\x33\x1a\x06\n\x04\x42\x31\x38\x34\x1a\x06\n\x04\x42\x31\x38\x35\x1a\x06\n\x04\x42\x31\x38\x36\x1a\x06\n\x04\x42\x31\x38\x37\x1a\x06\n\x04\x42\x31\x38\x38\x1a\x06\n\x04\x42\x31\x38\x39\x1a\x06\n\x04\x42\x31\x39\x30\x1a\x06\n\x04\x42\x31\x39\x31\x1a\x06\n\x04\x42\x31\x39\x32\x1a\x06\n\x04\x42\x31\x39\x33\x1a\x06\n\x04\x42\x31\x39\x34\x1a\x06\n\x04\x42\x31\x39\x35\x1a\x06\n\x04\x42\x31\x39\x36\x1a\x06\n\x04\x42\x31\x39\x37\x1a\x06\n\x04\x42\x31\x39\x38\x1a\x06\n\x04\x42\x31\x39\x39\x1a\x06\n\x04\x42\x32\x30\x30\x1a\x06\n\x04\x42\x32\x30\x31\x1a\x06\n\x04\x42\x32\x30\x32\x1a\x06\n\x04\x42\x32\x30\x33\x1a\x06\n\x04\x42\x32\x30\x34\x1a\x06\n\x04\x42\x32\x30\x35\x1a\x06\n\x04\x42\x32\x30\x36\x1a\x06\n\x04\x42\x32\x30\x37\x1a\x06\n\x04\x42\x32\x30\x38\x1a\x06\n\x04\x42\x32\x30\x39\x1a\x06\n\x04\x42\x32\x31\x30\x1a\x06\n\x04\x42\x32\x31\x31\x1a\x06\n\x04\x42\x32\x31\x32\x1a\x06\n\x04\x42\x32\x31\x33\x1a\x06\n\x04\x42\x32\x31\x34\x1a\x06\n\x04\x42\x32\x31\x35\x1a\x06\n\x04\x42\x32\x31\x36\x1a\x06\n\x04\x42\x32\x31\x37\x1a\x06\n\x04\x42\x32\x31\x38\x1a\x06\n\x04\x42\x32\x31\x39\x1a\x06\n\x04\x42\x32\x32\x30\x1a\x06\n\x04\x42\x32\x32\x31\x1a\x06\n\x04\x42\x32\x32\x32\x1a\x06\n\x04\x42\x32\x32\x33\x1a\x06\n\x04\x42\x32\x32\x34\x1a\x06\n\x04\x42\x32\x32\x35\x1a\x06\n\x04\x42\x32\x32\x36\x1a\x06\n\x04\x42\x32\x32\x37\x1a\x06\n\x04\x42\x32\x32\x38\x1a\x06\n\x04\x42\x32\x32\x39\x1a\x06\n\x04\x42\x32\x33\x30\x1a\x06\n\x04\x42\x32\x33\x31\x1a\x06\n\x04\x42\x32\x33\x32\x1a\x06\n\x04\x42\x32\x33\x33\x1a\x06\n\x04\x42\x32\x33\x34\x1a\x06\n\x04\x42\x32\x33\x35\x1a\x06\n\x04\x42\x32\x33\x36\x1a\x06\n\x04\x42\x32\x33\x37\x1a\x06\n\x04\x42\x32\x33\x38\x1a\x06\n\x04\x42\x32\x33\x39\x1a\x06\n\x04\x42\x32\x34\x30\x1a\x06\n\x04\x42\x32\x34\x31\x1a\x06\n\x04\x42\x32\x34\x32\x1a\x06\n\x04\x42\x32\x34\x33\x1a\x06\n\x04\x42\x32\x34\x34\x1a\x06\n\x04\x42\x32\x34\x35\x1a\x06\n\x04\x42\x32\x34\x36\x1a\x06\n\x04\x42\x32\x34\x37\x1a\x06\n\x04\x42\x32\x34\x38\x1a\x06\n\x04\x42\x32\x34\x39\x1a\x06\n\x04\x42\x32\x35\x30\x1a\x06\n\x04\x42\x32\x35\x31\x1a\x06\n\x04\x42\x32\x35\x32\x1a\x06\n\x04\x42\x32\x35\x33\x1a\x06\n\x04\x42\x32\x35\x34\x1a\x06\n\x04\x42\x32\x35\x35*\x1b\n\x02is\x12\x0b\n\x07\x64\x65\x66\x61ult\x10\x00\x12\x08\n\x04\x65lse\x10\x01:C\n\x0foptional_uint64\x12*.google.protobuf.internal.OutOfOrderFields\x18\x04 \x01(\x04:B\n\x0eoptional_int64\x12*.google.protobuf.internal.OutOfOrderFields\x18\x02 \x01(\x03:2\n\x08\x63ontinue\x12\x1f.google.protobuf.internal.class\x18\xe9\x07 \x01(\x05:2\n\x04with\x12#.google.protobuf.internal.class.try\x18\xe9\x07 \x01(\x05') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.more_messages_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + OutOfOrderFields.RegisterExtension(optional_uint64) + OutOfOrderFields.RegisterExtension(optional_int64) + globals()['class'].RegisterExtension(globals()['continue']) + getattr(globals()['class'], 'try').RegisterExtension(globals()['with']) + globals()['class'].RegisterExtension(_EXTENDCLASS.extensions_by_name['return']) + + DESCRIPTOR._options = None + _IS._serialized_start=2669 + _IS._serialized_end=2696 + _OUTOFORDERFIELDS._serialized_start=74 + _OUTOFORDERFIELDS._serialized_end=178 + _CLASS._serialized_start=181 + _CLASS._serialized_end=514 + _CLASS_TRY._serialized_start=448 + _CLASS_TRY._serialized_end=476 + _CLASS_FOR._serialized_start=478 + _CLASS_FOR._serialized_end=506 + _EXTENDCLASS._serialized_start=516 + _EXTENDCLASS._serialized_end=579 + _TESTFULLKEYWORD._serialized_start=581 + _TESTFULLKEYWORD._serialized_end=707 + _LOTSNESTEDMESSAGE._serialized_start=710 + _LOTSNESTEDMESSAGE._serialized_end=2667 + _LOTSNESTEDMESSAGE_B0._serialized_start=731 + _LOTSNESTEDMESSAGE_B0._serialized_end=735 + _LOTSNESTEDMESSAGE_B1._serialized_start=737 + _LOTSNESTEDMESSAGE_B1._serialized_end=741 + _LOTSNESTEDMESSAGE_B2._serialized_start=743 + _LOTSNESTEDMESSAGE_B2._serialized_end=747 + _LOTSNESTEDMESSAGE_B3._serialized_start=749 + _LOTSNESTEDMESSAGE_B3._serialized_end=753 + _LOTSNESTEDMESSAGE_B4._serialized_start=755 + _LOTSNESTEDMESSAGE_B4._serialized_end=759 + _LOTSNESTEDMESSAGE_B5._serialized_start=761 + _LOTSNESTEDMESSAGE_B5._serialized_end=765 + _LOTSNESTEDMESSAGE_B6._serialized_start=767 + _LOTSNESTEDMESSAGE_B6._serialized_end=771 + _LOTSNESTEDMESSAGE_B7._serialized_start=773 + _LOTSNESTEDMESSAGE_B7._serialized_end=777 + _LOTSNESTEDMESSAGE_B8._serialized_start=779 + _LOTSNESTEDMESSAGE_B8._serialized_end=783 + _LOTSNESTEDMESSAGE_B9._serialized_start=785 + _LOTSNESTEDMESSAGE_B9._serialized_end=789 + _LOTSNESTEDMESSAGE_B10._serialized_start=791 + _LOTSNESTEDMESSAGE_B10._serialized_end=796 + _LOTSNESTEDMESSAGE_B11._serialized_start=798 + _LOTSNESTEDMESSAGE_B11._serialized_end=803 + _LOTSNESTEDMESSAGE_B12._serialized_start=805 + _LOTSNESTEDMESSAGE_B12._serialized_end=810 + _LOTSNESTEDMESSAGE_B13._serialized_start=812 + _LOTSNESTEDMESSAGE_B13._serialized_end=817 + _LOTSNESTEDMESSAGE_B14._serialized_start=819 + _LOTSNESTEDMESSAGE_B14._serialized_end=824 + _LOTSNESTEDMESSAGE_B15._serialized_start=826 + _LOTSNESTEDMESSAGE_B15._serialized_end=831 + _LOTSNESTEDMESSAGE_B16._serialized_start=833 + _LOTSNESTEDMESSAGE_B16._serialized_end=838 + _LOTSNESTEDMESSAGE_B17._serialized_start=840 + _LOTSNESTEDMESSAGE_B17._serialized_end=845 + _LOTSNESTEDMESSAGE_B18._serialized_start=847 + _LOTSNESTEDMESSAGE_B18._serialized_end=852 + _LOTSNESTEDMESSAGE_B19._serialized_start=854 + _LOTSNESTEDMESSAGE_B19._serialized_end=859 + _LOTSNESTEDMESSAGE_B20._serialized_start=861 + _LOTSNESTEDMESSAGE_B20._serialized_end=866 + _LOTSNESTEDMESSAGE_B21._serialized_start=868 + _LOTSNESTEDMESSAGE_B21._serialized_end=873 + _LOTSNESTEDMESSAGE_B22._serialized_start=875 + _LOTSNESTEDMESSAGE_B22._serialized_end=880 + _LOTSNESTEDMESSAGE_B23._serialized_start=882 + _LOTSNESTEDMESSAGE_B23._serialized_end=887 + _LOTSNESTEDMESSAGE_B24._serialized_start=889 + _LOTSNESTEDMESSAGE_B24._serialized_end=894 + _LOTSNESTEDMESSAGE_B25._serialized_start=896 + _LOTSNESTEDMESSAGE_B25._serialized_end=901 + _LOTSNESTEDMESSAGE_B26._serialized_start=903 + _LOTSNESTEDMESSAGE_B26._serialized_end=908 + _LOTSNESTEDMESSAGE_B27._serialized_start=910 + _LOTSNESTEDMESSAGE_B27._serialized_end=915 + _LOTSNESTEDMESSAGE_B28._serialized_start=917 + _LOTSNESTEDMESSAGE_B28._serialized_end=922 + _LOTSNESTEDMESSAGE_B29._serialized_start=924 + _LOTSNESTEDMESSAGE_B29._serialized_end=929 + _LOTSNESTEDMESSAGE_B30._serialized_start=931 + _LOTSNESTEDMESSAGE_B30._serialized_end=936 + _LOTSNESTEDMESSAGE_B31._serialized_start=938 + _LOTSNESTEDMESSAGE_B31._serialized_end=943 + _LOTSNESTEDMESSAGE_B32._serialized_start=945 + _LOTSNESTEDMESSAGE_B32._serialized_end=950 + _LOTSNESTEDMESSAGE_B33._serialized_start=952 + _LOTSNESTEDMESSAGE_B33._serialized_end=957 + _LOTSNESTEDMESSAGE_B34._serialized_start=959 + _LOTSNESTEDMESSAGE_B34._serialized_end=964 + _LOTSNESTEDMESSAGE_B35._serialized_start=966 + _LOTSNESTEDMESSAGE_B35._serialized_end=971 + _LOTSNESTEDMESSAGE_B36._serialized_start=973 + _LOTSNESTEDMESSAGE_B36._serialized_end=978 + _LOTSNESTEDMESSAGE_B37._serialized_start=980 + _LOTSNESTEDMESSAGE_B37._serialized_end=985 + _LOTSNESTEDMESSAGE_B38._serialized_start=987 + _LOTSNESTEDMESSAGE_B38._serialized_end=992 + _LOTSNESTEDMESSAGE_B39._serialized_start=994 + _LOTSNESTEDMESSAGE_B39._serialized_end=999 + _LOTSNESTEDMESSAGE_B40._serialized_start=1001 + _LOTSNESTEDMESSAGE_B40._serialized_end=1006 + _LOTSNESTEDMESSAGE_B41._serialized_start=1008 + _LOTSNESTEDMESSAGE_B41._serialized_end=1013 + _LOTSNESTEDMESSAGE_B42._serialized_start=1015 + _LOTSNESTEDMESSAGE_B42._serialized_end=1020 + _LOTSNESTEDMESSAGE_B43._serialized_start=1022 + _LOTSNESTEDMESSAGE_B43._serialized_end=1027 + _LOTSNESTEDMESSAGE_B44._serialized_start=1029 + _LOTSNESTEDMESSAGE_B44._serialized_end=1034 + _LOTSNESTEDMESSAGE_B45._serialized_start=1036 + _LOTSNESTEDMESSAGE_B45._serialized_end=1041 + _LOTSNESTEDMESSAGE_B46._serialized_start=1043 + _LOTSNESTEDMESSAGE_B46._serialized_end=1048 + _LOTSNESTEDMESSAGE_B47._serialized_start=1050 + _LOTSNESTEDMESSAGE_B47._serialized_end=1055 + _LOTSNESTEDMESSAGE_B48._serialized_start=1057 + _LOTSNESTEDMESSAGE_B48._serialized_end=1062 + _LOTSNESTEDMESSAGE_B49._serialized_start=1064 + _LOTSNESTEDMESSAGE_B49._serialized_end=1069 + _LOTSNESTEDMESSAGE_B50._serialized_start=1071 + _LOTSNESTEDMESSAGE_B50._serialized_end=1076 + _LOTSNESTEDMESSAGE_B51._serialized_start=1078 + _LOTSNESTEDMESSAGE_B51._serialized_end=1083 + _LOTSNESTEDMESSAGE_B52._serialized_start=1085 + _LOTSNESTEDMESSAGE_B52._serialized_end=1090 + _LOTSNESTEDMESSAGE_B53._serialized_start=1092 + _LOTSNESTEDMESSAGE_B53._serialized_end=1097 + _LOTSNESTEDMESSAGE_B54._serialized_start=1099 + _LOTSNESTEDMESSAGE_B54._serialized_end=1104 + _LOTSNESTEDMESSAGE_B55._serialized_start=1106 + _LOTSNESTEDMESSAGE_B55._serialized_end=1111 + _LOTSNESTEDMESSAGE_B56._serialized_start=1113 + _LOTSNESTEDMESSAGE_B56._serialized_end=1118 + _LOTSNESTEDMESSAGE_B57._serialized_start=1120 + _LOTSNESTEDMESSAGE_B57._serialized_end=1125 + _LOTSNESTEDMESSAGE_B58._serialized_start=1127 + _LOTSNESTEDMESSAGE_B58._serialized_end=1132 + _LOTSNESTEDMESSAGE_B59._serialized_start=1134 + _LOTSNESTEDMESSAGE_B59._serialized_end=1139 + _LOTSNESTEDMESSAGE_B60._serialized_start=1141 + _LOTSNESTEDMESSAGE_B60._serialized_end=1146 + _LOTSNESTEDMESSAGE_B61._serialized_start=1148 + _LOTSNESTEDMESSAGE_B61._serialized_end=1153 + _LOTSNESTEDMESSAGE_B62._serialized_start=1155 + _LOTSNESTEDMESSAGE_B62._serialized_end=1160 + _LOTSNESTEDMESSAGE_B63._serialized_start=1162 + _LOTSNESTEDMESSAGE_B63._serialized_end=1167 + _LOTSNESTEDMESSAGE_B64._serialized_start=1169 + _LOTSNESTEDMESSAGE_B64._serialized_end=1174 + _LOTSNESTEDMESSAGE_B65._serialized_start=1176 + _LOTSNESTEDMESSAGE_B65._serialized_end=1181 + _LOTSNESTEDMESSAGE_B66._serialized_start=1183 + _LOTSNESTEDMESSAGE_B66._serialized_end=1188 + _LOTSNESTEDMESSAGE_B67._serialized_start=1190 + _LOTSNESTEDMESSAGE_B67._serialized_end=1195 + _LOTSNESTEDMESSAGE_B68._serialized_start=1197 + _LOTSNESTEDMESSAGE_B68._serialized_end=1202 + _LOTSNESTEDMESSAGE_B69._serialized_start=1204 + _LOTSNESTEDMESSAGE_B69._serialized_end=1209 + _LOTSNESTEDMESSAGE_B70._serialized_start=1211 + _LOTSNESTEDMESSAGE_B70._serialized_end=1216 + _LOTSNESTEDMESSAGE_B71._serialized_start=1218 + _LOTSNESTEDMESSAGE_B71._serialized_end=1223 + _LOTSNESTEDMESSAGE_B72._serialized_start=1225 + _LOTSNESTEDMESSAGE_B72._serialized_end=1230 + _LOTSNESTEDMESSAGE_B73._serialized_start=1232 + _LOTSNESTEDMESSAGE_B73._serialized_end=1237 + _LOTSNESTEDMESSAGE_B74._serialized_start=1239 + _LOTSNESTEDMESSAGE_B74._serialized_end=1244 + _LOTSNESTEDMESSAGE_B75._serialized_start=1246 + _LOTSNESTEDMESSAGE_B75._serialized_end=1251 + _LOTSNESTEDMESSAGE_B76._serialized_start=1253 + _LOTSNESTEDMESSAGE_B76._serialized_end=1258 + _LOTSNESTEDMESSAGE_B77._serialized_start=1260 + _LOTSNESTEDMESSAGE_B77._serialized_end=1265 + _LOTSNESTEDMESSAGE_B78._serialized_start=1267 + _LOTSNESTEDMESSAGE_B78._serialized_end=1272 + _LOTSNESTEDMESSAGE_B79._serialized_start=1274 + _LOTSNESTEDMESSAGE_B79._serialized_end=1279 + _LOTSNESTEDMESSAGE_B80._serialized_start=1281 + _LOTSNESTEDMESSAGE_B80._serialized_end=1286 + _LOTSNESTEDMESSAGE_B81._serialized_start=1288 + _LOTSNESTEDMESSAGE_B81._serialized_end=1293 + _LOTSNESTEDMESSAGE_B82._serialized_start=1295 + _LOTSNESTEDMESSAGE_B82._serialized_end=1300 + _LOTSNESTEDMESSAGE_B83._serialized_start=1302 + _LOTSNESTEDMESSAGE_B83._serialized_end=1307 + _LOTSNESTEDMESSAGE_B84._serialized_start=1309 + _LOTSNESTEDMESSAGE_B84._serialized_end=1314 + _LOTSNESTEDMESSAGE_B85._serialized_start=1316 + _LOTSNESTEDMESSAGE_B85._serialized_end=1321 + _LOTSNESTEDMESSAGE_B86._serialized_start=1323 + _LOTSNESTEDMESSAGE_B86._serialized_end=1328 + _LOTSNESTEDMESSAGE_B87._serialized_start=1330 + _LOTSNESTEDMESSAGE_B87._serialized_end=1335 + _LOTSNESTEDMESSAGE_B88._serialized_start=1337 + _LOTSNESTEDMESSAGE_B88._serialized_end=1342 + _LOTSNESTEDMESSAGE_B89._serialized_start=1344 + _LOTSNESTEDMESSAGE_B89._serialized_end=1349 + _LOTSNESTEDMESSAGE_B90._serialized_start=1351 + _LOTSNESTEDMESSAGE_B90._serialized_end=1356 + _LOTSNESTEDMESSAGE_B91._serialized_start=1358 + _LOTSNESTEDMESSAGE_B91._serialized_end=1363 + _LOTSNESTEDMESSAGE_B92._serialized_start=1365 + _LOTSNESTEDMESSAGE_B92._serialized_end=1370 + _LOTSNESTEDMESSAGE_B93._serialized_start=1372 + _LOTSNESTEDMESSAGE_B93._serialized_end=1377 + _LOTSNESTEDMESSAGE_B94._serialized_start=1379 + _LOTSNESTEDMESSAGE_B94._serialized_end=1384 + _LOTSNESTEDMESSAGE_B95._serialized_start=1386 + _LOTSNESTEDMESSAGE_B95._serialized_end=1391 + _LOTSNESTEDMESSAGE_B96._serialized_start=1393 + _LOTSNESTEDMESSAGE_B96._serialized_end=1398 + _LOTSNESTEDMESSAGE_B97._serialized_start=1400 + _LOTSNESTEDMESSAGE_B97._serialized_end=1405 + _LOTSNESTEDMESSAGE_B98._serialized_start=1407 + _LOTSNESTEDMESSAGE_B98._serialized_end=1412 + _LOTSNESTEDMESSAGE_B99._serialized_start=1414 + _LOTSNESTEDMESSAGE_B99._serialized_end=1419 + _LOTSNESTEDMESSAGE_B100._serialized_start=1421 + _LOTSNESTEDMESSAGE_B100._serialized_end=1427 + _LOTSNESTEDMESSAGE_B101._serialized_start=1429 + _LOTSNESTEDMESSAGE_B101._serialized_end=1435 + _LOTSNESTEDMESSAGE_B102._serialized_start=1437 + _LOTSNESTEDMESSAGE_B102._serialized_end=1443 + _LOTSNESTEDMESSAGE_B103._serialized_start=1445 + _LOTSNESTEDMESSAGE_B103._serialized_end=1451 + _LOTSNESTEDMESSAGE_B104._serialized_start=1453 + _LOTSNESTEDMESSAGE_B104._serialized_end=1459 + _LOTSNESTEDMESSAGE_B105._serialized_start=1461 + _LOTSNESTEDMESSAGE_B105._serialized_end=1467 + _LOTSNESTEDMESSAGE_B106._serialized_start=1469 + _LOTSNESTEDMESSAGE_B106._serialized_end=1475 + _LOTSNESTEDMESSAGE_B107._serialized_start=1477 + _LOTSNESTEDMESSAGE_B107._serialized_end=1483 + _LOTSNESTEDMESSAGE_B108._serialized_start=1485 + _LOTSNESTEDMESSAGE_B108._serialized_end=1491 + _LOTSNESTEDMESSAGE_B109._serialized_start=1493 + _LOTSNESTEDMESSAGE_B109._serialized_end=1499 + _LOTSNESTEDMESSAGE_B110._serialized_start=1501 + _LOTSNESTEDMESSAGE_B110._serialized_end=1507 + _LOTSNESTEDMESSAGE_B111._serialized_start=1509 + _LOTSNESTEDMESSAGE_B111._serialized_end=1515 + _LOTSNESTEDMESSAGE_B112._serialized_start=1517 + _LOTSNESTEDMESSAGE_B112._serialized_end=1523 + _LOTSNESTEDMESSAGE_B113._serialized_start=1525 + _LOTSNESTEDMESSAGE_B113._serialized_end=1531 + _LOTSNESTEDMESSAGE_B114._serialized_start=1533 + _LOTSNESTEDMESSAGE_B114._serialized_end=1539 + _LOTSNESTEDMESSAGE_B115._serialized_start=1541 + _LOTSNESTEDMESSAGE_B115._serialized_end=1547 + _LOTSNESTEDMESSAGE_B116._serialized_start=1549 + _LOTSNESTEDMESSAGE_B116._serialized_end=1555 + _LOTSNESTEDMESSAGE_B117._serialized_start=1557 + _LOTSNESTEDMESSAGE_B117._serialized_end=1563 + _LOTSNESTEDMESSAGE_B118._serialized_start=1565 + _LOTSNESTEDMESSAGE_B118._serialized_end=1571 + _LOTSNESTEDMESSAGE_B119._serialized_start=1573 + _LOTSNESTEDMESSAGE_B119._serialized_end=1579 + _LOTSNESTEDMESSAGE_B120._serialized_start=1581 + _LOTSNESTEDMESSAGE_B120._serialized_end=1587 + _LOTSNESTEDMESSAGE_B121._serialized_start=1589 + _LOTSNESTEDMESSAGE_B121._serialized_end=1595 + _LOTSNESTEDMESSAGE_B122._serialized_start=1597 + _LOTSNESTEDMESSAGE_B122._serialized_end=1603 + _LOTSNESTEDMESSAGE_B123._serialized_start=1605 + _LOTSNESTEDMESSAGE_B123._serialized_end=1611 + _LOTSNESTEDMESSAGE_B124._serialized_start=1613 + _LOTSNESTEDMESSAGE_B124._serialized_end=1619 + _LOTSNESTEDMESSAGE_B125._serialized_start=1621 + _LOTSNESTEDMESSAGE_B125._serialized_end=1627 + _LOTSNESTEDMESSAGE_B126._serialized_start=1629 + _LOTSNESTEDMESSAGE_B126._serialized_end=1635 + _LOTSNESTEDMESSAGE_B127._serialized_start=1637 + _LOTSNESTEDMESSAGE_B127._serialized_end=1643 + _LOTSNESTEDMESSAGE_B128._serialized_start=1645 + _LOTSNESTEDMESSAGE_B128._serialized_end=1651 + _LOTSNESTEDMESSAGE_B129._serialized_start=1653 + _LOTSNESTEDMESSAGE_B129._serialized_end=1659 + _LOTSNESTEDMESSAGE_B130._serialized_start=1661 + _LOTSNESTEDMESSAGE_B130._serialized_end=1667 + _LOTSNESTEDMESSAGE_B131._serialized_start=1669 + _LOTSNESTEDMESSAGE_B131._serialized_end=1675 + _LOTSNESTEDMESSAGE_B132._serialized_start=1677 + _LOTSNESTEDMESSAGE_B132._serialized_end=1683 + _LOTSNESTEDMESSAGE_B133._serialized_start=1685 + _LOTSNESTEDMESSAGE_B133._serialized_end=1691 + _LOTSNESTEDMESSAGE_B134._serialized_start=1693 + _LOTSNESTEDMESSAGE_B134._serialized_end=1699 + _LOTSNESTEDMESSAGE_B135._serialized_start=1701 + _LOTSNESTEDMESSAGE_B135._serialized_end=1707 + _LOTSNESTEDMESSAGE_B136._serialized_start=1709 + _LOTSNESTEDMESSAGE_B136._serialized_end=1715 + _LOTSNESTEDMESSAGE_B137._serialized_start=1717 + _LOTSNESTEDMESSAGE_B137._serialized_end=1723 + _LOTSNESTEDMESSAGE_B138._serialized_start=1725 + _LOTSNESTEDMESSAGE_B138._serialized_end=1731 + _LOTSNESTEDMESSAGE_B139._serialized_start=1733 + _LOTSNESTEDMESSAGE_B139._serialized_end=1739 + _LOTSNESTEDMESSAGE_B140._serialized_start=1741 + _LOTSNESTEDMESSAGE_B140._serialized_end=1747 + _LOTSNESTEDMESSAGE_B141._serialized_start=1749 + _LOTSNESTEDMESSAGE_B141._serialized_end=1755 + _LOTSNESTEDMESSAGE_B142._serialized_start=1757 + _LOTSNESTEDMESSAGE_B142._serialized_end=1763 + _LOTSNESTEDMESSAGE_B143._serialized_start=1765 + _LOTSNESTEDMESSAGE_B143._serialized_end=1771 + _LOTSNESTEDMESSAGE_B144._serialized_start=1773 + _LOTSNESTEDMESSAGE_B144._serialized_end=1779 + _LOTSNESTEDMESSAGE_B145._serialized_start=1781 + _LOTSNESTEDMESSAGE_B145._serialized_end=1787 + _LOTSNESTEDMESSAGE_B146._serialized_start=1789 + _LOTSNESTEDMESSAGE_B146._serialized_end=1795 + _LOTSNESTEDMESSAGE_B147._serialized_start=1797 + _LOTSNESTEDMESSAGE_B147._serialized_end=1803 + _LOTSNESTEDMESSAGE_B148._serialized_start=1805 + _LOTSNESTEDMESSAGE_B148._serialized_end=1811 + _LOTSNESTEDMESSAGE_B149._serialized_start=1813 + _LOTSNESTEDMESSAGE_B149._serialized_end=1819 + _LOTSNESTEDMESSAGE_B150._serialized_start=1821 + _LOTSNESTEDMESSAGE_B150._serialized_end=1827 + _LOTSNESTEDMESSAGE_B151._serialized_start=1829 + _LOTSNESTEDMESSAGE_B151._serialized_end=1835 + _LOTSNESTEDMESSAGE_B152._serialized_start=1837 + _LOTSNESTEDMESSAGE_B152._serialized_end=1843 + _LOTSNESTEDMESSAGE_B153._serialized_start=1845 + _LOTSNESTEDMESSAGE_B153._serialized_end=1851 + _LOTSNESTEDMESSAGE_B154._serialized_start=1853 + _LOTSNESTEDMESSAGE_B154._serialized_end=1859 + _LOTSNESTEDMESSAGE_B155._serialized_start=1861 + _LOTSNESTEDMESSAGE_B155._serialized_end=1867 + _LOTSNESTEDMESSAGE_B156._serialized_start=1869 + _LOTSNESTEDMESSAGE_B156._serialized_end=1875 + _LOTSNESTEDMESSAGE_B157._serialized_start=1877 + _LOTSNESTEDMESSAGE_B157._serialized_end=1883 + _LOTSNESTEDMESSAGE_B158._serialized_start=1885 + _LOTSNESTEDMESSAGE_B158._serialized_end=1891 + _LOTSNESTEDMESSAGE_B159._serialized_start=1893 + _LOTSNESTEDMESSAGE_B159._serialized_end=1899 + _LOTSNESTEDMESSAGE_B160._serialized_start=1901 + _LOTSNESTEDMESSAGE_B160._serialized_end=1907 + _LOTSNESTEDMESSAGE_B161._serialized_start=1909 + _LOTSNESTEDMESSAGE_B161._serialized_end=1915 + _LOTSNESTEDMESSAGE_B162._serialized_start=1917 + _LOTSNESTEDMESSAGE_B162._serialized_end=1923 + _LOTSNESTEDMESSAGE_B163._serialized_start=1925 + _LOTSNESTEDMESSAGE_B163._serialized_end=1931 + _LOTSNESTEDMESSAGE_B164._serialized_start=1933 + _LOTSNESTEDMESSAGE_B164._serialized_end=1939 + _LOTSNESTEDMESSAGE_B165._serialized_start=1941 + _LOTSNESTEDMESSAGE_B165._serialized_end=1947 + _LOTSNESTEDMESSAGE_B166._serialized_start=1949 + _LOTSNESTEDMESSAGE_B166._serialized_end=1955 + _LOTSNESTEDMESSAGE_B167._serialized_start=1957 + _LOTSNESTEDMESSAGE_B167._serialized_end=1963 + _LOTSNESTEDMESSAGE_B168._serialized_start=1965 + _LOTSNESTEDMESSAGE_B168._serialized_end=1971 + _LOTSNESTEDMESSAGE_B169._serialized_start=1973 + _LOTSNESTEDMESSAGE_B169._serialized_end=1979 + _LOTSNESTEDMESSAGE_B170._serialized_start=1981 + _LOTSNESTEDMESSAGE_B170._serialized_end=1987 + _LOTSNESTEDMESSAGE_B171._serialized_start=1989 + _LOTSNESTEDMESSAGE_B171._serialized_end=1995 + _LOTSNESTEDMESSAGE_B172._serialized_start=1997 + _LOTSNESTEDMESSAGE_B172._serialized_end=2003 + _LOTSNESTEDMESSAGE_B173._serialized_start=2005 + _LOTSNESTEDMESSAGE_B173._serialized_end=2011 + _LOTSNESTEDMESSAGE_B174._serialized_start=2013 + _LOTSNESTEDMESSAGE_B174._serialized_end=2019 + _LOTSNESTEDMESSAGE_B175._serialized_start=2021 + _LOTSNESTEDMESSAGE_B175._serialized_end=2027 + _LOTSNESTEDMESSAGE_B176._serialized_start=2029 + _LOTSNESTEDMESSAGE_B176._serialized_end=2035 + _LOTSNESTEDMESSAGE_B177._serialized_start=2037 + _LOTSNESTEDMESSAGE_B177._serialized_end=2043 + _LOTSNESTEDMESSAGE_B178._serialized_start=2045 + _LOTSNESTEDMESSAGE_B178._serialized_end=2051 + _LOTSNESTEDMESSAGE_B179._serialized_start=2053 + _LOTSNESTEDMESSAGE_B179._serialized_end=2059 + _LOTSNESTEDMESSAGE_B180._serialized_start=2061 + _LOTSNESTEDMESSAGE_B180._serialized_end=2067 + _LOTSNESTEDMESSAGE_B181._serialized_start=2069 + _LOTSNESTEDMESSAGE_B181._serialized_end=2075 + _LOTSNESTEDMESSAGE_B182._serialized_start=2077 + _LOTSNESTEDMESSAGE_B182._serialized_end=2083 + _LOTSNESTEDMESSAGE_B183._serialized_start=2085 + _LOTSNESTEDMESSAGE_B183._serialized_end=2091 + _LOTSNESTEDMESSAGE_B184._serialized_start=2093 + _LOTSNESTEDMESSAGE_B184._serialized_end=2099 + _LOTSNESTEDMESSAGE_B185._serialized_start=2101 + _LOTSNESTEDMESSAGE_B185._serialized_end=2107 + _LOTSNESTEDMESSAGE_B186._serialized_start=2109 + _LOTSNESTEDMESSAGE_B186._serialized_end=2115 + _LOTSNESTEDMESSAGE_B187._serialized_start=2117 + _LOTSNESTEDMESSAGE_B187._serialized_end=2123 + _LOTSNESTEDMESSAGE_B188._serialized_start=2125 + _LOTSNESTEDMESSAGE_B188._serialized_end=2131 + _LOTSNESTEDMESSAGE_B189._serialized_start=2133 + _LOTSNESTEDMESSAGE_B189._serialized_end=2139 + _LOTSNESTEDMESSAGE_B190._serialized_start=2141 + _LOTSNESTEDMESSAGE_B190._serialized_end=2147 + _LOTSNESTEDMESSAGE_B191._serialized_start=2149 + _LOTSNESTEDMESSAGE_B191._serialized_end=2155 + _LOTSNESTEDMESSAGE_B192._serialized_start=2157 + _LOTSNESTEDMESSAGE_B192._serialized_end=2163 + _LOTSNESTEDMESSAGE_B193._serialized_start=2165 + _LOTSNESTEDMESSAGE_B193._serialized_end=2171 + _LOTSNESTEDMESSAGE_B194._serialized_start=2173 + _LOTSNESTEDMESSAGE_B194._serialized_end=2179 + _LOTSNESTEDMESSAGE_B195._serialized_start=2181 + _LOTSNESTEDMESSAGE_B195._serialized_end=2187 + _LOTSNESTEDMESSAGE_B196._serialized_start=2189 + _LOTSNESTEDMESSAGE_B196._serialized_end=2195 + _LOTSNESTEDMESSAGE_B197._serialized_start=2197 + _LOTSNESTEDMESSAGE_B197._serialized_end=2203 + _LOTSNESTEDMESSAGE_B198._serialized_start=2205 + _LOTSNESTEDMESSAGE_B198._serialized_end=2211 + _LOTSNESTEDMESSAGE_B199._serialized_start=2213 + _LOTSNESTEDMESSAGE_B199._serialized_end=2219 + _LOTSNESTEDMESSAGE_B200._serialized_start=2221 + _LOTSNESTEDMESSAGE_B200._serialized_end=2227 + _LOTSNESTEDMESSAGE_B201._serialized_start=2229 + _LOTSNESTEDMESSAGE_B201._serialized_end=2235 + _LOTSNESTEDMESSAGE_B202._serialized_start=2237 + _LOTSNESTEDMESSAGE_B202._serialized_end=2243 + _LOTSNESTEDMESSAGE_B203._serialized_start=2245 + _LOTSNESTEDMESSAGE_B203._serialized_end=2251 + _LOTSNESTEDMESSAGE_B204._serialized_start=2253 + _LOTSNESTEDMESSAGE_B204._serialized_end=2259 + _LOTSNESTEDMESSAGE_B205._serialized_start=2261 + _LOTSNESTEDMESSAGE_B205._serialized_end=2267 + _LOTSNESTEDMESSAGE_B206._serialized_start=2269 + _LOTSNESTEDMESSAGE_B206._serialized_end=2275 + _LOTSNESTEDMESSAGE_B207._serialized_start=2277 + _LOTSNESTEDMESSAGE_B207._serialized_end=2283 + _LOTSNESTEDMESSAGE_B208._serialized_start=2285 + _LOTSNESTEDMESSAGE_B208._serialized_end=2291 + _LOTSNESTEDMESSAGE_B209._serialized_start=2293 + _LOTSNESTEDMESSAGE_B209._serialized_end=2299 + _LOTSNESTEDMESSAGE_B210._serialized_start=2301 + _LOTSNESTEDMESSAGE_B210._serialized_end=2307 + _LOTSNESTEDMESSAGE_B211._serialized_start=2309 + _LOTSNESTEDMESSAGE_B211._serialized_end=2315 + _LOTSNESTEDMESSAGE_B212._serialized_start=2317 + _LOTSNESTEDMESSAGE_B212._serialized_end=2323 + _LOTSNESTEDMESSAGE_B213._serialized_start=2325 + _LOTSNESTEDMESSAGE_B213._serialized_end=2331 + _LOTSNESTEDMESSAGE_B214._serialized_start=2333 + _LOTSNESTEDMESSAGE_B214._serialized_end=2339 + _LOTSNESTEDMESSAGE_B215._serialized_start=2341 + _LOTSNESTEDMESSAGE_B215._serialized_end=2347 + _LOTSNESTEDMESSAGE_B216._serialized_start=2349 + _LOTSNESTEDMESSAGE_B216._serialized_end=2355 + _LOTSNESTEDMESSAGE_B217._serialized_start=2357 + _LOTSNESTEDMESSAGE_B217._serialized_end=2363 + _LOTSNESTEDMESSAGE_B218._serialized_start=2365 + _LOTSNESTEDMESSAGE_B218._serialized_end=2371 + _LOTSNESTEDMESSAGE_B219._serialized_start=2373 + _LOTSNESTEDMESSAGE_B219._serialized_end=2379 + _LOTSNESTEDMESSAGE_B220._serialized_start=2381 + _LOTSNESTEDMESSAGE_B220._serialized_end=2387 + _LOTSNESTEDMESSAGE_B221._serialized_start=2389 + _LOTSNESTEDMESSAGE_B221._serialized_end=2395 + _LOTSNESTEDMESSAGE_B222._serialized_start=2397 + _LOTSNESTEDMESSAGE_B222._serialized_end=2403 + _LOTSNESTEDMESSAGE_B223._serialized_start=2405 + _LOTSNESTEDMESSAGE_B223._serialized_end=2411 + _LOTSNESTEDMESSAGE_B224._serialized_start=2413 + _LOTSNESTEDMESSAGE_B224._serialized_end=2419 + _LOTSNESTEDMESSAGE_B225._serialized_start=2421 + _LOTSNESTEDMESSAGE_B225._serialized_end=2427 + _LOTSNESTEDMESSAGE_B226._serialized_start=2429 + _LOTSNESTEDMESSAGE_B226._serialized_end=2435 + _LOTSNESTEDMESSAGE_B227._serialized_start=2437 + _LOTSNESTEDMESSAGE_B227._serialized_end=2443 + _LOTSNESTEDMESSAGE_B228._serialized_start=2445 + _LOTSNESTEDMESSAGE_B228._serialized_end=2451 + _LOTSNESTEDMESSAGE_B229._serialized_start=2453 + _LOTSNESTEDMESSAGE_B229._serialized_end=2459 + _LOTSNESTEDMESSAGE_B230._serialized_start=2461 + _LOTSNESTEDMESSAGE_B230._serialized_end=2467 + _LOTSNESTEDMESSAGE_B231._serialized_start=2469 + _LOTSNESTEDMESSAGE_B231._serialized_end=2475 + _LOTSNESTEDMESSAGE_B232._serialized_start=2477 + _LOTSNESTEDMESSAGE_B232._serialized_end=2483 + _LOTSNESTEDMESSAGE_B233._serialized_start=2485 + _LOTSNESTEDMESSAGE_B233._serialized_end=2491 + _LOTSNESTEDMESSAGE_B234._serialized_start=2493 + _LOTSNESTEDMESSAGE_B234._serialized_end=2499 + _LOTSNESTEDMESSAGE_B235._serialized_start=2501 + _LOTSNESTEDMESSAGE_B235._serialized_end=2507 + _LOTSNESTEDMESSAGE_B236._serialized_start=2509 + _LOTSNESTEDMESSAGE_B236._serialized_end=2515 + _LOTSNESTEDMESSAGE_B237._serialized_start=2517 + _LOTSNESTEDMESSAGE_B237._serialized_end=2523 + _LOTSNESTEDMESSAGE_B238._serialized_start=2525 + _LOTSNESTEDMESSAGE_B238._serialized_end=2531 + _LOTSNESTEDMESSAGE_B239._serialized_start=2533 + _LOTSNESTEDMESSAGE_B239._serialized_end=2539 + _LOTSNESTEDMESSAGE_B240._serialized_start=2541 + _LOTSNESTEDMESSAGE_B240._serialized_end=2547 + _LOTSNESTEDMESSAGE_B241._serialized_start=2549 + _LOTSNESTEDMESSAGE_B241._serialized_end=2555 + _LOTSNESTEDMESSAGE_B242._serialized_start=2557 + _LOTSNESTEDMESSAGE_B242._serialized_end=2563 + _LOTSNESTEDMESSAGE_B243._serialized_start=2565 + _LOTSNESTEDMESSAGE_B243._serialized_end=2571 + _LOTSNESTEDMESSAGE_B244._serialized_start=2573 + _LOTSNESTEDMESSAGE_B244._serialized_end=2579 + _LOTSNESTEDMESSAGE_B245._serialized_start=2581 + _LOTSNESTEDMESSAGE_B245._serialized_end=2587 + _LOTSNESTEDMESSAGE_B246._serialized_start=2589 + _LOTSNESTEDMESSAGE_B246._serialized_end=2595 + _LOTSNESTEDMESSAGE_B247._serialized_start=2597 + _LOTSNESTEDMESSAGE_B247._serialized_end=2603 + _LOTSNESTEDMESSAGE_B248._serialized_start=2605 + _LOTSNESTEDMESSAGE_B248._serialized_end=2611 + _LOTSNESTEDMESSAGE_B249._serialized_start=2613 + _LOTSNESTEDMESSAGE_B249._serialized_end=2619 + _LOTSNESTEDMESSAGE_B250._serialized_start=2621 + _LOTSNESTEDMESSAGE_B250._serialized_end=2627 + _LOTSNESTEDMESSAGE_B251._serialized_start=2629 + _LOTSNESTEDMESSAGE_B251._serialized_end=2635 + _LOTSNESTEDMESSAGE_B252._serialized_start=2637 + _LOTSNESTEDMESSAGE_B252._serialized_end=2643 + _LOTSNESTEDMESSAGE_B253._serialized_start=2645 + _LOTSNESTEDMESSAGE_B253._serialized_end=2651 + _LOTSNESTEDMESSAGE_B254._serialized_start=2653 + _LOTSNESTEDMESSAGE_B254._serialized_end=2659 + _LOTSNESTEDMESSAGE_B255._serialized_start=2661 + _LOTSNESTEDMESSAGE_B255._serialized_end=2667 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/no_package_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/no_package_pb2.py new file mode 100644 index 0000000000..d46dee080a --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/no_package_pb2.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/no_package.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n)google/protobuf/internal/no_package.proto\";\n\x10NoPackageMessage\x12\'\n\x0fno_package_enum\x18\x01 \x01(\x0e\x32\x0e.NoPackageEnum*?\n\rNoPackageEnum\x12\x16\n\x12NO_PACKAGE_VALUE_0\x10\x00\x12\x16\n\x12NO_PACKAGE_VALUE_1\x10\x01') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.no_package_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _NOPACKAGEENUM._serialized_start=106 + _NOPACKAGEENUM._serialized_end=169 + _NOPACKAGEMESSAGE._serialized_start=45 + _NOPACKAGEMESSAGE._serialized_end=104 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/python_message.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/python_message.py new file mode 100644 index 0000000000..2921d5cb6e --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/python_message.py @@ -0,0 +1,1539 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This code is meant to work on Python 2.4 and above only. +# +# TODO(robinson): Helpers for verbose, common checks like seeing if a +# descriptor's cpp_type is CPPTYPE_MESSAGE. + +"""Contains a metaclass and helper functions used to create +protocol message classes from Descriptor objects at runtime. + +Recall that a metaclass is the "type" of a class. +(A class is to a metaclass what an instance is to a class.) + +In this case, we use the GeneratedProtocolMessageType metaclass +to inject all the useful functionality into the classes +output by the protocol compiler at compile-time. + +The upshot of all this is that the real implementation +details for ALL pure-Python protocol buffers are *here in +this file*. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +from io import BytesIO +import struct +import sys +import weakref + +# We use "as" to avoid name collisions with variables. +from google.protobuf.internal import api_implementation +from google.protobuf.internal import containers +from google.protobuf.internal import decoder +from google.protobuf.internal import encoder +from google.protobuf.internal import enum_type_wrapper +from google.protobuf.internal import extension_dict +from google.protobuf.internal import message_listener as message_listener_mod +from google.protobuf.internal import type_checkers +from google.protobuf.internal import well_known_types +from google.protobuf.internal import wire_format +from google.protobuf import descriptor as descriptor_mod +from google.protobuf import message as message_mod +from google.protobuf import text_format + +_FieldDescriptor = descriptor_mod.FieldDescriptor +_AnyFullTypeName = 'google.protobuf.Any' +_ExtensionDict = extension_dict._ExtensionDict + +class GeneratedProtocolMessageType(type): + + """Metaclass for protocol message classes created at runtime from Descriptors. + + We add implementations for all methods described in the Message class. We + also create properties to allow getting/setting all fields in the protocol + message. Finally, we create slots to prevent users from accidentally + "setting" nonexistent fields in the protocol message, which then wouldn't get + serialized / deserialized properly. + + The protocol compiler currently uses this metaclass to create protocol + message classes at runtime. Clients can also manually create their own + classes at runtime, as in this example: + + mydescriptor = Descriptor(.....) + factory = symbol_database.Default() + factory.pool.AddDescriptor(mydescriptor) + MyProtoClass = factory.GetPrototype(mydescriptor) + myproto_instance = MyProtoClass() + myproto.foo_field = 23 + ... + """ + + # Must be consistent with the protocol-compiler code in + # proto2/compiler/internal/generator.*. + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __new__(cls, name, bases, dictionary): + """Custom allocation for runtime-generated class types. + + We override __new__ because this is apparently the only place + where we can meaningfully set __slots__ on the class we're creating(?). + (The interplay between metaclasses and slots is not very well-documented). + + Args: + name: Name of the class (ignored, but required by the + metaclass protocol). + bases: Base classes of the class we're constructing. + (Should be message.Message). We ignore this field, but + it's required by the metaclass protocol + dictionary: The class dictionary of the class we're + constructing. dictionary[_DESCRIPTOR_KEY] must contain + a Descriptor object describing this protocol message + type. + + Returns: + Newly-allocated class. + + Raises: + RuntimeError: Generated code only work with python cpp extension. + """ + descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] + + if isinstance(descriptor, str): + raise RuntimeError('The generated code only work with python cpp ' + 'extension, but it is using pure python runtime.') + + # If a concrete class already exists for this descriptor, don't try to + # create another. Doing so will break any messages that already exist with + # the existing class. + # + # The C++ implementation appears to have its own internal `PyMessageFactory` + # to achieve similar results. + # + # This most commonly happens in `text_format.py` when using descriptors from + # a custom pool; it calls symbol_database.Global().getPrototype() on a + # descriptor which already has an existing concrete class. + new_class = getattr(descriptor, '_concrete_class', None) + if new_class: + return new_class + + if descriptor.full_name in well_known_types.WKTBASES: + bases += (well_known_types.WKTBASES[descriptor.full_name],) + _AddClassAttributesForNestedExtensions(descriptor, dictionary) + _AddSlots(descriptor, dictionary) + + superclass = super(GeneratedProtocolMessageType, cls) + new_class = superclass.__new__(cls, name, bases, dictionary) + return new_class + + def __init__(cls, name, bases, dictionary): + """Here we perform the majority of our work on the class. + We add enum getters, an __init__ method, implementations + of all Message methods, and properties for all fields + in the protocol type. + + Args: + name: Name of the class (ignored, but required by the + metaclass protocol). + bases: Base classes of the class we're constructing. + (Should be message.Message). We ignore this field, but + it's required by the metaclass protocol + dictionary: The class dictionary of the class we're + constructing. dictionary[_DESCRIPTOR_KEY] must contain + a Descriptor object describing this protocol message + type. + """ + descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] + + # If this is an _existing_ class looked up via `_concrete_class` in the + # __new__ method above, then we don't need to re-initialize anything. + existing_class = getattr(descriptor, '_concrete_class', None) + if existing_class: + assert existing_class is cls, ( + 'Duplicate `GeneratedProtocolMessageType` created for descriptor %r' + % (descriptor.full_name)) + return + + cls._decoders_by_tag = {} + if (descriptor.has_options and + descriptor.GetOptions().message_set_wire_format): + cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = ( + decoder.MessageSetItemDecoder(descriptor), None) + + # Attach stuff to each FieldDescriptor for quick lookup later on. + for field in descriptor.fields: + _AttachFieldHelpers(cls, field) + + descriptor._concrete_class = cls # pylint: disable=protected-access + _AddEnumValues(descriptor, cls) + _AddInitMethod(descriptor, cls) + _AddPropertiesForFields(descriptor, cls) + _AddPropertiesForExtensions(descriptor, cls) + _AddStaticMethods(cls) + _AddMessageMethods(descriptor, cls) + _AddPrivateHelperMethods(descriptor, cls) + + superclass = super(GeneratedProtocolMessageType, cls) + superclass.__init__(name, bases, dictionary) + + +# Stateless helpers for GeneratedProtocolMessageType below. +# Outside clients should not access these directly. +# +# I opted not to make any of these methods on the metaclass, to make it more +# clear that I'm not really using any state there and to keep clients from +# thinking that they have direct access to these construction helpers. + + +def _PropertyName(proto_field_name): + """Returns the name of the public property attribute which + clients can use to get and (in some cases) set the value + of a protocol message field. + + Args: + proto_field_name: The protocol message field name, exactly + as it appears (or would appear) in a .proto file. + """ + # TODO(robinson): Escape Python keywords (e.g., yield), and test this support. + # nnorwitz makes my day by writing: + # """ + # FYI. See the keyword module in the stdlib. This could be as simple as: + # + # if keyword.iskeyword(proto_field_name): + # return proto_field_name + "_" + # return proto_field_name + # """ + # Kenton says: The above is a BAD IDEA. People rely on being able to use + # getattr() and setattr() to reflectively manipulate field values. If we + # rename the properties, then every such user has to also make sure to apply + # the same transformation. Note that currently if you name a field "yield", + # you can still access it just fine using getattr/setattr -- it's not even + # that cumbersome to do so. + # TODO(kenton): Remove this method entirely if/when everyone agrees with my + # position. + return proto_field_name + + +def _AddSlots(message_descriptor, dictionary): + """Adds a __slots__ entry to dictionary, containing the names of all valid + attributes for this message type. + + Args: + message_descriptor: A Descriptor instance describing this message type. + dictionary: Class dictionary to which we'll add a '__slots__' entry. + """ + dictionary['__slots__'] = ['_cached_byte_size', + '_cached_byte_size_dirty', + '_fields', + '_unknown_fields', + '_unknown_field_set', + '_is_present_in_parent', + '_listener', + '_listener_for_children', + '__weakref__', + '_oneofs'] + + +def _IsMessageSetExtension(field): + return (field.is_extension and + field.containing_type.has_options and + field.containing_type.GetOptions().message_set_wire_format and + field.type == _FieldDescriptor.TYPE_MESSAGE and + field.label == _FieldDescriptor.LABEL_OPTIONAL) + + +def _IsMapField(field): + return (field.type == _FieldDescriptor.TYPE_MESSAGE and + field.message_type.has_options and + field.message_type.GetOptions().map_entry) + + +def _IsMessageMapField(field): + value_type = field.message_type.fields_by_name['value'] + return value_type.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE + + +def _AttachFieldHelpers(cls, field_descriptor): + is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED) + is_packable = (is_repeated and + wire_format.IsTypePackable(field_descriptor.type)) + is_proto3 = field_descriptor.containing_type.syntax == 'proto3' + if not is_packable: + is_packed = False + elif field_descriptor.containing_type.syntax == 'proto2': + is_packed = (field_descriptor.has_options and + field_descriptor.GetOptions().packed) + else: + has_packed_false = (field_descriptor.has_options and + field_descriptor.GetOptions().HasField('packed') and + field_descriptor.GetOptions().packed == False) + is_packed = not has_packed_false + is_map_entry = _IsMapField(field_descriptor) + + if is_map_entry: + field_encoder = encoder.MapEncoder(field_descriptor) + sizer = encoder.MapSizer(field_descriptor, + _IsMessageMapField(field_descriptor)) + elif _IsMessageSetExtension(field_descriptor): + field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number) + sizer = encoder.MessageSetItemSizer(field_descriptor.number) + else: + field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type]( + field_descriptor.number, is_repeated, is_packed) + sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type]( + field_descriptor.number, is_repeated, is_packed) + + field_descriptor._encoder = field_encoder + field_descriptor._sizer = sizer + field_descriptor._default_constructor = _DefaultValueConstructorForField( + field_descriptor) + + def AddDecoder(wiretype, is_packed): + tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype) + decode_type = field_descriptor.type + if (decode_type == _FieldDescriptor.TYPE_ENUM and + type_checkers.SupportsOpenEnums(field_descriptor)): + decode_type = _FieldDescriptor.TYPE_INT32 + + oneof_descriptor = None + clear_if_default = False + if field_descriptor.containing_oneof is not None: + oneof_descriptor = field_descriptor + elif (is_proto3 and not is_repeated and + field_descriptor.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE): + clear_if_default = True + + if is_map_entry: + is_message_map = _IsMessageMapField(field_descriptor) + + field_decoder = decoder.MapDecoder( + field_descriptor, _GetInitializeDefaultForMap(field_descriptor), + is_message_map) + elif decode_type == _FieldDescriptor.TYPE_STRING: + field_decoder = decoder.StringDecoder( + field_descriptor.number, is_repeated, is_packed, + field_descriptor, field_descriptor._default_constructor, + clear_if_default) + elif field_descriptor.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + field_decoder = type_checkers.TYPE_TO_DECODER[decode_type]( + field_descriptor.number, is_repeated, is_packed, + field_descriptor, field_descriptor._default_constructor) + else: + field_decoder = type_checkers.TYPE_TO_DECODER[decode_type]( + field_descriptor.number, is_repeated, is_packed, + # pylint: disable=protected-access + field_descriptor, field_descriptor._default_constructor, + clear_if_default) + + cls._decoders_by_tag[tag_bytes] = (field_decoder, oneof_descriptor) + + AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type], + False) + + if is_repeated and wire_format.IsTypePackable(field_descriptor.type): + # To support wire compatibility of adding packed = true, add a decoder for + # packed values regardless of the field's options. + AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True) + + +def _AddClassAttributesForNestedExtensions(descriptor, dictionary): + extensions = descriptor.extensions_by_name + for extension_name, extension_field in extensions.items(): + assert extension_name not in dictionary + dictionary[extension_name] = extension_field + + +def _AddEnumValues(descriptor, cls): + """Sets class-level attributes for all enum fields defined in this message. + + Also exporting a class-level object that can name enum values. + + Args: + descriptor: Descriptor object for this message type. + cls: Class we're constructing for this message type. + """ + for enum_type in descriptor.enum_types: + setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type)) + for enum_value in enum_type.values: + setattr(cls, enum_value.name, enum_value.number) + + +def _GetInitializeDefaultForMap(field): + if field.label != _FieldDescriptor.LABEL_REPEATED: + raise ValueError('map_entry set on non-repeated field %s' % ( + field.name)) + fields_by_name = field.message_type.fields_by_name + key_checker = type_checkers.GetTypeChecker(fields_by_name['key']) + + value_field = fields_by_name['value'] + if _IsMessageMapField(field): + def MakeMessageMapDefault(message): + return containers.MessageMap( + message._listener_for_children, value_field.message_type, key_checker, + field.message_type) + return MakeMessageMapDefault + else: + value_checker = type_checkers.GetTypeChecker(value_field) + def MakePrimitiveMapDefault(message): + return containers.ScalarMap( + message._listener_for_children, key_checker, value_checker, + field.message_type) + return MakePrimitiveMapDefault + +def _DefaultValueConstructorForField(field): + """Returns a function which returns a default value for a field. + + Args: + field: FieldDescriptor object for this field. + + The returned function has one argument: + message: Message instance containing this field, or a weakref proxy + of same. + + That function in turn returns a default value for this field. The default + value may refer back to |message| via a weak reference. + """ + + if _IsMapField(field): + return _GetInitializeDefaultForMap(field) + + if field.label == _FieldDescriptor.LABEL_REPEATED: + if field.has_default_value and field.default_value != []: + raise ValueError('Repeated field default value not empty list: %s' % ( + field.default_value)) + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + # We can't look at _concrete_class yet since it might not have + # been set. (Depends on order in which we initialize the classes). + message_type = field.message_type + def MakeRepeatedMessageDefault(message): + return containers.RepeatedCompositeFieldContainer( + message._listener_for_children, field.message_type) + return MakeRepeatedMessageDefault + else: + type_checker = type_checkers.GetTypeChecker(field) + def MakeRepeatedScalarDefault(message): + return containers.RepeatedScalarFieldContainer( + message._listener_for_children, type_checker) + return MakeRepeatedScalarDefault + + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + # _concrete_class may not yet be initialized. + message_type = field.message_type + def MakeSubMessageDefault(message): + assert getattr(message_type, '_concrete_class', None), ( + 'Uninitialized concrete class found for field %r (message type %r)' + % (field.full_name, message_type.full_name)) + result = message_type._concrete_class() + result._SetListener( + _OneofListener(message, field) + if field.containing_oneof is not None + else message._listener_for_children) + return result + return MakeSubMessageDefault + + def MakeScalarDefault(message): + # TODO(protobuf-team): This may be broken since there may not be + # default_value. Combine with has_default_value somehow. + return field.default_value + return MakeScalarDefault + + +def _ReraiseTypeErrorWithFieldName(message_name, field_name): + """Re-raise the currently-handled TypeError with the field name added.""" + exc = sys.exc_info()[1] + if len(exc.args) == 1 and type(exc) is TypeError: + # simple TypeError; add field name to exception message + exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name)) + + # re-raise possibly-amended exception with original traceback: + raise exc.with_traceback(sys.exc_info()[2]) + + +def _AddInitMethod(message_descriptor, cls): + """Adds an __init__ method to cls.""" + + def _GetIntegerEnumValue(enum_type, value): + """Convert a string or integer enum value to an integer. + + If the value is a string, it is converted to the enum value in + enum_type with the same name. If the value is not a string, it's + returned as-is. (No conversion or bounds-checking is done.) + """ + if isinstance(value, str): + try: + return enum_type.values_by_name[value].number + except KeyError: + raise ValueError('Enum type %s: unknown label "%s"' % ( + enum_type.full_name, value)) + return value + + def init(self, **kwargs): + self._cached_byte_size = 0 + self._cached_byte_size_dirty = len(kwargs) > 0 + self._fields = {} + # Contains a mapping from oneof field descriptors to the descriptor + # of the currently set field in that oneof field. + self._oneofs = {} + + # _unknown_fields is () when empty for efficiency, and will be turned into + # a list if fields are added. + self._unknown_fields = () + # _unknown_field_set is None when empty for efficiency, and will be + # turned into UnknownFieldSet struct if fields are added. + self._unknown_field_set = None # pylint: disable=protected-access + self._is_present_in_parent = False + self._listener = message_listener_mod.NullMessageListener() + self._listener_for_children = _Listener(self) + for field_name, field_value in kwargs.items(): + field = _GetFieldByName(message_descriptor, field_name) + if field is None: + raise TypeError('%s() got an unexpected keyword argument "%s"' % + (message_descriptor.name, field_name)) + if field_value is None: + # field=None is the same as no field at all. + continue + if field.label == _FieldDescriptor.LABEL_REPEATED: + copy = field._default_constructor(self) + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite + if _IsMapField(field): + if _IsMessageMapField(field): + for key in field_value: + copy[key].MergeFrom(field_value[key]) + else: + copy.update(field_value) + else: + for val in field_value: + if isinstance(val, dict): + copy.add(**val) + else: + copy.add().MergeFrom(val) + else: # Scalar + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + field_value = [_GetIntegerEnumValue(field.enum_type, val) + for val in field_value] + copy.extend(field_value) + self._fields[field] = copy + elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + copy = field._default_constructor(self) + new_val = field_value + if isinstance(field_value, dict): + new_val = field.message_type._concrete_class(**field_value) + try: + copy.MergeFrom(new_val) + except TypeError: + _ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name) + self._fields[field] = copy + else: + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + field_value = _GetIntegerEnumValue(field.enum_type, field_value) + try: + setattr(self, field_name, field_value) + except TypeError: + _ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name) + + init.__module__ = None + init.__doc__ = None + cls.__init__ = init + + +def _GetFieldByName(message_descriptor, field_name): + """Returns a field descriptor by field name. + + Args: + message_descriptor: A Descriptor describing all fields in message. + field_name: The name of the field to retrieve. + Returns: + The field descriptor associated with the field name. + """ + try: + return message_descriptor.fields_by_name[field_name] + except KeyError: + raise ValueError('Protocol message %s has no "%s" field.' % + (message_descriptor.name, field_name)) + + +def _AddPropertiesForFields(descriptor, cls): + """Adds properties for all fields in this protocol message type.""" + for field in descriptor.fields: + _AddPropertiesForField(field, cls) + + if descriptor.is_extendable: + # _ExtensionDict is just an adaptor with no state so we allocate a new one + # every time it is accessed. + cls.Extensions = property(lambda self: _ExtensionDict(self)) + + +def _AddPropertiesForField(field, cls): + """Adds a public property for a protocol message field. + Clients can use this property to get and (in the case + of non-repeated scalar fields) directly set the value + of a protocol message field. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + # Catch it if we add other types that we should + # handle specially here. + assert _FieldDescriptor.MAX_CPPTYPE == 10 + + constant_name = field.name.upper() + '_FIELD_NUMBER' + setattr(cls, constant_name, field.number) + + if field.label == _FieldDescriptor.LABEL_REPEATED: + _AddPropertiesForRepeatedField(field, cls) + elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + _AddPropertiesForNonRepeatedCompositeField(field, cls) + else: + _AddPropertiesForNonRepeatedScalarField(field, cls) + + +class _FieldProperty(property): + __slots__ = ('DESCRIPTOR',) + + def __init__(self, descriptor, getter, setter, doc): + property.__init__(self, getter, setter, doc=doc) + self.DESCRIPTOR = descriptor + + +def _AddPropertiesForRepeatedField(field, cls): + """Adds a public property for a "repeated" protocol message field. Clients + can use this property to get the value of the field, which will be either a + RepeatedScalarFieldContainer or RepeatedCompositeFieldContainer (see + below). + + Note that when clients add values to these containers, we perform + type-checking in the case of repeated scalar fields, and we also set any + necessary "has" bits as a side-effect. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + + def getter(self): + field_value = self._fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + field_value = self._fields.setdefault(field, field_value) + return field_value + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + # We define a setter just so we can throw an exception with a more + # helpful error message. + def setter(self, new_value): + raise AttributeError('Assignment not allowed to repeated field ' + '"%s" in protocol message object.' % proto_field_name) + + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForNonRepeatedScalarField(field, cls): + """Adds a public property for a nonrepeated, scalar protocol message field. + Clients can use this property to get and directly set the value of the field. + Note that when the client sets the value of a field by using this property, + all necessary "has" bits are set as a side-effect, and we also perform + type-checking. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + type_checker = type_checkers.GetTypeChecker(field) + default_value = field.default_value + is_proto3 = field.containing_type.syntax == 'proto3' + + def getter(self): + # TODO(protobuf-team): This may be broken since there may not be + # default_value. Combine with has_default_value somehow. + return self._fields.get(field, default_value) + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + clear_when_set_to_default = is_proto3 and not field.containing_oneof + + def field_setter(self, new_value): + # pylint: disable=protected-access + # Testing the value for truthiness captures all of the proto3 defaults + # (0, 0.0, enum 0, and False). + try: + new_value = type_checker.CheckValue(new_value) + except TypeError as e: + raise TypeError( + 'Cannot set %s to %.1024r: %s' % (field.full_name, new_value, e)) + if clear_when_set_to_default and not new_value: + self._fields.pop(field, None) + else: + self._fields[field] = new_value + # Check _cached_byte_size_dirty inline to improve performance, since scalar + # setters are called frequently. + if not self._cached_byte_size_dirty: + self._Modified() + + if field.containing_oneof: + def setter(self, new_value): + field_setter(self, new_value) + self._UpdateOneofState(field) + else: + setter = field_setter + + setter.__module__ = None + setter.__doc__ = 'Setter for %s.' % proto_field_name + + # Add a property to encapsulate the getter/setter. + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForNonRepeatedCompositeField(field, cls): + """Adds a public property for a nonrepeated, composite protocol message field. + A composite field is a "group" or "message" field. + + Clients can use this property to get the value of the field, but cannot + assign to the property directly. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + # TODO(robinson): Remove duplication with similar method + # for non-repeated scalars. + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + + def getter(self): + field_value = self._fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + field_value = self._fields.setdefault(field, field_value) + return field_value + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + # We define a setter just so we can throw an exception with a more + # helpful error message. + def setter(self, new_value): + raise AttributeError('Assignment not allowed to composite field ' + '"%s" in protocol message object.' % proto_field_name) + + # Add a property to encapsulate the getter. + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForExtensions(descriptor, cls): + """Adds properties for all fields in this protocol message type.""" + extensions = descriptor.extensions_by_name + for extension_name, extension_field in extensions.items(): + constant_name = extension_name.upper() + '_FIELD_NUMBER' + setattr(cls, constant_name, extension_field.number) + + # TODO(amauryfa): Migrate all users of these attributes to functions like + # pool.FindExtensionByNumber(descriptor). + if descriptor.file is not None: + # TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available. + pool = descriptor.file.pool + cls._extensions_by_number = pool._extensions_by_number[descriptor] + cls._extensions_by_name = pool._extensions_by_name[descriptor] + +def _AddStaticMethods(cls): + # TODO(robinson): This probably needs to be thread-safe(?) + def RegisterExtension(extension_handle): + extension_handle.containing_type = cls.DESCRIPTOR + # TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available. + # pylint: disable=protected-access + cls.DESCRIPTOR.file.pool._AddExtensionDescriptor(extension_handle) + _AttachFieldHelpers(cls, extension_handle) + cls.RegisterExtension = staticmethod(RegisterExtension) + + def FromString(s): + message = cls() + message.MergeFromString(s) + return message + cls.FromString = staticmethod(FromString) + + +def _IsPresent(item): + """Given a (FieldDescriptor, value) tuple from _fields, return true if the + value should be included in the list returned by ListFields().""" + + if item[0].label == _FieldDescriptor.LABEL_REPEATED: + return bool(item[1]) + elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + return item[1]._is_present_in_parent + else: + return True + + +def _AddListFieldsMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def ListFields(self): + all_fields = [item for item in self._fields.items() if _IsPresent(item)] + all_fields.sort(key = lambda item: item[0].number) + return all_fields + + cls.ListFields = ListFields + +_PROTO3_ERROR_TEMPLATE = \ + ('Protocol message %s has no non-repeated submessage field "%s" ' + 'nor marked as optional') +_PROTO2_ERROR_TEMPLATE = 'Protocol message %s has no non-repeated field "%s"' + +def _AddHasFieldMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + is_proto3 = (message_descriptor.syntax == "proto3") + error_msg = _PROTO3_ERROR_TEMPLATE if is_proto3 else _PROTO2_ERROR_TEMPLATE + + hassable_fields = {} + for field in message_descriptor.fields: + if field.label == _FieldDescriptor.LABEL_REPEATED: + continue + # For proto3, only submessages and fields inside a oneof have presence. + if (is_proto3 and field.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE and + not field.containing_oneof): + continue + hassable_fields[field.name] = field + + # Has methods are supported for oneof descriptors. + for oneof in message_descriptor.oneofs: + hassable_fields[oneof.name] = oneof + + def HasField(self, field_name): + try: + field = hassable_fields[field_name] + except KeyError: + raise ValueError(error_msg % (message_descriptor.full_name, field_name)) + + if isinstance(field, descriptor_mod.OneofDescriptor): + try: + return HasField(self, self._oneofs[field].name) + except KeyError: + return False + else: + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + value = self._fields.get(field) + return value is not None and value._is_present_in_parent + else: + return field in self._fields + + cls.HasField = HasField + + +def _AddClearFieldMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def ClearField(self, field_name): + try: + field = message_descriptor.fields_by_name[field_name] + except KeyError: + try: + field = message_descriptor.oneofs_by_name[field_name] + if field in self._oneofs: + field = self._oneofs[field] + else: + return + except KeyError: + raise ValueError('Protocol message %s has no "%s" field.' % + (message_descriptor.name, field_name)) + + if field in self._fields: + # To match the C++ implementation, we need to invalidate iterators + # for map fields when ClearField() happens. + if hasattr(self._fields[field], 'InvalidateIterators'): + self._fields[field].InvalidateIterators() + + # Note: If the field is a sub-message, its listener will still point + # at us. That's fine, because the worst than can happen is that it + # will call _Modified() and invalidate our byte size. Big deal. + del self._fields[field] + + if self._oneofs.get(field.containing_oneof, None) is field: + del self._oneofs[field.containing_oneof] + + # Always call _Modified() -- even if nothing was changed, this is + # a mutating method, and thus calling it should cause the field to become + # present in the parent message. + self._Modified() + + cls.ClearField = ClearField + + +def _AddClearExtensionMethod(cls): + """Helper for _AddMessageMethods().""" + def ClearExtension(self, extension_handle): + extension_dict._VerifyExtensionHandle(self, extension_handle) + + # Similar to ClearField(), above. + if extension_handle in self._fields: + del self._fields[extension_handle] + self._Modified() + cls.ClearExtension = ClearExtension + + +def _AddHasExtensionMethod(cls): + """Helper for _AddMessageMethods().""" + def HasExtension(self, extension_handle): + extension_dict._VerifyExtensionHandle(self, extension_handle) + if extension_handle.label == _FieldDescriptor.LABEL_REPEATED: + raise KeyError('"%s" is repeated.' % extension_handle.full_name) + + if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + value = self._fields.get(extension_handle) + return value is not None and value._is_present_in_parent + else: + return extension_handle in self._fields + cls.HasExtension = HasExtension + +def _InternalUnpackAny(msg): + """Unpacks Any message and returns the unpacked message. + + This internal method is different from public Any Unpack method which takes + the target message as argument. _InternalUnpackAny method does not have + target message type and need to find the message type in descriptor pool. + + Args: + msg: An Any message to be unpacked. + + Returns: + The unpacked message. + """ + # TODO(amauryfa): Don't use the factory of generated messages. + # To make Any work with custom factories, use the message factory of the + # parent message. + # pylint: disable=g-import-not-at-top + from google.protobuf import symbol_database + factory = symbol_database.Default() + + type_url = msg.type_url + + if not type_url: + return None + + # TODO(haberman): For now we just strip the hostname. Better logic will be + # required. + type_name = type_url.split('/')[-1] + descriptor = factory.pool.FindMessageTypeByName(type_name) + + if descriptor is None: + return None + + message_class = factory.GetPrototype(descriptor) + message = message_class() + + message.ParseFromString(msg.value) + return message + + +def _AddEqualsMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __eq__(self, other): + if (not isinstance(other, message_mod.Message) or + other.DESCRIPTOR != self.DESCRIPTOR): + return False + + if self is other: + return True + + if self.DESCRIPTOR.full_name == _AnyFullTypeName: + any_a = _InternalUnpackAny(self) + any_b = _InternalUnpackAny(other) + if any_a and any_b: + return any_a == any_b + + if not self.ListFields() == other.ListFields(): + return False + + # TODO(jieluo): Fix UnknownFieldSet to consider MessageSet extensions, + # then use it for the comparison. + unknown_fields = list(self._unknown_fields) + unknown_fields.sort() + other_unknown_fields = list(other._unknown_fields) + other_unknown_fields.sort() + return unknown_fields == other_unknown_fields + + cls.__eq__ = __eq__ + + +def _AddStrMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __str__(self): + return text_format.MessageToString(self) + cls.__str__ = __str__ + + +def _AddReprMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __repr__(self): + return text_format.MessageToString(self) + cls.__repr__ = __repr__ + + +def _AddUnicodeMethod(unused_message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def __unicode__(self): + return text_format.MessageToString(self, as_utf8=True).decode('utf-8') + cls.__unicode__ = __unicode__ + + +def _BytesForNonRepeatedElement(value, field_number, field_type): + """Returns the number of bytes needed to serialize a non-repeated element. + The returned byte count includes space for tag information and any + other additional space associated with serializing value. + + Args: + value: Value we're serializing. + field_number: Field number of this value. (Since the field number + is stored as part of a varint-encoded tag, this has an impact + on the total bytes required to serialize the value). + field_type: The type of the field. One of the TYPE_* constants + within FieldDescriptor. + """ + try: + fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type] + return fn(field_number, value) + except KeyError: + raise message_mod.EncodeError('Unrecognized field type: %d' % field_type) + + +def _AddByteSizeMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def ByteSize(self): + if not self._cached_byte_size_dirty: + return self._cached_byte_size + + size = 0 + descriptor = self.DESCRIPTOR + if descriptor.GetOptions().map_entry: + # Fields of map entry should always be serialized. + size = descriptor.fields_by_name['key']._sizer(self.key) + size += descriptor.fields_by_name['value']._sizer(self.value) + else: + for field_descriptor, field_value in self.ListFields(): + size += field_descriptor._sizer(field_value) + for tag_bytes, value_bytes in self._unknown_fields: + size += len(tag_bytes) + len(value_bytes) + + self._cached_byte_size = size + self._cached_byte_size_dirty = False + self._listener_for_children.dirty = False + return size + + cls.ByteSize = ByteSize + + +def _AddSerializeToStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def SerializeToString(self, **kwargs): + # Check if the message has all of its required fields set. + if not self.IsInitialized(): + raise message_mod.EncodeError( + 'Message %s is missing required fields: %s' % ( + self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors()))) + return self.SerializePartialToString(**kwargs) + cls.SerializeToString = SerializeToString + + +def _AddSerializePartialToStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def SerializePartialToString(self, **kwargs): + out = BytesIO() + self._InternalSerialize(out.write, **kwargs) + return out.getvalue() + cls.SerializePartialToString = SerializePartialToString + + def InternalSerialize(self, write_bytes, deterministic=None): + if deterministic is None: + deterministic = ( + api_implementation.IsPythonDefaultSerializationDeterministic()) + else: + deterministic = bool(deterministic) + + descriptor = self.DESCRIPTOR + if descriptor.GetOptions().map_entry: + # Fields of map entry should always be serialized. + descriptor.fields_by_name['key']._encoder( + write_bytes, self.key, deterministic) + descriptor.fields_by_name['value']._encoder( + write_bytes, self.value, deterministic) + else: + for field_descriptor, field_value in self.ListFields(): + field_descriptor._encoder(write_bytes, field_value, deterministic) + for tag_bytes, value_bytes in self._unknown_fields: + write_bytes(tag_bytes) + write_bytes(value_bytes) + cls._InternalSerialize = InternalSerialize + + +def _AddMergeFromStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def MergeFromString(self, serialized): + serialized = memoryview(serialized) + length = len(serialized) + try: + if self._InternalParse(serialized, 0, length) != length: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise message_mod.DecodeError('Unexpected end-group tag.') + except (IndexError, TypeError): + # Now ord(buf[p:p+1]) == ord('') gets TypeError. + raise message_mod.DecodeError('Truncated message.') + except struct.error as e: + raise message_mod.DecodeError(e) + return length # Return this for legacy reasons. + cls.MergeFromString = MergeFromString + + local_ReadTag = decoder.ReadTag + local_SkipField = decoder.SkipField + decoders_by_tag = cls._decoders_by_tag + + def InternalParse(self, buffer, pos, end): + """Create a message from serialized bytes. + + Args: + self: Message, instance of the proto message object. + buffer: memoryview of the serialized data. + pos: int, position to start in the serialized data. + end: int, end position of the serialized data. + + Returns: + Message object. + """ + # Guard against internal misuse, since this function is called internally + # quite extensively, and its easy to accidentally pass bytes. + assert isinstance(buffer, memoryview) + self._Modified() + field_dict = self._fields + # pylint: disable=protected-access + unknown_field_set = self._unknown_field_set + while pos != end: + (tag_bytes, new_pos) = local_ReadTag(buffer, pos) + field_decoder, field_desc = decoders_by_tag.get(tag_bytes, (None, None)) + if field_decoder is None: + if not self._unknown_fields: # pylint: disable=protected-access + self._unknown_fields = [] # pylint: disable=protected-access + if unknown_field_set is None: + # pylint: disable=protected-access + self._unknown_field_set = containers.UnknownFieldSet() + # pylint: disable=protected-access + unknown_field_set = self._unknown_field_set + # pylint: disable=protected-access + (tag, _) = decoder._DecodeVarint(tag_bytes, 0) + field_number, wire_type = wire_format.UnpackTag(tag) + if field_number == 0: + raise message_mod.DecodeError('Field number 0 is illegal.') + # TODO(jieluo): remove old_pos. + old_pos = new_pos + (data, new_pos) = decoder._DecodeUnknownField( + buffer, new_pos, wire_type) # pylint: disable=protected-access + if new_pos == -1: + return pos + # pylint: disable=protected-access + unknown_field_set._add(field_number, wire_type, data) + # TODO(jieluo): remove _unknown_fields. + new_pos = local_SkipField(buffer, old_pos, end, tag_bytes) + if new_pos == -1: + return pos + self._unknown_fields.append( + (tag_bytes, buffer[old_pos:new_pos].tobytes())) + pos = new_pos + else: + pos = field_decoder(buffer, new_pos, end, self, field_dict) + if field_desc: + self._UpdateOneofState(field_desc) + return pos + cls._InternalParse = InternalParse + + +def _AddIsInitializedMethod(message_descriptor, cls): + """Adds the IsInitialized and FindInitializationError methods to the + protocol message class.""" + + required_fields = [field for field in message_descriptor.fields + if field.label == _FieldDescriptor.LABEL_REQUIRED] + + def IsInitialized(self, errors=None): + """Checks if all required fields of a message are set. + + Args: + errors: A list which, if provided, will be populated with the field + paths of all missing required fields. + + Returns: + True iff the specified message has all required fields set. + """ + + # Performance is critical so we avoid HasField() and ListFields(). + + for field in required_fields: + if (field not in self._fields or + (field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and + not self._fields[field]._is_present_in_parent)): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + + for field, value in list(self._fields.items()): # dict can change size! + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if field.label == _FieldDescriptor.LABEL_REPEATED: + if (field.message_type.has_options and + field.message_type.GetOptions().map_entry): + continue + for element in value: + if not element.IsInitialized(): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + elif value._is_present_in_parent and not value.IsInitialized(): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + + return True + + cls.IsInitialized = IsInitialized + + def FindInitializationErrors(self): + """Finds required fields which are not initialized. + + Returns: + A list of strings. Each string is a path to an uninitialized field from + the top-level message, e.g. "foo.bar[5].baz". + """ + + errors = [] # simplify things + + for field in required_fields: + if not self.HasField(field.name): + errors.append(field.name) + + for field, value in self.ListFields(): + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if field.is_extension: + name = '(%s)' % field.full_name + else: + name = field.name + + if _IsMapField(field): + if _IsMessageMapField(field): + for key in value: + element = value[key] + prefix = '%s[%s].' % (name, key) + sub_errors = element.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + else: + # ScalarMaps can't have any initialization errors. + pass + elif field.label == _FieldDescriptor.LABEL_REPEATED: + for i in range(len(value)): + element = value[i] + prefix = '%s[%d].' % (name, i) + sub_errors = element.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + else: + prefix = name + '.' + sub_errors = value.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + + return errors + + cls.FindInitializationErrors = FindInitializationErrors + + +def _FullyQualifiedClassName(klass): + module = klass.__module__ + name = getattr(klass, '__qualname__', klass.__name__) + if module in (None, 'builtins', '__builtin__'): + return name + return module + '.' + name + + +def _AddMergeFromMethod(cls): + LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED + CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE + + def MergeFrom(self, msg): + if not isinstance(msg, cls): + raise TypeError( + 'Parameter to MergeFrom() must be instance of same class: ' + 'expected %s got %s.' % (_FullyQualifiedClassName(cls), + _FullyQualifiedClassName(msg.__class__))) + + assert msg is not self + self._Modified() + + fields = self._fields + + for field, value in msg._fields.items(): + if field.label == LABEL_REPEATED: + field_value = fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + fields[field] = field_value + field_value.MergeFrom(value) + elif field.cpp_type == CPPTYPE_MESSAGE: + if value._is_present_in_parent: + field_value = fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + fields[field] = field_value + field_value.MergeFrom(value) + else: + self._fields[field] = value + if field.containing_oneof: + self._UpdateOneofState(field) + + if msg._unknown_fields: + if not self._unknown_fields: + self._unknown_fields = [] + self._unknown_fields.extend(msg._unknown_fields) + # pylint: disable=protected-access + if self._unknown_field_set is None: + self._unknown_field_set = containers.UnknownFieldSet() + self._unknown_field_set._extend(msg._unknown_field_set) + + cls.MergeFrom = MergeFrom + + +def _AddWhichOneofMethod(message_descriptor, cls): + def WhichOneof(self, oneof_name): + """Returns the name of the currently set field inside a oneof, or None.""" + try: + field = message_descriptor.oneofs_by_name[oneof_name] + except KeyError: + raise ValueError( + 'Protocol message has no oneof "%s" field.' % oneof_name) + + nested_field = self._oneofs.get(field, None) + if nested_field is not None and self.HasField(nested_field.name): + return nested_field.name + else: + return None + + cls.WhichOneof = WhichOneof + + +def _Clear(self): + # Clear fields. + self._fields = {} + self._unknown_fields = () + # pylint: disable=protected-access + if self._unknown_field_set is not None: + self._unknown_field_set._clear() + self._unknown_field_set = None + + self._oneofs = {} + self._Modified() + + +def _UnknownFields(self): + if self._unknown_field_set is None: # pylint: disable=protected-access + # pylint: disable=protected-access + self._unknown_field_set = containers.UnknownFieldSet() + return self._unknown_field_set # pylint: disable=protected-access + + +def _DiscardUnknownFields(self): + self._unknown_fields = [] + self._unknown_field_set = None # pylint: disable=protected-access + for field, value in self.ListFields(): + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if _IsMapField(field): + if _IsMessageMapField(field): + for key in value: + value[key].DiscardUnknownFields() + elif field.label == _FieldDescriptor.LABEL_REPEATED: + for sub_message in value: + sub_message.DiscardUnknownFields() + else: + value.DiscardUnknownFields() + + +def _SetListener(self, listener): + if listener is None: + self._listener = message_listener_mod.NullMessageListener() + else: + self._listener = listener + + +def _AddMessageMethods(message_descriptor, cls): + """Adds implementations of all Message methods to cls.""" + _AddListFieldsMethod(message_descriptor, cls) + _AddHasFieldMethod(message_descriptor, cls) + _AddClearFieldMethod(message_descriptor, cls) + if message_descriptor.is_extendable: + _AddClearExtensionMethod(cls) + _AddHasExtensionMethod(cls) + _AddEqualsMethod(message_descriptor, cls) + _AddStrMethod(message_descriptor, cls) + _AddReprMethod(message_descriptor, cls) + _AddUnicodeMethod(message_descriptor, cls) + _AddByteSizeMethod(message_descriptor, cls) + _AddSerializeToStringMethod(message_descriptor, cls) + _AddSerializePartialToStringMethod(message_descriptor, cls) + _AddMergeFromStringMethod(message_descriptor, cls) + _AddIsInitializedMethod(message_descriptor, cls) + _AddMergeFromMethod(cls) + _AddWhichOneofMethod(message_descriptor, cls) + # Adds methods which do not depend on cls. + cls.Clear = _Clear + cls.UnknownFields = _UnknownFields + cls.DiscardUnknownFields = _DiscardUnknownFields + cls._SetListener = _SetListener + + +def _AddPrivateHelperMethods(message_descriptor, cls): + """Adds implementation of private helper methods to cls.""" + + def Modified(self): + """Sets the _cached_byte_size_dirty bit to true, + and propagates this to our listener iff this was a state change. + """ + + # Note: Some callers check _cached_byte_size_dirty before calling + # _Modified() as an extra optimization. So, if this method is ever + # changed such that it does stuff even when _cached_byte_size_dirty is + # already true, the callers need to be updated. + if not self._cached_byte_size_dirty: + self._cached_byte_size_dirty = True + self._listener_for_children.dirty = True + self._is_present_in_parent = True + self._listener.Modified() + + def _UpdateOneofState(self, field): + """Sets field as the active field in its containing oneof. + + Will also delete currently active field in the oneof, if it is different + from the argument. Does not mark the message as modified. + """ + other_field = self._oneofs.setdefault(field.containing_oneof, field) + if other_field is not field: + del self._fields[other_field] + self._oneofs[field.containing_oneof] = field + + cls._Modified = Modified + cls.SetInParent = Modified + cls._UpdateOneofState = _UpdateOneofState + + +class _Listener(object): + + """MessageListener implementation that a parent message registers with its + child message. + + In order to support semantics like: + + foo.bar.baz.qux = 23 + assert foo.HasField('bar') + + ...child objects must have back references to their parents. + This helper class is at the heart of this support. + """ + + def __init__(self, parent_message): + """Args: + parent_message: The message whose _Modified() method we should call when + we receive Modified() messages. + """ + # This listener establishes a back reference from a child (contained) object + # to its parent (containing) object. We make this a weak reference to avoid + # creating cyclic garbage when the client finishes with the 'parent' object + # in the tree. + if isinstance(parent_message, weakref.ProxyType): + self._parent_message_weakref = parent_message + else: + self._parent_message_weakref = weakref.proxy(parent_message) + + # As an optimization, we also indicate directly on the listener whether + # or not the parent message is dirty. This way we can avoid traversing + # up the tree in the common case. + self.dirty = False + + def Modified(self): + if self.dirty: + return + try: + # Propagate the signal to our parents iff this is the first field set. + self._parent_message_weakref._Modified() + except ReferenceError: + # We can get here if a client has kept a reference to a child object, + # and is now setting a field on it, but the child's parent has been + # garbage-collected. This is not an error. + pass + + +class _OneofListener(_Listener): + """Special listener implementation for setting composite oneof fields.""" + + def __init__(self, parent_message, field): + """Args: + parent_message: The message whose _Modified() method we should call when + we receive Modified() messages. + field: The descriptor of the field being set in the parent message. + """ + super(_OneofListener, self).__init__(parent_message) + self._field = field + + def Modified(self): + """Also updates the state of the containing oneof in the parent message.""" + try: + self._parent_message_weakref._UpdateOneofState(self._field) + super(_OneofListener, self).Modified() + except ReferenceError: + pass diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/type_checkers.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/type_checkers.py new file mode 100644 index 0000000000..a53e71fe8e --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/type_checkers.py @@ -0,0 +1,435 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides type checking routines. + +This module defines type checking utilities in the forms of dictionaries: + +VALUE_CHECKERS: A dictionary of field types and a value validation object. +TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing + function. +TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization + function. +FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their + corresponding wire types. +TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization + function. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import ctypes +import numbers + +from google.protobuf.internal import decoder +from google.protobuf.internal import encoder +from google.protobuf.internal import wire_format +from google.protobuf import descriptor + +_FieldDescriptor = descriptor.FieldDescriptor + + +def TruncateToFourByteFloat(original): + return ctypes.c_float(original).value + + +def ToShortestFloat(original): + """Returns the shortest float that has same value in wire.""" + # All 4 byte floats have between 6 and 9 significant digits, so we + # start with 6 as the lower bound. + # It has to be iterative because use '.9g' directly can not get rid + # of the noises for most values. For example if set a float_field=0.9 + # use '.9g' will print 0.899999976. + precision = 6 + rounded = float('{0:.{1}g}'.format(original, precision)) + while TruncateToFourByteFloat(rounded) != original: + precision += 1 + rounded = float('{0:.{1}g}'.format(original, precision)) + return rounded + + +def SupportsOpenEnums(field_descriptor): + return field_descriptor.containing_type.syntax == 'proto3' + + +def GetTypeChecker(field): + """Returns a type checker for a message field of the specified types. + + Args: + field: FieldDescriptor object for this field. + + Returns: + An instance of TypeChecker which can be used to verify the types + of values assigned to a field of the specified type. + """ + if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and + field.type == _FieldDescriptor.TYPE_STRING): + return UnicodeValueChecker() + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + if SupportsOpenEnums(field): + # When open enums are supported, any int32 can be assigned. + return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32] + else: + return EnumValueChecker(field.enum_type) + return _VALUE_CHECKERS[field.cpp_type] + + +# None of the typecheckers below make any attempt to guard against people +# subclassing builtin types and doing weird things. We're not trying to +# protect against malicious clients here, just people accidentally shooting +# themselves in the foot in obvious ways. +class TypeChecker(object): + + """Type checker used to catch type errors as early as possible + when the client is setting scalar fields in protocol messages. + """ + + def __init__(self, *acceptable_types): + self._acceptable_types = acceptable_types + + def CheckValue(self, proposed_value): + """Type check the provided value and return it. + + The returned value might have been normalized to another type. + """ + if not isinstance(proposed_value, self._acceptable_types): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), self._acceptable_types)) + raise TypeError(message) + return proposed_value + + +class TypeCheckerWithDefault(TypeChecker): + + def __init__(self, default_value, *acceptable_types): + TypeChecker.__init__(self, *acceptable_types) + self._default_value = default_value + + def DefaultValue(self): + return self._default_value + + +class BoolValueChecker(object): + """Type checker used for bool fields.""" + + def CheckValue(self, proposed_value): + if not hasattr(proposed_value, '__index__') or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (bool, int))) + raise TypeError(message) + return bool(proposed_value) + + def DefaultValue(self): + return False + + +# IntValueChecker and its subclasses perform integer type-checks +# and bounds-checks. +class IntValueChecker(object): + + """Checker used for integer fields. Performs type-check and range check.""" + + def CheckValue(self, proposed_value): + if not hasattr(proposed_value, '__index__') or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (int,))) + raise TypeError(message) + + if not self._MIN <= int(proposed_value) <= self._MAX: + raise ValueError('Value out of range: %d' % proposed_value) + # We force all values to int to make alternate implementations where the + # distinction is more significant (e.g. the C++ implementation) simpler. + proposed_value = int(proposed_value) + return proposed_value + + def DefaultValue(self): + return 0 + + +class EnumValueChecker(object): + + """Checker used for enum fields. Performs type-check and range check.""" + + def __init__(self, enum_type): + self._enum_type = enum_type + + def CheckValue(self, proposed_value): + if not isinstance(proposed_value, numbers.Integral): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (int,))) + raise TypeError(message) + if int(proposed_value) not in self._enum_type.values_by_number: + raise ValueError('Unknown enum value: %d' % proposed_value) + return proposed_value + + def DefaultValue(self): + return self._enum_type.values[0].number + + +class UnicodeValueChecker(object): + + """Checker used for string fields. + + Always returns a unicode value, even if the input is of type str. + """ + + def CheckValue(self, proposed_value): + if not isinstance(proposed_value, (bytes, str)): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (bytes, str))) + raise TypeError(message) + + # If the value is of type 'bytes' make sure that it is valid UTF-8 data. + if isinstance(proposed_value, bytes): + try: + proposed_value = proposed_value.decode('utf-8') + except UnicodeDecodeError: + raise ValueError('%.1024r has type bytes, but isn\'t valid UTF-8 ' + 'encoding. Non-UTF-8 strings must be converted to ' + 'unicode objects before being added.' % + (proposed_value)) + else: + try: + proposed_value.encode('utf8') + except UnicodeEncodeError: + raise ValueError('%.1024r isn\'t a valid unicode string and ' + 'can\'t be encoded in UTF-8.'% + (proposed_value)) + + return proposed_value + + def DefaultValue(self): + return u"" + + +class Int32ValueChecker(IntValueChecker): + # We're sure to use ints instead of longs here since comparison may be more + # efficient. + _MIN = -2147483648 + _MAX = 2147483647 + + +class Uint32ValueChecker(IntValueChecker): + _MIN = 0 + _MAX = (1 << 32) - 1 + + +class Int64ValueChecker(IntValueChecker): + _MIN = -(1 << 63) + _MAX = (1 << 63) - 1 + + +class Uint64ValueChecker(IntValueChecker): + _MIN = 0 + _MAX = (1 << 64) - 1 + + +# The max 4 bytes float is about 3.4028234663852886e+38 +_FLOAT_MAX = float.fromhex('0x1.fffffep+127') +_FLOAT_MIN = -_FLOAT_MAX +_INF = float('inf') +_NEG_INF = float('-inf') + + +class DoubleValueChecker(object): + """Checker used for double fields. + + Performs type-check and range check. + """ + + def CheckValue(self, proposed_value): + """Check and convert proposed_value to float.""" + if (not hasattr(proposed_value, '__float__') and + not hasattr(proposed_value, '__index__')) or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: int, float' % + (proposed_value, type(proposed_value))) + raise TypeError(message) + return float(proposed_value) + + def DefaultValue(self): + return 0.0 + + +class FloatValueChecker(DoubleValueChecker): + """Checker used for float fields. + + Performs type-check and range check. + + Values exceeding a 32-bit float will be converted to inf/-inf. + """ + + def CheckValue(self, proposed_value): + """Check and convert proposed_value to float.""" + converted_value = super().CheckValue(proposed_value) + # This inf rounding matches the C++ proto SafeDoubleToFloat logic. + if converted_value > _FLOAT_MAX: + return _INF + if converted_value < _FLOAT_MIN: + return _NEG_INF + + return TruncateToFourByteFloat(converted_value) + +# Type-checkers for all scalar CPPTYPEs. +_VALUE_CHECKERS = { + _FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(), + _FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(), + _FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(), + _FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(), + _FieldDescriptor.CPPTYPE_DOUBLE: DoubleValueChecker(), + _FieldDescriptor.CPPTYPE_FLOAT: FloatValueChecker(), + _FieldDescriptor.CPPTYPE_BOOL: BoolValueChecker(), + _FieldDescriptor.CPPTYPE_STRING: TypeCheckerWithDefault(b'', bytes), +} + + +# Map from field type to a function F, such that F(field_num, value) +# gives the total byte size for a value of the given type. This +# byte size includes tag information and any other additional space +# associated with serializing "value". +TYPE_TO_BYTE_SIZE_FN = { + _FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize, + _FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize, + _FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize, + _FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize, + _FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize, + _FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize, + _FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize, + _FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize, + _FieldDescriptor.TYPE_STRING: wire_format.StringByteSize, + _FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize, + _FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize, + _FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize, + _FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize, + _FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize, + _FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize, + _FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize, + _FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize, + _FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize + } + + +# Maps from field types to encoder constructors. +TYPE_TO_ENCODER = { + _FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder, + _FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder, + _FieldDescriptor.TYPE_INT64: encoder.Int64Encoder, + _FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder, + _FieldDescriptor.TYPE_INT32: encoder.Int32Encoder, + _FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder, + _FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder, + _FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder, + _FieldDescriptor.TYPE_STRING: encoder.StringEncoder, + _FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder, + _FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder, + _FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder, + _FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder, + _FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder, + _FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder, + _FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder, + _FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder, + _FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder, + } + + +# Maps from field types to sizer constructors. +TYPE_TO_SIZER = { + _FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer, + _FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer, + _FieldDescriptor.TYPE_INT64: encoder.Int64Sizer, + _FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer, + _FieldDescriptor.TYPE_INT32: encoder.Int32Sizer, + _FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer, + _FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer, + _FieldDescriptor.TYPE_BOOL: encoder.BoolSizer, + _FieldDescriptor.TYPE_STRING: encoder.StringSizer, + _FieldDescriptor.TYPE_GROUP: encoder.GroupSizer, + _FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer, + _FieldDescriptor.TYPE_BYTES: encoder.BytesSizer, + _FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer, + _FieldDescriptor.TYPE_ENUM: encoder.EnumSizer, + _FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer, + _FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer, + _FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer, + _FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer, + } + + +# Maps from field type to a decoder constructor. +TYPE_TO_DECODER = { + _FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder, + _FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder, + _FieldDescriptor.TYPE_INT64: decoder.Int64Decoder, + _FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder, + _FieldDescriptor.TYPE_INT32: decoder.Int32Decoder, + _FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder, + _FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder, + _FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder, + _FieldDescriptor.TYPE_STRING: decoder.StringDecoder, + _FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder, + _FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder, + _FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder, + _FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder, + _FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder, + _FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder, + _FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder, + _FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder, + _FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder, + } + +# Maps from field type to expected wiretype. +FIELD_TYPE_TO_WIRE_TYPE = { + _FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_STRING: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP, + _FieldDescriptor.TYPE_MESSAGE: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_BYTES: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT, + } diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/well_known_types.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/well_known_types.py new file mode 100644 index 0000000000..b581ab750a --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/well_known_types.py @@ -0,0 +1,878 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains well known classes. + +This files defines well known classes which need extra maintenance including: + - Any + - Duration + - FieldMask + - Struct + - Timestamp +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + +import calendar +import collections.abc +import datetime + +from google.protobuf.descriptor import FieldDescriptor + +_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S' +_NANOS_PER_SECOND = 1000000000 +_NANOS_PER_MILLISECOND = 1000000 +_NANOS_PER_MICROSECOND = 1000 +_MILLIS_PER_SECOND = 1000 +_MICROS_PER_SECOND = 1000000 +_SECONDS_PER_DAY = 24 * 3600 +_DURATION_SECONDS_MAX = 315576000000 + + +class Any(object): + """Class for Any Message type.""" + + __slots__ = () + + def Pack(self, msg, type_url_prefix='type.googleapis.com/', + deterministic=None): + """Packs the specified message into current Any message.""" + if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/': + self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name) + else: + self.type_url = '%s%s' % (type_url_prefix, msg.DESCRIPTOR.full_name) + self.value = msg.SerializeToString(deterministic=deterministic) + + def Unpack(self, msg): + """Unpacks the current Any message into specified message.""" + descriptor = msg.DESCRIPTOR + if not self.Is(descriptor): + return False + msg.ParseFromString(self.value) + return True + + def TypeName(self): + """Returns the protobuf type name of the inner message.""" + # Only last part is to be used: b/25630112 + return self.type_url.split('/')[-1] + + def Is(self, descriptor): + """Checks if this Any represents the given protobuf type.""" + return '/' in self.type_url and self.TypeName() == descriptor.full_name + + +_EPOCH_DATETIME_NAIVE = datetime.datetime.utcfromtimestamp(0) +_EPOCH_DATETIME_AWARE = datetime.datetime.fromtimestamp( + 0, tz=datetime.timezone.utc) + + +class Timestamp(object): + """Class for Timestamp message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts Timestamp to RFC 3339 date string format. + + Returns: + A string converted from timestamp. The string is always Z-normalized + and uses 3, 6 or 9 fractional digits as required to represent the + exact time. Example of the return format: '1972-01-01T10:00:20.021Z' + """ + nanos = self.nanos % _NANOS_PER_SECOND + total_sec = self.seconds + (self.nanos - nanos) // _NANOS_PER_SECOND + seconds = total_sec % _SECONDS_PER_DAY + days = (total_sec - seconds) // _SECONDS_PER_DAY + dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(days, seconds) + + result = dt.isoformat() + if (nanos % 1e9) == 0: + # If there are 0 fractional digits, the fractional + # point '.' should be omitted when serializing. + return result + 'Z' + if (nanos % 1e6) == 0: + # Serialize 3 fractional digits. + return result + '.%03dZ' % (nanos / 1e6) + if (nanos % 1e3) == 0: + # Serialize 6 fractional digits. + return result + '.%06dZ' % (nanos / 1e3) + # Serialize 9 fractional digits. + return result + '.%09dZ' % nanos + + def FromJsonString(self, value): + """Parse a RFC 3339 date string format to Timestamp. + + Args: + value: A date string. Any fractional digits (or none) and any offset are + accepted as long as they fit into nano-seconds precision. + Example of accepted format: '1972-01-01T10:00:20.021-05:00' + + Raises: + ValueError: On parsing problems. + """ + if not isinstance(value, str): + raise ValueError('Timestamp JSON value not a string: {!r}'.format(value)) + timezone_offset = value.find('Z') + if timezone_offset == -1: + timezone_offset = value.find('+') + if timezone_offset == -1: + timezone_offset = value.rfind('-') + if timezone_offset == -1: + raise ValueError( + 'Failed to parse timestamp: missing valid timezone offset.') + time_value = value[0:timezone_offset] + # Parse datetime and nanos. + point_position = time_value.find('.') + if point_position == -1: + second_value = time_value + nano_value = '' + else: + second_value = time_value[:point_position] + nano_value = time_value[point_position + 1:] + if 't' in second_value: + raise ValueError( + 'time data \'{0}\' does not match format \'%Y-%m-%dT%H:%M:%S\', ' + 'lowercase \'t\' is not accepted'.format(second_value)) + date_object = datetime.datetime.strptime(second_value, _TIMESTAMPFOMAT) + td = date_object - datetime.datetime(1970, 1, 1) + seconds = td.seconds + td.days * _SECONDS_PER_DAY + if len(nano_value) > 9: + raise ValueError( + 'Failed to parse Timestamp: nanos {0} more than ' + '9 fractional digits.'.format(nano_value)) + if nano_value: + nanos = round(float('0.' + nano_value) * 1e9) + else: + nanos = 0 + # Parse timezone offsets. + if value[timezone_offset] == 'Z': + if len(value) != timezone_offset + 1: + raise ValueError('Failed to parse timestamp: invalid trailing' + ' data {0}.'.format(value)) + else: + timezone = value[timezone_offset:] + pos = timezone.find(':') + if pos == -1: + raise ValueError( + 'Invalid timezone offset value: {0}.'.format(timezone)) + if timezone[0] == '+': + seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60 + else: + seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60 + # Set seconds and nanos + self.seconds = int(seconds) + self.nanos = int(nanos) + + def GetCurrentTime(self): + """Get the current UTC into Timestamp.""" + self.FromDatetime(datetime.datetime.utcnow()) + + def ToNanoseconds(self): + """Converts Timestamp to nanoseconds since epoch.""" + return self.seconds * _NANOS_PER_SECOND + self.nanos + + def ToMicroseconds(self): + """Converts Timestamp to microseconds since epoch.""" + return (self.seconds * _MICROS_PER_SECOND + + self.nanos // _NANOS_PER_MICROSECOND) + + def ToMilliseconds(self): + """Converts Timestamp to milliseconds since epoch.""" + return (self.seconds * _MILLIS_PER_SECOND + + self.nanos // _NANOS_PER_MILLISECOND) + + def ToSeconds(self): + """Converts Timestamp to seconds since epoch.""" + return self.seconds + + def FromNanoseconds(self, nanos): + """Converts nanoseconds since epoch to Timestamp.""" + self.seconds = nanos // _NANOS_PER_SECOND + self.nanos = nanos % _NANOS_PER_SECOND + + def FromMicroseconds(self, micros): + """Converts microseconds since epoch to Timestamp.""" + self.seconds = micros // _MICROS_PER_SECOND + self.nanos = (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND + + def FromMilliseconds(self, millis): + """Converts milliseconds since epoch to Timestamp.""" + self.seconds = millis // _MILLIS_PER_SECOND + self.nanos = (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND + + def FromSeconds(self, seconds): + """Converts seconds since epoch to Timestamp.""" + self.seconds = seconds + self.nanos = 0 + + def ToDatetime(self, tzinfo=None): + """Converts Timestamp to a datetime. + + Args: + tzinfo: A datetime.tzinfo subclass; defaults to None. + + Returns: + If tzinfo is None, returns a timezone-naive UTC datetime (with no timezone + information, i.e. not aware that it's UTC). + + Otherwise, returns a timezone-aware datetime in the input timezone. + """ + delta = datetime.timedelta( + seconds=self.seconds, + microseconds=_RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND)) + if tzinfo is None: + return _EPOCH_DATETIME_NAIVE + delta + else: + return _EPOCH_DATETIME_AWARE.astimezone(tzinfo) + delta + + def FromDatetime(self, dt): + """Converts datetime to Timestamp. + + Args: + dt: A datetime. If it's timezone-naive, it's assumed to be in UTC. + """ + # Using this guide: http://wiki.python.org/moin/WorkingWithTime + # And this conversion guide: http://docs.python.org/library/time.html + + # Turn the date parameter into a tuple (struct_time) that can then be + # manipulated into a long value of seconds. During the conversion from + # struct_time to long, the source date in UTC, and so it follows that the + # correct transformation is calendar.timegm() + self.seconds = calendar.timegm(dt.utctimetuple()) + self.nanos = dt.microsecond * _NANOS_PER_MICROSECOND + + +class Duration(object): + """Class for Duration message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts Duration to string format. + + Returns: + A string converted from self. The string format will contains + 3, 6, or 9 fractional digits depending on the precision required to + represent the exact Duration value. For example: "1s", "1.010s", + "1.000000100s", "-3.100s" + """ + _CheckDurationValid(self.seconds, self.nanos) + if self.seconds < 0 or self.nanos < 0: + result = '-' + seconds = - self.seconds + int((0 - self.nanos) // 1e9) + nanos = (0 - self.nanos) % 1e9 + else: + result = '' + seconds = self.seconds + int(self.nanos // 1e9) + nanos = self.nanos % 1e9 + result += '%d' % seconds + if (nanos % 1e9) == 0: + # If there are 0 fractional digits, the fractional + # point '.' should be omitted when serializing. + return result + 's' + if (nanos % 1e6) == 0: + # Serialize 3 fractional digits. + return result + '.%03ds' % (nanos / 1e6) + if (nanos % 1e3) == 0: + # Serialize 6 fractional digits. + return result + '.%06ds' % (nanos / 1e3) + # Serialize 9 fractional digits. + return result + '.%09ds' % nanos + + def FromJsonString(self, value): + """Converts a string to Duration. + + Args: + value: A string to be converted. The string must end with 's'. Any + fractional digits (or none) are accepted as long as they fit into + precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s + + Raises: + ValueError: On parsing problems. + """ + if not isinstance(value, str): + raise ValueError('Duration JSON value not a string: {!r}'.format(value)) + if len(value) < 1 or value[-1] != 's': + raise ValueError( + 'Duration must end with letter "s": {0}.'.format(value)) + try: + pos = value.find('.') + if pos == -1: + seconds = int(value[:-1]) + nanos = 0 + else: + seconds = int(value[:pos]) + if value[0] == '-': + nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9)) + else: + nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9)) + _CheckDurationValid(seconds, nanos) + self.seconds = seconds + self.nanos = nanos + except ValueError as e: + raise ValueError( + 'Couldn\'t parse duration: {0} : {1}.'.format(value, e)) + + def ToNanoseconds(self): + """Converts a Duration to nanoseconds.""" + return self.seconds * _NANOS_PER_SECOND + self.nanos + + def ToMicroseconds(self): + """Converts a Duration to microseconds.""" + micros = _RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND) + return self.seconds * _MICROS_PER_SECOND + micros + + def ToMilliseconds(self): + """Converts a Duration to milliseconds.""" + millis = _RoundTowardZero(self.nanos, _NANOS_PER_MILLISECOND) + return self.seconds * _MILLIS_PER_SECOND + millis + + def ToSeconds(self): + """Converts a Duration to seconds.""" + return self.seconds + + def FromNanoseconds(self, nanos): + """Converts nanoseconds to Duration.""" + self._NormalizeDuration(nanos // _NANOS_PER_SECOND, + nanos % _NANOS_PER_SECOND) + + def FromMicroseconds(self, micros): + """Converts microseconds to Duration.""" + self._NormalizeDuration( + micros // _MICROS_PER_SECOND, + (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND) + + def FromMilliseconds(self, millis): + """Converts milliseconds to Duration.""" + self._NormalizeDuration( + millis // _MILLIS_PER_SECOND, + (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND) + + def FromSeconds(self, seconds): + """Converts seconds to Duration.""" + self.seconds = seconds + self.nanos = 0 + + def ToTimedelta(self): + """Converts Duration to timedelta.""" + return datetime.timedelta( + seconds=self.seconds, microseconds=_RoundTowardZero( + self.nanos, _NANOS_PER_MICROSECOND)) + + def FromTimedelta(self, td): + """Converts timedelta to Duration.""" + self._NormalizeDuration(td.seconds + td.days * _SECONDS_PER_DAY, + td.microseconds * _NANOS_PER_MICROSECOND) + + def _NormalizeDuration(self, seconds, nanos): + """Set Duration by seconds and nanos.""" + # Force nanos to be negative if the duration is negative. + if seconds < 0 and nanos > 0: + seconds += 1 + nanos -= _NANOS_PER_SECOND + self.seconds = seconds + self.nanos = nanos + + +def _CheckDurationValid(seconds, nanos): + if seconds < -_DURATION_SECONDS_MAX or seconds > _DURATION_SECONDS_MAX: + raise ValueError( + 'Duration is not valid: Seconds {0} must be in range ' + '[-315576000000, 315576000000].'.format(seconds)) + if nanos <= -_NANOS_PER_SECOND or nanos >= _NANOS_PER_SECOND: + raise ValueError( + 'Duration is not valid: Nanos {0} must be in range ' + '[-999999999, 999999999].'.format(nanos)) + if (nanos < 0 and seconds > 0) or (nanos > 0 and seconds < 0): + raise ValueError( + 'Duration is not valid: Sign mismatch.') + + +def _RoundTowardZero(value, divider): + """Truncates the remainder part after division.""" + # For some languages, the sign of the remainder is implementation + # dependent if any of the operands is negative. Here we enforce + # "rounded toward zero" semantics. For example, for (-5) / 2 an + # implementation may give -3 as the result with the remainder being + # 1. This function ensures we always return -2 (closer to zero). + result = value // divider + remainder = value % divider + if result < 0 and remainder > 0: + return result + 1 + else: + return result + + +class FieldMask(object): + """Class for FieldMask message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts FieldMask to string according to proto3 JSON spec.""" + camelcase_paths = [] + for path in self.paths: + camelcase_paths.append(_SnakeCaseToCamelCase(path)) + return ','.join(camelcase_paths) + + def FromJsonString(self, value): + """Converts string to FieldMask according to proto3 JSON spec.""" + if not isinstance(value, str): + raise ValueError('FieldMask JSON value not a string: {!r}'.format(value)) + self.Clear() + if value: + for path in value.split(','): + self.paths.append(_CamelCaseToSnakeCase(path)) + + def IsValidForDescriptor(self, message_descriptor): + """Checks whether the FieldMask is valid for Message Descriptor.""" + for path in self.paths: + if not _IsValidPath(message_descriptor, path): + return False + return True + + def AllFieldsFromDescriptor(self, message_descriptor): + """Gets all direct fields of Message Descriptor to FieldMask.""" + self.Clear() + for field in message_descriptor.fields: + self.paths.append(field.name) + + def CanonicalFormFromMask(self, mask): + """Converts a FieldMask to the canonical form. + + Removes paths that are covered by another path. For example, + "foo.bar" is covered by "foo" and will be removed if "foo" + is also in the FieldMask. Then sorts all paths in alphabetical order. + + Args: + mask: The original FieldMask to be converted. + """ + tree = _FieldMaskTree(mask) + tree.ToFieldMask(self) + + def Union(self, mask1, mask2): + """Merges mask1 and mask2 into this FieldMask.""" + _CheckFieldMaskMessage(mask1) + _CheckFieldMaskMessage(mask2) + tree = _FieldMaskTree(mask1) + tree.MergeFromFieldMask(mask2) + tree.ToFieldMask(self) + + def Intersect(self, mask1, mask2): + """Intersects mask1 and mask2 into this FieldMask.""" + _CheckFieldMaskMessage(mask1) + _CheckFieldMaskMessage(mask2) + tree = _FieldMaskTree(mask1) + intersection = _FieldMaskTree() + for path in mask2.paths: + tree.IntersectPath(path, intersection) + intersection.ToFieldMask(self) + + def MergeMessage( + self, source, destination, + replace_message_field=False, replace_repeated_field=False): + """Merges fields specified in FieldMask from source to destination. + + Args: + source: Source message. + destination: The destination message to be merged into. + replace_message_field: Replace message field if True. Merge message + field if False. + replace_repeated_field: Replace repeated field if True. Append + elements of repeated field if False. + """ + tree = _FieldMaskTree(self) + tree.MergeMessage( + source, destination, replace_message_field, replace_repeated_field) + + +def _IsValidPath(message_descriptor, path): + """Checks whether the path is valid for Message Descriptor.""" + parts = path.split('.') + last = parts.pop() + for name in parts: + field = message_descriptor.fields_by_name.get(name) + if (field is None or + field.label == FieldDescriptor.LABEL_REPEATED or + field.type != FieldDescriptor.TYPE_MESSAGE): + return False + message_descriptor = field.message_type + return last in message_descriptor.fields_by_name + + +def _CheckFieldMaskMessage(message): + """Raises ValueError if message is not a FieldMask.""" + message_descriptor = message.DESCRIPTOR + if (message_descriptor.name != 'FieldMask' or + message_descriptor.file.name != 'google/protobuf/field_mask.proto'): + raise ValueError('Message {0} is not a FieldMask.'.format( + message_descriptor.full_name)) + + +def _SnakeCaseToCamelCase(path_name): + """Converts a path name from snake_case to camelCase.""" + result = [] + after_underscore = False + for c in path_name: + if c.isupper(): + raise ValueError( + 'Fail to print FieldMask to Json string: Path name ' + '{0} must not contain uppercase letters.'.format(path_name)) + if after_underscore: + if c.islower(): + result.append(c.upper()) + after_underscore = False + else: + raise ValueError( + 'Fail to print FieldMask to Json string: The ' + 'character after a "_" must be a lowercase letter ' + 'in path name {0}.'.format(path_name)) + elif c == '_': + after_underscore = True + else: + result += c + + if after_underscore: + raise ValueError('Fail to print FieldMask to Json string: Trailing "_" ' + 'in path name {0}.'.format(path_name)) + return ''.join(result) + + +def _CamelCaseToSnakeCase(path_name): + """Converts a field name from camelCase to snake_case.""" + result = [] + for c in path_name: + if c == '_': + raise ValueError('Fail to parse FieldMask: Path name ' + '{0} must not contain "_"s.'.format(path_name)) + if c.isupper(): + result += '_' + result += c.lower() + else: + result += c + return ''.join(result) + + +class _FieldMaskTree(object): + """Represents a FieldMask in a tree structure. + + For example, given a FieldMask "foo.bar,foo.baz,bar.baz", + the FieldMaskTree will be: + [_root] -+- foo -+- bar + | | + | +- baz + | + +- bar --- baz + In the tree, each leaf node represents a field path. + """ + + __slots__ = ('_root',) + + def __init__(self, field_mask=None): + """Initializes the tree by FieldMask.""" + self._root = {} + if field_mask: + self.MergeFromFieldMask(field_mask) + + def MergeFromFieldMask(self, field_mask): + """Merges a FieldMask to the tree.""" + for path in field_mask.paths: + self.AddPath(path) + + def AddPath(self, path): + """Adds a field path into the tree. + + If the field path to add is a sub-path of an existing field path + in the tree (i.e., a leaf node), it means the tree already matches + the given path so nothing will be added to the tree. If the path + matches an existing non-leaf node in the tree, that non-leaf node + will be turned into a leaf node with all its children removed because + the path matches all the node's children. Otherwise, a new path will + be added. + + Args: + path: The field path to add. + """ + node = self._root + for name in path.split('.'): + if name not in node: + node[name] = {} + elif not node[name]: + # Pre-existing empty node implies we already have this entire tree. + return + node = node[name] + # Remove any sub-trees we might have had. + node.clear() + + def ToFieldMask(self, field_mask): + """Converts the tree to a FieldMask.""" + field_mask.Clear() + _AddFieldPaths(self._root, '', field_mask) + + def IntersectPath(self, path, intersection): + """Calculates the intersection part of a field path with this tree. + + Args: + path: The field path to calculates. + intersection: The out tree to record the intersection part. + """ + node = self._root + for name in path.split('.'): + if name not in node: + return + elif not node[name]: + intersection.AddPath(path) + return + node = node[name] + intersection.AddLeafNodes(path, node) + + def AddLeafNodes(self, prefix, node): + """Adds leaf nodes begin with prefix to this tree.""" + if not node: + self.AddPath(prefix) + for name in node: + child_path = prefix + '.' + name + self.AddLeafNodes(child_path, node[name]) + + def MergeMessage( + self, source, destination, + replace_message, replace_repeated): + """Merge all fields specified by this tree from source to destination.""" + _MergeMessage( + self._root, source, destination, replace_message, replace_repeated) + + +def _StrConvert(value): + """Converts value to str if it is not.""" + # This file is imported by c extension and some methods like ClearField + # requires string for the field name. py2/py3 has different text + # type and may use unicode. + if not isinstance(value, str): + return value.encode('utf-8') + return value + + +def _MergeMessage( + node, source, destination, replace_message, replace_repeated): + """Merge all fields specified by a sub-tree from source to destination.""" + source_descriptor = source.DESCRIPTOR + for name in node: + child = node[name] + field = source_descriptor.fields_by_name[name] + if field is None: + raise ValueError('Error: Can\'t find field {0} in message {1}.'.format( + name, source_descriptor.full_name)) + if child: + # Sub-paths are only allowed for singular message fields. + if (field.label == FieldDescriptor.LABEL_REPEATED or + field.cpp_type != FieldDescriptor.CPPTYPE_MESSAGE): + raise ValueError('Error: Field {0} in message {1} is not a singular ' + 'message field and cannot have sub-fields.'.format( + name, source_descriptor.full_name)) + if source.HasField(name): + _MergeMessage( + child, getattr(source, name), getattr(destination, name), + replace_message, replace_repeated) + continue + if field.label == FieldDescriptor.LABEL_REPEATED: + if replace_repeated: + destination.ClearField(_StrConvert(name)) + repeated_source = getattr(source, name) + repeated_destination = getattr(destination, name) + repeated_destination.MergeFrom(repeated_source) + else: + if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + if replace_message: + destination.ClearField(_StrConvert(name)) + if source.HasField(name): + getattr(destination, name).MergeFrom(getattr(source, name)) + else: + setattr(destination, name, getattr(source, name)) + + +def _AddFieldPaths(node, prefix, field_mask): + """Adds the field paths descended from node to field_mask.""" + if not node and prefix: + field_mask.paths.append(prefix) + return + for name in sorted(node): + if prefix: + child_path = prefix + '.' + name + else: + child_path = name + _AddFieldPaths(node[name], child_path, field_mask) + + +def _SetStructValue(struct_value, value): + if value is None: + struct_value.null_value = 0 + elif isinstance(value, bool): + # Note: this check must come before the number check because in Python + # True and False are also considered numbers. + struct_value.bool_value = value + elif isinstance(value, str): + struct_value.string_value = value + elif isinstance(value, (int, float)): + struct_value.number_value = value + elif isinstance(value, (dict, Struct)): + struct_value.struct_value.Clear() + struct_value.struct_value.update(value) + elif isinstance(value, (list, ListValue)): + struct_value.list_value.Clear() + struct_value.list_value.extend(value) + else: + raise ValueError('Unexpected type') + + +def _GetStructValue(struct_value): + which = struct_value.WhichOneof('kind') + if which == 'struct_value': + return struct_value.struct_value + elif which == 'null_value': + return None + elif which == 'number_value': + return struct_value.number_value + elif which == 'string_value': + return struct_value.string_value + elif which == 'bool_value': + return struct_value.bool_value + elif which == 'list_value': + return struct_value.list_value + elif which is None: + raise ValueError('Value not set') + + +class Struct(object): + """Class for Struct message type.""" + + __slots__ = () + + def __getitem__(self, key): + return _GetStructValue(self.fields[key]) + + def __contains__(self, item): + return item in self.fields + + def __setitem__(self, key, value): + _SetStructValue(self.fields[key], value) + + def __delitem__(self, key): + del self.fields[key] + + def __len__(self): + return len(self.fields) + + def __iter__(self): + return iter(self.fields) + + def keys(self): # pylint: disable=invalid-name + return self.fields.keys() + + def values(self): # pylint: disable=invalid-name + return [self[key] for key in self] + + def items(self): # pylint: disable=invalid-name + return [(key, self[key]) for key in self] + + def get_or_create_list(self, key): + """Returns a list for this key, creating if it didn't exist already.""" + if not self.fields[key].HasField('list_value'): + # Clear will mark list_value modified which will indeed create a list. + self.fields[key].list_value.Clear() + return self.fields[key].list_value + + def get_or_create_struct(self, key): + """Returns a struct for this key, creating if it didn't exist already.""" + if not self.fields[key].HasField('struct_value'): + # Clear will mark struct_value modified which will indeed create a struct. + self.fields[key].struct_value.Clear() + return self.fields[key].struct_value + + def update(self, dictionary): # pylint: disable=invalid-name + for key, value in dictionary.items(): + _SetStructValue(self.fields[key], value) + +collections.abc.MutableMapping.register(Struct) + + +class ListValue(object): + """Class for ListValue message type.""" + + __slots__ = () + + def __len__(self): + return len(self.values) + + def append(self, value): + _SetStructValue(self.values.add(), value) + + def extend(self, elem_seq): + for value in elem_seq: + self.append(value) + + def __getitem__(self, index): + """Retrieves item by the specified index.""" + return _GetStructValue(self.values.__getitem__(index)) + + def __setitem__(self, index, value): + _SetStructValue(self.values.__getitem__(index), value) + + def __delitem__(self, key): + del self.values[key] + + def items(self): + for i in range(len(self)): + yield self[i] + + def add_struct(self): + """Appends and returns a struct value as the next value in the list.""" + struct_value = self.values.add().struct_value + # Clear will mark struct_value modified which will indeed create a struct. + struct_value.Clear() + return struct_value + + def add_list(self): + """Appends and returns a list value as the next value in the list.""" + list_value = self.values.add().list_value + # Clear will mark list_value modified which will indeed create a list. + list_value.Clear() + return list_value + +collections.abc.MutableSequence.register(ListValue) + + +WKTBASES = { + 'google.protobuf.Any': Any, + 'google.protobuf.Duration': Duration, + 'google.protobuf.FieldMask': FieldMask, + 'google.protobuf.ListValue': ListValue, + 'google.protobuf.Struct': Struct, + 'google.protobuf.Timestamp': Timestamp, +} diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/wire_format.py b/openpype/hosts/hiero/vendor/google/protobuf/internal/wire_format.py new file mode 100644 index 0000000000..883f525585 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/internal/wire_format.py @@ -0,0 +1,268 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Constants and static functions to support protocol buffer wire format.""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import struct +from google.protobuf import descriptor +from google.protobuf import message + + +TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag. +TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7 + +# These numbers identify the wire type of a protocol buffer value. +# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded +# tag-and-type to store one of these WIRETYPE_* constants. +# These values must match WireType enum in google/protobuf/wire_format.h. +WIRETYPE_VARINT = 0 +WIRETYPE_FIXED64 = 1 +WIRETYPE_LENGTH_DELIMITED = 2 +WIRETYPE_START_GROUP = 3 +WIRETYPE_END_GROUP = 4 +WIRETYPE_FIXED32 = 5 +_WIRETYPE_MAX = 5 + + +# Bounds for various integer types. +INT32_MAX = int((1 << 31) - 1) +INT32_MIN = int(-(1 << 31)) +UINT32_MAX = (1 << 32) - 1 + +INT64_MAX = (1 << 63) - 1 +INT64_MIN = -(1 << 63) +UINT64_MAX = (1 << 64) - 1 + +# "struct" format strings that will encode/decode the specified formats. +FORMAT_UINT32_LITTLE_ENDIAN = '> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK) + + +def ZigZagEncode(value): + """ZigZag Transform: Encodes signed integers so that they can be + effectively used with varint encoding. See wire_format.h for + more details. + """ + if value >= 0: + return value << 1 + return (value << 1) ^ (~0) + + +def ZigZagDecode(value): + """Inverse of ZigZagEncode().""" + if not value & 0x1: + return value >> 1 + return (value >> 1) ^ (~0) + + + +# The *ByteSize() functions below return the number of bytes required to +# serialize "field number + type" information and then serialize the value. + + +def Int32ByteSize(field_number, int32): + return Int64ByteSize(field_number, int32) + + +def Int32ByteSizeNoTag(int32): + return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32) + + +def Int64ByteSize(field_number, int64): + # Have to convert to uint before calling UInt64ByteSize(). + return UInt64ByteSize(field_number, 0xffffffffffffffff & int64) + + +def UInt32ByteSize(field_number, uint32): + return UInt64ByteSize(field_number, uint32) + + +def UInt64ByteSize(field_number, uint64): + return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64) + + +def SInt32ByteSize(field_number, int32): + return UInt32ByteSize(field_number, ZigZagEncode(int32)) + + +def SInt64ByteSize(field_number, int64): + return UInt64ByteSize(field_number, ZigZagEncode(int64)) + + +def Fixed32ByteSize(field_number, fixed32): + return TagByteSize(field_number) + 4 + + +def Fixed64ByteSize(field_number, fixed64): + return TagByteSize(field_number) + 8 + + +def SFixed32ByteSize(field_number, sfixed32): + return TagByteSize(field_number) + 4 + + +def SFixed64ByteSize(field_number, sfixed64): + return TagByteSize(field_number) + 8 + + +def FloatByteSize(field_number, flt): + return TagByteSize(field_number) + 4 + + +def DoubleByteSize(field_number, double): + return TagByteSize(field_number) + 8 + + +def BoolByteSize(field_number, b): + return TagByteSize(field_number) + 1 + + +def EnumByteSize(field_number, enum): + return UInt32ByteSize(field_number, enum) + + +def StringByteSize(field_number, string): + return BytesByteSize(field_number, string.encode('utf-8')) + + +def BytesByteSize(field_number, b): + return (TagByteSize(field_number) + + _VarUInt64ByteSizeNoTag(len(b)) + + len(b)) + + +def GroupByteSize(field_number, message): + return (2 * TagByteSize(field_number) # START and END group. + + message.ByteSize()) + + +def MessageByteSize(field_number, message): + return (TagByteSize(field_number) + + _VarUInt64ByteSizeNoTag(message.ByteSize()) + + message.ByteSize()) + + +def MessageSetItemByteSize(field_number, msg): + # First compute the sizes of the tags. + # There are 2 tags for the beginning and ending of the repeated group, that + # is field number 1, one with field number 2 (type_id) and one with field + # number 3 (message). + total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3)) + + # Add the number of bytes for type_id. + total_size += _VarUInt64ByteSizeNoTag(field_number) + + message_size = msg.ByteSize() + + # The number of bytes for encoding the length of the message. + total_size += _VarUInt64ByteSizeNoTag(message_size) + + # The size of the message. + total_size += message_size + return total_size + + +def TagByteSize(field_number): + """Returns the bytes required to serialize a tag with this field number.""" + # Just pass in type 0, since the type won't affect the tag+type size. + return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0)) + + +# Private helper function for the *ByteSize() functions above. + +def _VarUInt64ByteSizeNoTag(uint64): + """Returns the number of bytes required to serialize a single varint + using boundary value comparisons. (unrolled loop optimization -WPierce) + uint64 must be unsigned. + """ + if uint64 <= 0x7f: return 1 + if uint64 <= 0x3fff: return 2 + if uint64 <= 0x1fffff: return 3 + if uint64 <= 0xfffffff: return 4 + if uint64 <= 0x7ffffffff: return 5 + if uint64 <= 0x3ffffffffff: return 6 + if uint64 <= 0x1ffffffffffff: return 7 + if uint64 <= 0xffffffffffffff: return 8 + if uint64 <= 0x7fffffffffffffff: return 9 + if uint64 > UINT64_MAX: + raise message.EncodeError('Value out of range: %d' % uint64) + return 10 + + +NON_PACKABLE_TYPES = ( + descriptor.FieldDescriptor.TYPE_STRING, + descriptor.FieldDescriptor.TYPE_GROUP, + descriptor.FieldDescriptor.TYPE_MESSAGE, + descriptor.FieldDescriptor.TYPE_BYTES +) + + +def IsTypePackable(field_type): + """Return true iff packable = true is valid for fields of this type. + + Args: + field_type: a FieldDescriptor::Type value. + + Returns: + True iff fields of this type are packable. + """ + return field_type not in NON_PACKABLE_TYPES diff --git a/openpype/hosts/hiero/vendor/google/protobuf/json_format.py b/openpype/hosts/hiero/vendor/google/protobuf/json_format.py new file mode 100644 index 0000000000..5024ed89d7 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/json_format.py @@ -0,0 +1,912 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains routines for printing protocol messages in JSON format. + +Simple usage example: + + # Create a proto object and serialize it to a json format string. + message = my_proto_pb2.MyMessage(foo='bar') + json_string = json_format.MessageToJson(message) + + # Parse a json format string to proto object. + message = json_format.Parse(json_string, my_proto_pb2.MyMessage()) +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + + +import base64 +from collections import OrderedDict +import json +import math +from operator import methodcaller +import re +import sys + +from google.protobuf.internal import type_checkers +from google.protobuf import descriptor +from google.protobuf import symbol_database + + +_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S' +_INT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT32, + descriptor.FieldDescriptor.CPPTYPE_UINT32, + descriptor.FieldDescriptor.CPPTYPE_INT64, + descriptor.FieldDescriptor.CPPTYPE_UINT64]) +_INT64_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT64, + descriptor.FieldDescriptor.CPPTYPE_UINT64]) +_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT, + descriptor.FieldDescriptor.CPPTYPE_DOUBLE]) +_INFINITY = 'Infinity' +_NEG_INFINITY = '-Infinity' +_NAN = 'NaN' + +_UNPAIRED_SURROGATE_PATTERN = re.compile( + u'[\ud800-\udbff](?![\udc00-\udfff])|(? self.max_recursion_depth: + raise ParseError('Message too deep. Max recursion depth is {0}'.format( + self.max_recursion_depth)) + message_descriptor = message.DESCRIPTOR + full_name = message_descriptor.full_name + if not path: + path = message_descriptor.name + if _IsWrapperMessage(message_descriptor): + self._ConvertWrapperMessage(value, message, path) + elif full_name in _WKTJSONMETHODS: + methodcaller(_WKTJSONMETHODS[full_name][1], value, message, path)(self) + else: + self._ConvertFieldValuePair(value, message, path) + self.recursion_depth -= 1 + + def _ConvertFieldValuePair(self, js, message, path): + """Convert field value pairs into regular message. + + Args: + js: A JSON object to convert the field value pairs. + message: A regular protocol message to record the data. + path: parent path to log parse error info. + + Raises: + ParseError: In case of problems converting. + """ + names = [] + message_descriptor = message.DESCRIPTOR + fields_by_json_name = dict((f.json_name, f) + for f in message_descriptor.fields) + for name in js: + try: + field = fields_by_json_name.get(name, None) + if not field: + field = message_descriptor.fields_by_name.get(name, None) + if not field and _VALID_EXTENSION_NAME.match(name): + if not message_descriptor.is_extendable: + raise ParseError( + 'Message type {0} does not have extensions at {1}'.format( + message_descriptor.full_name, path)) + identifier = name[1:-1] # strip [] brackets + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(identifier) + # pylint: enable=protected-access + if not field: + # Try looking for extension by the message type name, dropping the + # field name following the final . separator in full_name. + identifier = '.'.join(identifier.split('.')[:-1]) + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(identifier) + # pylint: enable=protected-access + if not field: + if self.ignore_unknown_fields: + continue + raise ParseError( + ('Message type "{0}" has no field named "{1}" at "{2}".\n' + ' Available Fields(except extensions): "{3}"').format( + message_descriptor.full_name, name, path, + [f.json_name for f in message_descriptor.fields])) + if name in names: + raise ParseError('Message type "{0}" should not have multiple ' + '"{1}" fields at "{2}".'.format( + message.DESCRIPTOR.full_name, name, path)) + names.append(name) + value = js[name] + # Check no other oneof field is parsed. + if field.containing_oneof is not None and value is not None: + oneof_name = field.containing_oneof.name + if oneof_name in names: + raise ParseError('Message type "{0}" should not have multiple ' + '"{1}" oneof fields at "{2}".'.format( + message.DESCRIPTOR.full_name, oneof_name, + path)) + names.append(oneof_name) + + if value is None: + if (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE + and field.message_type.full_name == 'google.protobuf.Value'): + sub_message = getattr(message, field.name) + sub_message.null_value = 0 + elif (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM + and field.enum_type.full_name == 'google.protobuf.NullValue'): + setattr(message, field.name, 0) + else: + message.ClearField(field.name) + continue + + # Parse field value. + if _IsMapEntry(field): + message.ClearField(field.name) + self._ConvertMapFieldValue(value, message, field, + '{0}.{1}'.format(path, name)) + elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + message.ClearField(field.name) + if not isinstance(value, list): + raise ParseError('repeated field {0} must be in [] which is ' + '{1} at {2}'.format(name, value, path)) + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + # Repeated message field. + for index, item in enumerate(value): + sub_message = getattr(message, field.name).add() + # None is a null_value in Value. + if (item is None and + sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'): + raise ParseError('null is not allowed to be used as an element' + ' in a repeated field at {0}.{1}[{2}]'.format( + path, name, index)) + self.ConvertMessage(item, sub_message, + '{0}.{1}[{2}]'.format(path, name, index)) + else: + # Repeated scalar field. + for index, item in enumerate(value): + if item is None: + raise ParseError('null is not allowed to be used as an element' + ' in a repeated field at {0}.{1}[{2}]'.format( + path, name, index)) + getattr(message, field.name).append( + _ConvertScalarFieldValue( + item, field, '{0}.{1}[{2}]'.format(path, name, index))) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + if field.is_extension: + sub_message = message.Extensions[field] + else: + sub_message = getattr(message, field.name) + sub_message.SetInParent() + self.ConvertMessage(value, sub_message, '{0}.{1}'.format(path, name)) + else: + if field.is_extension: + message.Extensions[field] = _ConvertScalarFieldValue( + value, field, '{0}.{1}'.format(path, name)) + else: + setattr( + message, field.name, + _ConvertScalarFieldValue(value, field, + '{0}.{1}'.format(path, name))) + except ParseError as e: + if field and field.containing_oneof is None: + raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) + else: + raise ParseError(str(e)) + except ValueError as e: + raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) + except TypeError as e: + raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) + + def _ConvertAnyMessage(self, value, message, path): + """Convert a JSON representation into Any message.""" + if isinstance(value, dict) and not value: + return + try: + type_url = value['@type'] + except KeyError: + raise ParseError( + '@type is missing when parsing any message at {0}'.format(path)) + + try: + sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool) + except TypeError as e: + raise ParseError('{0} at {1}'.format(e, path)) + message_descriptor = sub_message.DESCRIPTOR + full_name = message_descriptor.full_name + if _IsWrapperMessage(message_descriptor): + self._ConvertWrapperMessage(value['value'], sub_message, + '{0}.value'.format(path)) + elif full_name in _WKTJSONMETHODS: + methodcaller(_WKTJSONMETHODS[full_name][1], value['value'], sub_message, + '{0}.value'.format(path))( + self) + else: + del value['@type'] + self._ConvertFieldValuePair(value, sub_message, path) + value['@type'] = type_url + # Sets Any message + message.value = sub_message.SerializeToString() + message.type_url = type_url + + def _ConvertGenericMessage(self, value, message, path): + """Convert a JSON representation into message with FromJsonString.""" + # Duration, Timestamp, FieldMask have a FromJsonString method to do the + # conversion. Users can also call the method directly. + try: + message.FromJsonString(value) + except ValueError as e: + raise ParseError('{0} at {1}'.format(e, path)) + + def _ConvertValueMessage(self, value, message, path): + """Convert a JSON representation into Value message.""" + if isinstance(value, dict): + self._ConvertStructMessage(value, message.struct_value, path) + elif isinstance(value, list): + self._ConvertListValueMessage(value, message.list_value, path) + elif value is None: + message.null_value = 0 + elif isinstance(value, bool): + message.bool_value = value + elif isinstance(value, str): + message.string_value = value + elif isinstance(value, _INT_OR_FLOAT): + message.number_value = value + else: + raise ParseError('Value {0} has unexpected type {1} at {2}'.format( + value, type(value), path)) + + def _ConvertListValueMessage(self, value, message, path): + """Convert a JSON representation into ListValue message.""" + if not isinstance(value, list): + raise ParseError('ListValue must be in [] which is {0} at {1}'.format( + value, path)) + message.ClearField('values') + for index, item in enumerate(value): + self._ConvertValueMessage(item, message.values.add(), + '{0}[{1}]'.format(path, index)) + + def _ConvertStructMessage(self, value, message, path): + """Convert a JSON representation into Struct message.""" + if not isinstance(value, dict): + raise ParseError('Struct must be in a dict which is {0} at {1}'.format( + value, path)) + # Clear will mark the struct as modified so it will be created even if + # there are no values. + message.Clear() + for key in value: + self._ConvertValueMessage(value[key], message.fields[key], + '{0}.{1}'.format(path, key)) + return + + def _ConvertWrapperMessage(self, value, message, path): + """Convert a JSON representation into Wrapper message.""" + field = message.DESCRIPTOR.fields_by_name['value'] + setattr( + message, 'value', + _ConvertScalarFieldValue(value, field, path='{0}.value'.format(path))) + + def _ConvertMapFieldValue(self, value, message, field, path): + """Convert map field value for a message map field. + + Args: + value: A JSON object to convert the map field value. + message: A protocol message to record the converted data. + field: The descriptor of the map field to be converted. + path: parent path to log parse error info. + + Raises: + ParseError: In case of convert problems. + """ + if not isinstance(value, dict): + raise ParseError( + 'Map field {0} must be in a dict which is {1} at {2}'.format( + field.name, value, path)) + key_field = field.message_type.fields_by_name['key'] + value_field = field.message_type.fields_by_name['value'] + for key in value: + key_value = _ConvertScalarFieldValue(key, key_field, + '{0}.key'.format(path), True) + if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + self.ConvertMessage(value[key], + getattr(message, field.name)[key_value], + '{0}[{1}]'.format(path, key_value)) + else: + getattr(message, field.name)[key_value] = _ConvertScalarFieldValue( + value[key], value_field, path='{0}[{1}]'.format(path, key_value)) + + +def _ConvertScalarFieldValue(value, field, path, require_str=False): + """Convert a single scalar field value. + + Args: + value: A scalar value to convert the scalar field value. + field: The descriptor of the field to convert. + path: parent path to log parse error info. + require_str: If True, the field value must be a str. + + Returns: + The converted scalar field value + + Raises: + ParseError: In case of convert problems. + """ + try: + if field.cpp_type in _INT_TYPES: + return _ConvertInteger(value) + elif field.cpp_type in _FLOAT_TYPES: + return _ConvertFloat(value, field) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: + return _ConvertBool(value, require_str) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: + if field.type == descriptor.FieldDescriptor.TYPE_BYTES: + if isinstance(value, str): + encoded = value.encode('utf-8') + else: + encoded = value + # Add extra padding '=' + padded_value = encoded + b'=' * (4 - len(encoded) % 4) + return base64.urlsafe_b64decode(padded_value) + else: + # Checking for unpaired surrogates appears to be unreliable, + # depending on the specific Python version, so we check manually. + if _UNPAIRED_SURROGATE_PATTERN.search(value): + raise ParseError('Unpaired surrogate') + return value + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: + # Convert an enum value. + enum_value = field.enum_type.values_by_name.get(value, None) + if enum_value is None: + try: + number = int(value) + enum_value = field.enum_type.values_by_number.get(number, None) + except ValueError: + raise ParseError('Invalid enum value {0} for enum type {1}'.format( + value, field.enum_type.full_name)) + if enum_value is None: + if field.file.syntax == 'proto3': + # Proto3 accepts unknown enums. + return number + raise ParseError('Invalid enum value {0} for enum type {1}'.format( + value, field.enum_type.full_name)) + return enum_value.number + except ParseError as e: + raise ParseError('{0} at {1}'.format(e, path)) + + +def _ConvertInteger(value): + """Convert an integer. + + Args: + value: A scalar value to convert. + + Returns: + The integer value. + + Raises: + ParseError: If an integer couldn't be consumed. + """ + if isinstance(value, float) and not value.is_integer(): + raise ParseError('Couldn\'t parse integer: {0}'.format(value)) + + if isinstance(value, str) and value.find(' ') != -1: + raise ParseError('Couldn\'t parse integer: "{0}"'.format(value)) + + if isinstance(value, bool): + raise ParseError('Bool value {0} is not acceptable for ' + 'integer field'.format(value)) + + return int(value) + + +def _ConvertFloat(value, field): + """Convert an floating point number.""" + if isinstance(value, float): + if math.isnan(value): + raise ParseError('Couldn\'t parse NaN, use quoted "NaN" instead') + if math.isinf(value): + if value > 0: + raise ParseError('Couldn\'t parse Infinity or value too large, ' + 'use quoted "Infinity" instead') + else: + raise ParseError('Couldn\'t parse -Infinity or value too small, ' + 'use quoted "-Infinity" instead') + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT: + # pylint: disable=protected-access + if value > type_checkers._FLOAT_MAX: + raise ParseError('Float value too large') + # pylint: disable=protected-access + if value < type_checkers._FLOAT_MIN: + raise ParseError('Float value too small') + if value == 'nan': + raise ParseError('Couldn\'t parse float "nan", use "NaN" instead') + try: + # Assume Python compatible syntax. + return float(value) + except ValueError: + # Check alternative spellings. + if value == _NEG_INFINITY: + return float('-inf') + elif value == _INFINITY: + return float('inf') + elif value == _NAN: + return float('nan') + else: + raise ParseError('Couldn\'t parse float: {0}'.format(value)) + + +def _ConvertBool(value, require_str): + """Convert a boolean value. + + Args: + value: A scalar value to convert. + require_str: If True, value must be a str. + + Returns: + The bool parsed. + + Raises: + ParseError: If a boolean value couldn't be consumed. + """ + if require_str: + if value == 'true': + return True + elif value == 'false': + return False + else: + raise ParseError('Expected "true" or "false", not {0}'.format(value)) + + if not isinstance(value, bool): + raise ParseError('Expected true or false without quotes') + return value + +_WKTJSONMETHODS = { + 'google.protobuf.Any': ['_AnyMessageToJsonObject', + '_ConvertAnyMessage'], + 'google.protobuf.Duration': ['_GenericMessageToJsonObject', + '_ConvertGenericMessage'], + 'google.protobuf.FieldMask': ['_GenericMessageToJsonObject', + '_ConvertGenericMessage'], + 'google.protobuf.ListValue': ['_ListValueMessageToJsonObject', + '_ConvertListValueMessage'], + 'google.protobuf.Struct': ['_StructMessageToJsonObject', + '_ConvertStructMessage'], + 'google.protobuf.Timestamp': ['_GenericMessageToJsonObject', + '_ConvertGenericMessage'], + 'google.protobuf.Value': ['_ValueMessageToJsonObject', + '_ConvertValueMessage'] +} diff --git a/openpype/hosts/hiero/vendor/google/protobuf/message.py b/openpype/hosts/hiero/vendor/google/protobuf/message.py new file mode 100644 index 0000000000..76c6802f70 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/message.py @@ -0,0 +1,424 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# TODO(robinson): We should just make these methods all "pure-virtual" and move +# all implementation out, into reflection.py for now. + + +"""Contains an abstract base class for protocol messages.""" + +__author__ = 'robinson@google.com (Will Robinson)' + +class Error(Exception): + """Base error type for this module.""" + pass + + +class DecodeError(Error): + """Exception raised when deserializing messages.""" + pass + + +class EncodeError(Error): + """Exception raised when serializing messages.""" + pass + + +class Message(object): + + """Abstract base class for protocol messages. + + Protocol message classes are almost always generated by the protocol + compiler. These generated types subclass Message and implement the methods + shown below. + """ + + # TODO(robinson): Link to an HTML document here. + + # TODO(robinson): Document that instances of this class will also + # have an Extensions attribute with __getitem__ and __setitem__. + # Again, not sure how to best convey this. + + # TODO(robinson): Document that the class must also have a static + # RegisterExtension(extension_field) method. + # Not sure how to best express at this point. + + # TODO(robinson): Document these fields and methods. + + __slots__ = [] + + #: The :class:`google.protobuf.descriptor.Descriptor` for this message type. + DESCRIPTOR = None + + def __deepcopy__(self, memo=None): + clone = type(self)() + clone.MergeFrom(self) + return clone + + def __eq__(self, other_msg): + """Recursively compares two messages by value and structure.""" + raise NotImplementedError + + def __ne__(self, other_msg): + # Can't just say self != other_msg, since that would infinitely recurse. :) + return not self == other_msg + + def __hash__(self): + raise TypeError('unhashable object') + + def __str__(self): + """Outputs a human-readable representation of the message.""" + raise NotImplementedError + + def __unicode__(self): + """Outputs a human-readable representation of the message.""" + raise NotImplementedError + + def MergeFrom(self, other_msg): + """Merges the contents of the specified message into current message. + + This method merges the contents of the specified message into the current + message. Singular fields that are set in the specified message overwrite + the corresponding fields in the current message. Repeated fields are + appended. Singular sub-messages and groups are recursively merged. + + Args: + other_msg (Message): A message to merge into the current message. + """ + raise NotImplementedError + + def CopyFrom(self, other_msg): + """Copies the content of the specified message into the current message. + + The method clears the current message and then merges the specified + message using MergeFrom. + + Args: + other_msg (Message): A message to copy into the current one. + """ + if self is other_msg: + return + self.Clear() + self.MergeFrom(other_msg) + + def Clear(self): + """Clears all data that was set in the message.""" + raise NotImplementedError + + def SetInParent(self): + """Mark this as present in the parent. + + This normally happens automatically when you assign a field of a + sub-message, but sometimes you want to make the sub-message + present while keeping it empty. If you find yourself using this, + you may want to reconsider your design. + """ + raise NotImplementedError + + def IsInitialized(self): + """Checks if the message is initialized. + + Returns: + bool: The method returns True if the message is initialized (i.e. all of + its required fields are set). + """ + raise NotImplementedError + + # TODO(robinson): MergeFromString() should probably return None and be + # implemented in terms of a helper that returns the # of bytes read. Our + # deserialization routines would use the helper when recursively + # deserializing, but the end user would almost always just want the no-return + # MergeFromString(). + + def MergeFromString(self, serialized): + """Merges serialized protocol buffer data into this message. + + When we find a field in `serialized` that is already present + in this message: + + - If it's a "repeated" field, we append to the end of our list. + - Else, if it's a scalar, we overwrite our field. + - Else, (it's a nonrepeated composite), we recursively merge + into the existing composite. + + Args: + serialized (bytes): Any object that allows us to call + ``memoryview(serialized)`` to access a string of bytes using the + buffer interface. + + Returns: + int: The number of bytes read from `serialized`. + For non-group messages, this will always be `len(serialized)`, + but for messages which are actually groups, this will + generally be less than `len(serialized)`, since we must + stop when we reach an ``END_GROUP`` tag. Note that if + we *do* stop because of an ``END_GROUP`` tag, the number + of bytes returned does not include the bytes + for the ``END_GROUP`` tag information. + + Raises: + DecodeError: if the input cannot be parsed. + """ + # TODO(robinson): Document handling of unknown fields. + # TODO(robinson): When we switch to a helper, this will return None. + raise NotImplementedError + + def ParseFromString(self, serialized): + """Parse serialized protocol buffer data into this message. + + Like :func:`MergeFromString()`, except we clear the object first. + + Raises: + message.DecodeError if the input cannot be parsed. + """ + self.Clear() + return self.MergeFromString(serialized) + + def SerializeToString(self, **kwargs): + """Serializes the protocol message to a binary string. + + Keyword Args: + deterministic (bool): If true, requests deterministic serialization + of the protobuf, with predictable ordering of map keys. + + Returns: + A binary string representation of the message if all of the required + fields in the message are set (i.e. the message is initialized). + + Raises: + EncodeError: if the message isn't initialized (see :func:`IsInitialized`). + """ + raise NotImplementedError + + def SerializePartialToString(self, **kwargs): + """Serializes the protocol message to a binary string. + + This method is similar to SerializeToString but doesn't check if the + message is initialized. + + Keyword Args: + deterministic (bool): If true, requests deterministic serialization + of the protobuf, with predictable ordering of map keys. + + Returns: + bytes: A serialized representation of the partial message. + """ + raise NotImplementedError + + # TODO(robinson): Decide whether we like these better + # than auto-generated has_foo() and clear_foo() methods + # on the instances themselves. This way is less consistent + # with C++, but it makes reflection-type access easier and + # reduces the number of magically autogenerated things. + # + # TODO(robinson): Be sure to document (and test) exactly + # which field names are accepted here. Are we case-sensitive? + # What do we do with fields that share names with Python keywords + # like 'lambda' and 'yield'? + # + # nnorwitz says: + # """ + # Typically (in python), an underscore is appended to names that are + # keywords. So they would become lambda_ or yield_. + # """ + def ListFields(self): + """Returns a list of (FieldDescriptor, value) tuples for present fields. + + A message field is non-empty if HasField() would return true. A singular + primitive field is non-empty if HasField() would return true in proto2 or it + is non zero in proto3. A repeated field is non-empty if it contains at least + one element. The fields are ordered by field number. + + Returns: + list[tuple(FieldDescriptor, value)]: field descriptors and values + for all fields in the message which are not empty. The values vary by + field type. + """ + raise NotImplementedError + + def HasField(self, field_name): + """Checks if a certain field is set for the message. + + For a oneof group, checks if any field inside is set. Note that if the + field_name is not defined in the message descriptor, :exc:`ValueError` will + be raised. + + Args: + field_name (str): The name of the field to check for presence. + + Returns: + bool: Whether a value has been set for the named field. + + Raises: + ValueError: if the `field_name` is not a member of this message. + """ + raise NotImplementedError + + def ClearField(self, field_name): + """Clears the contents of a given field. + + Inside a oneof group, clears the field set. If the name neither refers to a + defined field or oneof group, :exc:`ValueError` is raised. + + Args: + field_name (str): The name of the field to check for presence. + + Raises: + ValueError: if the `field_name` is not a member of this message. + """ + raise NotImplementedError + + def WhichOneof(self, oneof_group): + """Returns the name of the field that is set inside a oneof group. + + If no field is set, returns None. + + Args: + oneof_group (str): the name of the oneof group to check. + + Returns: + str or None: The name of the group that is set, or None. + + Raises: + ValueError: no group with the given name exists + """ + raise NotImplementedError + + def HasExtension(self, extension_handle): + """Checks if a certain extension is present for this message. + + Extensions are retrieved using the :attr:`Extensions` mapping (if present). + + Args: + extension_handle: The handle for the extension to check. + + Returns: + bool: Whether the extension is present for this message. + + Raises: + KeyError: if the extension is repeated. Similar to repeated fields, + there is no separate notion of presence: a "not present" repeated + extension is an empty list. + """ + raise NotImplementedError + + def ClearExtension(self, extension_handle): + """Clears the contents of a given extension. + + Args: + extension_handle: The handle for the extension to clear. + """ + raise NotImplementedError + + def UnknownFields(self): + """Returns the UnknownFieldSet. + + Returns: + UnknownFieldSet: The unknown fields stored in this message. + """ + raise NotImplementedError + + def DiscardUnknownFields(self): + """Clears all fields in the :class:`UnknownFieldSet`. + + This operation is recursive for nested message. + """ + raise NotImplementedError + + def ByteSize(self): + """Returns the serialized size of this message. + + Recursively calls ByteSize() on all contained messages. + + Returns: + int: The number of bytes required to serialize this message. + """ + raise NotImplementedError + + @classmethod + def FromString(cls, s): + raise NotImplementedError + + @staticmethod + def RegisterExtension(extension_handle): + raise NotImplementedError + + def _SetListener(self, message_listener): + """Internal method used by the protocol message implementation. + Clients should not call this directly. + + Sets a listener that this message will call on certain state transitions. + + The purpose of this method is to register back-edges from children to + parents at runtime, for the purpose of setting "has" bits and + byte-size-dirty bits in the parent and ancestor objects whenever a child or + descendant object is modified. + + If the client wants to disconnect this Message from the object tree, she + explicitly sets callback to None. + + If message_listener is None, unregisters any existing listener. Otherwise, + message_listener must implement the MessageListener interface in + internal/message_listener.py, and we discard any listener registered + via a previous _SetListener() call. + """ + raise NotImplementedError + + def __getstate__(self): + """Support the pickle protocol.""" + return dict(serialized=self.SerializePartialToString()) + + def __setstate__(self, state): + """Support the pickle protocol.""" + self.__init__() + serialized = state['serialized'] + # On Python 3, using encoding='latin1' is required for unpickling + # protos pickled by Python 2. + if not isinstance(serialized, bytes): + serialized = serialized.encode('latin1') + self.ParseFromString(serialized) + + def __reduce__(self): + message_descriptor = self.DESCRIPTOR + if message_descriptor.containing_type is None: + return type(self), (), self.__getstate__() + # the message type must be nested. + # Python does not pickle nested classes; use the symbol_database on the + # receiving end. + container = message_descriptor + return (_InternalConstructMessage, (container.full_name,), + self.__getstate__()) + + +def _InternalConstructMessage(full_name): + """Constructs a nested message.""" + from google.protobuf import symbol_database # pylint:disable=g-import-not-at-top + + return symbol_database.Default().GetSymbol(full_name)() diff --git a/openpype/hosts/hiero/vendor/google/protobuf/message_factory.py b/openpype/hosts/hiero/vendor/google/protobuf/message_factory.py new file mode 100644 index 0000000000..3656fa6874 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/message_factory.py @@ -0,0 +1,185 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides a factory class for generating dynamic messages. + +The easiest way to use this class is if you have access to the FileDescriptor +protos containing the messages you want to create you can just do the following: + +message_classes = message_factory.GetMessages(iterable_of_file_descriptors) +my_proto_instance = message_classes['some.proto.package.MessageName']() +""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +from google.protobuf.internal import api_implementation +from google.protobuf import descriptor_pool +from google.protobuf import message + +if api_implementation.Type() == 'cpp': + from google.protobuf.pyext import cpp_message as message_impl +else: + from google.protobuf.internal import python_message as message_impl + + +# The type of all Message classes. +_GENERATED_PROTOCOL_MESSAGE_TYPE = message_impl.GeneratedProtocolMessageType + + +class MessageFactory(object): + """Factory for creating Proto2 messages from descriptors in a pool.""" + + def __init__(self, pool=None): + """Initializes a new factory.""" + self.pool = pool or descriptor_pool.DescriptorPool() + + # local cache of all classes built from protobuf descriptors + self._classes = {} + + def GetPrototype(self, descriptor): + """Obtains a proto2 message class based on the passed in descriptor. + + Passing a descriptor with a fully qualified name matching a previous + invocation will cause the same class to be returned. + + Args: + descriptor: The descriptor to build from. + + Returns: + A class describing the passed in descriptor. + """ + if descriptor not in self._classes: + result_class = self.CreatePrototype(descriptor) + # The assignment to _classes is redundant for the base implementation, but + # might avoid confusion in cases where CreatePrototype gets overridden and + # does not call the base implementation. + self._classes[descriptor] = result_class + return result_class + return self._classes[descriptor] + + def CreatePrototype(self, descriptor): + """Builds a proto2 message class based on the passed in descriptor. + + Don't call this function directly, it always creates a new class. Call + GetPrototype() instead. This method is meant to be overridden in subblasses + to perform additional operations on the newly constructed class. + + Args: + descriptor: The descriptor to build from. + + Returns: + A class describing the passed in descriptor. + """ + descriptor_name = descriptor.name + result_class = _GENERATED_PROTOCOL_MESSAGE_TYPE( + descriptor_name, + (message.Message,), + { + 'DESCRIPTOR': descriptor, + # If module not set, it wrongly points to message_factory module. + '__module__': None, + }) + result_class._FACTORY = self # pylint: disable=protected-access + # Assign in _classes before doing recursive calls to avoid infinite + # recursion. + self._classes[descriptor] = result_class + for field in descriptor.fields: + if field.message_type: + self.GetPrototype(field.message_type) + for extension in result_class.DESCRIPTOR.extensions: + if extension.containing_type not in self._classes: + self.GetPrototype(extension.containing_type) + extended_class = self._classes[extension.containing_type] + extended_class.RegisterExtension(extension) + return result_class + + def GetMessages(self, files): + """Gets all the messages from a specified file. + + This will find and resolve dependencies, failing if the descriptor + pool cannot satisfy them. + + Args: + files: The file names to extract messages from. + + Returns: + A dictionary mapping proto names to the message classes. This will include + any dependent messages as well as any messages defined in the same file as + a specified message. + """ + result = {} + for file_name in files: + file_desc = self.pool.FindFileByName(file_name) + for desc in file_desc.message_types_by_name.values(): + result[desc.full_name] = self.GetPrototype(desc) + + # While the extension FieldDescriptors are created by the descriptor pool, + # the python classes created in the factory need them to be registered + # explicitly, which is done below. + # + # The call to RegisterExtension will specifically check if the + # extension was already registered on the object and either + # ignore the registration if the original was the same, or raise + # an error if they were different. + + for extension in file_desc.extensions_by_name.values(): + if extension.containing_type not in self._classes: + self.GetPrototype(extension.containing_type) + extended_class = self._classes[extension.containing_type] + extended_class.RegisterExtension(extension) + return result + + +_FACTORY = MessageFactory() + + +def GetMessages(file_protos): + """Builds a dictionary of all the messages available in a set of files. + + Args: + file_protos: Iterable of FileDescriptorProto to build messages out of. + + Returns: + A dictionary mapping proto names to the message classes. This will include + any dependent messages as well as any messages defined in the same file as + a specified message. + """ + # The cpp implementation of the protocol buffer library requires to add the + # message in topological order of the dependency graph. + file_by_name = {file_proto.name: file_proto for file_proto in file_protos} + def _AddFile(file_proto): + for dependency in file_proto.dependency: + if dependency in file_by_name: + # Remove from elements to be visited, in order to cut cycles. + _AddFile(file_by_name.pop(dependency)) + _FACTORY.pool.Add(file_proto) + while file_by_name: + _AddFile(file_by_name.popitem()[1]) + return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos]) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/proto_builder.py b/openpype/hosts/hiero/vendor/google/protobuf/proto_builder.py new file mode 100644 index 0000000000..a4667ce63e --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/proto_builder.py @@ -0,0 +1,134 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Dynamic Protobuf class creator.""" + +from collections import OrderedDict +import hashlib +import os + +from google.protobuf import descriptor_pb2 +from google.protobuf import descriptor +from google.protobuf import message_factory + + +def _GetMessageFromFactory(factory, full_name): + """Get a proto class from the MessageFactory by name. + + Args: + factory: a MessageFactory instance. + full_name: str, the fully qualified name of the proto type. + Returns: + A class, for the type identified by full_name. + Raises: + KeyError, if the proto is not found in the factory's descriptor pool. + """ + proto_descriptor = factory.pool.FindMessageTypeByName(full_name) + proto_cls = factory.GetPrototype(proto_descriptor) + return proto_cls + + +def MakeSimpleProtoClass(fields, full_name=None, pool=None): + """Create a Protobuf class whose fields are basic types. + + Note: this doesn't validate field names! + + Args: + fields: dict of {name: field_type} mappings for each field in the proto. If + this is an OrderedDict the order will be maintained, otherwise the + fields will be sorted by name. + full_name: optional str, the fully-qualified name of the proto type. + pool: optional DescriptorPool instance. + Returns: + a class, the new protobuf class with a FileDescriptor. + """ + factory = message_factory.MessageFactory(pool=pool) + + if full_name is not None: + try: + proto_cls = _GetMessageFromFactory(factory, full_name) + return proto_cls + except KeyError: + # The factory's DescriptorPool doesn't know about this class yet. + pass + + # Get a list of (name, field_type) tuples from the fields dict. If fields was + # an OrderedDict we keep the order, but otherwise we sort the field to ensure + # consistent ordering. + field_items = fields.items() + if not isinstance(fields, OrderedDict): + field_items = sorted(field_items) + + # Use a consistent file name that is unlikely to conflict with any imported + # proto files. + fields_hash = hashlib.sha1() + for f_name, f_type in field_items: + fields_hash.update(f_name.encode('utf-8')) + fields_hash.update(str(f_type).encode('utf-8')) + proto_file_name = fields_hash.hexdigest() + '.proto' + + # If the proto is anonymous, use the same hash to name it. + if full_name is None: + full_name = ('net.proto2.python.public.proto_builder.AnonymousProto_' + + fields_hash.hexdigest()) + try: + proto_cls = _GetMessageFromFactory(factory, full_name) + return proto_cls + except KeyError: + # The factory's DescriptorPool doesn't know about this class yet. + pass + + # This is the first time we see this proto: add a new descriptor to the pool. + factory.pool.Add( + _MakeFileDescriptorProto(proto_file_name, full_name, field_items)) + return _GetMessageFromFactory(factory, full_name) + + +def _MakeFileDescriptorProto(proto_file_name, full_name, field_items): + """Populate FileDescriptorProto for MessageFactory's DescriptorPool.""" + package, name = full_name.rsplit('.', 1) + file_proto = descriptor_pb2.FileDescriptorProto() + file_proto.name = os.path.join(package.replace('.', '/'), proto_file_name) + file_proto.package = package + desc_proto = file_proto.message_type.add() + desc_proto.name = name + for f_number, (f_name, f_type) in enumerate(field_items, 1): + field_proto = desc_proto.field.add() + field_proto.name = f_name + # # If the number falls in the reserved range, reassign it to the correct + # # number after the range. + if f_number >= descriptor.FieldDescriptor.FIRST_RESERVED_FIELD_NUMBER: + f_number += ( + descriptor.FieldDescriptor.LAST_RESERVED_FIELD_NUMBER - + descriptor.FieldDescriptor.FIRST_RESERVED_FIELD_NUMBER + 1) + field_proto.number = f_number + field_proto.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL + field_proto.type = f_type + return file_proto diff --git a/openpype/hosts/hiero/vendor/google/protobuf/pyext/__init__.py b/openpype/hosts/hiero/vendor/google/protobuf/pyext/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/hiero/vendor/google/protobuf/pyext/cpp_message.py b/openpype/hosts/hiero/vendor/google/protobuf/pyext/cpp_message.py new file mode 100644 index 0000000000..fc8eb32d79 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/pyext/cpp_message.py @@ -0,0 +1,65 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Protocol message implementation hooks for C++ implementation. + +Contains helper functions used to create protocol message classes from +Descriptor objects at runtime backed by the protocol buffer C++ API. +""" + +__author__ = 'tibell@google.com (Johan Tibell)' + +from google.protobuf.pyext import _message + + +class GeneratedProtocolMessageType(_message.MessageMeta): + + """Metaclass for protocol message classes created at runtime from Descriptors. + + The protocol compiler currently uses this metaclass to create protocol + message classes at runtime. Clients can also manually create their own + classes at runtime, as in this example: + + mydescriptor = Descriptor(.....) + factory = symbol_database.Default() + factory.pool.AddDescriptor(mydescriptor) + MyProtoClass = factory.GetPrototype(mydescriptor) + myproto_instance = MyProtoClass() + myproto.foo_field = 23 + ... + + The above example will not work for nested types. If you wish to include them, + use reflection.MakeClass() instead of manually instantiating the class in + order to create the appropriate class structure. + """ + + # Must be consistent with the protocol-compiler code in + # proto2/compiler/internal/generator.*. + _DESCRIPTOR_KEY = 'DESCRIPTOR' diff --git a/openpype/hosts/hiero/vendor/google/protobuf/pyext/python_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/pyext/python_pb2.py new file mode 100644 index 0000000000..2c6ecf4c98 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/pyext/python_pb2.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/pyext/python.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\"google/protobuf/pyext/python.proto\x12\x1fgoogle.protobuf.python.internal\"\xbc\x02\n\x0cTestAllTypes\x12\\\n\x17repeated_nested_message\x18\x01 \x03(\x0b\x32;.google.protobuf.python.internal.TestAllTypes.NestedMessage\x12\\\n\x17optional_nested_message\x18\x02 \x01(\x0b\x32;.google.protobuf.python.internal.TestAllTypes.NestedMessage\x12\x16\n\x0eoptional_int32\x18\x03 \x01(\x05\x1aX\n\rNestedMessage\x12\n\n\x02\x62\x62\x18\x01 \x01(\x05\x12;\n\x02\x63\x63\x18\x02 \x01(\x0b\x32/.google.protobuf.python.internal.ForeignMessage\"&\n\x0e\x46oreignMessage\x12\t\n\x01\x63\x18\x01 \x01(\x05\x12\t\n\x01\x64\x18\x02 \x03(\x05\"\x1d\n\x11TestAllExtensions*\x08\x08\x01\x10\x80\x80\x80\x80\x02:\x9a\x01\n!optional_nested_message_extension\x12\x32.google.protobuf.python.internal.TestAllExtensions\x18\x01 \x01(\x0b\x32;.google.protobuf.python.internal.TestAllTypes.NestedMessage:\x9a\x01\n!repeated_nested_message_extension\x12\x32.google.protobuf.python.internal.TestAllExtensions\x18\x02 \x03(\x0b\x32;.google.protobuf.python.internal.TestAllTypes.NestedMessageB\x02H\x01') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.pyext.python_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + TestAllExtensions.RegisterExtension(optional_nested_message_extension) + TestAllExtensions.RegisterExtension(repeated_nested_message_extension) + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'H\001' + _TESTALLTYPES._serialized_start=72 + _TESTALLTYPES._serialized_end=388 + _TESTALLTYPES_NESTEDMESSAGE._serialized_start=300 + _TESTALLTYPES_NESTEDMESSAGE._serialized_end=388 + _FOREIGNMESSAGE._serialized_start=390 + _FOREIGNMESSAGE._serialized_end=428 + _TESTALLEXTENSIONS._serialized_start=430 + _TESTALLEXTENSIONS._serialized_end=459 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/reflection.py b/openpype/hosts/hiero/vendor/google/protobuf/reflection.py new file mode 100644 index 0000000000..81e18859a8 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/reflection.py @@ -0,0 +1,95 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This code is meant to work on Python 2.4 and above only. + +"""Contains a metaclass and helper functions used to create +protocol message classes from Descriptor objects at runtime. + +Recall that a metaclass is the "type" of a class. +(A class is to a metaclass what an instance is to a class.) + +In this case, we use the GeneratedProtocolMessageType metaclass +to inject all the useful functionality into the classes +output by the protocol compiler at compile-time. + +The upshot of all this is that the real implementation +details for ALL pure-Python protocol buffers are *here in +this file*. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + + +from google.protobuf import message_factory +from google.protobuf import symbol_database + +# The type of all Message classes. +# Part of the public interface, but normally only used by message factories. +GeneratedProtocolMessageType = message_factory._GENERATED_PROTOCOL_MESSAGE_TYPE + +MESSAGE_CLASS_CACHE = {} + + +# Deprecated. Please NEVER use reflection.ParseMessage(). +def ParseMessage(descriptor, byte_str): + """Generate a new Message instance from this Descriptor and a byte string. + + DEPRECATED: ParseMessage is deprecated because it is using MakeClass(). + Please use MessageFactory.GetPrototype() instead. + + Args: + descriptor: Protobuf Descriptor object + byte_str: Serialized protocol buffer byte string + + Returns: + Newly created protobuf Message object. + """ + result_class = MakeClass(descriptor) + new_msg = result_class() + new_msg.ParseFromString(byte_str) + return new_msg + + +# Deprecated. Please NEVER use reflection.MakeClass(). +def MakeClass(descriptor): + """Construct a class object for a protobuf described by descriptor. + + DEPRECATED: use MessageFactory.GetPrototype() instead. + + Args: + descriptor: A descriptor.Descriptor object describing the protobuf. + Returns: + The Message class object described by the descriptor. + """ + # Original implementation leads to duplicate message classes, which won't play + # well with extensions. Message factory info is also missing. + # Redirect to message_factory. + return symbol_database.Default().GetPrototype(descriptor) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/service.py b/openpype/hosts/hiero/vendor/google/protobuf/service.py new file mode 100644 index 0000000000..5625246324 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/service.py @@ -0,0 +1,228 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""DEPRECATED: Declares the RPC service interfaces. + +This module declares the abstract interfaces underlying proto2 RPC +services. These are intended to be independent of any particular RPC +implementation, so that proto2 services can be used on top of a variety +of implementations. Starting with version 2.3.0, RPC implementations should +not try to build on these, but should instead provide code generator plugins +which generate code specific to the particular RPC implementation. This way +the generated code can be more appropriate for the implementation in use +and can avoid unnecessary layers of indirection. +""" + +__author__ = 'petar@google.com (Petar Petrov)' + + +class RpcException(Exception): + """Exception raised on failed blocking RPC method call.""" + pass + + +class Service(object): + + """Abstract base interface for protocol-buffer-based RPC services. + + Services themselves are abstract classes (implemented either by servers or as + stubs), but they subclass this base interface. The methods of this + interface can be used to call the methods of the service without knowing + its exact type at compile time (analogous to the Message interface). + """ + + def GetDescriptor(): + """Retrieves this service's descriptor.""" + raise NotImplementedError + + def CallMethod(self, method_descriptor, rpc_controller, + request, done): + """Calls a method of the service specified by method_descriptor. + + If "done" is None then the call is blocking and the response + message will be returned directly. Otherwise the call is asynchronous + and "done" will later be called with the response value. + + In the blocking case, RpcException will be raised on error. + + Preconditions: + + * method_descriptor.service == GetDescriptor + * request is of the exact same classes as returned by + GetRequestClass(method). + * After the call has started, the request must not be modified. + * "rpc_controller" is of the correct type for the RPC implementation being + used by this Service. For stubs, the "correct type" depends on the + RpcChannel which the stub is using. + + Postconditions: + + * "done" will be called when the method is complete. This may be + before CallMethod() returns or it may be at some point in the future. + * If the RPC failed, the response value passed to "done" will be None. + Further details about the failure can be found by querying the + RpcController. + """ + raise NotImplementedError + + def GetRequestClass(self, method_descriptor): + """Returns the class of the request message for the specified method. + + CallMethod() requires that the request is of a particular subclass of + Message. GetRequestClass() gets the default instance of this required + type. + + Example: + method = service.GetDescriptor().FindMethodByName("Foo") + request = stub.GetRequestClass(method)() + request.ParseFromString(input) + service.CallMethod(method, request, callback) + """ + raise NotImplementedError + + def GetResponseClass(self, method_descriptor): + """Returns the class of the response message for the specified method. + + This method isn't really needed, as the RpcChannel's CallMethod constructs + the response protocol message. It's provided anyway in case it is useful + for the caller to know the response type in advance. + """ + raise NotImplementedError + + +class RpcController(object): + + """An RpcController mediates a single method call. + + The primary purpose of the controller is to provide a way to manipulate + settings specific to the RPC implementation and to find out about RPC-level + errors. The methods provided by the RpcController interface are intended + to be a "least common denominator" set of features which we expect all + implementations to support. Specific implementations may provide more + advanced features (e.g. deadline propagation). + """ + + # Client-side methods below + + def Reset(self): + """Resets the RpcController to its initial state. + + After the RpcController has been reset, it may be reused in + a new call. Must not be called while an RPC is in progress. + """ + raise NotImplementedError + + def Failed(self): + """Returns true if the call failed. + + After a call has finished, returns true if the call failed. The possible + reasons for failure depend on the RPC implementation. Failed() must not + be called before a call has finished. If Failed() returns true, the + contents of the response message are undefined. + """ + raise NotImplementedError + + def ErrorText(self): + """If Failed is true, returns a human-readable description of the error.""" + raise NotImplementedError + + def StartCancel(self): + """Initiate cancellation. + + Advises the RPC system that the caller desires that the RPC call be + canceled. The RPC system may cancel it immediately, may wait awhile and + then cancel it, or may not even cancel the call at all. If the call is + canceled, the "done" callback will still be called and the RpcController + will indicate that the call failed at that time. + """ + raise NotImplementedError + + # Server-side methods below + + def SetFailed(self, reason): + """Sets a failure reason. + + Causes Failed() to return true on the client side. "reason" will be + incorporated into the message returned by ErrorText(). If you find + you need to return machine-readable information about failures, you + should incorporate it into your response protocol buffer and should + NOT call SetFailed(). + """ + raise NotImplementedError + + def IsCanceled(self): + """Checks if the client cancelled the RPC. + + If true, indicates that the client canceled the RPC, so the server may + as well give up on replying to it. The server should still call the + final "done" callback. + """ + raise NotImplementedError + + def NotifyOnCancel(self, callback): + """Sets a callback to invoke on cancel. + + Asks that the given callback be called when the RPC is canceled. The + callback will always be called exactly once. If the RPC completes without + being canceled, the callback will be called after completion. If the RPC + has already been canceled when NotifyOnCancel() is called, the callback + will be called immediately. + + NotifyOnCancel() must be called no more than once per request. + """ + raise NotImplementedError + + +class RpcChannel(object): + + """Abstract interface for an RPC channel. + + An RpcChannel represents a communication line to a service which can be used + to call that service's methods. The service may be running on another + machine. Normally, you should not use an RpcChannel directly, but instead + construct a stub {@link Service} wrapping it. Example: + + Example: + RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234") + RpcController controller = rpcImpl.Controller() + MyService service = MyService_Stub(channel) + service.MyMethod(controller, request, callback) + """ + + def CallMethod(self, method_descriptor, rpc_controller, + request, response_class, done): + """Calls the method identified by the descriptor. + + Call the given method of the remote service. The signature of this + procedure looks the same as Service.CallMethod(), but the requirements + are less strict in one important way: the request object doesn't have to + be of any specific class as long as its descriptor is method.input_type. + """ + raise NotImplementedError diff --git a/openpype/hosts/hiero/vendor/google/protobuf/service_reflection.py b/openpype/hosts/hiero/vendor/google/protobuf/service_reflection.py new file mode 100644 index 0000000000..f82ab7145a --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/service_reflection.py @@ -0,0 +1,295 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains metaclasses used to create protocol service and service stub +classes from ServiceDescriptor objects at runtime. + +The GeneratedServiceType and GeneratedServiceStubType metaclasses are used to +inject all useful functionality into the classes output by the protocol +compiler at compile-time. +""" + +__author__ = 'petar@google.com (Petar Petrov)' + + +class GeneratedServiceType(type): + + """Metaclass for service classes created at runtime from ServiceDescriptors. + + Implementations for all methods described in the Service class are added here + by this class. We also create properties to allow getting/setting all fields + in the protocol message. + + The protocol compiler currently uses this metaclass to create protocol service + classes at runtime. Clients can also manually create their own classes at + runtime, as in this example:: + + mydescriptor = ServiceDescriptor(.....) + class MyProtoService(service.Service): + __metaclass__ = GeneratedServiceType + DESCRIPTOR = mydescriptor + myservice_instance = MyProtoService() + # ... + """ + + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __init__(cls, name, bases, dictionary): + """Creates a message service class. + + Args: + name: Name of the class (ignored, but required by the metaclass + protocol). + bases: Base classes of the class being constructed. + dictionary: The class dictionary of the class being constructed. + dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object + describing this protocol service type. + """ + # Don't do anything if this class doesn't have a descriptor. This happens + # when a service class is subclassed. + if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary: + return + + descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY] + service_builder = _ServiceBuilder(descriptor) + service_builder.BuildService(cls) + cls.DESCRIPTOR = descriptor + + +class GeneratedServiceStubType(GeneratedServiceType): + + """Metaclass for service stubs created at runtime from ServiceDescriptors. + + This class has similar responsibilities as GeneratedServiceType, except that + it creates the service stub classes. + """ + + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __init__(cls, name, bases, dictionary): + """Creates a message service stub class. + + Args: + name: Name of the class (ignored, here). + bases: Base classes of the class being constructed. + dictionary: The class dictionary of the class being constructed. + dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object + describing this protocol service type. + """ + super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary) + # Don't do anything if this class doesn't have a descriptor. This happens + # when a service stub is subclassed. + if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary: + return + + descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY] + service_stub_builder = _ServiceStubBuilder(descriptor) + service_stub_builder.BuildServiceStub(cls) + + +class _ServiceBuilder(object): + + """This class constructs a protocol service class using a service descriptor. + + Given a service descriptor, this class constructs a class that represents + the specified service descriptor. One service builder instance constructs + exactly one service class. That means all instances of that class share the + same builder. + """ + + def __init__(self, service_descriptor): + """Initializes an instance of the service class builder. + + Args: + service_descriptor: ServiceDescriptor to use when constructing the + service class. + """ + self.descriptor = service_descriptor + + def BuildService(builder, cls): + """Constructs the service class. + + Args: + cls: The class that will be constructed. + """ + + # CallMethod needs to operate with an instance of the Service class. This + # internal wrapper function exists only to be able to pass the service + # instance to the method that does the real CallMethod work. + # Making sure to use exact argument names from the abstract interface in + # service.py to match the type signature + def _WrapCallMethod(self, method_descriptor, rpc_controller, request, done): + return builder._CallMethod(self, method_descriptor, rpc_controller, + request, done) + + def _WrapGetRequestClass(self, method_descriptor): + return builder._GetRequestClass(method_descriptor) + + def _WrapGetResponseClass(self, method_descriptor): + return builder._GetResponseClass(method_descriptor) + + builder.cls = cls + cls.CallMethod = _WrapCallMethod + cls.GetDescriptor = staticmethod(lambda: builder.descriptor) + cls.GetDescriptor.__doc__ = 'Returns the service descriptor.' + cls.GetRequestClass = _WrapGetRequestClass + cls.GetResponseClass = _WrapGetResponseClass + for method in builder.descriptor.methods: + setattr(cls, method.name, builder._GenerateNonImplementedMethod(method)) + + def _CallMethod(self, srvc, method_descriptor, + rpc_controller, request, callback): + """Calls the method described by a given method descriptor. + + Args: + srvc: Instance of the service for which this method is called. + method_descriptor: Descriptor that represent the method to call. + rpc_controller: RPC controller to use for this method's execution. + request: Request protocol message. + callback: A callback to invoke after the method has completed. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'CallMethod() given method descriptor for wrong service type.') + method = getattr(srvc, method_descriptor.name) + return method(rpc_controller, request, callback) + + def _GetRequestClass(self, method_descriptor): + """Returns the class of the request protocol message. + + Args: + method_descriptor: Descriptor of the method for which to return the + request protocol message class. + + Returns: + A class that represents the input protocol message of the specified + method. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'GetRequestClass() given method descriptor for wrong service type.') + return method_descriptor.input_type._concrete_class + + def _GetResponseClass(self, method_descriptor): + """Returns the class of the response protocol message. + + Args: + method_descriptor: Descriptor of the method for which to return the + response protocol message class. + + Returns: + A class that represents the output protocol message of the specified + method. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'GetResponseClass() given method descriptor for wrong service type.') + return method_descriptor.output_type._concrete_class + + def _GenerateNonImplementedMethod(self, method): + """Generates and returns a method that can be set for a service methods. + + Args: + method: Descriptor of the service method for which a method is to be + generated. + + Returns: + A method that can be added to the service class. + """ + return lambda inst, rpc_controller, request, callback: ( + self._NonImplementedMethod(method.name, rpc_controller, callback)) + + def _NonImplementedMethod(self, method_name, rpc_controller, callback): + """The body of all methods in the generated service class. + + Args: + method_name: Name of the method being executed. + rpc_controller: RPC controller used to execute this method. + callback: A callback which will be invoked when the method finishes. + """ + rpc_controller.SetFailed('Method %s not implemented.' % method_name) + callback(None) + + +class _ServiceStubBuilder(object): + + """Constructs a protocol service stub class using a service descriptor. + + Given a service descriptor, this class constructs a suitable stub class. + A stub is just a type-safe wrapper around an RpcChannel which emulates a + local implementation of the service. + + One service stub builder instance constructs exactly one class. It means all + instances of that class share the same service stub builder. + """ + + def __init__(self, service_descriptor): + """Initializes an instance of the service stub class builder. + + Args: + service_descriptor: ServiceDescriptor to use when constructing the + stub class. + """ + self.descriptor = service_descriptor + + def BuildServiceStub(self, cls): + """Constructs the stub class. + + Args: + cls: The class that will be constructed. + """ + + def _ServiceStubInit(stub, rpc_channel): + stub.rpc_channel = rpc_channel + self.cls = cls + cls.__init__ = _ServiceStubInit + for method in self.descriptor.methods: + setattr(cls, method.name, self._GenerateStubMethod(method)) + + def _GenerateStubMethod(self, method): + return (lambda inst, rpc_controller, request, callback=None: + self._StubMethod(inst, method, rpc_controller, request, callback)) + + def _StubMethod(self, stub, method_descriptor, + rpc_controller, request, callback): + """The body of all service methods in the generated stub class. + + Args: + stub: Stub instance. + method_descriptor: Descriptor of the invoked method. + rpc_controller: Rpc controller to execute the method. + request: Request protocol message. + callback: A callback to execute when the method finishes. + Returns: + Response message (in case of blocking call). + """ + return stub.rpc_channel.CallMethod( + method_descriptor, rpc_controller, request, + method_descriptor.output_type._concrete_class, callback) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/source_context_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/source_context_pb2.py new file mode 100644 index 0000000000..30cca2e06e --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/source_context_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/source_context.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n$google/protobuf/source_context.proto\x12\x0fgoogle.protobuf\"\"\n\rSourceContext\x12\x11\n\tfile_name\x18\x01 \x01(\tB\x8a\x01\n\x13\x63om.google.protobufB\x12SourceContextProtoP\x01Z6google.golang.org/protobuf/types/known/sourcecontextpb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.source_context_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\022SourceContextProtoP\001Z6google.golang.org/protobuf/types/known/sourcecontextpb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _SOURCECONTEXT._serialized_start=57 + _SOURCECONTEXT._serialized_end=91 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/struct_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/struct_pb2.py new file mode 100644 index 0000000000..149728ca08 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/struct_pb2.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/struct.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x84\x01\n\x06Struct\x12\x33\n\x06\x66ields\x18\x01 \x03(\x0b\x32#.google.protobuf.Struct.FieldsEntry\x1a\x45\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value:\x02\x38\x01\"\xea\x01\n\x05Value\x12\x30\n\nnull_value\x18\x01 \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00\x12\x16\n\x0cnumber_value\x18\x02 \x01(\x01H\x00\x12\x16\n\x0cstring_value\x18\x03 \x01(\tH\x00\x12\x14\n\nbool_value\x18\x04 \x01(\x08H\x00\x12/\n\x0cstruct_value\x18\x05 \x01(\x0b\x32\x17.google.protobuf.StructH\x00\x12\x30\n\nlist_value\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x42\x06\n\x04kind\"3\n\tListValue\x12&\n\x06values\x18\x01 \x03(\x0b\x32\x16.google.protobuf.Value*\x1b\n\tNullValue\x12\x0e\n\nNULL_VALUE\x10\x00\x42\x7f\n\x13\x63om.google.protobufB\x0bStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.struct_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\013StructProtoP\001Z/google.golang.org/protobuf/types/known/structpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _STRUCT_FIELDSENTRY._options = None + _STRUCT_FIELDSENTRY._serialized_options = b'8\001' + _NULLVALUE._serialized_start=474 + _NULLVALUE._serialized_end=501 + _STRUCT._serialized_start=50 + _STRUCT._serialized_end=182 + _STRUCT_FIELDSENTRY._serialized_start=113 + _STRUCT_FIELDSENTRY._serialized_end=182 + _VALUE._serialized_start=185 + _VALUE._serialized_end=419 + _LISTVALUE._serialized_start=421 + _LISTVALUE._serialized_end=472 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/symbol_database.py b/openpype/hosts/hiero/vendor/google/protobuf/symbol_database.py new file mode 100644 index 0000000000..fdcf8cf06c --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/symbol_database.py @@ -0,0 +1,194 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A database of Python protocol buffer generated symbols. + +SymbolDatabase is the MessageFactory for messages generated at compile time, +and makes it easy to create new instances of a registered type, given only the +type's protocol buffer symbol name. + +Example usage:: + + db = symbol_database.SymbolDatabase() + + # Register symbols of interest, from one or multiple files. + db.RegisterFileDescriptor(my_proto_pb2.DESCRIPTOR) + db.RegisterMessage(my_proto_pb2.MyMessage) + db.RegisterEnumDescriptor(my_proto_pb2.MyEnum.DESCRIPTOR) + + # The database can be used as a MessageFactory, to generate types based on + # their name: + types = db.GetMessages(['my_proto.proto']) + my_message_instance = types['MyMessage']() + + # The database's underlying descriptor pool can be queried, so it's not + # necessary to know a type's filename to be able to generate it: + filename = db.pool.FindFileContainingSymbol('MyMessage') + my_message_instance = db.GetMessages([filename])['MyMessage']() + + # This functionality is also provided directly via a convenience method: + my_message_instance = db.GetSymbol('MyMessage')() +""" + + +from google.protobuf.internal import api_implementation +from google.protobuf import descriptor_pool +from google.protobuf import message_factory + + +class SymbolDatabase(message_factory.MessageFactory): + """A database of Python generated symbols.""" + + def RegisterMessage(self, message): + """Registers the given message type in the local database. + + Calls to GetSymbol() and GetMessages() will return messages registered here. + + Args: + message: A :class:`google.protobuf.message.Message` subclass (or + instance); its descriptor will be registered. + + Returns: + The provided message. + """ + + desc = message.DESCRIPTOR + self._classes[desc] = message + self.RegisterMessageDescriptor(desc) + return message + + def RegisterMessageDescriptor(self, message_descriptor): + """Registers the given message descriptor in the local database. + + Args: + message_descriptor (Descriptor): the message descriptor to add. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddDescriptor(message_descriptor) + + def RegisterEnumDescriptor(self, enum_descriptor): + """Registers the given enum descriptor in the local database. + + Args: + enum_descriptor (EnumDescriptor): The enum descriptor to register. + + Returns: + EnumDescriptor: The provided descriptor. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddEnumDescriptor(enum_descriptor) + return enum_descriptor + + def RegisterServiceDescriptor(self, service_descriptor): + """Registers the given service descriptor in the local database. + + Args: + service_descriptor (ServiceDescriptor): the service descriptor to + register. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddServiceDescriptor(service_descriptor) + + def RegisterFileDescriptor(self, file_descriptor): + """Registers the given file descriptor in the local database. + + Args: + file_descriptor (FileDescriptor): The file descriptor to register. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._InternalAddFileDescriptor(file_descriptor) + + def GetSymbol(self, symbol): + """Tries to find a symbol in the local database. + + Currently, this method only returns message.Message instances, however, if + may be extended in future to support other symbol types. + + Args: + symbol (str): a protocol buffer symbol. + + Returns: + A Python class corresponding to the symbol. + + Raises: + KeyError: if the symbol could not be found. + """ + + return self._classes[self.pool.FindMessageTypeByName(symbol)] + + def GetMessages(self, files): + # TODO(amauryfa): Fix the differences with MessageFactory. + """Gets all registered messages from a specified file. + + Only messages already created and registered will be returned; (this is the + case for imported _pb2 modules) + But unlike MessageFactory, this version also returns already defined nested + messages, but does not register any message extensions. + + Args: + files (list[str]): The file names to extract messages from. + + Returns: + A dictionary mapping proto names to the message classes. + + Raises: + KeyError: if a file could not be found. + """ + + def _GetAllMessages(desc): + """Walk a message Descriptor and recursively yields all message names.""" + yield desc + for msg_desc in desc.nested_types: + for nested_desc in _GetAllMessages(msg_desc): + yield nested_desc + + result = {} + for file_name in files: + file_desc = self.pool.FindFileByName(file_name) + for msg_desc in file_desc.message_types_by_name.values(): + for desc in _GetAllMessages(msg_desc): + try: + result[desc.full_name] = self._classes[desc] + except KeyError: + # This descriptor has no registered class, skip it. + pass + return result + + +_DEFAULT = SymbolDatabase(pool=descriptor_pool.Default()) + + +def Default(): + """Returns the default SymbolDatabase.""" + return _DEFAULT diff --git a/openpype/hosts/hiero/vendor/google/protobuf/text_encoding.py b/openpype/hosts/hiero/vendor/google/protobuf/text_encoding.py new file mode 100644 index 0000000000..759cf11f62 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/text_encoding.py @@ -0,0 +1,110 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Encoding related utilities.""" +import re + +_cescape_chr_to_symbol_map = {} +_cescape_chr_to_symbol_map[9] = r'\t' # optional escape +_cescape_chr_to_symbol_map[10] = r'\n' # optional escape +_cescape_chr_to_symbol_map[13] = r'\r' # optional escape +_cescape_chr_to_symbol_map[34] = r'\"' # necessary escape +_cescape_chr_to_symbol_map[39] = r"\'" # optional escape +_cescape_chr_to_symbol_map[92] = r'\\' # necessary escape + +# Lookup table for unicode +_cescape_unicode_to_str = [chr(i) for i in range(0, 256)] +for byte, string in _cescape_chr_to_symbol_map.items(): + _cescape_unicode_to_str[byte] = string + +# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32) +_cescape_byte_to_str = ([r'\%03o' % i for i in range(0, 32)] + + [chr(i) for i in range(32, 127)] + + [r'\%03o' % i for i in range(127, 256)]) +for byte, string in _cescape_chr_to_symbol_map.items(): + _cescape_byte_to_str[byte] = string +del byte, string + + +def CEscape(text, as_utf8): + # type: (...) -> str + """Escape a bytes string for use in an text protocol buffer. + + Args: + text: A byte string to be escaped. + as_utf8: Specifies if result may contain non-ASCII characters. + In Python 3 this allows unescaped non-ASCII Unicode characters. + In Python 2 the return value will be valid UTF-8 rather than only ASCII. + Returns: + Escaped string (str). + """ + # Python's text.encode() 'string_escape' or 'unicode_escape' codecs do not + # satisfy our needs; they encodes unprintable characters using two-digit hex + # escapes whereas our C++ unescaping function allows hex escapes to be any + # length. So, "\0011".encode('string_escape') ends up being "\\x011", which + # will be decoded in C++ as a single-character string with char code 0x11. + text_is_unicode = isinstance(text, str) + if as_utf8 and text_is_unicode: + # We're already unicode, no processing beyond control char escapes. + return text.translate(_cescape_chr_to_symbol_map) + ord_ = ord if text_is_unicode else lambda x: x # bytes iterate as ints. + if as_utf8: + return ''.join(_cescape_unicode_to_str[ord_(c)] for c in text) + return ''.join(_cescape_byte_to_str[ord_(c)] for c in text) + + +_CUNESCAPE_HEX = re.compile(r'(\\+)x([0-9a-fA-F])(?![0-9a-fA-F])') + + +def CUnescape(text): + # type: (str) -> bytes + """Unescape a text string with C-style escape sequences to UTF-8 bytes. + + Args: + text: The data to parse in a str. + Returns: + A byte string. + """ + + def ReplaceHex(m): + # Only replace the match if the number of leading back slashes is odd. i.e. + # the slash itself is not escaped. + if len(m.group(1)) & 1: + return m.group(1) + 'x0' + m.group(2) + return m.group(0) + + # This is required because the 'string_escape' encoding doesn't + # allow single-digit hex escapes (like '\xf'). + result = _CUNESCAPE_HEX.sub(ReplaceHex, text) + + return (result.encode('utf-8') # Make it bytes to allow decode. + .decode('unicode_escape') + # Make it bytes again to return the proper type. + .encode('raw_unicode_escape')) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/text_format.py b/openpype/hosts/hiero/vendor/google/protobuf/text_format.py new file mode 100644 index 0000000000..412385c26f --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/text_format.py @@ -0,0 +1,1795 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains routines for printing protocol messages in text format. + +Simple usage example:: + + # Create a proto object and serialize it to a text proto string. + message = my_proto_pb2.MyMessage(foo='bar') + text_proto = text_format.MessageToString(message) + + # Parse a text proto string. + message = text_format.Parse(text_proto, my_proto_pb2.MyMessage()) +""" + +__author__ = 'kenton@google.com (Kenton Varda)' + +# TODO(b/129989314) Import thread contention leads to test failures. +import encodings.raw_unicode_escape # pylint: disable=unused-import +import encodings.unicode_escape # pylint: disable=unused-import +import io +import math +import re + +from google.protobuf.internal import decoder +from google.protobuf.internal import type_checkers +from google.protobuf import descriptor +from google.protobuf import text_encoding + +# pylint: disable=g-import-not-at-top +__all__ = ['MessageToString', 'Parse', 'PrintMessage', 'PrintField', + 'PrintFieldValue', 'Merge', 'MessageToBytes'] + +_INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(), + type_checkers.Int32ValueChecker(), + type_checkers.Uint64ValueChecker(), + type_checkers.Int64ValueChecker()) +_FLOAT_INFINITY = re.compile('-?inf(?:inity)?f?$', re.IGNORECASE) +_FLOAT_NAN = re.compile('nanf?$', re.IGNORECASE) +_QUOTES = frozenset(("'", '"')) +_ANY_FULL_TYPE_NAME = 'google.protobuf.Any' + + +class Error(Exception): + """Top-level module error for text_format.""" + + +class ParseError(Error): + """Thrown in case of text parsing or tokenizing error.""" + + def __init__(self, message=None, line=None, column=None): + if message is not None and line is not None: + loc = str(line) + if column is not None: + loc += ':{0}'.format(column) + message = '{0} : {1}'.format(loc, message) + if message is not None: + super(ParseError, self).__init__(message) + else: + super(ParseError, self).__init__() + self._line = line + self._column = column + + def GetLine(self): + return self._line + + def GetColumn(self): + return self._column + + +class TextWriter(object): + + def __init__(self, as_utf8): + self._writer = io.StringIO() + + def write(self, val): + return self._writer.write(val) + + def close(self): + return self._writer.close() + + def getvalue(self): + return self._writer.getvalue() + + +def MessageToString( + message, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + indent=0, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + # type: (...) -> str + """Convert protobuf message to text format. + + Double values can be formatted compactly with 15 digits of + precision (which is the most that IEEE 754 "double" can guarantee) + using double_format='.15g'. To ensure that converting to text and back to a + proto will result in an identical value, double_format='.17g' should be used. + + Args: + message: The protocol buffers message. + as_utf8: Return unescaped Unicode for non-ASCII characters. + In Python 3 actual Unicode characters may appear as is in strings. + In Python 2 the return value will be valid UTF-8 rather than only ASCII. + as_one_line: Don't introduce newlines between fields. + use_short_repeated_primitives: Use short repeated format for primitives. + pointy_brackets: If True, use angle brackets instead of curly braces for + nesting. + use_index_order: If True, fields of a proto message will be printed using + the order defined in source code instead of the field number, extensions + will be printed at the end of the message and their relative order is + determined by the extension number. By default, use the field number + order. + float_format (str): If set, use this to specify float field formatting + (per the "Format Specification Mini-Language"); otherwise, shortest float + that has same value in wire will be printed. Also affect double field + if double_format is not set but float_format is set. + double_format (str): If set, use this to specify double field formatting + (per the "Format Specification Mini-Language"); if it is not set but + float_format is set, use float_format. Otherwise, use ``str()`` + use_field_number: If True, print field numbers instead of names. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + indent (int): The initial indent level, in terms of spaces, for pretty + print. + message_formatter (function(message, indent, as_one_line) -> unicode|None): + Custom formatter for selected sub-messages (usually based on message + type). Use to pretty print parts of the protobuf for easier diffing. + print_unknown_fields: If True, unknown fields will be printed. + force_colon: If set, a colon will be added after the field name even if the + field is a proto message. + + Returns: + str: A string of the text formatted protocol buffer message. + """ + out = TextWriter(as_utf8) + printer = _Printer( + out, + indent, + as_utf8, + as_one_line, + use_short_repeated_primitives, + pointy_brackets, + use_index_order, + float_format, + double_format, + use_field_number, + descriptor_pool, + message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintMessage(message) + result = out.getvalue() + out.close() + if as_one_line: + return result.rstrip() + return result + + +def MessageToBytes(message, **kwargs): + # type: (...) -> bytes + """Convert protobuf message to encoded text format. See MessageToString.""" + text = MessageToString(message, **kwargs) + if isinstance(text, bytes): + return text + codec = 'utf-8' if kwargs.get('as_utf8') else 'ascii' + return text.encode(codec) + + +def _IsMapEntry(field): + return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.message_type.has_options and + field.message_type.GetOptions().map_entry) + + +def PrintMessage(message, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + printer = _Printer( + out=out, indent=indent, as_utf8=as_utf8, + as_one_line=as_one_line, + use_short_repeated_primitives=use_short_repeated_primitives, + pointy_brackets=pointy_brackets, + use_index_order=use_index_order, + float_format=float_format, + double_format=double_format, + use_field_number=use_field_number, + descriptor_pool=descriptor_pool, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintMessage(message) + + +def PrintField(field, + value, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Print a single field name/value pair.""" + printer = _Printer(out, indent, as_utf8, as_one_line, + use_short_repeated_primitives, pointy_brackets, + use_index_order, float_format, double_format, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintField(field, value) + + +def PrintFieldValue(field, + value, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Print a single field value (not including name).""" + printer = _Printer(out, indent, as_utf8, as_one_line, + use_short_repeated_primitives, pointy_brackets, + use_index_order, float_format, double_format, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintFieldValue(field, value) + + +def _BuildMessageFromTypeName(type_name, descriptor_pool): + """Returns a protobuf message instance. + + Args: + type_name: Fully-qualified protobuf message type name string. + descriptor_pool: DescriptorPool instance. + + Returns: + A Message instance of type matching type_name, or None if the a Descriptor + wasn't found matching type_name. + """ + # pylint: disable=g-import-not-at-top + if descriptor_pool is None: + from google.protobuf import descriptor_pool as pool_mod + descriptor_pool = pool_mod.Default() + from google.protobuf import symbol_database + database = symbol_database.Default() + try: + message_descriptor = descriptor_pool.FindMessageTypeByName(type_name) + except KeyError: + return None + message_type = database.GetPrototype(message_descriptor) + return message_type() + + +# These values must match WireType enum in google/protobuf/wire_format.h. +WIRETYPE_LENGTH_DELIMITED = 2 +WIRETYPE_START_GROUP = 3 + + +class _Printer(object): + """Text format printer for protocol message.""" + + def __init__( + self, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Initialize the Printer. + + Double values can be formatted compactly with 15 digits of precision + (which is the most that IEEE 754 "double" can guarantee) using + double_format='.15g'. To ensure that converting to text and back to a proto + will result in an identical value, double_format='.17g' should be used. + + Args: + out: To record the text format result. + indent: The initial indent level for pretty print. + as_utf8: Return unescaped Unicode for non-ASCII characters. + In Python 3 actual Unicode characters may appear as is in strings. + In Python 2 the return value will be valid UTF-8 rather than ASCII. + as_one_line: Don't introduce newlines between fields. + use_short_repeated_primitives: Use short repeated format for primitives. + pointy_brackets: If True, use angle brackets instead of curly braces for + nesting. + use_index_order: If True, print fields of a proto message using the order + defined in source code instead of the field number. By default, use the + field number order. + float_format: If set, use this to specify float field formatting + (per the "Format Specification Mini-Language"); otherwise, shortest + float that has same value in wire will be printed. Also affect double + field if double_format is not set but float_format is set. + double_format: If set, use this to specify double field formatting + (per the "Format Specification Mini-Language"); if it is not set but + float_format is set, use float_format. Otherwise, str() is used. + use_field_number: If True, print field numbers instead of names. + descriptor_pool: A DescriptorPool used to resolve Any types. + message_formatter: A function(message, indent, as_one_line): unicode|None + to custom format selected sub-messages (usually based on message type). + Use to pretty print parts of the protobuf for easier diffing. + print_unknown_fields: If True, unknown fields will be printed. + force_colon: If set, a colon will be added after the field name even if + the field is a proto message. + """ + self.out = out + self.indent = indent + self.as_utf8 = as_utf8 + self.as_one_line = as_one_line + self.use_short_repeated_primitives = use_short_repeated_primitives + self.pointy_brackets = pointy_brackets + self.use_index_order = use_index_order + self.float_format = float_format + if double_format is not None: + self.double_format = double_format + else: + self.double_format = float_format + self.use_field_number = use_field_number + self.descriptor_pool = descriptor_pool + self.message_formatter = message_formatter + self.print_unknown_fields = print_unknown_fields + self.force_colon = force_colon + + def _TryPrintAsAnyMessage(self, message): + """Serializes if message is a google.protobuf.Any field.""" + if '/' not in message.type_url: + return False + packed_message = _BuildMessageFromTypeName(message.TypeName(), + self.descriptor_pool) + if packed_message: + packed_message.MergeFromString(message.value) + colon = ':' if self.force_colon else '' + self.out.write('%s[%s]%s ' % (self.indent * ' ', message.type_url, colon)) + self._PrintMessageFieldValue(packed_message) + self.out.write(' ' if self.as_one_line else '\n') + return True + else: + return False + + def _TryCustomFormatMessage(self, message): + formatted = self.message_formatter(message, self.indent, self.as_one_line) + if formatted is None: + return False + + out = self.out + out.write(' ' * self.indent) + out.write(formatted) + out.write(' ' if self.as_one_line else '\n') + return True + + def PrintMessage(self, message): + """Convert protobuf message to text format. + + Args: + message: The protocol buffers message. + """ + if self.message_formatter and self._TryCustomFormatMessage(message): + return + if (message.DESCRIPTOR.full_name == _ANY_FULL_TYPE_NAME and + self._TryPrintAsAnyMessage(message)): + return + fields = message.ListFields() + if self.use_index_order: + fields.sort( + key=lambda x: x[0].number if x[0].is_extension else x[0].index) + for field, value in fields: + if _IsMapEntry(field): + for key in sorted(value): + # This is slow for maps with submessage entries because it copies the + # entire tree. Unfortunately this would take significant refactoring + # of this file to work around. + # + # TODO(haberman): refactor and optimize if this becomes an issue. + entry_submsg = value.GetEntryClass()(key=key, value=value[key]) + self.PrintField(field, entry_submsg) + elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if (self.use_short_repeated_primitives + and field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE + and field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_STRING): + self._PrintShortRepeatedPrimitivesValue(field, value) + else: + for element in value: + self.PrintField(field, element) + else: + self.PrintField(field, value) + + if self.print_unknown_fields: + self._PrintUnknownFields(message.UnknownFields()) + + def _PrintUnknownFields(self, unknown_fields): + """Print unknown fields.""" + out = self.out + for field in unknown_fields: + out.write(' ' * self.indent) + out.write(str(field.field_number)) + if field.wire_type == WIRETYPE_START_GROUP: + if self.as_one_line: + out.write(' { ') + else: + out.write(' {\n') + self.indent += 2 + + self._PrintUnknownFields(field.data) + + if self.as_one_line: + out.write('} ') + else: + self.indent -= 2 + out.write(' ' * self.indent + '}\n') + elif field.wire_type == WIRETYPE_LENGTH_DELIMITED: + try: + # If this field is parseable as a Message, it is probably + # an embedded message. + # pylint: disable=protected-access + (embedded_unknown_message, pos) = decoder._DecodeUnknownFieldSet( + memoryview(field.data), 0, len(field.data)) + except Exception: # pylint: disable=broad-except + pos = 0 + + if pos == len(field.data): + if self.as_one_line: + out.write(' { ') + else: + out.write(' {\n') + self.indent += 2 + + self._PrintUnknownFields(embedded_unknown_message) + + if self.as_one_line: + out.write('} ') + else: + self.indent -= 2 + out.write(' ' * self.indent + '}\n') + else: + # A string or bytes field. self.as_utf8 may not work. + out.write(': \"') + out.write(text_encoding.CEscape(field.data, False)) + out.write('\" ' if self.as_one_line else '\"\n') + else: + # varint, fixed32, fixed64 + out.write(': ') + out.write(str(field.data)) + out.write(' ' if self.as_one_line else '\n') + + def _PrintFieldName(self, field): + """Print field name.""" + out = self.out + out.write(' ' * self.indent) + if self.use_field_number: + out.write(str(field.number)) + else: + if field.is_extension: + out.write('[') + if (field.containing_type.GetOptions().message_set_wire_format and + field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL): + out.write(field.message_type.full_name) + else: + out.write(field.full_name) + out.write(']') + elif field.type == descriptor.FieldDescriptor.TYPE_GROUP: + # For groups, use the capitalized name. + out.write(field.message_type.name) + else: + out.write(field.name) + + if (self.force_colon or + field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE): + # The colon is optional in this case, but our cross-language golden files + # don't include it. Here, the colon is only included if force_colon is + # set to True + out.write(':') + + def PrintField(self, field, value): + """Print a single field name/value pair.""" + self._PrintFieldName(field) + self.out.write(' ') + self.PrintFieldValue(field, value) + self.out.write(' ' if self.as_one_line else '\n') + + def _PrintShortRepeatedPrimitivesValue(self, field, value): + """"Prints short repeated primitives value.""" + # Note: this is called only when value has at least one element. + self._PrintFieldName(field) + self.out.write(' [') + for i in range(len(value) - 1): + self.PrintFieldValue(field, value[i]) + self.out.write(', ') + self.PrintFieldValue(field, value[-1]) + self.out.write(']') + self.out.write(' ' if self.as_one_line else '\n') + + def _PrintMessageFieldValue(self, value): + if self.pointy_brackets: + openb = '<' + closeb = '>' + else: + openb = '{' + closeb = '}' + + if self.as_one_line: + self.out.write('%s ' % openb) + self.PrintMessage(value) + self.out.write(closeb) + else: + self.out.write('%s\n' % openb) + self.indent += 2 + self.PrintMessage(value) + self.indent -= 2 + self.out.write(' ' * self.indent + closeb) + + def PrintFieldValue(self, field, value): + """Print a single field value (not including name). + + For repeated fields, the value should be a single element. + + Args: + field: The descriptor of the field to be printed. + value: The value of the field. + """ + out = self.out + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + self._PrintMessageFieldValue(value) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: + enum_value = field.enum_type.values_by_number.get(value, None) + if enum_value is not None: + out.write(enum_value.name) + else: + out.write(str(value)) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: + out.write('\"') + if isinstance(value, str) and not self.as_utf8: + out_value = value.encode('utf-8') + else: + out_value = value + if field.type == descriptor.FieldDescriptor.TYPE_BYTES: + # We always need to escape all binary data in TYPE_BYTES fields. + out_as_utf8 = False + else: + out_as_utf8 = self.as_utf8 + out.write(text_encoding.CEscape(out_value, out_as_utf8)) + out.write('\"') + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: + if value: + out.write('true') + else: + out.write('false') + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT: + if self.float_format is not None: + out.write('{1:{0}}'.format(self.float_format, value)) + else: + if math.isnan(value): + out.write(str(value)) + else: + out.write(str(type_checkers.ToShortestFloat(value))) + elif (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_DOUBLE and + self.double_format is not None): + out.write('{1:{0}}'.format(self.double_format, value)) + else: + out.write(str(value)) + + +def Parse(text, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + NOTE: for historical reasons this function does not clear the input + message. This is different from what the binary msg.ParseFrom(...) does. + If text contains a field already set in message, the value is appended if the + field is repeated. Otherwise, an error is raised. + + Example:: + + a = MyProto() + a.repeated_field.append('test') + b = MyProto() + + # Repeated fields are combined + text_format.Parse(repr(a), b) + text_format.Parse(repr(a), b) # repeated_field contains ["test", "test"] + + # Non-repeated fields cannot be overwritten + a.singular_field = 1 + b.singular_field = 2 + text_format.Parse(repr(a), b) # ParseError + + # Binary version: + b.ParseFromString(a.SerializeToString()) # repeated_field is now "test" + + Caller is responsible for clearing the message as needed. + + Args: + text (str): Message text representation. + message (Message): A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + Message: The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + return ParseLines(text.split(b'\n' if isinstance(text, bytes) else u'\n'), + message, + allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + + +def Merge(text, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + Like Parse(), but allows repeated values for a non-repeated field, and uses + the last one. This means any non-repeated, top-level fields specified in text + replace those in the message. + + Args: + text (str): Message text representation. + message (Message): A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + Message: The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + return MergeLines( + text.split(b'\n' if isinstance(text, bytes) else u'\n'), + message, + allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + + +def ParseLines(lines, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + See Parse() for caveats. + + Args: + lines: An iterable of lines of a message's text representation. + message: A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool: A DescriptorPool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + parser = _Parser(allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + return parser.ParseLines(lines, message) + + +def MergeLines(lines, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + See Merge() for more details. + + Args: + lines: An iterable of lines of a message's text representation. + message: A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool: A DescriptorPool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + parser = _Parser(allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + return parser.MergeLines(lines, message) + + +class _Parser(object): + """Text format parser for protocol message.""" + + def __init__(self, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + self.allow_unknown_extension = allow_unknown_extension + self.allow_field_number = allow_field_number + self.descriptor_pool = descriptor_pool + self.allow_unknown_field = allow_unknown_field + + def ParseLines(self, lines, message): + """Parses a text representation of a protocol message into a message.""" + self._allow_multiple_scalars = False + self._ParseOrMerge(lines, message) + return message + + def MergeLines(self, lines, message): + """Merges a text representation of a protocol message into a message.""" + self._allow_multiple_scalars = True + self._ParseOrMerge(lines, message) + return message + + def _ParseOrMerge(self, lines, message): + """Converts a text representation of a protocol message into a message. + + Args: + lines: Lines of a message's text representation. + message: A protocol buffer message to merge into. + + Raises: + ParseError: On text parsing problems. + """ + # Tokenize expects native str lines. + str_lines = ( + line if isinstance(line, str) else line.decode('utf-8') + for line in lines) + tokenizer = Tokenizer(str_lines) + while not tokenizer.AtEnd(): + self._MergeField(tokenizer, message) + + def _MergeField(self, tokenizer, message): + """Merges a single protocol message field into a message. + + Args: + tokenizer: A tokenizer to parse the field name and values. + message: A protocol message to record the data. + + Raises: + ParseError: In case of text parsing problems. + """ + message_descriptor = message.DESCRIPTOR + if (message_descriptor.full_name == _ANY_FULL_TYPE_NAME and + tokenizer.TryConsume('[')): + type_url_prefix, packed_type_name = self._ConsumeAnyTypeUrl(tokenizer) + tokenizer.Consume(']') + tokenizer.TryConsume(':') + if tokenizer.TryConsume('<'): + expanded_any_end_token = '>' + else: + tokenizer.Consume('{') + expanded_any_end_token = '}' + expanded_any_sub_message = _BuildMessageFromTypeName(packed_type_name, + self.descriptor_pool) + if not expanded_any_sub_message: + raise ParseError('Type %s not found in descriptor pool' % + packed_type_name) + while not tokenizer.TryConsume(expanded_any_end_token): + if tokenizer.AtEnd(): + raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % + (expanded_any_end_token,)) + self._MergeField(tokenizer, expanded_any_sub_message) + deterministic = False + + message.Pack(expanded_any_sub_message, + type_url_prefix=type_url_prefix, + deterministic=deterministic) + return + + if tokenizer.TryConsume('['): + name = [tokenizer.ConsumeIdentifier()] + while tokenizer.TryConsume('.'): + name.append(tokenizer.ConsumeIdentifier()) + name = '.'.join(name) + + if not message_descriptor.is_extendable: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" does not have extensions.' % + message_descriptor.full_name) + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(name) + # pylint: enable=protected-access + + + if not field: + if self.allow_unknown_extension: + field = None + else: + raise tokenizer.ParseErrorPreviousToken( + 'Extension "%s" not registered. ' + 'Did you import the _pb2 module which defines it? ' + 'If you are trying to place the extension in the MessageSet ' + 'field of another message that is in an Any or MessageSet field, ' + 'that message\'s _pb2 module must be imported as well' % name) + elif message_descriptor != field.containing_type: + raise tokenizer.ParseErrorPreviousToken( + 'Extension "%s" does not extend message type "%s".' % + (name, message_descriptor.full_name)) + + tokenizer.Consume(']') + + else: + name = tokenizer.ConsumeIdentifierOrNumber() + if self.allow_field_number and name.isdigit(): + number = ParseInteger(name, True, True) + field = message_descriptor.fields_by_number.get(number, None) + if not field and message_descriptor.is_extendable: + field = message.Extensions._FindExtensionByNumber(number) + else: + field = message_descriptor.fields_by_name.get(name, None) + + # Group names are expected to be capitalized as they appear in the + # .proto file, which actually matches their type names, not their field + # names. + if not field: + field = message_descriptor.fields_by_name.get(name.lower(), None) + if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP: + field = None + + if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and + field.message_type.name != name): + field = None + + if not field and not self.allow_unknown_field: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" has no field named "%s".' % + (message_descriptor.full_name, name)) + + if field: + if not self._allow_multiple_scalars and field.containing_oneof: + # Check if there's a different field set in this oneof. + # Note that we ignore the case if the same field was set before, and we + # apply _allow_multiple_scalars to non-scalar fields as well. + which_oneof = message.WhichOneof(field.containing_oneof.name) + if which_oneof is not None and which_oneof != field.name: + raise tokenizer.ParseErrorPreviousToken( + 'Field "%s" is specified along with field "%s", another member ' + 'of oneof "%s" for message type "%s".' % + (field.name, which_oneof, field.containing_oneof.name, + message_descriptor.full_name)) + + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + tokenizer.TryConsume(':') + merger = self._MergeMessageField + else: + tokenizer.Consume(':') + merger = self._MergeScalarField + + if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED and + tokenizer.TryConsume('[')): + # Short repeated format, e.g. "foo: [1, 2, 3]" + if not tokenizer.TryConsume(']'): + while True: + merger(tokenizer, message, field) + if tokenizer.TryConsume(']'): + break + tokenizer.Consume(',') + + else: + merger(tokenizer, message, field) + + else: # Proto field is unknown. + assert (self.allow_unknown_extension or self.allow_unknown_field) + _SkipFieldContents(tokenizer) + + # For historical reasons, fields may optionally be separated by commas or + # semicolons. + if not tokenizer.TryConsume(','): + tokenizer.TryConsume(';') + + + def _ConsumeAnyTypeUrl(self, tokenizer): + """Consumes a google.protobuf.Any type URL and returns the type name.""" + # Consume "type.googleapis.com/". + prefix = [tokenizer.ConsumeIdentifier()] + tokenizer.Consume('.') + prefix.append(tokenizer.ConsumeIdentifier()) + tokenizer.Consume('.') + prefix.append(tokenizer.ConsumeIdentifier()) + tokenizer.Consume('/') + # Consume the fully-qualified type name. + name = [tokenizer.ConsumeIdentifier()] + while tokenizer.TryConsume('.'): + name.append(tokenizer.ConsumeIdentifier()) + return '.'.join(prefix), '.'.join(name) + + def _MergeMessageField(self, tokenizer, message, field): + """Merges a single scalar field into a message. + + Args: + tokenizer: A tokenizer to parse the field value. + message: The message of which field is a member. + field: The descriptor of the field to be merged. + + Raises: + ParseError: In case of text parsing problems. + """ + is_map_entry = _IsMapEntry(field) + + if tokenizer.TryConsume('<'): + end_token = '>' + else: + tokenizer.Consume('{') + end_token = '}' + + if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if field.is_extension: + sub_message = message.Extensions[field].add() + elif is_map_entry: + sub_message = getattr(message, field.name).GetEntryClass()() + else: + sub_message = getattr(message, field.name).add() + else: + if field.is_extension: + if (not self._allow_multiple_scalars and + message.HasExtension(field)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" extensions.' % + (message.DESCRIPTOR.full_name, field.full_name)) + sub_message = message.Extensions[field] + else: + # Also apply _allow_multiple_scalars to message field. + # TODO(jieluo): Change to _allow_singular_overwrites. + if (not self._allow_multiple_scalars and + message.HasField(field.name)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" fields.' % + (message.DESCRIPTOR.full_name, field.name)) + sub_message = getattr(message, field.name) + sub_message.SetInParent() + + while not tokenizer.TryConsume(end_token): + if tokenizer.AtEnd(): + raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,)) + self._MergeField(tokenizer, sub_message) + + if is_map_entry: + value_cpptype = field.message_type.fields_by_name['value'].cpp_type + if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + value = getattr(message, field.name)[sub_message.key] + value.CopyFrom(sub_message.value) + else: + getattr(message, field.name)[sub_message.key] = sub_message.value + + @staticmethod + def _IsProto3Syntax(message): + message_descriptor = message.DESCRIPTOR + return (hasattr(message_descriptor, 'syntax') and + message_descriptor.syntax == 'proto3') + + def _MergeScalarField(self, tokenizer, message, field): + """Merges a single scalar field into a message. + + Args: + tokenizer: A tokenizer to parse the field value. + message: A protocol message to record the data. + field: The descriptor of the field to be merged. + + Raises: + ParseError: In case of text parsing problems. + RuntimeError: On runtime errors. + """ + _ = self.allow_unknown_extension + value = None + + if field.type in (descriptor.FieldDescriptor.TYPE_INT32, + descriptor.FieldDescriptor.TYPE_SINT32, + descriptor.FieldDescriptor.TYPE_SFIXED32): + value = _ConsumeInt32(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_INT64, + descriptor.FieldDescriptor.TYPE_SINT64, + descriptor.FieldDescriptor.TYPE_SFIXED64): + value = _ConsumeInt64(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32, + descriptor.FieldDescriptor.TYPE_FIXED32): + value = _ConsumeUint32(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64, + descriptor.FieldDescriptor.TYPE_FIXED64): + value = _ConsumeUint64(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT, + descriptor.FieldDescriptor.TYPE_DOUBLE): + value = tokenizer.ConsumeFloat() + elif field.type == descriptor.FieldDescriptor.TYPE_BOOL: + value = tokenizer.ConsumeBool() + elif field.type == descriptor.FieldDescriptor.TYPE_STRING: + value = tokenizer.ConsumeString() + elif field.type == descriptor.FieldDescriptor.TYPE_BYTES: + value = tokenizer.ConsumeByteString() + elif field.type == descriptor.FieldDescriptor.TYPE_ENUM: + value = tokenizer.ConsumeEnum(field) + else: + raise RuntimeError('Unknown field type %d' % field.type) + + if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if field.is_extension: + message.Extensions[field].append(value) + else: + getattr(message, field.name).append(value) + else: + if field.is_extension: + if (not self._allow_multiple_scalars and + not self._IsProto3Syntax(message) and + message.HasExtension(field)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" extensions.' % + (message.DESCRIPTOR.full_name, field.full_name)) + else: + message.Extensions[field] = value + else: + duplicate_error = False + if not self._allow_multiple_scalars: + if self._IsProto3Syntax(message): + # Proto3 doesn't represent presence so we try best effort to check + # multiple scalars by compare to default values. + duplicate_error = bool(getattr(message, field.name)) + else: + duplicate_error = message.HasField(field.name) + + if duplicate_error: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" fields.' % + (message.DESCRIPTOR.full_name, field.name)) + else: + setattr(message, field.name, value) + + +def _SkipFieldContents(tokenizer): + """Skips over contents (value or message) of a field. + + Args: + tokenizer: A tokenizer to parse the field name and values. + """ + # Try to guess the type of this field. + # If this field is not a message, there should be a ":" between the + # field name and the field value and also the field value should not + # start with "{" or "<" which indicates the beginning of a message body. + # If there is no ":" or there is a "{" or "<" after ":", this field has + # to be a message or the input is ill-formed. + if tokenizer.TryConsume(':') and not tokenizer.LookingAt( + '{') and not tokenizer.LookingAt('<'): + _SkipFieldValue(tokenizer) + else: + _SkipFieldMessage(tokenizer) + + +def _SkipField(tokenizer): + """Skips over a complete field (name and value/message). + + Args: + tokenizer: A tokenizer to parse the field name and values. + """ + if tokenizer.TryConsume('['): + # Consume extension name. + tokenizer.ConsumeIdentifier() + while tokenizer.TryConsume('.'): + tokenizer.ConsumeIdentifier() + tokenizer.Consume(']') + else: + tokenizer.ConsumeIdentifierOrNumber() + + _SkipFieldContents(tokenizer) + + # For historical reasons, fields may optionally be separated by commas or + # semicolons. + if not tokenizer.TryConsume(','): + tokenizer.TryConsume(';') + + +def _SkipFieldMessage(tokenizer): + """Skips over a field message. + + Args: + tokenizer: A tokenizer to parse the field name and values. + """ + + if tokenizer.TryConsume('<'): + delimiter = '>' + else: + tokenizer.Consume('{') + delimiter = '}' + + while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'): + _SkipField(tokenizer) + + tokenizer.Consume(delimiter) + + +def _SkipFieldValue(tokenizer): + """Skips over a field value. + + Args: + tokenizer: A tokenizer to parse the field name and values. + + Raises: + ParseError: In case an invalid field value is found. + """ + # String/bytes tokens can come in multiple adjacent string literals. + # If we can consume one, consume as many as we can. + if tokenizer.TryConsumeByteString(): + while tokenizer.TryConsumeByteString(): + pass + return + + if (not tokenizer.TryConsumeIdentifier() and + not _TryConsumeInt64(tokenizer) and not _TryConsumeUint64(tokenizer) and + not tokenizer.TryConsumeFloat()): + raise ParseError('Invalid field value: ' + tokenizer.token) + + +class Tokenizer(object): + """Protocol buffer text representation tokenizer. + + This class handles the lower level string parsing by splitting it into + meaningful tokens. + + It was directly ported from the Java protocol buffer API. + """ + + _WHITESPACE = re.compile(r'\s+') + _COMMENT = re.compile(r'(\s*#.*$)', re.MULTILINE) + _WHITESPACE_OR_COMMENT = re.compile(r'(\s|(#.*$))+', re.MULTILINE) + _TOKEN = re.compile('|'.join([ + r'[a-zA-Z_][0-9a-zA-Z_+-]*', # an identifier + r'([0-9+-]|(\.[0-9]))[0-9a-zA-Z_.+-]*', # a number + ] + [ # quoted str for each quote mark + # Avoid backtracking! https://stackoverflow.com/a/844267 + r'{qt}[^{qt}\n\\]*((\\.)+[^{qt}\n\\]*)*({qt}|\\?$)'.format(qt=mark) + for mark in _QUOTES + ])) + + _IDENTIFIER = re.compile(r'[^\d\W]\w*') + _IDENTIFIER_OR_NUMBER = re.compile(r'\w+') + + def __init__(self, lines, skip_comments=True): + self._position = 0 + self._line = -1 + self._column = 0 + self._token_start = None + self.token = '' + self._lines = iter(lines) + self._current_line = '' + self._previous_line = 0 + self._previous_column = 0 + self._more_lines = True + self._skip_comments = skip_comments + self._whitespace_pattern = (skip_comments and self._WHITESPACE_OR_COMMENT + or self._WHITESPACE) + self._SkipWhitespace() + self.NextToken() + + def LookingAt(self, token): + return self.token == token + + def AtEnd(self): + """Checks the end of the text was reached. + + Returns: + True iff the end was reached. + """ + return not self.token + + def _PopLine(self): + while len(self._current_line) <= self._column: + try: + self._current_line = next(self._lines) + except StopIteration: + self._current_line = '' + self._more_lines = False + return + else: + self._line += 1 + self._column = 0 + + def _SkipWhitespace(self): + while True: + self._PopLine() + match = self._whitespace_pattern.match(self._current_line, self._column) + if not match: + break + length = len(match.group(0)) + self._column += length + + def TryConsume(self, token): + """Tries to consume a given piece of text. + + Args: + token: Text to consume. + + Returns: + True iff the text was consumed. + """ + if self.token == token: + self.NextToken() + return True + return False + + def Consume(self, token): + """Consumes a piece of text. + + Args: + token: Text to consume. + + Raises: + ParseError: If the text couldn't be consumed. + """ + if not self.TryConsume(token): + raise self.ParseError('Expected "%s".' % token) + + def ConsumeComment(self): + result = self.token + if not self._COMMENT.match(result): + raise self.ParseError('Expected comment.') + self.NextToken() + return result + + def ConsumeCommentOrTrailingComment(self): + """Consumes a comment, returns a 2-tuple (trailing bool, comment str).""" + + # Tokenizer initializes _previous_line and _previous_column to 0. As the + # tokenizer starts, it looks like there is a previous token on the line. + just_started = self._line == 0 and self._column == 0 + + before_parsing = self._previous_line + comment = self.ConsumeComment() + + # A trailing comment is a comment on the same line than the previous token. + trailing = (self._previous_line == before_parsing + and not just_started) + + return trailing, comment + + def TryConsumeIdentifier(self): + try: + self.ConsumeIdentifier() + return True + except ParseError: + return False + + def ConsumeIdentifier(self): + """Consumes protocol message field identifier. + + Returns: + Identifier string. + + Raises: + ParseError: If an identifier couldn't be consumed. + """ + result = self.token + if not self._IDENTIFIER.match(result): + raise self.ParseError('Expected identifier.') + self.NextToken() + return result + + def TryConsumeIdentifierOrNumber(self): + try: + self.ConsumeIdentifierOrNumber() + return True + except ParseError: + return False + + def ConsumeIdentifierOrNumber(self): + """Consumes protocol message field identifier. + + Returns: + Identifier string. + + Raises: + ParseError: If an identifier couldn't be consumed. + """ + result = self.token + if not self._IDENTIFIER_OR_NUMBER.match(result): + raise self.ParseError('Expected identifier or number, got %s.' % result) + self.NextToken() + return result + + def TryConsumeInteger(self): + try: + self.ConsumeInteger() + return True + except ParseError: + return False + + def ConsumeInteger(self): + """Consumes an integer number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an integer couldn't be consumed. + """ + try: + result = _ParseAbstractInteger(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def TryConsumeFloat(self): + try: + self.ConsumeFloat() + return True + except ParseError: + return False + + def ConsumeFloat(self): + """Consumes an floating point number. + + Returns: + The number parsed. + + Raises: + ParseError: If a floating point number couldn't be consumed. + """ + try: + result = ParseFloat(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ConsumeBool(self): + """Consumes a boolean value. + + Returns: + The bool parsed. + + Raises: + ParseError: If a boolean value couldn't be consumed. + """ + try: + result = ParseBool(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def TryConsumeByteString(self): + try: + self.ConsumeByteString() + return True + except ParseError: + return False + + def ConsumeString(self): + """Consumes a string value. + + Returns: + The string parsed. + + Raises: + ParseError: If a string value couldn't be consumed. + """ + the_bytes = self.ConsumeByteString() + try: + return str(the_bytes, 'utf-8') + except UnicodeDecodeError as e: + raise self._StringParseError(e) + + def ConsumeByteString(self): + """Consumes a byte array value. + + Returns: + The array parsed (as a string). + + Raises: + ParseError: If a byte array value couldn't be consumed. + """ + the_list = [self._ConsumeSingleByteString()] + while self.token and self.token[0] in _QUOTES: + the_list.append(self._ConsumeSingleByteString()) + return b''.join(the_list) + + def _ConsumeSingleByteString(self): + """Consume one token of a string literal. + + String literals (whether bytes or text) can come in multiple adjacent + tokens which are automatically concatenated, like in C or Python. This + method only consumes one token. + + Returns: + The token parsed. + Raises: + ParseError: When the wrong format data is found. + """ + text = self.token + if len(text) < 1 or text[0] not in _QUOTES: + raise self.ParseError('Expected string but found: %r' % (text,)) + + if len(text) < 2 or text[-1] != text[0]: + raise self.ParseError('String missing ending quote: %r' % (text,)) + + try: + result = text_encoding.CUnescape(text[1:-1]) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ConsumeEnum(self, field): + try: + result = ParseEnum(field, self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ParseErrorPreviousToken(self, message): + """Creates and *returns* a ParseError for the previously read token. + + Args: + message: A message to set for the exception. + + Returns: + A ParseError instance. + """ + return ParseError(message, self._previous_line + 1, + self._previous_column + 1) + + def ParseError(self, message): + """Creates and *returns* a ParseError for the current token.""" + return ParseError('\'' + self._current_line + '\': ' + message, + self._line + 1, self._column + 1) + + def _StringParseError(self, e): + return self.ParseError('Couldn\'t parse string: ' + str(e)) + + def NextToken(self): + """Reads the next meaningful token.""" + self._previous_line = self._line + self._previous_column = self._column + + self._column += len(self.token) + self._SkipWhitespace() + + if not self._more_lines: + self.token = '' + return + + match = self._TOKEN.match(self._current_line, self._column) + if not match and not self._skip_comments: + match = self._COMMENT.match(self._current_line, self._column) + if match: + token = match.group(0) + self.token = token + else: + self.token = self._current_line[self._column] + +# Aliased so it can still be accessed by current visibility violators. +# TODO(dbarnett): Migrate violators to textformat_tokenizer. +_Tokenizer = Tokenizer # pylint: disable=invalid-name + + +def _ConsumeInt32(tokenizer): + """Consumes a signed 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If a signed 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=True, is_long=False) + + +def _ConsumeUint32(tokenizer): + """Consumes an unsigned 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an unsigned 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=False, is_long=False) + + +def _TryConsumeInt64(tokenizer): + try: + _ConsumeInt64(tokenizer) + return True + except ParseError: + return False + + +def _ConsumeInt64(tokenizer): + """Consumes a signed 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If a signed 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=True, is_long=True) + + +def _TryConsumeUint64(tokenizer): + try: + _ConsumeUint64(tokenizer) + return True + except ParseError: + return False + + +def _ConsumeUint64(tokenizer): + """Consumes an unsigned 64bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an unsigned 64bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=False, is_long=True) + + +def _ConsumeInteger(tokenizer, is_signed=False, is_long=False): + """Consumes an integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + is_signed: True if a signed integer must be parsed. + is_long: True if a long integer must be parsed. + + Returns: + The integer parsed. + + Raises: + ParseError: If an integer with given characteristics couldn't be consumed. + """ + try: + result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long) + except ValueError as e: + raise tokenizer.ParseError(str(e)) + tokenizer.NextToken() + return result + + +def ParseInteger(text, is_signed=False, is_long=False): + """Parses an integer. + + Args: + text: The text to parse. + is_signed: True if a signed integer must be parsed. + is_long: True if a long integer must be parsed. + + Returns: + The integer value. + + Raises: + ValueError: Thrown Iff the text is not a valid integer. + """ + # Do the actual parsing. Exception handling is propagated to caller. + result = _ParseAbstractInteger(text) + + # Check if the integer is sane. Exceptions handled by callers. + checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] + checker.CheckValue(result) + return result + + +def _ParseAbstractInteger(text): + """Parses an integer without checking size/signedness. + + Args: + text: The text to parse. + + Returns: + The integer value. + + Raises: + ValueError: Thrown Iff the text is not a valid integer. + """ + # Do the actual parsing. Exception handling is propagated to caller. + orig_text = text + c_octal_match = re.match(r'(-?)0(\d+)$', text) + if c_octal_match: + # Python 3 no longer supports 0755 octal syntax without the 'o', so + # we always use the '0o' prefix for multi-digit numbers starting with 0. + text = c_octal_match.group(1) + '0o' + c_octal_match.group(2) + try: + return int(text, 0) + except ValueError: + raise ValueError('Couldn\'t parse integer: %s' % orig_text) + + +def ParseFloat(text): + """Parse a floating point number. + + Args: + text: Text to parse. + + Returns: + The number parsed. + + Raises: + ValueError: If a floating point number couldn't be parsed. + """ + try: + # Assume Python compatible syntax. + return float(text) + except ValueError: + # Check alternative spellings. + if _FLOAT_INFINITY.match(text): + if text[0] == '-': + return float('-inf') + else: + return float('inf') + elif _FLOAT_NAN.match(text): + return float('nan') + else: + # assume '1.0f' format + try: + return float(text.rstrip('f')) + except ValueError: + raise ValueError('Couldn\'t parse float: %s' % text) + + +def ParseBool(text): + """Parse a boolean value. + + Args: + text: Text to parse. + + Returns: + Boolean values parsed + + Raises: + ValueError: If text is not a valid boolean. + """ + if text in ('true', 't', '1', 'True'): + return True + elif text in ('false', 'f', '0', 'False'): + return False + else: + raise ValueError('Expected "true" or "false".') + + +def ParseEnum(field, value): + """Parse an enum value. + + The value can be specified by a number (the enum value), or by + a string literal (the enum name). + + Args: + field: Enum field descriptor. + value: String value. + + Returns: + Enum value number. + + Raises: + ValueError: If the enum value could not be parsed. + """ + enum_descriptor = field.enum_type + try: + number = int(value, 0) + except ValueError: + # Identifier. + enum_value = enum_descriptor.values_by_name.get(value, None) + if enum_value is None: + raise ValueError('Enum type "%s" has no value named %s.' % + (enum_descriptor.full_name, value)) + else: + # Numeric value. + if hasattr(field.file, 'syntax'): + # Attribute is checked for compatibility. + if field.file.syntax == 'proto3': + # Proto3 accept numeric unknown enums. + return number + enum_value = enum_descriptor.values_by_number.get(number, None) + if enum_value is None: + raise ValueError('Enum type "%s" has no value with number %d.' % + (enum_descriptor.full_name, number)) + return enum_value.number diff --git a/openpype/hosts/hiero/vendor/google/protobuf/timestamp_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/timestamp_pb2.py new file mode 100644 index 0000000000..558d496941 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/timestamp_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/timestamp.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\"+\n\tTimestamp\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42\x85\x01\n\x13\x63om.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.timestamp_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\016TimestampProtoP\001Z2google.golang.org/protobuf/types/known/timestamppb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _TIMESTAMP._serialized_start=52 + _TIMESTAMP._serialized_end=95 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/type_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/type_pb2.py new file mode 100644 index 0000000000..19903fb6b4 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/type_pb2.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/type.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 +from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1agoogle/protobuf/type.proto\x12\x0fgoogle.protobuf\x1a\x19google/protobuf/any.proto\x1a$google/protobuf/source_context.proto\"\xd7\x01\n\x04Type\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x16.google.protobuf.Field\x12\x0e\n\x06oneofs\x18\x03 \x03(\t\x12(\n\x07options\x18\x04 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x06 \x01(\x0e\x32\x17.google.protobuf.Syntax\"\xd5\x05\n\x05\x46ield\x12)\n\x04kind\x18\x01 \x01(\x0e\x32\x1b.google.protobuf.Field.Kind\x12\x37\n\x0b\x63\x61rdinality\x18\x02 \x01(\x0e\x32\".google.protobuf.Field.Cardinality\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x10\n\x08type_url\x18\x06 \x01(\t\x12\x13\n\x0boneof_index\x18\x07 \x01(\x05\x12\x0e\n\x06packed\x18\x08 \x01(\x08\x12(\n\x07options\x18\t \x03(\x0b\x32\x17.google.protobuf.Option\x12\x11\n\tjson_name\x18\n \x01(\t\x12\x15\n\rdefault_value\x18\x0b \x01(\t\"\xc8\x02\n\x04Kind\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"t\n\x0b\x43\x61rdinality\x12\x17\n\x13\x43\x41RDINALITY_UNKNOWN\x10\x00\x12\x18\n\x14\x43\x41RDINALITY_OPTIONAL\x10\x01\x12\x18\n\x14\x43\x41RDINALITY_REQUIRED\x10\x02\x12\x18\n\x14\x43\x41RDINALITY_REPEATED\x10\x03\"\xce\x01\n\x04\x45num\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\tenumvalue\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.EnumValue\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x05 \x01(\x0e\x32\x17.google.protobuf.Syntax\"S\n\tEnumValue\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\";\n\x06Option\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any*.\n\x06Syntax\x12\x11\n\rSYNTAX_PROTO2\x10\x00\x12\x11\n\rSYNTAX_PROTO3\x10\x01\x42{\n\x13\x63om.google.protobufB\tTypeProtoP\x01Z-google.golang.org/protobuf/types/known/typepb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.type_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\tTypeProtoP\001Z-google.golang.org/protobuf/types/known/typepb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _SYNTAX._serialized_start=1413 + _SYNTAX._serialized_end=1459 + _TYPE._serialized_start=113 + _TYPE._serialized_end=328 + _FIELD._serialized_start=331 + _FIELD._serialized_end=1056 + _FIELD_KIND._serialized_start=610 + _FIELD_KIND._serialized_end=938 + _FIELD_CARDINALITY._serialized_start=940 + _FIELD_CARDINALITY._serialized_end=1056 + _ENUM._serialized_start=1059 + _ENUM._serialized_end=1265 + _ENUMVALUE._serialized_start=1267 + _ENUMVALUE._serialized_end=1350 + _OPTION._serialized_start=1352 + _OPTION._serialized_end=1411 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/util/__init__.py b/openpype/hosts/hiero/vendor/google/protobuf/util/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/hiero/vendor/google/protobuf/util/json_format_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/util/json_format_pb2.py new file mode 100644 index 0000000000..66a5836c82 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/util/json_format_pb2.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/util/json_format.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&google/protobuf/util/json_format.proto\x12\x11protobuf_unittest\"\x89\x01\n\x13TestFlagsAndStrings\x12\t\n\x01\x41\x18\x01 \x02(\x05\x12K\n\rrepeatedgroup\x18\x02 \x03(\n24.protobuf_unittest.TestFlagsAndStrings.RepeatedGroup\x1a\x1a\n\rRepeatedGroup\x12\t\n\x01\x66\x18\x03 \x02(\t\"!\n\x14TestBase64ByteArrays\x12\t\n\x01\x61\x18\x01 \x02(\x0c\"G\n\x12TestJavaScriptJSON\x12\t\n\x01\x61\x18\x01 \x01(\x05\x12\r\n\x05\x66inal\x18\x02 \x01(\x02\x12\n\n\x02in\x18\x03 \x01(\t\x12\x0b\n\x03Var\x18\x04 \x01(\t\"Q\n\x18TestJavaScriptOrderJSON1\x12\t\n\x01\x64\x18\x01 \x01(\x05\x12\t\n\x01\x63\x18\x02 \x01(\x05\x12\t\n\x01x\x18\x03 \x01(\x08\x12\t\n\x01\x62\x18\x04 \x01(\x05\x12\t\n\x01\x61\x18\x05 \x01(\x05\"\x89\x01\n\x18TestJavaScriptOrderJSON2\x12\t\n\x01\x64\x18\x01 \x01(\x05\x12\t\n\x01\x63\x18\x02 \x01(\x05\x12\t\n\x01x\x18\x03 \x01(\x08\x12\t\n\x01\x62\x18\x04 \x01(\x05\x12\t\n\x01\x61\x18\x05 \x01(\x05\x12\x36\n\x01z\x18\x06 \x03(\x0b\x32+.protobuf_unittest.TestJavaScriptOrderJSON1\"$\n\x0cTestLargeInt\x12\t\n\x01\x61\x18\x01 \x02(\x03\x12\t\n\x01\x62\x18\x02 \x02(\x04\"\xa0\x01\n\x0bTestNumbers\x12\x30\n\x01\x61\x18\x01 \x01(\x0e\x32%.protobuf_unittest.TestNumbers.MyType\x12\t\n\x01\x62\x18\x02 \x01(\x05\x12\t\n\x01\x63\x18\x03 \x01(\x02\x12\t\n\x01\x64\x18\x04 \x01(\x08\x12\t\n\x01\x65\x18\x05 \x01(\x01\x12\t\n\x01\x66\x18\x06 \x01(\r\"(\n\x06MyType\x12\x06\n\x02OK\x10\x00\x12\x0b\n\x07WARNING\x10\x01\x12\t\n\x05\x45RROR\x10\x02\"T\n\rTestCamelCase\x12\x14\n\x0cnormal_field\x18\x01 \x01(\t\x12\x15\n\rCAPITAL_FIELD\x18\x02 \x01(\x05\x12\x16\n\x0e\x43\x61melCaseField\x18\x03 \x01(\x05\"|\n\x0bTestBoolMap\x12=\n\x08\x62ool_map\x18\x01 \x03(\x0b\x32+.protobuf_unittest.TestBoolMap.BoolMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"O\n\rTestRecursion\x12\r\n\x05value\x18\x01 \x01(\x05\x12/\n\x05\x63hild\x18\x02 \x01(\x0b\x32 .protobuf_unittest.TestRecursion\"\x86\x01\n\rTestStringMap\x12\x43\n\nstring_map\x18\x01 \x03(\x0b\x32/.protobuf_unittest.TestStringMap.StringMapEntry\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc4\x01\n\x14TestStringSerializer\x12\x15\n\rscalar_string\x18\x01 \x01(\t\x12\x17\n\x0frepeated_string\x18\x02 \x03(\t\x12J\n\nstring_map\x18\x03 \x03(\x0b\x32\x36.protobuf_unittest.TestStringSerializer.StringMapEntry\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"$\n\x18TestMessageWithExtension*\x08\x08\x64\x10\x80\x80\x80\x80\x02\"z\n\rTestExtension\x12\r\n\x05value\x18\x01 \x01(\t2Z\n\x03\x65xt\x12+.protobuf_unittest.TestMessageWithExtension\x18\x64 \x01(\x0b\x32 .protobuf_unittest.TestExtension\"Q\n\x14TestDefaultEnumValue\x12\x39\n\nenum_value\x18\x01 \x01(\x0e\x32\x1c.protobuf_unittest.EnumValue:\x07\x44\x45\x46\x41ULT*2\n\tEnumValue\x12\x0c\n\x08PROTOCOL\x10\x00\x12\n\n\x06\x42UFFER\x10\x01\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x02') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.util.json_format_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + TestMessageWithExtension.RegisterExtension(_TESTEXTENSION.extensions_by_name['ext']) + + DESCRIPTOR._options = None + _TESTBOOLMAP_BOOLMAPENTRY._options = None + _TESTBOOLMAP_BOOLMAPENTRY._serialized_options = b'8\001' + _TESTSTRINGMAP_STRINGMAPENTRY._options = None + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._options = None + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._serialized_options = b'8\001' + _ENUMVALUE._serialized_start=1607 + _ENUMVALUE._serialized_end=1657 + _TESTFLAGSANDSTRINGS._serialized_start=62 + _TESTFLAGSANDSTRINGS._serialized_end=199 + _TESTFLAGSANDSTRINGS_REPEATEDGROUP._serialized_start=173 + _TESTFLAGSANDSTRINGS_REPEATEDGROUP._serialized_end=199 + _TESTBASE64BYTEARRAYS._serialized_start=201 + _TESTBASE64BYTEARRAYS._serialized_end=234 + _TESTJAVASCRIPTJSON._serialized_start=236 + _TESTJAVASCRIPTJSON._serialized_end=307 + _TESTJAVASCRIPTORDERJSON1._serialized_start=309 + _TESTJAVASCRIPTORDERJSON1._serialized_end=390 + _TESTJAVASCRIPTORDERJSON2._serialized_start=393 + _TESTJAVASCRIPTORDERJSON2._serialized_end=530 + _TESTLARGEINT._serialized_start=532 + _TESTLARGEINT._serialized_end=568 + _TESTNUMBERS._serialized_start=571 + _TESTNUMBERS._serialized_end=731 + _TESTNUMBERS_MYTYPE._serialized_start=691 + _TESTNUMBERS_MYTYPE._serialized_end=731 + _TESTCAMELCASE._serialized_start=733 + _TESTCAMELCASE._serialized_end=817 + _TESTBOOLMAP._serialized_start=819 + _TESTBOOLMAP._serialized_end=943 + _TESTBOOLMAP_BOOLMAPENTRY._serialized_start=897 + _TESTBOOLMAP_BOOLMAPENTRY._serialized_end=943 + _TESTRECURSION._serialized_start=945 + _TESTRECURSION._serialized_end=1024 + _TESTSTRINGMAP._serialized_start=1027 + _TESTSTRINGMAP._serialized_end=1161 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_start=1113 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_end=1161 + _TESTSTRINGSERIALIZER._serialized_start=1164 + _TESTSTRINGSERIALIZER._serialized_end=1360 + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._serialized_start=1113 + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._serialized_end=1161 + _TESTMESSAGEWITHEXTENSION._serialized_start=1362 + _TESTMESSAGEWITHEXTENSION._serialized_end=1398 + _TESTEXTENSION._serialized_start=1400 + _TESTEXTENSION._serialized_end=1522 + _TESTDEFAULTENUMVALUE._serialized_start=1524 + _TESTDEFAULTENUMVALUE._serialized_end=1605 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/util/json_format_proto3_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/util/json_format_proto3_pb2.py new file mode 100644 index 0000000000..5498deafa9 --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/util/json_format_proto3_pb2.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/util/json_format_proto3.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 +from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 +from google.protobuf import unittest_pb2 as google_dot_protobuf_dot_unittest__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n-google/protobuf/util/json_format_proto3.proto\x12\x06proto3\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a google/protobuf/field_mask.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1egoogle/protobuf/unittest.proto\"\x1c\n\x0bMessageType\x12\r\n\x05value\x18\x01 \x01(\x05\"\x94\x05\n\x0bTestMessage\x12\x12\n\nbool_value\x18\x01 \x01(\x08\x12\x13\n\x0bint32_value\x18\x02 \x01(\x05\x12\x13\n\x0bint64_value\x18\x03 \x01(\x03\x12\x14\n\x0cuint32_value\x18\x04 \x01(\r\x12\x14\n\x0cuint64_value\x18\x05 \x01(\x04\x12\x13\n\x0b\x66loat_value\x18\x06 \x01(\x02\x12\x14\n\x0c\x64ouble_value\x18\x07 \x01(\x01\x12\x14\n\x0cstring_value\x18\x08 \x01(\t\x12\x13\n\x0b\x62ytes_value\x18\t \x01(\x0c\x12$\n\nenum_value\x18\n \x01(\x0e\x32\x10.proto3.EnumType\x12*\n\rmessage_value\x18\x0b \x01(\x0b\x32\x13.proto3.MessageType\x12\x1b\n\x13repeated_bool_value\x18\x15 \x03(\x08\x12\x1c\n\x14repeated_int32_value\x18\x16 \x03(\x05\x12\x1c\n\x14repeated_int64_value\x18\x17 \x03(\x03\x12\x1d\n\x15repeated_uint32_value\x18\x18 \x03(\r\x12\x1d\n\x15repeated_uint64_value\x18\x19 \x03(\x04\x12\x1c\n\x14repeated_float_value\x18\x1a \x03(\x02\x12\x1d\n\x15repeated_double_value\x18\x1b \x03(\x01\x12\x1d\n\x15repeated_string_value\x18\x1c \x03(\t\x12\x1c\n\x14repeated_bytes_value\x18\x1d \x03(\x0c\x12-\n\x13repeated_enum_value\x18\x1e \x03(\x0e\x32\x10.proto3.EnumType\x12\x33\n\x16repeated_message_value\x18\x1f \x03(\x0b\x32\x13.proto3.MessageType\"\x8c\x02\n\tTestOneof\x12\x1b\n\x11oneof_int32_value\x18\x01 \x01(\x05H\x00\x12\x1c\n\x12oneof_string_value\x18\x02 \x01(\tH\x00\x12\x1b\n\x11oneof_bytes_value\x18\x03 \x01(\x0cH\x00\x12,\n\x10oneof_enum_value\x18\x04 \x01(\x0e\x32\x10.proto3.EnumTypeH\x00\x12\x32\n\x13oneof_message_value\x18\x05 \x01(\x0b\x32\x13.proto3.MessageTypeH\x00\x12\x36\n\x10oneof_null_value\x18\x06 \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00\x42\r\n\x0boneof_value\"\xe1\x04\n\x07TestMap\x12.\n\x08\x62ool_map\x18\x01 \x03(\x0b\x32\x1c.proto3.TestMap.BoolMapEntry\x12\x30\n\tint32_map\x18\x02 \x03(\x0b\x32\x1d.proto3.TestMap.Int32MapEntry\x12\x30\n\tint64_map\x18\x03 \x03(\x0b\x32\x1d.proto3.TestMap.Int64MapEntry\x12\x32\n\nuint32_map\x18\x04 \x03(\x0b\x32\x1e.proto3.TestMap.Uint32MapEntry\x12\x32\n\nuint64_map\x18\x05 \x03(\x0b\x32\x1e.proto3.TestMap.Uint64MapEntry\x12\x32\n\nstring_map\x18\x06 \x03(\x0b\x32\x1e.proto3.TestMap.StringMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"\x85\x06\n\rTestNestedMap\x12\x34\n\x08\x62ool_map\x18\x01 \x03(\x0b\x32\".proto3.TestNestedMap.BoolMapEntry\x12\x36\n\tint32_map\x18\x02 \x03(\x0b\x32#.proto3.TestNestedMap.Int32MapEntry\x12\x36\n\tint64_map\x18\x03 \x03(\x0b\x32#.proto3.TestNestedMap.Int64MapEntry\x12\x38\n\nuint32_map\x18\x04 \x03(\x0b\x32$.proto3.TestNestedMap.Uint32MapEntry\x12\x38\n\nuint64_map\x18\x05 \x03(\x0b\x32$.proto3.TestNestedMap.Uint64MapEntry\x12\x38\n\nstring_map\x18\x06 \x03(\x0b\x32$.proto3.TestNestedMap.StringMapEntry\x12\x32\n\x07map_map\x18\x07 \x03(\x0b\x32!.proto3.TestNestedMap.MapMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x44\n\x0bMapMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.proto3.TestNestedMap:\x02\x38\x01\"{\n\rTestStringMap\x12\x38\n\nstring_map\x18\x01 \x03(\x0b\x32$.proto3.TestStringMap.StringMapEntry\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xee\x07\n\x0bTestWrapper\x12.\n\nbool_value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x30\n\x0bint32_value\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x30\n\x0bint64_value\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x32\n\x0cuint32_value\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x32\n\x0cuint64_value\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x30\n\x0b\x66loat_value\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.FloatValue\x12\x32\n\x0c\x64ouble_value\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x32\n\x0cstring_value\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\x0b\x62ytes_value\x18\t \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x37\n\x13repeated_bool_value\x18\x0b \x03(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x39\n\x14repeated_int32_value\x18\x0c \x03(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x39\n\x14repeated_int64_value\x18\r \x03(\x0b\x32\x1b.google.protobuf.Int64Value\x12;\n\x15repeated_uint32_value\x18\x0e \x03(\x0b\x32\x1c.google.protobuf.UInt32Value\x12;\n\x15repeated_uint64_value\x18\x0f \x03(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x39\n\x14repeated_float_value\x18\x10 \x03(\x0b\x32\x1b.google.protobuf.FloatValue\x12;\n\x15repeated_double_value\x18\x11 \x03(\x0b\x32\x1c.google.protobuf.DoubleValue\x12;\n\x15repeated_string_value\x18\x12 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x39\n\x14repeated_bytes_value\x18\x13 \x03(\x0b\x32\x1b.google.protobuf.BytesValue\"n\n\rTestTimestamp\x12)\n\x05value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.Timestamp\"k\n\x0cTestDuration\x12(\n\x05value\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x31\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x19.google.protobuf.Duration\":\n\rTestFieldMask\x12)\n\x05value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"e\n\nTestStruct\x12&\n\x05value\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12/\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x17.google.protobuf.Struct\"\\\n\x07TestAny\x12#\n\x05value\x18\x01 \x01(\x0b\x32\x14.google.protobuf.Any\x12,\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x14.google.protobuf.Any\"b\n\tTestValue\x12%\n\x05value\x18\x01 \x01(\x0b\x32\x16.google.protobuf.Value\x12.\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x16.google.protobuf.Value\"n\n\rTestListValue\x12)\n\x05value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.ListValue\x12\x32\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.ListValue\"\x89\x01\n\rTestBoolValue\x12\x12\n\nbool_value\x18\x01 \x01(\x08\x12\x34\n\x08\x62ool_map\x18\x02 \x03(\x0b\x32\".proto3.TestBoolValue.BoolMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"+\n\x12TestCustomJsonName\x12\x15\n\x05value\x18\x01 \x01(\x05R\x06@value\"J\n\x0eTestExtensions\x12\x38\n\nextensions\x18\x01 \x01(\x0b\x32$.protobuf_unittest.TestAllExtensions\"\x84\x01\n\rTestEnumValue\x12%\n\x0b\x65num_value1\x18\x01 \x01(\x0e\x32\x10.proto3.EnumType\x12%\n\x0b\x65num_value2\x18\x02 \x01(\x0e\x32\x10.proto3.EnumType\x12%\n\x0b\x65num_value3\x18\x03 \x01(\x0e\x32\x10.proto3.EnumType*\x1c\n\x08\x45numType\x12\x07\n\x03\x46OO\x10\x00\x12\x07\n\x03\x42\x41R\x10\x01\x42,\n\x18\x63om.google.protobuf.utilB\x10JsonFormatProto3b\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.util.json_format_proto3_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030com.google.protobuf.utilB\020JsonFormatProto3' + _TESTMAP_BOOLMAPENTRY._options = None + _TESTMAP_BOOLMAPENTRY._serialized_options = b'8\001' + _TESTMAP_INT32MAPENTRY._options = None + _TESTMAP_INT32MAPENTRY._serialized_options = b'8\001' + _TESTMAP_INT64MAPENTRY._options = None + _TESTMAP_INT64MAPENTRY._serialized_options = b'8\001' + _TESTMAP_UINT32MAPENTRY._options = None + _TESTMAP_UINT32MAPENTRY._serialized_options = b'8\001' + _TESTMAP_UINT64MAPENTRY._options = None + _TESTMAP_UINT64MAPENTRY._serialized_options = b'8\001' + _TESTMAP_STRINGMAPENTRY._options = None + _TESTMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_BOOLMAPENTRY._options = None + _TESTNESTEDMAP_BOOLMAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_INT32MAPENTRY._options = None + _TESTNESTEDMAP_INT32MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_INT64MAPENTRY._options = None + _TESTNESTEDMAP_INT64MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_UINT32MAPENTRY._options = None + _TESTNESTEDMAP_UINT32MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_UINT64MAPENTRY._options = None + _TESTNESTEDMAP_UINT64MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_STRINGMAPENTRY._options = None + _TESTNESTEDMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_MAPMAPENTRY._options = None + _TESTNESTEDMAP_MAPMAPENTRY._serialized_options = b'8\001' + _TESTSTRINGMAP_STRINGMAPENTRY._options = None + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTBOOLVALUE_BOOLMAPENTRY._options = None + _TESTBOOLVALUE_BOOLMAPENTRY._serialized_options = b'8\001' + _ENUMTYPE._serialized_start=4849 + _ENUMTYPE._serialized_end=4877 + _MESSAGETYPE._serialized_start=277 + _MESSAGETYPE._serialized_end=305 + _TESTMESSAGE._serialized_start=308 + _TESTMESSAGE._serialized_end=968 + _TESTONEOF._serialized_start=971 + _TESTONEOF._serialized_end=1239 + _TESTMAP._serialized_start=1242 + _TESTMAP._serialized_end=1851 + _TESTMAP_BOOLMAPENTRY._serialized_start=1557 + _TESTMAP_BOOLMAPENTRY._serialized_end=1603 + _TESTMAP_INT32MAPENTRY._serialized_start=1605 + _TESTMAP_INT32MAPENTRY._serialized_end=1652 + _TESTMAP_INT64MAPENTRY._serialized_start=1654 + _TESTMAP_INT64MAPENTRY._serialized_end=1701 + _TESTMAP_UINT32MAPENTRY._serialized_start=1703 + _TESTMAP_UINT32MAPENTRY._serialized_end=1751 + _TESTMAP_UINT64MAPENTRY._serialized_start=1753 + _TESTMAP_UINT64MAPENTRY._serialized_end=1801 + _TESTMAP_STRINGMAPENTRY._serialized_start=1803 + _TESTMAP_STRINGMAPENTRY._serialized_end=1851 + _TESTNESTEDMAP._serialized_start=1854 + _TESTNESTEDMAP._serialized_end=2627 + _TESTNESTEDMAP_BOOLMAPENTRY._serialized_start=1557 + _TESTNESTEDMAP_BOOLMAPENTRY._serialized_end=1603 + _TESTNESTEDMAP_INT32MAPENTRY._serialized_start=1605 + _TESTNESTEDMAP_INT32MAPENTRY._serialized_end=1652 + _TESTNESTEDMAP_INT64MAPENTRY._serialized_start=1654 + _TESTNESTEDMAP_INT64MAPENTRY._serialized_end=1701 + _TESTNESTEDMAP_UINT32MAPENTRY._serialized_start=1703 + _TESTNESTEDMAP_UINT32MAPENTRY._serialized_end=1751 + _TESTNESTEDMAP_UINT64MAPENTRY._serialized_start=1753 + _TESTNESTEDMAP_UINT64MAPENTRY._serialized_end=1801 + _TESTNESTEDMAP_STRINGMAPENTRY._serialized_start=1803 + _TESTNESTEDMAP_STRINGMAPENTRY._serialized_end=1851 + _TESTNESTEDMAP_MAPMAPENTRY._serialized_start=2559 + _TESTNESTEDMAP_MAPMAPENTRY._serialized_end=2627 + _TESTSTRINGMAP._serialized_start=2629 + _TESTSTRINGMAP._serialized_end=2752 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_start=2704 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_end=2752 + _TESTWRAPPER._serialized_start=2755 + _TESTWRAPPER._serialized_end=3761 + _TESTTIMESTAMP._serialized_start=3763 + _TESTTIMESTAMP._serialized_end=3873 + _TESTDURATION._serialized_start=3875 + _TESTDURATION._serialized_end=3982 + _TESTFIELDMASK._serialized_start=3984 + _TESTFIELDMASK._serialized_end=4042 + _TESTSTRUCT._serialized_start=4044 + _TESTSTRUCT._serialized_end=4145 + _TESTANY._serialized_start=4147 + _TESTANY._serialized_end=4239 + _TESTVALUE._serialized_start=4241 + _TESTVALUE._serialized_end=4339 + _TESTLISTVALUE._serialized_start=4341 + _TESTLISTVALUE._serialized_end=4451 + _TESTBOOLVALUE._serialized_start=4454 + _TESTBOOLVALUE._serialized_end=4591 + _TESTBOOLVALUE_BOOLMAPENTRY._serialized_start=1557 + _TESTBOOLVALUE_BOOLMAPENTRY._serialized_end=1603 + _TESTCUSTOMJSONNAME._serialized_start=4593 + _TESTCUSTOMJSONNAME._serialized_end=4636 + _TESTEXTENSIONS._serialized_start=4638 + _TESTEXTENSIONS._serialized_end=4712 + _TESTENUMVALUE._serialized_start=4715 + _TESTENUMVALUE._serialized_end=4847 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/hiero/vendor/google/protobuf/wrappers_pb2.py b/openpype/hosts/hiero/vendor/google/protobuf/wrappers_pb2.py new file mode 100644 index 0000000000..e49eb4c15d --- /dev/null +++ b/openpype/hosts/hiero/vendor/google/protobuf/wrappers_pb2.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/wrappers.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"\x1c\n\x0b\x44oubleValue\x12\r\n\x05value\x18\x01 \x01(\x01\"\x1b\n\nFloatValue\x12\r\n\x05value\x18\x01 \x01(\x02\"\x1b\n\nInt64Value\x12\r\n\x05value\x18\x01 \x01(\x03\"\x1c\n\x0bUInt64Value\x12\r\n\x05value\x18\x01 \x01(\x04\"\x1b\n\nInt32Value\x12\r\n\x05value\x18\x01 \x01(\x05\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bStringValue\x12\r\n\x05value\x18\x01 \x01(\t\"\x1b\n\nBytesValue\x12\r\n\x05value\x18\x01 \x01(\x0c\x42\x83\x01\n\x13\x63om.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.wrappers_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\rWrappersProtoP\001Z1google.golang.org/protobuf/types/known/wrapperspb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _DOUBLEVALUE._serialized_start=51 + _DOUBLEVALUE._serialized_end=79 + _FLOATVALUE._serialized_start=81 + _FLOATVALUE._serialized_end=108 + _INT64VALUE._serialized_start=110 + _INT64VALUE._serialized_end=137 + _UINT64VALUE._serialized_start=139 + _UINT64VALUE._serialized_end=167 + _INT32VALUE._serialized_start=169 + _INT32VALUE._serialized_end=196 + _UINT32VALUE._serialized_start=198 + _UINT32VALUE._serialized_end=226 + _BOOLVALUE._serialized_start=228 + _BOOLVALUE._serialized_end=254 + _STRINGVALUE._serialized_start=256 + _STRINGVALUE._serialized_end=284 + _BYTESVALUE._serialized_start=286 + _BYTESVALUE._serialized_end=313 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/houdini/__init__.py b/openpype/hosts/houdini/__init__.py index a3ee38db8d..38bf1fcc2d 100644 --- a/openpype/hosts/houdini/__init__.py +++ b/openpype/hosts/houdini/__init__.py @@ -1,38 +1,10 @@ -import os +from .addon import ( + HoudiniAddon, + HOUDINI_HOST_DIR, +) -def add_implementation_envs(env, _app): - # Add requirements to HOUDINI_PATH and HOUDINI_MENU_PATH - pype_root = os.environ["OPENPYPE_REPOS_ROOT"] - - startup_path = os.path.join( - pype_root, "openpype", "hosts", "houdini", "startup" - ) - new_houdini_path = [startup_path] - new_houdini_menu_path = [startup_path] - - old_houdini_path = env.get("HOUDINI_PATH") or "" - old_houdini_menu_path = env.get("HOUDINI_MENU_PATH") or "" - - for path in old_houdini_path.split(os.pathsep): - if not path: - continue - - norm_path = os.path.normpath(path) - if norm_path not in new_houdini_path: - new_houdini_path.append(norm_path) - - for path in old_houdini_menu_path.split(os.pathsep): - if not path: - continue - - norm_path = os.path.normpath(path) - if norm_path not in new_houdini_menu_path: - new_houdini_menu_path.append(norm_path) - - # Add ampersand for unknown reason (Maybe is needed in Houdini?) - new_houdini_path.append("&") - new_houdini_menu_path.append("&") - - env["HOUDINI_PATH"] = os.pathsep.join(new_houdini_path) - env["HOUDINI_MENU_PATH"] = os.pathsep.join(new_houdini_menu_path) +__all__ = ( + "HoudiniAddon", + "HOUDINI_HOST_DIR", +) diff --git a/openpype/hosts/houdini/addon.py b/openpype/hosts/houdini/addon.py new file mode 100644 index 0000000000..80856b0624 --- /dev/null +++ b/openpype/hosts/houdini/addon.py @@ -0,0 +1,54 @@ +import os +from openpype.modules import OpenPypeModule, IHostAddon + +HOUDINI_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class HoudiniAddon(OpenPypeModule, IHostAddon): + name = "houdini" + host_name = "houdini" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + # Add requirements to HOUDINI_PATH and HOUDINI_MENU_PATH + startup_path = os.path.join(HOUDINI_HOST_DIR, "startup") + new_houdini_path = [startup_path] + new_houdini_menu_path = [startup_path] + + old_houdini_path = env.get("HOUDINI_PATH") or "" + old_houdini_menu_path = env.get("HOUDINI_MENU_PATH") or "" + + for path in old_houdini_path.split(os.pathsep): + if not path: + continue + + norm_path = os.path.normpath(path) + if norm_path not in new_houdini_path: + new_houdini_path.append(norm_path) + + for path in old_houdini_menu_path.split(os.pathsep): + if not path: + continue + + norm_path = os.path.normpath(path) + if norm_path not in new_houdini_menu_path: + new_houdini_menu_path.append(norm_path) + + # Add ampersand for unknown reason (Maybe is needed in Houdini?) + new_houdini_path.append("&") + new_houdini_menu_path.append("&") + + env["HOUDINI_PATH"] = os.pathsep.join(new_houdini_path) + env["HOUDINI_MENU_PATH"] = os.pathsep.join(new_houdini_menu_path) + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(HOUDINI_HOST_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".hip", ".hiplc", ".hipnc"] diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py index 603519069a..c8a7f92bb9 100644 --- a/openpype/hosts/houdini/api/lib.py +++ b/openpype/hosts/houdini/api/lib.py @@ -4,8 +4,9 @@ from contextlib import contextmanager import six -from openpype.api import get_asset +from openpype.client import get_asset_by_name from openpype.pipeline import legacy_io +from openpype.pipeline.context_tools import get_current_project_asset import hou @@ -15,7 +16,7 @@ log = logging.getLogger(__name__) def get_asset_fps(): """Return current asset fps.""" - return get_asset()["data"].get("fps") + return get_current_project_asset()["data"].get("fps") def set_id(node, unique_id, overwrite=False): @@ -74,16 +75,13 @@ def generate_ids(nodes, asset_id=None): """ if asset_id is None: + project_name = legacy_io.active_project() + asset_name = legacy_io.Session["AVALON_ASSET"] # Get the asset ID from the database for the asset of current context - asset_data = legacy_io.find_one( - { - "type": "asset", - "name": legacy_io.Session["AVALON_ASSET"] - }, - projection={"_id": True} - ) - assert asset_data, "No current asset found in Session" - asset_id = asset_data['_id'] + asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"]) + + assert asset_doc, "No current asset found in Session" + asset_id = asset_doc['_id'] node_ids = [] for node in nodes: @@ -130,6 +128,8 @@ def get_output_parameter(node): elif node_type == "arnold": if node.evalParm("ar_ass_export_enable"): return node.parm("ar_ass_file") + elif node_type == "Redshift_Proxy_Output": + return node.parm("RS_archive_file") raise TypeError("Node type '%s' not supported" % node_type) @@ -428,26 +428,29 @@ def maintained_selection(): def reset_framerange(): """Set frame range to current asset""" + project_name = legacy_io.active_project() asset_name = legacy_io.Session["AVALON_ASSET"] - asset = legacy_io.find_one({"name": asset_name, "type": "asset"}) + # Get the asset ID from the database for the asset of current context + asset_doc = get_asset_by_name(project_name, asset_name) + asset_data = asset_doc["data"] - frame_start = asset["data"].get("frameStart") - frame_end = asset["data"].get("frameEnd") + frame_start = asset_data.get("frameStart") + frame_end = asset_data.get("frameEnd") # Backwards compatibility if frame_start is None or frame_end is None: - frame_start = asset["data"].get("edit_in") - frame_end = asset["data"].get("edit_out") + frame_start = asset_data.get("edit_in") + frame_end = asset_data.get("edit_out") if frame_start is None or frame_end is None: log.warning("No edit information found for %s" % asset_name) return - handles = asset["data"].get("handles") or 0 - handle_start = asset["data"].get("handleStart") + handles = asset_data.get("handles") or 0 + handle_start = asset_data.get("handleStart") if handle_start is None: handle_start = handles - handle_end = asset["data"].get("handleEnd") + handle_end = asset_data.get("handleEnd") if handle_end is None: handle_end = handles diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py index 7048accceb..e4af1913ef 100644 --- a/openpype/hosts/houdini/api/pipeline.py +++ b/openpype/hosts/houdini/api/pipeline.py @@ -12,13 +12,13 @@ from openpype.pipeline import ( register_loader_plugin_path, AVALON_CONTAINER_ID, ) -import openpype.hosts.houdini -from openpype.hosts.houdini.api import lib +from openpype.pipeline.load import any_outdated_containers +from openpype.hosts.houdini import HOUDINI_HOST_DIR +from openpype.hosts.houdini.api import lib, shelves from openpype.lib import ( register_event_callback, emit_event, - any_outdated, ) from .lib import get_asset_fps @@ -28,8 +28,7 @@ log = logging.getLogger("openpype.hosts.houdini") AVALON_CONTAINERS = "/obj/AVALON_CONTAINERS" IS_HEADLESS = not hasattr(hou, "ui") -HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.houdini.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +PLUGINS_DIR = os.path.join(HOUDINI_HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") @@ -66,7 +65,7 @@ def install(): self._has_been_setup = True # add houdini vendor packages - hou_pythonpath = os.path.join(os.path.dirname(HOST_DIR), "vendor") + hou_pythonpath = os.path.join(HOUDINI_HOST_DIR, "vendor") sys.path.append(hou_pythonpath) @@ -74,6 +73,7 @@ def install(): # so it initializes into the correct scene FPS, Frame Range, etc. # todo: make sure this doesn't trigger when opening with last workfile _set_context_settings() + shelves.generate_shelves() def uninstall(): @@ -245,7 +245,7 @@ def on_open(): # ensure it is using correct FPS for the asset lib.validate_fps() - if any_outdated(): + if any_outdated_containers(): from openpype.widgets import popup log.warning("Scene has outdated content.") diff --git a/openpype/hosts/houdini/api/shelves.py b/openpype/hosts/houdini/api/shelves.py new file mode 100644 index 0000000000..3ccab964cd --- /dev/null +++ b/openpype/hosts/houdini/api/shelves.py @@ -0,0 +1,182 @@ +import os +import logging +import platform + +from openpype.settings import get_project_settings + +import hou + +log = logging.getLogger("openpype.hosts.houdini.shelves") + + +def generate_shelves(): + """This function generates complete shelves from shelf set to tools + in Houdini from openpype project settings houdini shelf definition. + """ + current_os = platform.system().lower() + + # load configuration of houdini shelves + project_settings = get_project_settings(os.getenv("AVALON_PROJECT")) + shelves_set_config = project_settings["houdini"]["shelves"] + + if not shelves_set_config: + log.debug("No custom shelves found in project settings.") + return + + for shelf_set_config in shelves_set_config: + shelf_set_filepath = shelf_set_config.get('shelf_set_source_path') + shelf_set_os_filepath = shelf_set_filepath[current_os] + if shelf_set_os_filepath: + if not os.path.isfile(shelf_set_os_filepath): + log.error("Shelf path doesn't exist - " + "{}".format(shelf_set_os_filepath)) + continue + + hou.shelves.newShelfSet(file_path=shelf_set_os_filepath) + continue + + shelf_set_name = shelf_set_config.get('shelf_set_name') + if not shelf_set_name: + log.warning("No name found in shelf set definition.") + continue + + shelves_definition = shelf_set_config.get('shelf_definition') + if not shelves_definition: + log.debug( + "No shelf definition found for shelf set named '{}'".format( + shelf_set_name + ) + ) + continue + + shelf_set = get_or_create_shelf_set(shelf_set_name) + for shelf_definition in shelves_definition: + shelf_name = shelf_definition.get('shelf_name') + if not shelf_name: + log.warning("No name found in shelf definition.") + continue + + shelf = get_or_create_shelf(shelf_name) + + if not shelf_definition.get('tools_list'): + log.debug( + "No tool definition found for shelf named {}".format( + shelf_name + ) + ) + continue + + mandatory_attributes = {'name', 'script'} + for tool_definition in shelf_definition.get('tools_list'): + # We verify that the name and script attibutes of the tool + # are set + if not all( + tool_definition[key] for key in mandatory_attributes + ): + log.warning( + "You need to specify at least the name and the " + "script path of the tool.") + continue + + tool = get_or_create_tool(tool_definition, shelf) + + if not tool: + continue + + # Add the tool to the shelf if not already in it + if tool not in shelf.tools(): + shelf.setTools(list(shelf.tools()) + [tool]) + + # Add the shelf in the shelf set if not already in it + if shelf not in shelf_set.shelves(): + shelf_set.setShelves(shelf_set.shelves() + (shelf,)) + + +def get_or_create_shelf_set(shelf_set_label): + """This function verifies if the shelf set label exists. If not, + creates a new shelf set. + + Arguments: + shelf_set_label (str): The label of the shelf set + + Returns: + hou.ShelfSet: The shelf set existing or the new one + """ + all_shelves_sets = hou.shelves.shelfSets().values() + + shelf_set = next((shelf for shelf in all_shelves_sets if + shelf.label() == shelf_set_label), None) + if shelf_set: + return shelf_set + + shelf_set_name = shelf_set_label.replace(' ', '_').lower() + new_shelf_set = hou.shelves.newShelfSet( + name=shelf_set_name, + label=shelf_set_label + ) + return new_shelf_set + + +def get_or_create_shelf(shelf_label): + """This function verifies if the shelf label exists. If not, creates + a new shelf. + + Arguments: + shelf_label (str): The label of the shelf + + Returns: + hou.Shelf: The shelf existing or the new one + """ + all_shelves = hou.shelves.shelves().values() + + shelf = next((s for s in all_shelves if s.label() == shelf_label), None) + if shelf: + return shelf + + shelf_name = shelf_label.replace(' ', '_').lower() + new_shelf = hou.shelves.newShelf( + name=shelf_name, + label=shelf_label + ) + return new_shelf + + +def get_or_create_tool(tool_definition, shelf): + """This function verifies if the tool exists and updates it. If not, creates + a new one. + + Arguments: + tool_definition (dict): Dict with label, script, icon and help + shelf (hou.Shelf): The parent shelf of the tool + + Returns: + hou.Tool: The tool updated or the new one + """ + existing_tools = shelf.tools() + tool_label = tool_definition.get('label') + + existing_tool = next( + (tool for tool in existing_tools if tool.label() == tool_label), + None + ) + if existing_tool: + tool_definition.pop('name', None) + tool_definition.pop('label', None) + existing_tool.setData(**tool_definition) + return existing_tool + + tool_name = tool_label.replace(' ', '_').lower() + + if not os.path.exists(tool_definition['script']): + log.warning( + "This path doesn't exist - {}".format(tool_definition['script']) + ) + return + + with open(tool_definition['script']) as f: + script = f.read() + tool_definition.update({'script': script}) + + new_tool = hou.shelves.newTool(name=tool_name, **tool_definition) + + return new_tool diff --git a/openpype/hosts/houdini/api/usd.py b/openpype/hosts/houdini/api/usd.py index e9991e38ec..4f4a3d8e6f 100644 --- a/openpype/hosts/houdini/api/usd.py +++ b/openpype/hosts/houdini/api/usd.py @@ -6,6 +6,7 @@ import logging from Qt import QtWidgets, QtCore, QtGui from openpype import style +from openpype.client import get_asset_by_name from openpype.pipeline import legacy_io from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget @@ -46,10 +47,8 @@ class SelectAssetDialog(QtWidgets.QWidget): select_id = None name = self._parm.eval() if name: - db_asset = legacy_io.find_one( - {"name": name, "type": "asset"}, - {"_id": True} - ) + project_name = legacy_io.active_project() + db_asset = get_asset_by_name(project_name, name, fields=["_id"]) if db_asset: select_id = db_asset["_id"] diff --git a/openpype/hosts/houdini/api/workio.py b/openpype/hosts/houdini/api/workio.py index e0213023fd..5f7efff333 100644 --- a/openpype/hosts/houdini/api/workio.py +++ b/openpype/hosts/houdini/api/workio.py @@ -2,11 +2,10 @@ import os import hou -from openpype.pipeline import HOST_WORKFILE_EXTENSIONS def file_extensions(): - return HOST_WORKFILE_EXTENSIONS["houdini"] + return [".hip", ".hiplc", ".hipnc"] def has_unsaved_changes(): diff --git a/openpype/hosts/houdini/hooks/set_paths.py b/openpype/hosts/houdini/hooks/set_paths.py index cd2f98fb76..04a33b1643 100644 --- a/openpype/hosts/houdini/hooks/set_paths.py +++ b/openpype/hosts/houdini/hooks/set_paths.py @@ -1,5 +1,4 @@ from openpype.lib import PreLaunchHook -import os class SetPath(PreLaunchHook): @@ -15,4 +14,4 @@ class SetPath(PreLaunchHook): self.log.warning("BUG: Workdir is not filled.") return - os.chdir(workdir) + self.launch_context.kwargs["cwd"] = workdir diff --git a/openpype/hosts/houdini/plugins/create/create_hda.py b/openpype/hosts/houdini/plugins/create/create_hda.py index 5fc78c7539..b98da8b8bb 100644 --- a/openpype/hosts/houdini/plugins/create/create_hda.py +++ b/openpype/hosts/houdini/plugins/create/create_hda.py @@ -1,6 +1,10 @@ # -*- coding: utf-8 -*- import hou +from openpype.client import ( + get_asset_by_name, + get_subsets, +) from openpype.pipeline import legacy_io from openpype.hosts.houdini.api import lib from openpype.hosts.houdini.api import plugin @@ -23,20 +27,16 @@ class CreateHDA(plugin.Creator): # type: (str) -> bool """Check if existing subset name versions already exists.""" # Get all subsets of the current asset - asset_id = legacy_io.find_one( - {"name": self.data["asset"], "type": "asset"}, - projection={"_id": True} - )['_id'] - subset_docs = legacy_io.find( - { - "type": "subset", - "parent": asset_id - }, - {"name": 1} + project_name = legacy_io.active_project() + asset_doc = get_asset_by_name( + project_name, self.data["asset"], fields=["_id"] + ) + subset_docs = get_subsets( + project_name, asset_ids=[asset_doc["_id"]], fields=["name"] ) - existing_subset_names = set(subset_docs.distinct("name")) existing_subset_names_low = { - _name.lower() for _name in existing_subset_names + subset_doc["name"].lower() + for subset_doc in subset_docs } return subset_name.lower() in existing_subset_names_low diff --git a/openpype/hosts/houdini/plugins/create/create_redshift_proxy.py b/openpype/hosts/houdini/plugins/create/create_redshift_proxy.py new file mode 100644 index 0000000000..da4d80bf2b --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_redshift_proxy.py @@ -0,0 +1,48 @@ +from openpype.hosts.houdini.api import plugin + + +class CreateRedshiftProxy(plugin.Creator): + """Redshift Proxy""" + + label = "Redshift Proxy" + family = "redshiftproxy" + icon = "magic" + + def __init__(self, *args, **kwargs): + super(CreateRedshiftProxy, self).__init__(*args, **kwargs) + + # Remove the active, we are checking the bypass flag of the nodes + self.data.pop("active", None) + + # Redshift provides a `Redshift_Proxy_Output` node type which shows + # a limited set of parameters by default and is set to extract a + # Redshift Proxy. However when "imprinting" extra parameters needed + # for OpenPype it starts showing all its parameters again. It's unclear + # why this happens. + # TODO: Somehow enforce so that it only shows the original limited + # attributes of the Redshift_Proxy_Output node type + self.data.update({"node_type": "Redshift_Proxy_Output"}) + + def _process(self, instance): + """Creator main entry point. + + Args: + instance (hou.Node): Created Houdini instance. + + """ + parms = { + "RS_archive_file": '$HIP/pyblish/`chs("subset")`.$F4.rs', + } + + if self.nodes: + node = self.nodes[0] + path = node.path() + parms["RS_archive_sopPath"] = path + + instance.setParms(parms) + + # Lock some Avalon attributes + to_lock = ["family", "id"] + for name in to_lock: + parm = instance.parm(name) + parm.lock(True) diff --git a/openpype/hosts/houdini/plugins/load/load_ass.py b/openpype/hosts/houdini/plugins/load/load_ass.py index 0144bbaefd..557d601677 100644 --- a/openpype/hosts/houdini/plugins/load/load_ass.py +++ b/openpype/hosts/houdini/plugins/load/load_ass.py @@ -1,11 +1,10 @@ import os +import re -import clique from openpype.pipeline import ( load, get_representation_path, ) - from openpype.hosts.houdini.api import pipeline @@ -20,7 +19,6 @@ class AssLoader(load.LoaderPlugin): color = "orange" def load(self, context, name=None, namespace=None, data=None): - import hou # Get the root node @@ -32,7 +30,11 @@ class AssLoader(load.LoaderPlugin): # Create a new geo node procedural = obj.createNode("arnold::procedural", node_name=node_name) - procedural.setParms({"ar_filename": self.get_path(self.fname)}) + + procedural.setParms( + { + "ar_filename": self.format_path(context["representation"]) + }) nodes = [procedural] self[:] = nodes @@ -46,62 +48,43 @@ class AssLoader(load.LoaderPlugin): suffix="", ) - def get_path(self, path): - - # Find all frames in the folder - ext = ".ass.gz" if path.endswith(".ass.gz") else ".ass" - folder = os.path.dirname(path) - frames = [f for f in os.listdir(folder) if f.endswith(ext)] - - # Get the collection of frames to detect frame padding - patterns = [clique.PATTERNS["frames"]] - collections, remainder = clique.assemble(frames, - minimum_items=1, - patterns=patterns) - self.log.debug("Detected collections: {}".format(collections)) - self.log.debug("Detected remainder: {}".format(remainder)) - - if not collections and remainder: - if len(remainder) != 1: - raise ValueError("Frames not correctly detected " - "in: {}".format(remainder)) - - # A single frame without frame range detected - filepath = remainder[0] - return os.path.normpath(filepath).replace("\\", "/") - - # Frames detected with a valid "frame" number pattern - # Then we don't want to have any remainder files found - assert len(collections) == 1 and not remainder - collection = collections[0] - - num_frames = len(collection.indexes) - if num_frames == 1: - # Return the input path without dynamic $F variable - result = path - else: - # More than a single frame detected - use $F{padding} - fname = "{}$F{}{}".format(collection.head, - collection.padding, - collection.tail) - result = os.path.join(folder, fname) - - # Format file name, Houdini only wants forward slashes - return os.path.normpath(result).replace("\\", "/") - def update(self, container, representation): - # Update the file path - file_path = get_representation_path(representation) - file_path = file_path.replace("\\", "/") - procedural = container["node"] - procedural.setParms({"ar_filename": self.get_path(file_path)}) + procedural.setParms({"ar_filename": self.format_path(representation)}) # Update attribute procedural.setParms({"representation": str(representation["_id"])}) def remove(self, container): - node = container["node"] node.destroy() + + @staticmethod + def format_path(representation): + """Format file path correctly for single ass.* or ass.* sequence. + + Args: + representation (dict): representation to be loaded. + + Returns: + str: Formatted path to be used by the input node. + + """ + path = get_representation_path(representation) + if not os.path.exists(path): + raise RuntimeError("Path does not exist: {}".format(path)) + + is_sequence = bool(representation["context"].get("frame")) + # The path is either a single file or sequence in a folder. + if is_sequence: + dir_path, file_name = os.path.split(path) + path = os.path.join( + dir_path, + re.sub(r"(.*)\.(\d+)\.(ass.*)", "\\1.$F4.\\3", file_name) + ) + + return os.path.normpath(path).replace("\\", "/") + + def switch(self, container, representation): + self.update(container, representation) diff --git a/openpype/hosts/houdini/plugins/load/load_bgeo.py b/openpype/hosts/houdini/plugins/load/load_bgeo.py index a463d51383..b298d423bc 100644 --- a/openpype/hosts/houdini/plugins/load/load_bgeo.py +++ b/openpype/hosts/houdini/plugins/load/load_bgeo.py @@ -44,7 +44,8 @@ class BgeoLoader(load.LoaderPlugin): # Explicitly create a file node file_node = container.createNode("file", node_name=node_name) - file_node.setParms({"file": self.format_path(self.fname, is_sequence)}) + file_node.setParms( + {"file": self.format_path(self.fname, context["representation"])}) # Set display on last node file_node.setDisplayFlag(True) @@ -62,15 +63,15 @@ class BgeoLoader(load.LoaderPlugin): ) @staticmethod - def format_path(path, is_sequence): + def format_path(path, representation): """Format file path correctly for single bgeo or bgeo sequence.""" if not os.path.exists(path): raise RuntimeError("Path does not exist: %s" % path) + is_sequence = bool(representation["context"].get("frame")) # The path is either a single file or sequence in a folder. if not is_sequence: filename = path - print("single") else: filename = re.sub(r"(.*)\.(\d+)\.(bgeo.*)", "\\1.$F4.\\3", path) @@ -94,9 +95,9 @@ class BgeoLoader(load.LoaderPlugin): # Update the file path file_path = get_representation_path(representation) - file_path = self.format_path(file_path) + file_path = self.format_path(file_path, representation) - file_node.setParms({"fileName": file_path}) + file_node.setParms({"file": file_path}) # Update attribute node.setParms({"representation": str(representation["_id"])}) diff --git a/openpype/hosts/houdini/plugins/load/load_image.py b/openpype/hosts/houdini/plugins/load/load_image.py index 928c2ee734..c78798e58a 100644 --- a/openpype/hosts/houdini/plugins/load/load_image.py +++ b/openpype/hosts/houdini/plugins/load/load_image.py @@ -73,7 +73,7 @@ class ImageLoader(load.LoaderPlugin): # Imprint it manually data = { - "schema": "avalon-core:container-2.0", + "schema": "openpype:container-2.0", "id": AVALON_CONTAINER_ID, "name": node_name, "namespace": namespace, diff --git a/openpype/hosts/houdini/plugins/load/load_usd_layer.py b/openpype/hosts/houdini/plugins/load/load_usd_layer.py index 48580fc3aa..2e5079925b 100644 --- a/openpype/hosts/houdini/plugins/load/load_usd_layer.py +++ b/openpype/hosts/houdini/plugins/load/load_usd_layer.py @@ -43,7 +43,7 @@ class USDSublayerLoader(load.LoaderPlugin): # Imprint it manually data = { - "schema": "avalon-core:container-2.0", + "schema": "openpype:container-2.0", "id": AVALON_CONTAINER_ID, "name": node_name, "namespace": namespace, diff --git a/openpype/hosts/houdini/plugins/load/load_usd_reference.py b/openpype/hosts/houdini/plugins/load/load_usd_reference.py index 6851c77e6d..c4371db39b 100644 --- a/openpype/hosts/houdini/plugins/load/load_usd_reference.py +++ b/openpype/hosts/houdini/plugins/load/load_usd_reference.py @@ -43,7 +43,7 @@ class USDReferenceLoader(load.LoaderPlugin): # Imprint it manually data = { - "schema": "avalon-core:container-2.0", + "schema": "openpype:container-2.0", "id": AVALON_CONTAINER_ID, "name": node_name, "namespace": namespace, diff --git a/openpype/hosts/houdini/plugins/load/load_vdb.py b/openpype/hosts/houdini/plugins/load/load_vdb.py index bff0f8b0bf..c558a7a0e7 100644 --- a/openpype/hosts/houdini/plugins/load/load_vdb.py +++ b/openpype/hosts/houdini/plugins/load/load_vdb.py @@ -40,7 +40,8 @@ class VdbLoader(load.LoaderPlugin): # Explicitly create a file node file_node = container.createNode("file", node_name=node_name) - file_node.setParms({"file": self.format_path(self.fname)}) + file_node.setParms( + {"file": self.format_path(self.fname, context["representation"])}) # Set display on last node file_node.setDisplayFlag(True) @@ -57,30 +58,20 @@ class VdbLoader(load.LoaderPlugin): suffix="", ) - def format_path(self, path): + @staticmethod + def format_path(path, representation): """Format file path correctly for single vdb or vdb sequence.""" if not os.path.exists(path): raise RuntimeError("Path does not exist: %s" % path) + is_sequence = bool(representation["context"].get("frame")) # The path is either a single file or sequence in a folder. - is_single_file = os.path.isfile(path) - if is_single_file: + if not is_sequence: filename = path else: - # The path points to the publish .vdb sequence folder so we - # find the first file in there that ends with .vdb - files = sorted(os.listdir(path)) - first = next((x for x in files if x.endswith(".vdb")), None) - if first is None: - raise RuntimeError( - "Couldn't find first .vdb file of " - "sequence in: %s" % path - ) + filename = re.sub(r"(.*)\.(\d+)\.vdb$", "\\1.$F4.vdb", path) - # Set .vdb to $F.vdb - first = re.sub(r"\.(\d+)\.vdb$", ".$F.vdb", first) - - filename = os.path.join(path, first) + filename = os.path.join(path, filename) filename = os.path.normpath(filename) filename = filename.replace("\\", "/") @@ -100,9 +91,9 @@ class VdbLoader(load.LoaderPlugin): # Update the file path file_path = get_representation_path(representation) - file_path = self.format_path(file_path) + file_path = self.format_path(file_path, representation) - file_node.setParms({"fileName": file_path}) + file_node.setParms({"file": file_path}) # Update attribute node.setParms({"representation": str(representation["_id"])}) diff --git a/openpype/hosts/houdini/plugins/publish/collect_current_file.py b/openpype/hosts/houdini/plugins/publish/collect_current_file.py index c0b987ebbc..1383c274a2 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_current_file.py +++ b/openpype/hosts/houdini/plugins/publish/collect_current_file.py @@ -1,27 +1,28 @@ import os import hou +from openpype.pipeline import legacy_io import pyblish.api class CollectHoudiniCurrentFile(pyblish.api.ContextPlugin): """Inject the current working file into context""" - order = pyblish.api.CollectorOrder - 0.5 + order = pyblish.api.CollectorOrder - 0.01 label = "Houdini Current File" hosts = ["houdini"] def process(self, context): """Inject the current working file""" - filepath = hou.hipFile.path() - if not os.path.exists(filepath): + current_file = hou.hipFile.path() + if not os.path.exists(current_file): # By default Houdini will even point a new scene to a path. # However if the file is not saved at all and does not exist, # we assume the user never set it. filepath = "" - elif os.path.basename(filepath) == "untitled.hip": + elif os.path.basename(current_file) == "untitled.hip": # Due to even a new file being called 'untitled.hip' we are unable # to confirm the current scene was ever saved because the file # could have existed already. We will allow it if the file exists, @@ -33,4 +34,43 @@ class CollectHoudiniCurrentFile(pyblish.api.ContextPlugin): "saved correctly." ) - context.data["currentFile"] = filepath + context.data["currentFile"] = current_file + + folder, file = os.path.split(current_file) + filename, ext = os.path.splitext(file) + + task = legacy_io.Session["AVALON_TASK"] + + data = {} + + # create instance + instance = context.create_instance(name=filename) + subset = 'workfile' + task.capitalize() + + data.update({ + "subset": subset, + "asset": os.getenv("AVALON_ASSET", None), + "label": subset, + "publish": True, + "family": 'workfile', + "families": ['workfile'], + "setMembers": [current_file], + "frameStart": context.data['frameStart'], + "frameEnd": context.data['frameEnd'], + "handleStart": context.data['handleStart'], + "handleEnd": context.data['handleEnd'] + }) + + data['representations'] = [{ + 'name': ext.lstrip("."), + 'ext': ext.lstrip("."), + 'files': file, + "stagingDir": folder, + }] + + instance.data.update(data) + + self.log.info('Collected instance: {}'.format(file)) + self.log.info('Scene path: {}'.format(current_file)) + self.log.info('staging Dir: {}'.format(folder)) + self.log.info('subset: {}'.format(subset)) diff --git a/openpype/hosts/houdini/plugins/publish/collect_frames.py b/openpype/hosts/houdini/plugins/publish/collect_frames.py index fac40b4d2b..9bd43d8a09 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_frames.py +++ b/openpype/hosts/houdini/plugins/publish/collect_frames.py @@ -20,7 +20,7 @@ class CollectFrames(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder label = "Collect Frames" - families = ["vdbcache", "imagesequence", "ass"] + families = ["vdbcache", "imagesequence", "ass", "redshiftproxy"] def process(self, instance): diff --git a/openpype/hosts/houdini/plugins/publish/collect_inputs.py b/openpype/hosts/houdini/plugins/publish/collect_inputs.py index 8c7098c710..9ee0248bd9 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_inputs.py +++ b/openpype/hosts/houdini/plugins/publish/collect_inputs.py @@ -1,3 +1,5 @@ +from bson.objectid import ObjectId + import pyblish.api from openpype.pipeline import registered_host @@ -115,7 +117,7 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin): # Collect containers for the given set of nodes containers = collect_input_containers(nodes) - inputs = [c["representation"] for c in containers] - instance.data["inputs"] = inputs + inputs = [ObjectId(c["representation"]) for c in containers] + instance.data["inputRepresentations"] = inputs self.log.info("Collected inputs: %s" % inputs) diff --git a/openpype/hosts/houdini/plugins/publish/collect_output_node.py b/openpype/hosts/houdini/plugins/publish/collect_output_node.py index 938ee81cc3..0130c0a8da 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_output_node.py +++ b/openpype/hosts/houdini/plugins/publish/collect_output_node.py @@ -12,6 +12,7 @@ class CollectOutputSOPPath(pyblish.api.InstancePlugin): "imagesequence", "usd", "usdrender", + "redshiftproxy" ] hosts = ["houdini"] @@ -54,6 +55,8 @@ class CollectOutputSOPPath(pyblish.api.InstancePlugin): else: out_node = node.parm("loppath").evalAsNode() + elif node_type == "Redshift_Proxy_Output": + out_node = node.parm("RS_archive_sopPath").evalAsNode() else: raise ValueError( "ROP node type '%s' is" " not supported." % node_type diff --git a/openpype/hosts/houdini/plugins/publish/collect_remote_publish.py b/openpype/hosts/houdini/plugins/publish/collect_remote_publish.py index c635a53074..d56d389be0 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_remote_publish.py +++ b/openpype/hosts/houdini/plugins/publish/collect_remote_publish.py @@ -1,7 +1,7 @@ import pyblish.api -import openpype.api import hou +from openpype.pipeline.publish import RepairAction from openpype.hosts.houdini.api import lib @@ -13,7 +13,7 @@ class CollectRemotePublishSettings(pyblish.api.ContextPlugin): hosts = ["houdini"] targets = ["deadline"] label = "Remote Publish Submission Settings" - actions = [openpype.api.RepairAction] + actions = [RepairAction] def process(self, context): diff --git a/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py b/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py index 3f0d10e0ba..cf8d61cda3 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py +++ b/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py @@ -1,5 +1,6 @@ import pyblish.api +from openyppe.client import get_subset_by_name, get_asset_by_name from openpype.pipeline import legacy_io import openpype.lib.usdlib as usdlib @@ -50,10 +51,8 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin): self.log.debug("Add bootstrap for: %s" % bootstrap) - asset = legacy_io.find_one({ - "name": instance.data["asset"], - "type": "asset" - }) + project_name = legacy_io.active_project() + asset = get_asset_by_name(project_name, instance.data["asset"]) assert asset, "Asset must exist: %s" % asset # Check which are not about to be created and don't exist yet @@ -70,7 +69,7 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin): self.log.debug("Checking required bootstrap: %s" % required) for subset in required: - if self._subset_exists(instance, subset, asset): + if self._subset_exists(project_name, instance, subset, asset): continue self.log.debug( @@ -93,7 +92,7 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin): for key in ["asset"]: new.data[key] = instance.data[key] - def _subset_exists(self, instance, subset, asset): + def _subset_exists(self, project_name, instance, subset, asset): """Return whether subset exists in current context or in database.""" # Allow it to be created during this publish session context = instance.context @@ -106,9 +105,8 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin): # Or, if they already exist in the database we can # skip them too. - return bool( - legacy_io.find_one( - {"name": subset, "type": "subset", "parent": asset["_id"]}, - {"_id": True} - ) - ) + if get_subset_by_name( + project_name, subset, asset["_id"], fields=["_id"] + ): + return True + return False diff --git a/openpype/hosts/houdini/plugins/publish/extract_alembic.py b/openpype/hosts/houdini/plugins/publish/extract_alembic.py index 83b790407f..758d4c560b 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_alembic.py +++ b/openpype/hosts/houdini/plugins/publish/extract_alembic.py @@ -1,11 +1,12 @@ import os import pyblish.api -import openpype.api + +from openpype.pipeline import publish from openpype.hosts.houdini.api.lib import render_rop -class ExtractAlembic(openpype.api.Extractor): +class ExtractAlembic(publish.Extractor): order = pyblish.api.ExtractorOrder label = "Extract Alembic" diff --git a/openpype/hosts/houdini/plugins/publish/extract_ass.py b/openpype/hosts/houdini/plugins/publish/extract_ass.py index e56e40df85..a302b451cb 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_ass.py +++ b/openpype/hosts/houdini/plugins/publish/extract_ass.py @@ -1,11 +1,12 @@ import os import pyblish.api -import openpype.api + +from openpype.pipeline import publish from openpype.hosts.houdini.api.lib import render_rop -class ExtractAss(openpype.api.Extractor): +class ExtractAss(publish.Extractor): order = pyblish.api.ExtractorOrder + 0.1 label = "Extract Ass" diff --git a/openpype/hosts/houdini/plugins/publish/extract_composite.py b/openpype/hosts/houdini/plugins/publish/extract_composite.py index f300b6d28d..23e875f107 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_composite.py +++ b/openpype/hosts/houdini/plugins/publish/extract_composite.py @@ -1,12 +1,12 @@ import os import pyblish.api -import openpype.api +from openpype.pipeline import publish from openpype.hosts.houdini.api.lib import render_rop -class ExtractComposite(openpype.api.Extractor): +class ExtractComposite(publish.Extractor): order = pyblish.api.ExtractorOrder label = "Extract Composite (Image Sequence)" diff --git a/openpype/hosts/houdini/plugins/publish/extract_hda.py b/openpype/hosts/houdini/plugins/publish/extract_hda.py index 301dd4e297..7dd03a92b7 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_hda.py +++ b/openpype/hosts/houdini/plugins/publish/extract_hda.py @@ -4,10 +4,11 @@ import os from pprint import pformat import pyblish.api -import openpype.api + +from openpype.pipeline import publish -class ExtractHDA(openpype.api.Extractor): +class ExtractHDA(publish.Extractor): order = pyblish.api.ExtractorOrder label = "Extract HDA" diff --git a/openpype/hosts/houdini/plugins/publish/extract_redshift_proxy.py b/openpype/hosts/houdini/plugins/publish/extract_redshift_proxy.py new file mode 100644 index 0000000000..ca9be64a47 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/extract_redshift_proxy.py @@ -0,0 +1,49 @@ +import os + +import pyblish.api + +from openpype.pipeline import publish +from openpype.hosts.houdini.api.lib import render_rop + + +class ExtractRedshiftProxy(publish.Extractor): + + order = pyblish.api.ExtractorOrder + 0.1 + label = "Extract Redshift Proxy" + families = ["redshiftproxy"] + hosts = ["houdini"] + + def process(self, instance): + + ropnode = instance[0] + + # Get the filename from the filename parameter + # `.evalParm(parameter)` will make sure all tokens are resolved + output = ropnode.evalParm("RS_archive_file") + staging_dir = os.path.normpath(os.path.dirname(output)) + instance.data["stagingDir"] = staging_dir + file_name = os.path.basename(output) + + self.log.info("Writing Redshift Proxy '%s' to '%s'" % (file_name, + staging_dir)) + + render_rop(ropnode) + + output = instance.data["frames"] + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "rs", + "ext": "rs", + "files": output, + "stagingDir": staging_dir, + } + + # A single frame may also be rendered without start/end frame. + if "frameStart" in instance.data and "frameEnd" in instance.data: + representation["frameStart"] = instance.data["frameStart"] + representation["frameEnd"] = instance.data["frameEnd"] + + instance.data["representations"].append(representation) diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd.py b/openpype/hosts/houdini/plugins/publish/extract_usd.py index 0fc26900fb..78c32affb4 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_usd.py +++ b/openpype/hosts/houdini/plugins/publish/extract_usd.py @@ -1,11 +1,12 @@ import os import pyblish.api -import openpype.api + +from openpype.pipeline import publish from openpype.hosts.houdini.api.lib import render_rop -class ExtractUSD(openpype.api.Extractor): +class ExtractUSD(publish.Extractor): order = pyblish.api.ExtractorOrder label = "Extract USD" diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py index bfcd93c1cb..f686f712bb 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py +++ b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py @@ -5,11 +5,17 @@ import sys from collections import deque import pyblish.api -import openpype.api +from openpype.client import ( + get_asset_by_name, + get_subset_by_name, + get_last_version_by_subset_id, + get_representation_by_name, +) from openpype.pipeline import ( get_representation_path, legacy_io, + publish, ) import openpype.hosts.houdini.api.usd as hou_usdlib from openpype.hosts.houdini.api.lib import render_rop @@ -154,7 +160,7 @@ def parm_values(overrides): parm.set(value) -class ExtractUSDLayered(openpype.api.Extractor): +class ExtractUSDLayered(publish.Extractor): order = pyblish.api.ExtractorOrder label = "Extract Layered USD" @@ -244,11 +250,14 @@ class ExtractUSDLayered(openpype.api.Extractor): # Set up the dependency for publish if they have new content # compared to previous publishes + project_name = legacy_io.active_project() for dependency in active_dependencies: dependency_fname = dependency.data["usdFilename"] filepath = os.path.join(staging_dir, dependency_fname) - similar = self._compare_with_latest_publish(dependency, filepath) + similar = self._compare_with_latest_publish( + project_name, dependency, filepath + ) if similar: # Deactivate this dependency self.log.debug( @@ -268,7 +277,7 @@ class ExtractUSDLayered(openpype.api.Extractor): instance.data["files"] = [] instance.data["files"].append(fname) - def _compare_with_latest_publish(self, dependency, new_file): + def _compare_with_latest_publish(self, project_name, dependency, new_file): import filecmp _, ext = os.path.splitext(new_file) @@ -276,35 +285,29 @@ class ExtractUSDLayered(openpype.api.Extractor): # Compare this dependency with the latest published version # to detect whether we should make this into a new publish # version. If not, skip it. - asset = legacy_io.find_one( - {"name": dependency.data["asset"], "type": "asset"} + asset = get_asset_by_name( + project_name, dependency.data["asset"], fields=["_id"] ) - subset = legacy_io.find_one( - { - "name": dependency.data["subset"], - "type": "subset", - "parent": asset["_id"], - } + subset = get_subset_by_name( + project_name, + dependency.data["subset"], + asset["_id"], + fields=["_id"] ) if not subset: # Subset doesn't exist yet. Definitely new file self.log.debug("No existing subset..") return False - version = legacy_io.find_one( - {"type": "version", "parent": subset["_id"], }, - sort=[("name", -1)] + version = get_last_version_by_subset_id( + project_name, subset["_id"], fields=["_id"] ) if not version: self.log.debug("No existing version..") return False - representation = legacy_io.find_one( - { - "name": ext.lstrip("."), - "type": "representation", - "parent": version["_id"], - } + representation = get_representation_by_name( + project_name, ext.lstrip("."), version["_id"] ) if not representation: self.log.debug("No existing representation..") diff --git a/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py b/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py index 113e1b0bcb..26ec423048 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py +++ b/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py @@ -1,11 +1,12 @@ import os import pyblish.api -import openpype.api + +from openpype.pipeline import publish from openpype.hosts.houdini.api.lib import render_rop -class ExtractVDBCache(openpype.api.Extractor): +class ExtractVDBCache(publish.Extractor): order = pyblish.api.ExtractorOrder + 0.1 label = "Extract VDB Cache" diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file.py b/openpype/hosts/houdini/plugins/publish/increment_current_file.py index c5cacd1880..c990f481d3 100644 --- a/openpype/hosts/houdini/plugins/publish/increment_current_file.py +++ b/openpype/hosts/houdini/plugins/publish/increment_current_file.py @@ -1,11 +1,10 @@ import pyblish.api -from openpype.api import version_up -from openpype.action import get_errored_plugins_from_data +from openpype.lib import version_up from openpype.pipeline import registered_host -class IncrementCurrentFile(pyblish.api.InstancePlugin): +class IncrementCurrentFile(pyblish.api.ContextPlugin): """Increment the current file. Saves the current scene with an increased version number. @@ -15,30 +14,10 @@ class IncrementCurrentFile(pyblish.api.InstancePlugin): label = "Increment current file" order = pyblish.api.IntegratorOrder + 9.0 hosts = ["houdini"] - families = ["colorbleed.usdrender", "redshift_rop"] - targets = ["local"] + families = ["workfile"] + optional = True - def process(self, instance): - - # This should be a ContextPlugin, but this is a workaround - # for a bug in pyblish to run once for a family: issue #250 - context = instance.context - key = "__hasRun{}".format(self.__class__.__name__) - if context.data.get(key, False): - return - else: - context.data[key] = True - - context = instance.context - errored_plugins = get_errored_plugins_from_data(context) - if any( - plugin.__name__ == "HoudiniSubmitPublishDeadline" - for plugin in errored_plugins - ): - raise RuntimeError( - "Skipping incrementing current file because " - "submission to deadline failed." - ) + def process(self, context): # Filename must not have changed since collecting host = registered_host() diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file_deadline.py b/openpype/hosts/houdini/plugins/publish/increment_current_file_deadline.py deleted file mode 100644 index faa015f739..0000000000 --- a/openpype/hosts/houdini/plugins/publish/increment_current_file_deadline.py +++ /dev/null @@ -1,35 +0,0 @@ -import pyblish.api - -import hou -from openpype.api import version_up -from openpype.action import get_errored_plugins_from_data - - -class IncrementCurrentFileDeadline(pyblish.api.ContextPlugin): - """Increment the current file. - - Saves the current scene with an increased version number. - - """ - - label = "Increment current file" - order = pyblish.api.IntegratorOrder + 9.0 - hosts = ["houdini"] - targets = ["deadline"] - - def process(self, context): - - errored_plugins = get_errored_plugins_from_data(context) - if any( - plugin.__name__ == "HoudiniSubmitPublishDeadline" - for plugin in errored_plugins - ): - raise RuntimeError( - "Skipping incrementing current file because " - "submission to deadline failed." - ) - - current_filepath = context.data["currentFile"] - new_filepath = version_up(current_filepath) - - hou.hipFile.save(file_name=new_filepath, save_to_recent_files=True) diff --git a/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py b/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py index 0ae1bc94eb..ac408bc842 100644 --- a/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py +++ b/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py @@ -1,5 +1,5 @@ import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateVDBInputNode(pyblish.api.InstancePlugin): @@ -16,7 +16,7 @@ class ValidateVDBInputNode(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + 0.1 + order = ValidateContentsOrder + 0.1 families = ["vdbcache"] hosts = ["houdini"] label = "Validate Input Node (VDB)" diff --git a/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py b/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py index 3e17d3e8de..ea800707fb 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py +++ b/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py @@ -1,8 +1,9 @@ import pyblish.api -import openpype.api from collections import defaultdict +from openpype.pipeline.publish import ValidateContentsOrder + class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin): """Validate Alembic ROP Primitive to Detail attribute is consistent. @@ -15,7 +16,7 @@ class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + 0.1 + order = ValidateContentsOrder + 0.1 families = ["pointcache"] hosts = ["houdini"] label = "Validate Primitive to Detail (Abc)" diff --git a/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py b/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py index e9126ffef0..cbed3ea235 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py +++ b/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py @@ -1,5 +1,6 @@ import pyblish.api -import openpype.api + +from openpype.pipeline.publish import ValidateContentsOrder class ValidateAlembicROPFaceSets(pyblish.api.InstancePlugin): @@ -17,7 +18,7 @@ class ValidateAlembicROPFaceSets(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + 0.1 + order = ValidateContentsOrder + 0.1 families = ["pointcache"] hosts = ["houdini"] label = "Validate Alembic ROP Face Sets" diff --git a/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py b/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py index 8d7e3b611f..2625ae5f83 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py +++ b/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py @@ -1,5 +1,6 @@ import pyblish.api -import colorbleed.api + +from openpype.pipeline.publish import ValidateContentsOrder class ValidateAlembicInputNode(pyblish.api.InstancePlugin): @@ -11,7 +12,7 @@ class ValidateAlembicInputNode(pyblish.api.InstancePlugin): """ - order = colorbleed.api.ValidateContentsOrder + 0.1 + order = ValidateContentsOrder + 0.1 families = ["pointcache"] hosts = ["houdini"] label = "Validate Input Node (Abc)" diff --git a/openpype/hosts/houdini/plugins/publish/validate_bypass.py b/openpype/hosts/houdini/plugins/publish/validate_bypass.py index fc4e18f701..7cf8da69d6 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_bypass.py +++ b/openpype/hosts/houdini/plugins/publish/validate_bypass.py @@ -1,5 +1,5 @@ import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateBypassed(pyblish.api.InstancePlugin): @@ -11,7 +11,7 @@ class ValidateBypassed(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder - 0.1 + order = ValidateContentsOrder - 0.1 families = ["*"] hosts = ["houdini"] label = "Validate ROP Bypass" diff --git a/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py b/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py index a0919e1323..d414920f8b 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py +++ b/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py @@ -1,11 +1,11 @@ import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateCameraROP(pyblish.api.InstancePlugin): """Validate Camera ROP settings.""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["camera"] hosts = ["houdini"] label = "Camera ROP" diff --git a/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py b/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py index cd72877949..be6a798a95 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py +++ b/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py @@ -1,11 +1,11 @@ import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin): """Validate Create Intermediate Directories is enabled on ROP node.""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["pointcache", "camera", "vdbcache"] hosts = ["houdini"] label = "Create Intermediate Directories Checked" diff --git a/openpype/hosts/houdini/plugins/publish/validate_no_errors.py b/openpype/hosts/houdini/plugins/publish/validate_no_errors.py index f58e5f8d7d..76635d4ed5 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_no_errors.py +++ b/openpype/hosts/houdini/plugins/publish/validate_no_errors.py @@ -1,6 +1,6 @@ import pyblish.api -import openpype.api import hou +from openpype.pipeline.publish import ValidateContentsOrder def cook_in_range(node, start, end): @@ -28,7 +28,7 @@ def get_errors(node): class ValidateNoErrors(pyblish.api.InstancePlugin): """Validate the Instance has no current cooking errors.""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["houdini"] label = "Validate no errors" diff --git a/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py b/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py index 1eb36763bb..7a8cd04f15 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py +++ b/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py @@ -1,5 +1,5 @@ import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin): @@ -11,7 +11,7 @@ class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + 0.1 + order = ValidateContentsOrder + 0.1 families = ["pointcache"] hosts = ["houdini"] label = "Validate Prims Hierarchy Path" diff --git a/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py b/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py index 95c66edff0..0ab182c584 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py +++ b/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py @@ -1,7 +1,7 @@ import pyblish.api -import openpype.api from openpype.hosts.houdini.api import lib +from openpype.pipeline.publish import RepairContextAction import hou @@ -14,7 +14,7 @@ class ValidateRemotePublishOutNode(pyblish.api.ContextPlugin): hosts = ["houdini"] targets = ["deadline"] label = "Remote Publish ROP node" - actions = [openpype.api.RepairContextAction] + actions = [RepairContextAction] def process(self, context): diff --git a/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py b/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py index b681fd0ee1..afc8df7528 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py +++ b/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py @@ -1,7 +1,7 @@ import pyblish.api -import openpype.api import hou +from openpype.pipeline.publish import RepairContextAction class ValidateRemotePublishEnabled(pyblish.api.ContextPlugin): @@ -12,7 +12,7 @@ class ValidateRemotePublishEnabled(pyblish.api.ContextPlugin): hosts = ["houdini"] targets = ["deadline"] label = "Remote Publish ROP enabled" - actions = [openpype.api.RepairContextAction] + actions = [RepairContextAction] def process(self, context): diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py index 44719ae488..f08c7c72c5 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py @@ -2,44 +2,37 @@ import re import pyblish.api -import openpype.api +from openpype.client import get_subset_by_name from openpype.pipeline import legacy_io +from openpype.pipeline.publish import ValidateContentsOrder class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin): """Validate the Instance has no current cooking errors.""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["houdini"] families = ["usdShade"] label = "USD Shade model exists" def process(self, instance): - - asset = instance.data["asset"] + project_name = legacy_io.active_project() + asset_name = instance.data["asset"] subset = instance.data["subset"] # Assume shading variation starts after a dot separator shade_subset = subset.split(".", 1)[0] model_subset = re.sub("^usdShade", "usdModel", shade_subset) - asset_doc = legacy_io.find_one( - {"name": asset, "type": "asset"}, - {"_id": True} - ) + asset_doc = instance.data.get("assetEntity") if not asset_doc: - raise RuntimeError("Asset does not exist: %s" % asset) + raise RuntimeError("Asset document is not filled on instance.") - subset_doc = legacy_io.find_one( - { - "name": model_subset, - "type": "subset", - "parent": asset_doc["_id"], - }, - {"_id": True} + subset_doc = get_subset_by_name( + project_name, model_subset, asset_doc["_id"], fields=["_id"] ) if not subset_doc: raise RuntimeError( "USD Model subset not found: " - "%s (%s)" % (model_subset, asset) + "%s (%s)" % (model_subset, asset_name) ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py index a77ca2f3cb..a4902b48a9 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py @@ -1,5 +1,5 @@ import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder import hou @@ -12,7 +12,7 @@ class ValidateUsdShadeWorkspace(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["houdini"] families = ["usdShade"] label = "USD Shade Workspace" diff --git a/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py b/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py index 0ae1bc94eb..ac408bc842 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py +++ b/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py @@ -1,5 +1,5 @@ import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateVDBInputNode(pyblish.api.InstancePlugin): @@ -16,7 +16,7 @@ class ValidateVDBInputNode(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + 0.1 + order = ValidateContentsOrder + 0.1 families = ["vdbcache"] hosts = ["houdini"] label = "Validate Input Node (VDB)" diff --git a/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py index 1ba840b71d..55ed581d4c 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py +++ b/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py @@ -1,6 +1,6 @@ import pyblish.api -import openpype.api import hou +from openpype.pipeline.publish import ValidateContentsOrder class ValidateVDBOutputNode(pyblish.api.InstancePlugin): @@ -17,7 +17,7 @@ class ValidateVDBOutputNode(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + 0.1 + order = ValidateContentsOrder + 0.1 families = ["vdbcache"] hosts = ["houdini"] label = "Validate Output Node (VDB)" diff --git a/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py b/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py new file mode 100644 index 0000000000..560b355e21 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +import pyblish.api +import hou + +from openpype.pipeline.publish import RepairAction + + +class ValidateWorkfilePaths(pyblish.api.InstancePlugin): + """Validate workfile paths so they are absolute.""" + + order = pyblish.api.ValidatorOrder + families = ["workfile"] + hosts = ["houdini"] + label = "Validate Workfile Paths" + actions = [RepairAction] + optional = True + + node_types = ["file", "alembic"] + prohibited_vars = ["$HIP", "$JOB"] + + def process(self, instance): + invalid = self.get_invalid() + self.log.info( + "node types to check: {}".format(", ".join(self.node_types))) + self.log.info( + "prohibited vars: {}".format(", ".join(self.prohibited_vars)) + ) + if invalid: + for param in invalid: + self.log.error( + "{}: {}".format(param.path(), param.unexpandedString())) + + raise RuntimeError("Invalid paths found") + + @classmethod + def get_invalid(cls): + invalid = [] + for param, _ in hou.fileReferences(): + if param is None: + continue + + # skip nodes we are not interested in + if param.node().type().name() not in cls.node_types: + continue + + if any( + v for v in cls.prohibited_vars + if v in param.unexpandedString()): + invalid.append(param) + + return invalid + + @classmethod + def repair(cls, instance): + invalid = cls.get_invalid() + for param in invalid: + cls.log.info("processing: {}".format(param.path())) + cls.log.info("Replacing {} for {}".format( + param.unexpandedString(), + hou.text.expandString(param.unexpandedString()))) + param.set(hou.text.expandString(param.unexpandedString())) diff --git a/openpype/hosts/houdini/startup/python3.9libs/pythonrc.py b/openpype/hosts/houdini/startup/python3.9libs/pythonrc.py new file mode 100644 index 0000000000..afadbffd3e --- /dev/null +++ b/openpype/hosts/houdini/startup/python3.9libs/pythonrc.py @@ -0,0 +1,10 @@ +from openpype.pipeline import install_host +from openpype.hosts.houdini import api + + +def main(): + print("Installing OpenPype ...") + install_host(api) + + +main() diff --git a/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py index 01a29472e7..d7d1c79d73 100644 --- a/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py +++ b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py @@ -4,19 +4,8 @@ import husdoutputprocessors.base as base import colorbleed.usdlib as usdlib -from openpype.pipeline import ( - legacy_io, - registered_root, -) - - -def _get_project_publish_template(): - """Return publish template from database for current project""" - project = legacy_io.find_one( - {"type": "project"}, - projection={"config.template.publish": True} - ) - return project["config"]["template"]["publish"] +from openpype.client import get_asset_by_name +from openpype.pipeline import legacy_io, Anatomy class AvalonURIOutputProcessor(base.OutputProcessorBase): @@ -35,7 +24,6 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase): ever created in a Houdini session. Therefore be very careful about what data gets put in this object. """ - self._template = None self._use_publish_paths = False self._cache = dict() @@ -60,14 +48,11 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase): return self._parameters def beginSave(self, config_node, t): - self._template = _get_project_publish_template() - parm = self._parms["use_publish_paths"] self._use_publish_paths = config_node.parm(parm).evalAtTime(t) self._cache.clear() def endSave(self): - self._template = None self._use_publish_paths = None self._cache.clear() @@ -138,22 +123,19 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase): """ PROJECT = legacy_io.Session["AVALON_PROJECT"] - asset_doc = legacy_io.find_one({ - "name": asset, - "type": "asset" - }) + anatomy = Anatomy(PROJECT) + asset_doc = get_asset_by_name(PROJECT, asset) if not asset_doc: raise RuntimeError("Invalid asset name: '%s'" % asset) - root = registered_root() - path = self._template.format(**{ - "root": root, + formatted_anatomy = anatomy.format({ "project": PROJECT, "asset": asset_doc["name"], "subset": subset, "representation": ext, "version": 0 # stub version zero }) + path = formatted_anatomy["publish"]["path"] # Remove the version folder subset_folder = os.path.dirname(os.path.dirname(path)) diff --git a/openpype/hosts/maya/__init__.py b/openpype/hosts/maya/__init__.py index c1c82c62e5..bb940a881b 100644 --- a/openpype/hosts/maya/__init__.py +++ b/openpype/hosts/maya/__init__.py @@ -1,27 +1,10 @@ -import os +from .addon import ( + MayaAddon, + MAYA_ROOT_DIR, +) -def add_implementation_envs(env, _app): - # Add requirements to PYTHONPATH - pype_root = os.environ["OPENPYPE_REPOS_ROOT"] - new_python_paths = [ - os.path.join(pype_root, "openpype", "hosts", "maya", "startup") - ] - old_python_path = env.get("PYTHONPATH") or "" - for path in old_python_path.split(os.pathsep): - if not path: - continue - - norm_path = os.path.normpath(path) - if norm_path not in new_python_paths: - new_python_paths.append(norm_path) - - env["PYTHONPATH"] = os.pathsep.join(new_python_paths) - - # Set default values if are not already set via settings - defaults = { - "OPENPYPE_LOG_NO_COLORS": "Yes" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value +__all__ = ( + "MayaAddon", + "MAYA_ROOT_DIR", +) diff --git a/openpype/hosts/maya/addon.py b/openpype/hosts/maya/addon.py new file mode 100644 index 0000000000..b9ecb8279f --- /dev/null +++ b/openpype/hosts/maya/addon.py @@ -0,0 +1,49 @@ +import os +from openpype.modules import OpenPypeModule, IHostAddon + +MAYA_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class MayaAddon(OpenPypeModule, IHostAddon): + name = "maya" + host_name = "maya" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + # Add requirements to PYTHONPATH + new_python_paths = [ + os.path.join(MAYA_ROOT_DIR, "startup") + ] + old_python_path = env.get("PYTHONPATH") or "" + for path in old_python_path.split(os.pathsep): + if not path: + continue + + norm_path = os.path.normpath(path) + if norm_path not in new_python_paths: + new_python_paths.append(norm_path) + + env["PYTHONPATH"] = os.pathsep.join(new_python_paths) + + # Set default environments + envs = { + "OPENPYPE_LOG_NO_COLORS": "Yes", + # For python module 'qtpy' + "QT_API": "PySide2", + # For python module 'Qt' + "QT_PREFERRED_BINDING": "PySide2" + } + for key, value in envs.items(): + env[key] = value + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(MAYA_ROOT_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".ma", ".mb"] diff --git a/openpype/hosts/maya/api/__init__.py b/openpype/hosts/maya/api/__init__.py index 5d76bf0f04..a6c5f50e1a 100644 --- a/openpype/hosts/maya/api/__init__.py +++ b/openpype/hosts/maya/api/__init__.py @@ -5,11 +5,11 @@ Anything that isn't defined here is INTERNAL and unreliable for external use. """ from .pipeline import ( - install, uninstall, ls, containerise, + MayaHost, ) from .plugin import ( Creator, @@ -40,11 +40,11 @@ from .lib import ( __all__ = [ - "install", "uninstall", "ls", "containerise", + "MayaHost", "Creator", "Loader", diff --git a/openpype/hosts/maya/api/action.py b/openpype/hosts/maya/api/action.py index ca1006b6aa..065fdf3691 100644 --- a/openpype/hosts/maya/api/action.py +++ b/openpype/hosts/maya/api/action.py @@ -3,8 +3,9 @@ from __future__ import absolute_import import pyblish.api +from openpype.client import get_asset_by_name from openpype.pipeline import legacy_io -from openpype.api import get_errored_instances_from_context +from openpype.pipeline.publish import get_errored_instances_from_context class GenerateUUIDsOnInvalidAction(pyblish.api.Action): @@ -74,12 +75,21 @@ class GenerateUUIDsOnInvalidAction(pyblish.api.Action): from . import lib - asset = instance.data['asset'] - asset_id = legacy_io.find_one( - {"name": asset, "type": "asset"}, - projection={"_id": True} - )['_id'] - for node, _id in lib.generate_ids(nodes, asset_id=asset_id): + # Expecting this is called on validators in which case 'assetEntity' + # should be always available, but kept a way to query it by name. + asset_doc = instance.data.get("assetEntity") + if not asset_doc: + asset_name = instance.data["asset"] + project_name = legacy_io.active_project() + self.log.info(( + "Asset is not stored on instance." + " Querying by name \"{}\" from project \"{}\"" + ).format(asset_name, project_name)) + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["_id"] + ) + + for node, _id in lib.generate_ids(nodes, asset_id=asset_doc["_id"]): lib.set_id(node, _id, overwrite=True) diff --git a/openpype/hosts/maya/api/commands.py b/openpype/hosts/maya/api/commands.py index dd616b6dd6..355edf3ae4 100644 --- a/openpype/hosts/maya/api/commands.py +++ b/openpype/hosts/maya/api/commands.py @@ -2,6 +2,7 @@ """OpenPype script commands to be used directly in Maya.""" from maya import cmds +from openpype.client import get_asset_by_name, get_project from openpype.pipeline import legacy_io @@ -79,8 +80,9 @@ def reset_frame_range(): cmds.currentUnit(time=fps) # Set frame start/end + project_name = legacy_io.active_project() asset_name = legacy_io.Session["AVALON_ASSET"] - asset = legacy_io.find_one({"name": asset_name, "type": "asset"}) + asset = get_asset_by_name(project_name, asset_name) frame_start = asset["data"].get("frameStart") frame_end = asset["data"].get("frameEnd") @@ -145,8 +147,9 @@ def reset_resolution(): resolution_height = 1080 # Get resolution from asset + project_name = legacy_io.active_project() asset_name = legacy_io.Session["AVALON_ASSET"] - asset_doc = legacy_io.find_one({"name": asset_name, "type": "asset"}) + asset_doc = get_asset_by_name(project_name, asset_name) resolution = _resolution_from_document(asset_doc) # Try get resolution from project if resolution is None: @@ -155,7 +158,7 @@ def reset_resolution(): "Asset \"{}\" does not have set resolution." " Trying to get resolution from project" ).format(asset_name)) - project_doc = legacy_io.find_one({"type": "project"}) + project_doc = get_project(project_name) resolution = _resolution_from_document(project_doc) if resolution is None: diff --git a/openpype/hosts/maya/api/customize.py b/openpype/hosts/maya/api/customize.py index 683e6b24b0..f66858dfb6 100644 --- a/openpype/hosts/maya/api/customize.py +++ b/openpype/hosts/maya/api/customize.py @@ -8,7 +8,7 @@ from functools import partial import maya.cmds as cmds import maya.mel as mel -from openpype.api import resources +from openpype import resources from openpype.tools.utils import host_tools from .lib import get_main_window diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 088304ab05..2530021eba 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -12,13 +12,18 @@ import contextlib from collections import OrderedDict, defaultdict from math import ceil from six import string_types -import bson from maya import cmds, mel import maya.api.OpenMaya as om -from openpype import lib -from openpype.api import get_anatomy_settings +from openpype.client import ( + get_project, + get_asset_by_name, + get_subsets, + get_last_versions, + get_representation_by_name +) +from openpype.settings import get_project_settings from openpype.pipeline import ( legacy_io, discover_loader_plugins, @@ -27,6 +32,7 @@ from openpype.pipeline import ( load_container, registered_host, ) +from openpype.pipeline.context_tools import get_current_project_asset from .commands import reset_frame_range @@ -1387,15 +1393,11 @@ def generate_ids(nodes, asset_id=None): if asset_id is None: # Get the asset ID from the database for the asset of current context - asset_data = legacy_io.find_one( - { - "type": "asset", - "name": legacy_io.Session["AVALON_ASSET"] - }, - projection={"_id": True} - ) - assert asset_data, "No current asset found in Session" - asset_id = asset_data['_id'] + project_name = legacy_io.active_project() + asset_name = legacy_io.Session["AVALON_ASSET"] + asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"]) + assert asset_doc, "No current asset found in Session" + asset_id = asset_doc['_id'] node_ids = [] for node in nodes: @@ -1530,7 +1532,7 @@ def get_container_members(container): if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"): continue - reference_members = cmds.referenceQuery(ref, nodes=True) + reference_members = cmds.referenceQuery(ref, nodes=True, dagPath=True) reference_members = cmds.ls(reference_members, long=True, objectsOnly=True) @@ -1548,13 +1550,15 @@ def list_looks(asset_id): # # get all subsets with look leading in # the name associated with the asset - subset = legacy_io.find({ - "parent": bson.ObjectId(asset_id), - "type": "subset", - "name": {"$regex": "look*"} - }) - - return list(subset) + # TODO this should probably look for family 'look' instead of checking + # subset name that can not start with family + project_name = legacy_io.active_project() + subset_docs = get_subsets(project_name, asset_ids=[asset_id]) + return [ + subset_doc + for subset_doc in subset_docs + if subset_doc["name"].startswith("look") + ] def assign_look_by_version(nodes, version_id): @@ -1570,18 +1574,15 @@ def assign_look_by_version(nodes, version_id): None """ - # Get representations of shader file and relationships - look_representation = legacy_io.find_one({ - "type": "representation", - "parent": version_id, - "name": "ma" - }) + project_name = legacy_io.active_project() - json_representation = legacy_io.find_one({ - "type": "representation", - "parent": version_id, - "name": "json" - }) + # Get representations of shader file and relationships + look_representation = get_representation_by_name( + project_name, "ma", version_id + ) + json_representation = get_representation_by_name( + project_name, "json", version_id + ) # See if representation is already loaded, if so reuse it. host = registered_host() @@ -1639,42 +1640,54 @@ def assign_look(nodes, subset="lookDefault"): parts = pype_id.split(":", 1) grouped[parts[0]].append(node) + project_name = legacy_io.active_project() + subset_docs = get_subsets( + project_name, subset_names=[subset], asset_ids=grouped.keys() + ) + subset_docs_by_asset_id = { + str(subset_doc["parent"]): subset_doc + for subset_doc in subset_docs + } + subset_ids = { + subset_doc["_id"] + for subset_doc in subset_docs_by_asset_id.values() + } + last_version_docs = get_last_versions( + project_name, + subset_ids=subset_ids, + fields=["_id", "name", "data.families"] + ) + last_version_docs_by_subset_id = { + last_version_doc["parent"]: last_version_doc + for last_version_doc in last_version_docs + } + for asset_id, asset_nodes in grouped.items(): # create objectId for database - try: - asset_id = bson.ObjectId(asset_id) - except bson.errors.InvalidId: - log.warning("Asset ID is not compatible with bson") - continue - subset_data = legacy_io.find_one({ - "type": "subset", - "name": subset, - "parent": asset_id - }) - - if not subset_data: + subset_doc = subset_docs_by_asset_id.get(asset_id) + if not subset_doc: log.warning("No subset '{}' found for {}".format(subset, asset_id)) continue - # get last version - # with backwards compatibility - version = legacy_io.find_one( - { - "parent": subset_data['_id'], - "type": "version", - "data.families": {"$in": ["look"]} - }, - sort=[("name", -1)], - projection={ - "_id": True, - "name": True - } - ) + last_version = last_version_docs_by_subset_id.get(subset_doc["_id"]) + if not last_version: + log.warning(( + "Not found last version for subset '{}' on asset with id {}" + ).format(subset, asset_id)) + continue - log.debug("Assigning look '{}' ".format(subset, - version["name"])) + families = last_version.get("data", {}).get("families") or [] + if "look" not in families: + log.warning(( + "Last version for subset '{}' on asset with id {}" + " does not have look family" + ).format(subset, asset_id)) + continue - assign_look_by_version(asset_nodes, version['_id']) + log.debug("Assigning look '{}' ".format( + subset, last_version["name"])) + + assign_look_by_version(asset_nodes, last_version["_id"]) def apply_shaders(relationships, shadernodes, nodes): @@ -1737,8 +1750,11 @@ def apply_shaders(relationships, shadernodes, nodes): log.warning("No nodes found for shading engine " "'{0}'".format(id_shading_engines[0])) continue + try: + cmds.sets(filtered_nodes, forceElement=id_shading_engines[0]) + except RuntimeError as rte: + log.error("Error during shader assignment: {}".format(rte)) - cmds.sets(filtered_nodes, forceElement=id_shading_engines[0]) # endregion apply_attributes(attributes, nodes_by_id) @@ -1892,7 +1908,7 @@ def iter_parents(node): """ while True: split = node.rsplit("|", 1) - if len(split) == 1: + if len(split) == 1 or not split[0]: return node = split[0] @@ -2123,9 +2139,11 @@ def set_scene_resolution(width, height, pixelAspect): control_node = "defaultResolution" current_renderer = cmds.getAttr("defaultRenderGlobals.currentRenderer") + aspect_ratio_attr = "deviceAspectRatio" # Give VRay a helping hand as it is slightly different from the rest if current_renderer == "vray": + aspect_ratio_attr = "aspectRatio" vray_node = "vraySettings" if cmds.objExists(vray_node): control_node = vray_node @@ -2138,7 +2156,8 @@ def set_scene_resolution(width, height, pixelAspect): cmds.setAttr("%s.height" % control_node, height) deviceAspectRatio = ((float(width) / float(height)) * float(pixelAspect)) - cmds.setAttr("%s.deviceAspectRatio" % control_node, deviceAspectRatio) + cmds.setAttr( + "{}.{}".format(control_node, aspect_ratio_attr), deviceAspectRatio) cmds.setAttr("%s.pixelAspect" % control_node, pixelAspect) @@ -2152,9 +2171,10 @@ def reset_scene_resolution(): None """ - project_doc = legacy_io.find_one({"type": "project"}) + project_name = legacy_io.active_project() + project_doc = get_project(project_name) project_data = project_doc["data"] - asset_data = lib.get_asset()["data"] + asset_data = get_current_project_asset()["data"] # Set project resolution width_key = "resolutionWidth" @@ -2185,9 +2205,11 @@ def set_context_settings(): """ # Todo (Wijnand): apply renderer and resolution of project - project_doc = legacy_io.find_one({"type": "project"}) + project_name = legacy_io.active_project() + project_doc = get_project(project_name) project_data = project_doc["data"] - asset_data = lib.get_asset()["data"] + asset_doc = get_current_project_asset(fields=["data.fps"]) + asset_data = asset_doc.get("data", {}) # Set project fps fps = asset_data.get("fps", project_data.get("fps", 25)) @@ -2212,7 +2234,7 @@ def validate_fps(): """ - fps = lib.get_asset()["data"]["fps"] + fps = get_current_project_asset(fields=["data.fps"])["data"]["fps"] # TODO(antirotor): This is hack as for framerates having multiple # decimal places. FTrack is ceiling decimal values on # fps to two decimal places but Maya 2019+ is reporting those fps @@ -2437,105 +2459,120 @@ def bake_to_world_space(nodes, def load_capture_preset(data=None): + """Convert OpenPype Extract Playblast settings to `capture` arguments + + Input data is the settings from: + `project_settings/maya/publish/ExtractPlayblast/capture_preset` + + Args: + data (dict): Capture preset settings from OpenPype settings + + Returns: + dict: `capture.capture` compatible keyword arguments + + """ + import capture - preset = data - options = dict() + viewport_options = dict() + viewport2_options = dict() + camera_options = dict() - # CODEC - id = 'Codec' - for key in preset[id]: - options[str(key)] = preset[id][key] + # Straight key-value match from settings to capture arguments + options.update(data["Codec"]) + options.update(data["Generic"]) + options.update(data["Resolution"]) - # GENERIC - id = 'Generic' - for key in preset[id]: - options[str(key)] = preset[id][key] - - # RESOLUTION - id = 'Resolution' - options['height'] = preset[id]['height'] - options['width'] = preset[id]['width'] + camera_options.update(data['Camera Options']) + viewport_options.update(data["Renderer"]) # DISPLAY OPTIONS - id = 'Display Options' disp_options = {} - for key in preset['Display Options']: + for key, value in data['Display Options'].items(): if key.startswith('background'): - disp_options[key] = preset['Display Options'][key] - if len(disp_options[key]) == 4: - disp_options[key][0] = (float(disp_options[key][0])/255) - disp_options[key][1] = (float(disp_options[key][1])/255) - disp_options[key][2] = (float(disp_options[key][2])/255) - disp_options[key].pop() + # Convert background, backgroundTop, backgroundBottom colors + if len(value) == 4: + # Ignore alpha + convert RGB to float + value = [ + float(value[0]) / 255, + float(value[1]) / 255, + float(value[2]) / 255 + ] + disp_options[key] = value else: disp_options['displayGradient'] = True options['display_options'] = disp_options - # VIEWPORT OPTIONS - temp_options = {} - id = 'Renderer' - for key in preset[id]: - temp_options[str(key)] = preset[id][key] + # Viewport Options has a mixture of Viewport2 Options and Viewport Options + # to pass along to capture. So we'll need to differentiate between the two + VIEWPORT2_OPTIONS = { + "textureMaxResolution", + "renderDepthOfField", + "ssaoEnable", + "ssaoSamples", + "ssaoAmount", + "ssaoRadius", + "ssaoFilterRadius", + "hwFogStart", + "hwFogEnd", + "hwFogAlpha", + "hwFogFalloff", + "hwFogColorR", + "hwFogColorG", + "hwFogColorB", + "hwFogDensity", + "motionBlurEnable", + "motionBlurSampleCount", + "motionBlurShutterOpenFraction", + "lineAAEnable" + } + for key, value in data['Viewport Options'].items(): - temp_options2 = {} - id = 'Viewport Options' - for key in preset[id]: + # There are some keys we want to ignore + if key in {"override_viewport_options", "high_quality"}: + continue + + # First handle special cases where we do value conversion to + # separate option values if key == 'textureMaxResolution': - if preset[id][key] > 0: - temp_options2['textureMaxResolution'] = preset[id][key] - temp_options2['enableTextureMaxRes'] = True - temp_options2['textureMaxResMode'] = 1 + viewport2_options['textureMaxResolution'] = value + if value > 0: + viewport2_options['enableTextureMaxRes'] = True + viewport2_options['textureMaxResMode'] = 1 else: - temp_options2['textureMaxResolution'] = preset[id][key] - temp_options2['enableTextureMaxRes'] = False - temp_options2['textureMaxResMode'] = 0 + viewport2_options['enableTextureMaxRes'] = False + viewport2_options['textureMaxResMode'] = 0 - if key == 'multiSample': - if preset[id][key] > 0: - temp_options2['multiSampleEnable'] = True - temp_options2['multiSampleCount'] = preset[id][key] - else: - temp_options2['multiSampleEnable'] = False - temp_options2['multiSampleCount'] = preset[id][key] + elif key == 'multiSample': + viewport2_options['multiSampleEnable'] = value > 0 + viewport2_options['multiSampleCount'] = value - if key == 'ssaoEnable': - if preset[id][key] is True: - temp_options2['ssaoEnable'] = True - else: - temp_options2['ssaoEnable'] = False + elif key == 'alphaCut': + viewport2_options['transparencyAlgorithm'] = 5 + viewport2_options['transparencyQuality'] = 1 - if key == 'alphaCut': - temp_options2['transparencyAlgorithm'] = 5 - temp_options2['transparencyQuality'] = 1 + elif key == 'hwFogFalloff': + # Settings enum value string to integer + viewport2_options['hwFogFalloff'] = int(value) - if key == 'headsUpDisplay': - temp_options['headsUpDisplay'] = True + # Then handle Viewport 2.0 Options + elif key in VIEWPORT2_OPTIONS: + viewport2_options[key] = value + # Then assume remainder is Viewport Options else: - temp_options[str(key)] = preset[id][key] + viewport_options[key] = value - for key in ['override_viewport_options', - 'high_quality', - 'alphaCut', - 'gpuCacheDisplayFilter', - 'multiSample', - 'ssaoEnable', - 'textureMaxResolution' - ]: - temp_options.pop(key, None) - - options['viewport_options'] = temp_options - options['viewport2_options'] = temp_options2 + options['viewport_options'] = viewport_options + options['viewport2_options'] = viewport2_options + options['camera_options'] = camera_options # use active sound track scene = capture.parse_active_scene() options['sound'] = scene['sound'] - # options['display_options'] = temp_options - return options @@ -2953,8 +2990,9 @@ def update_content_on_context_change(): This will update scene content to match new asset on context change """ scene_sets = cmds.listSets(allSets=True) - new_asset = legacy_io.Session["AVALON_ASSET"] - new_data = lib.get_asset()["data"] + asset_doc = get_current_project_asset() + new_asset = asset_doc["name"] + new_data = asset_doc["data"] for s in scene_sets: try: if cmds.getAttr("{}.id".format(s)) == "pyblish.avalon.instance": @@ -3059,7 +3097,7 @@ def set_colorspace(): """Set Colorspace from project configuration """ project_name = os.getenv("AVALON_PROJECT") - imageio = get_anatomy_settings(project_name)["imageio"]["maya"] + imageio = get_project_settings(project_name)["maya"]["imageio"] # Maya 2022+ introduces new OCIO v2 color management settings that # can override the old color managenement preferences. OpenPype has @@ -3192,3 +3230,209 @@ def parent_nodes(nodes, parent=None): node[0].setParent(node[1]) if delete_parent: pm.delete(parent_node) + + +@contextlib.contextmanager +def maintained_time(): + ct = cmds.currentTime(query=True) + try: + yield + finally: + cmds.currentTime(ct, edit=True) + + +def iter_visible_nodes_in_range(nodes, start, end): + """Yield nodes that are visible in start-end frame range. + + - Ignores intermediateObjects completely. + - Considers animated visibility attributes + upstream visibilities. + + This is optimized for large scenes where some nodes in the parent + hierarchy might have some input connections to the visibilities, + e.g. key, driven keys, connections to other attributes, etc. + + This only does a single time step to `start` if current frame is + not inside frame range since the assumption is made that changing + a frame isn't so slow that it beats querying all visibility + plugs through MDGContext on another frame. + + Args: + nodes (list): List of node names to consider. + start (int, float): Start frame. + end (int, float): End frame. + + Returns: + list: List of node names. These will be long full path names so + might have a longer name than the input nodes. + + """ + # States we consider per node + VISIBLE = 1 # always visible + INVISIBLE = 0 # always invisible + ANIMATED = -1 # animated visibility + + # Ensure integers + start = int(start) + end = int(end) + + # Consider only non-intermediate dag nodes and use the "long" names. + nodes = cmds.ls(nodes, long=True, noIntermediate=True, type="dagNode") + if not nodes: + return + + with maintained_time(): + # Go to first frame of the range if the current time is outside + # the queried range so can directly query all visible nodes on + # that frame. + current_time = cmds.currentTime(query=True) + if not (start <= current_time <= end): + cmds.currentTime(start) + + visible = cmds.ls(nodes, long=True, visible=True) + for node in visible: + yield node + if len(visible) == len(nodes) or start == end: + # All are visible on frame one, so they are at least visible once + # inside the frame range. + return + + # For the invisible ones check whether its visibility and/or + # any of its parents visibility attributes are animated. If so, it might + # get visible on other frames in the range. + def memodict(f): + """Memoization decorator for a function taking a single argument. + + See: http://code.activestate.com/recipes/ + 578231-probably-the-fastest-memoization-decorator-in-the-/ + """ + + class memodict(dict): + def __missing__(self, key): + ret = self[key] = f(key) + return ret + + return memodict().__getitem__ + + @memodict + def get_state(node): + plug = node + ".visibility" + connections = cmds.listConnections(plug, + source=True, + destination=False) + if connections: + return ANIMATED + else: + return VISIBLE if cmds.getAttr(plug) else INVISIBLE + + visible = set(visible) + invisible = [node for node in nodes if node not in visible] + always_invisible = set() + # Iterate over the nodes by short to long names to iterate the highest + # in hierarchy nodes first. So the collected data can be used from the + # cache for parent queries in next iterations. + node_dependencies = dict() + for node in sorted(invisible, key=len): + + state = get_state(node) + if state == INVISIBLE: + always_invisible.add(node) + continue + + # If not always invisible by itself we should go through and check + # the parents to see if any of them are always invisible. For those + # that are "ANIMATED" we consider that this node is dependent on + # that attribute, we store them as dependency. + dependencies = set() + if state == ANIMATED: + dependencies.add(node) + + traversed_parents = list() + for parent in iter_parents(node): + + if parent in always_invisible or get_state(parent) == INVISIBLE: + # When parent is always invisible then consider this parent, + # this node we started from and any of the parents we + # have traversed in-between to be *always invisible* + always_invisible.add(parent) + always_invisible.add(node) + always_invisible.update(traversed_parents) + break + + # If we have traversed the parent before and its visibility + # was dependent on animated visibilities then we can just extend + # its dependencies for to those for this node and break further + # iteration upwards. + parent_dependencies = node_dependencies.get(parent, None) + if parent_dependencies is not None: + dependencies.update(parent_dependencies) + break + + state = get_state(parent) + if state == ANIMATED: + dependencies.add(parent) + + traversed_parents.append(parent) + + if node not in always_invisible and dependencies: + node_dependencies[node] = dependencies + + if not node_dependencies: + return + + # Now we only have to check the visibilities for nodes that have animated + # visibility dependencies upstream. The fastest way to check these + # visibility attributes across different frames is with Python api 2.0 + # so we do that. + @memodict + def get_visibility_mplug(node): + """Return api 2.0 MPlug with cached memoize decorator""" + sel = om.MSelectionList() + sel.add(node) + dag = sel.getDagPath(0) + return om.MFnDagNode(dag).findPlug("visibility", True) + + @contextlib.contextmanager + def dgcontext(mtime): + """MDGContext context manager""" + context = om.MDGContext(mtime) + try: + previous = context.makeCurrent() + yield context + finally: + previous.makeCurrent() + + # We skip the first frame as we already used that frame to check for + # overall visibilities. And end+1 to include the end frame. + scene_units = om.MTime.uiUnit() + for frame in range(start + 1, end + 1): + mtime = om.MTime(frame, unit=scene_units) + + # Build little cache so we don't query the same MPlug's value + # again if it was checked on this frame and also is a dependency + # for another node + frame_visibilities = {} + with dgcontext(mtime) as context: + for node, dependencies in list(node_dependencies.items()): + for dependency in dependencies: + dependency_visible = frame_visibilities.get(dependency, + None) + if dependency_visible is None: + mplug = get_visibility_mplug(dependency) + dependency_visible = mplug.asBool(context) + frame_visibilities[dependency] = dependency_visible + + if not dependency_visible: + # One dependency is not visible, thus the + # node is not visible. + break + + else: + # All dependencies are visible. + yield node + # Remove node with dependencies for next frame iterations + # because it was visible at least once. + node_dependencies.pop(node) + + # If no more nodes to process break the frame iterations.. + if not node_dependencies: + break diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py index ff04fa7aa2..cd204445b7 100644 --- a/openpype/hosts/maya/api/lib_renderproducts.py +++ b/openpype/hosts/maya/api/lib_renderproducts.py @@ -80,7 +80,15 @@ IMAGE_PREFIXES = { "mayahardware2": "defaultRenderGlobals.imageFilePrefix" } -RENDERMAN_IMAGE_DIR = "maya//" +RENDERMAN_IMAGE_DIR = "/" + + +def has_tokens(string, tokens): + """Return whether any of tokens is in input string (case-insensitive)""" + pattern = "({})".format("|".join(re.escape(token) for token in tokens)) + match = re.search(pattern, string, re.IGNORECASE) + return bool(match) + @attr.s class LayerMetadata(object): @@ -99,6 +107,12 @@ class LayerMetadata(object): # Render Products products = attr.ib(init=False, default=attr.Factory(list)) + # The AOV separator token. Note that not all renderers define an explicit + # render separator but allow to put the AOV/RenderPass token anywhere in + # the file path prefix. For those renderers we'll fall back to whatever + # is between the last occurrences of and tokens. + aov_separator = attr.ib(default="_") + @attr.s class RenderProduct(object): @@ -183,7 +197,6 @@ class ARenderProducts: self.layer = layer self.render_instance = render_instance self.multipart = False - self.aov_separator = render_instance.data.get("aovSeparator", "_") # Initialize self.layer_data = self._get_layer_data() @@ -247,20 +260,20 @@ class ARenderProducts: """ try: - file_prefix_attr = IMAGE_PREFIXES[self.renderer] + prefix_attr = IMAGE_PREFIXES[self.renderer] except KeyError: raise UnsupportedRendererException( "Unsupported renderer {}".format(self.renderer) ) - file_prefix = self._get_attr(file_prefix_attr) + prefix = self._get_attr(prefix_attr) - if not file_prefix: + if not prefix: # Fall back to scene name by default log.debug("Image prefix not set, using ") file_prefix = "" - return file_prefix + return prefix def get_render_attribute(self, attribute): """Get attribute from render options. @@ -296,6 +309,42 @@ class ARenderProducts: return lib.get_attr_in_layer(plug, layer=self.layer) + @staticmethod + def extract_separator(file_prefix): + """Extract AOV separator character from the prefix. + + Default behavior extracts the part between + last occurrences of and + + Todo: + This code also triggers for V-Ray which overrides it explicitly + so this code will invalidly debug log it couldn't extract the + AOV separator even though it does set it in RenderProductsVray. + + Args: + file_prefix (str): File prefix with tokens. + + Returns: + str or None: prefix character if it can be extracted. + """ + layer_tokens = ["", ""] + aov_tokens = ["", ""] + + def match_last(tokens, text): + """regex match the last occurence from a list of tokens""" + pattern = "(?:.*)({})".format("|".join(tokens)) + return re.search(pattern, text, re.IGNORECASE) + + layer_match = match_last(layer_tokens, file_prefix) + aov_match = match_last(aov_tokens, file_prefix) + separator = None + if layer_match and aov_match: + matches = sorted((layer_match, aov_match), + key=lambda match: match.end(1)) + separator = file_prefix[matches[0].end(1):matches[1].start(1)] + return separator + + def _get_layer_data(self): # type: () -> LayerMetadata # ______________________________________________ @@ -304,7 +353,7 @@ class ARenderProducts: # ____________________/ _, scene_basename = os.path.split(cmds.file(q=True, loc=True)) scene_name, _ = os.path.splitext(scene_basename) - + kwargs = {} file_prefix = self.get_renderer_prefix() # If the Render Layer belongs to a Render Setup layer then the @@ -319,6 +368,13 @@ class ARenderProducts: # defaultRenderLayer renders as masterLayer layer_name = "masterLayer" + separator = self.extract_separator(file_prefix) + if separator: + kwargs["aov_separator"] = separator + else: + log.debug("Couldn't extract aov separator from " + "file prefix: {}".format(file_prefix)) + # todo: Support Custom Frames sequences 0,5-10,100-120 # Deadline allows submitting renders with a custom frame list # to support those cases we might want to allow 'custom frames' @@ -335,7 +391,8 @@ class ARenderProducts: layerName=layer_name, renderer=self.renderer, defaultExt=self._get_attr("defaultRenderGlobals.imfPluginKey"), - filePrefix=file_prefix + filePrefix=file_prefix, + **kwargs ) def _generate_file_sequence( @@ -673,16 +730,27 @@ class RenderProductsVray(ARenderProducts): """Get image prefix for V-Ray. This overrides :func:`ARenderProducts.get_renderer_prefix()` as - we must add `` token manually. + we must add `` token manually. This is done only for + non-multipart outputs, where `` token doesn't make sense. See also: :func:`ARenderProducts.get_renderer_prefix()` """ prefix = super(RenderProductsVray, self).get_renderer_prefix() - prefix = "{}{}".format(prefix, self.aov_separator) + if self.multipart: + return prefix + aov_separator = self._get_aov_separator() + prefix = "{}{}".format(prefix, aov_separator) return prefix + def _get_aov_separator(self): + # type: () -> str + """Return the V-Ray AOV/Render Elements separator""" + return self._get_attr( + "vraySettings.fileNameRenderElementSeparator" + ) + def _get_layer_data(self): # type: () -> LayerMetadata """Override to get vray specific extension.""" @@ -694,6 +762,8 @@ class RenderProductsVray(ARenderProducts): layer_data.defaultExt = default_ext layer_data.padding = self._get_attr("vraySettings.fileNamePadding") + layer_data.aov_separator = self._get_aov_separator() + return layer_data def get_render_products(self): @@ -907,14 +977,18 @@ class RenderProductsRedshift(ARenderProducts): """Get image prefix for Redshift. This overrides :func:`ARenderProducts.get_renderer_prefix()` as - we must add `` token manually. + we must add `` token manually. This is done only for + non-multipart outputs, where `` token doesn't make sense. See also: :func:`ARenderProducts.get_renderer_prefix()` """ prefix = super(RenderProductsRedshift, self).get_renderer_prefix() - prefix = "{}{}".format(prefix, self.aov_separator) + if self.multipart: + return prefix + separator = self.extract_separator(prefix) + prefix = "{}{}".format(prefix, separator or "_") return prefix def get_render_products(self): @@ -1087,12 +1161,17 @@ class RenderProductsRenderman(ARenderProducts): "d_tiff": "tif" } - displays = get_displays()["displays"] + displays = get_displays(override_dst="render")["displays"] for name, display in displays.items(): enabled = display["params"]["enable"]["value"] if not enabled: continue + # Skip display types not producing any file output. + # Is there a better way to do it? + if not display_types.get(display["driverNode"]["type"]): + continue + aov_name = name if aov_name == "rmanDefaultDisplay": aov_name = "beauty" @@ -1101,9 +1180,33 @@ class RenderProductsRenderman(ARenderProducts): display["driverNode"]["type"], "exr") for camera in cameras: - product = RenderProduct(productName=aov_name, - ext=extensions, - camera=camera) + # Create render product and set it as multipart only on + # display types supporting it. In all other cases, Renderman + # will create separate output per channel. + if display["driverNode"]["type"] in ["d_openexr", "d_deepexr", "d_tiff"]: # noqa + product = RenderProduct( + productName=aov_name, + ext=extensions, + camera=camera, + multipart=True + ) + else: + # this code should handle the case where no multipart + # capable format is selected. But since it involves + # shady logic to determine what channel become what + # lets not do that as all productions will use exr anyway. + """ + for channel in display['params']['displayChannels']['value']: # noqa + product = RenderProduct( + productName="{}_{}".format(aov_name, channel), + ext=extensions, + camera=camera, + multipart=False + ) + """ + raise UnsupportedImageFormatException( + "Only exr, deep exr and tiff formats are supported.") + products.append(product) return products @@ -1196,3 +1299,7 @@ class UnsupportedRendererException(Exception): Raised when requesting data from unsupported renderer. """ + + +class UnsupportedImageFormatException(Exception): + """Custom exception to report unsupported output image format.""" diff --git a/openpype/hosts/maya/api/lib_rendersettings.py b/openpype/hosts/maya/api/lib_rendersettings.py new file mode 100644 index 0000000000..2b996702c3 --- /dev/null +++ b/openpype/hosts/maya/api/lib_rendersettings.py @@ -0,0 +1,239 @@ +# -*- coding: utf-8 -*- +"""Class for handling Render Settings.""" +from maya import cmds # noqa +import maya.mel as mel +import six +import sys + +from openpype.lib import Logger +from openpype.settings import ( + get_project_settings, + get_current_project_settings +) + +from openpype.pipeline import legacy_io +from openpype.pipeline import CreatorError +from openpype.pipeline.context_tools import get_current_project_asset +from openpype.hosts.maya.api.commands import reset_frame_range + + +class RenderSettings(object): + + _image_prefix_nodes = { + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' + } + + _image_prefixes = { + 'vray': get_current_project_settings()["maya"]["RenderSettings"]["vray_renderer"]["image_prefix"], # noqa + 'arnold': get_current_project_settings()["maya"]["RenderSettings"]["arnold_renderer"]["image_prefix"], # noqa + 'renderman': '//{aov_separator}', + 'redshift': get_current_project_settings()["maya"]["RenderSettings"]["redshift_renderer"]["image_prefix"] # noqa + } + + _aov_chars = { + "dot": ".", + "dash": "-", + "underscore": "_" + } + + log = Logger.get_logger("RenderSettings") + + @classmethod + def get_image_prefix_attr(cls, renderer): + return cls._image_prefix_nodes[renderer] + + def __init__(self, project_settings=None): + self._project_settings = project_settings + if not self._project_settings: + self._project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + + def set_default_renderer_settings(self, renderer=None): + """Set basic settings based on renderer.""" + if not renderer: + renderer = cmds.getAttr( + 'defaultRenderGlobals.currentRenderer').lower() + + asset_doc = get_current_project_asset() + # project_settings/maya/create/CreateRender/aov_separator + try: + aov_separator = self._aov_chars[( + self._project_settings["maya"] + ["RenderSettings"] + ["aov_separator"] + )] + except KeyError: + aov_separator = "_" + reset_frame = self._project_settings["maya"]["RenderSettings"]["reset_current_frame"] # noqa + + if reset_frame: + start_frame = cmds.getAttr("defaultRenderGlobals.startFrame") + cmds.currentTime(start_frame, edit=True) + + if renderer in self._image_prefix_nodes: + prefix = self._image_prefixes[renderer] + prefix = prefix.replace("{aov_separator}", aov_separator) + cmds.setAttr(self._image_prefix_nodes[renderer], + prefix, type="string") # noqa + else: + print("{0} isn't a supported renderer to autoset settings.".format(renderer)) # noqa + + # TODO: handle not having res values in the doc + width = asset_doc["data"].get("resolutionWidth") + height = asset_doc["data"].get("resolutionHeight") + + if renderer == "arnold": + # set renderer settings for Arnold from project settings + self._set_arnold_settings(width, height) + + if renderer == "vray": + self._set_vray_settings(aov_separator, width, height) + + if renderer == "redshift": + self._set_redshift_settings(width, height) + + def _set_arnold_settings(self, width, height): + """Sets settings for Arnold.""" + from mtoa.core import createOptions # noqa + from mtoa.aovs import AOVInterface # noqa + createOptions() + arnold_render_presets = self._project_settings["maya"]["RenderSettings"]["arnold_renderer"] # noqa + # Force resetting settings and AOV list to avoid having to deal with + # AOV checking logic, for now. + # This is a work around because the standard + # function to revert render settings does not reset AOVs list in MtoA + # Fetch current aovs in case there's any. + current_aovs = AOVInterface().getAOVs() + # Remove fetched AOVs + AOVInterface().removeAOVs(current_aovs) + mel.eval("unifiedRenderGlobalsRevertToDefault") + img_ext = arnold_render_presets["image_format"] + img_prefix = arnold_render_presets["image_prefix"] + aovs = arnold_render_presets["aov_list"] + img_tiled = arnold_render_presets["tiled"] + multi_exr = arnold_render_presets["multilayer_exr"] + additional_options = arnold_render_presets["additional_options"] + for aov in aovs: + AOVInterface('defaultArnoldRenderOptions').addAOV(aov) + + cmds.setAttr("defaultResolution.width", width) + cmds.setAttr("defaultResolution.height", height) + + self._set_global_output_settings() + + cmds.setAttr( + "defaultRenderGlobals.imageFilePrefix", img_prefix, type="string") + + cmds.setAttr( + "defaultArnoldDriver.ai_translator", img_ext, type="string") + + cmds.setAttr( + "defaultArnoldDriver.exrTiled", img_tiled) + + cmds.setAttr( + "defaultArnoldDriver.mergeAOVs", multi_exr) + self._additional_attribs_setter(additional_options) + reset_frame_range() + + def _set_redshift_settings(self, width, height): + """Sets settings for Redshift.""" + redshift_render_presets = ( + self._project_settings + ["maya"] + ["RenderSettings"] + ["redshift_renderer"] + ) + additional_options = redshift_render_presets["additional_options"] + ext = redshift_render_presets["image_format"] + img_exts = ["iff", "exr", "tif", "png", "tga", "jpg"] + img_ext = img_exts.index(ext) + + self._set_global_output_settings() + cmds.setAttr("redshiftOptions.imageFormat", img_ext) + cmds.setAttr("defaultResolution.width", width) + cmds.setAttr("defaultResolution.height", height) + self._additional_attribs_setter(additional_options) + + def _set_vray_settings(self, aov_separator, width, height): + # type: (str, int, int) -> None + """Sets important settings for Vray.""" + settings = cmds.ls(type="VRaySettingsNode") + node = settings[0] if settings else cmds.createNode("VRaySettingsNode") + vray_render_presets = ( + self._project_settings + ["maya"] + ["RenderSettings"] + ["vray_renderer"] + ) + # Set aov separator + # First we need to explicitly set the UI items in Render Settings + # because that is also what V-Ray updates to when that Render Settings + # UI did initialize before and refreshes again. + MENU = "vrayRenderElementSeparator" + if cmds.optionMenuGrp(MENU, query=True, exists=True): + items = cmds.optionMenuGrp(MENU, query=True, ill=True) + separators = [cmds.menuItem(i, query=True, label=True) for i in items] # noqa: E501 + try: + sep_idx = separators.index(aov_separator) + except ValueError as e: + six.reraise( + CreatorError, + CreatorError( + "AOV character {} not in {}".format( + aov_separator, separators)), + sys.exc_info()[2]) + + cmds.optionMenuGrp(MENU, edit=True, select=sep_idx + 1) + + # Set the render element attribute as string. This is also what V-Ray + # sets whenever the `vrayRenderElementSeparator` menu items switch + cmds.setAttr( + "{}.fileNameRenderElementSeparator".format(node), + aov_separator, + type="string" + ) + + # Set render file format to exr + cmds.setAttr("{}.imageFormatStr".format(node), "exr", type="string") + + # animType + cmds.setAttr("{}.animType".format(node), 1) + + # resolution + cmds.setAttr("{}.width".format(node), width) + cmds.setAttr("{}.height".format(node), height) + + additional_options = vray_render_presets["additional_options"] + + self._additional_attribs_setter(additional_options) + + @staticmethod + def _set_global_output_settings(): + # enable animation + cmds.setAttr("defaultRenderGlobals.outFormatControl", 0) + cmds.setAttr("defaultRenderGlobals.animation", 1) + cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1) + cmds.setAttr("defaultRenderGlobals.extensionPadding", 4) + + def _additional_attribs_setter(self, additional_attribs): + for item in additional_attribs: + attribute, value = item + attribute = str(attribute) # ensure str conversion from settings + attribute_type = cmds.getAttr(attribute, type=True) + if attribute_type in {"long", "bool"}: + cmds.setAttr(attribute, int(value)) + elif attribute_type == "string": + cmds.setAttr(attribute, str(value), type="string") + elif attribute_type in {"double", "doubleAngle", "doubleLinear"}: + cmds.setAttr(attribute, float(value)) + else: + self.log.error( + "Attribute {attribute} can not be set due to unsupported " + "type: {attribute_type}".format( + attribute=attribute, + attribute_type=attribute_type) + ) diff --git a/openpype/hosts/maya/api/lib_rendersetup.py b/openpype/hosts/maya/api/lib_rendersetup.py index 0fdc54a068..e616f26e1b 100644 --- a/openpype/hosts/maya/api/lib_rendersetup.py +++ b/openpype/hosts/maya/api/lib_rendersetup.py @@ -348,3 +348,71 @@ def get_attr_overrides(node_attr, layer, break return reversed(plug_overrides) + + +def get_shader_in_layer(node, layer): + """Return the assigned shader in a renderlayer without switching layers. + + This has been developed and tested for Legacy Renderlayers and *not* for + Render Setup. + + Note: This will also return the shader for any face assignments, however + it will *not* return the components they are assigned to. This could + be implemented, but since Maya's renderlayers are famous for breaking + with face assignments there has been no need for this function to + support that. + + Returns: + list: The list of assigned shaders in the given layer. + + """ + + def _get_connected_shader(plug): + """Return current shader""" + return cmds.listConnections(plug, + source=False, + destination=True, + plugs=False, + connections=False, + type="shadingEngine") or [] + + # We check the instObjGroups (shader connection) for layer overrides. + plug = node + ".instObjGroups" + + # Ignore complex query if we're in the layer anyway (optimization) + current_layer = cmds.editRenderLayerGlobals(query=True, + currentRenderLayer=True) + if layer == current_layer: + return _get_connected_shader(plug) + + connections = cmds.listConnections(plug, + plugs=True, + source=False, + destination=True, + type="renderLayer") or [] + connections = filter(lambda x: x.endswith(".outPlug"), connections) + if not connections: + # If no overrides anywhere on the shader, just get the current shader + return _get_connected_shader(plug) + + def _get_override(connections, layer): + """Return the overridden connection for that layer in connections""" + # If there's an override on that layer, return that. + for connection in connections: + if (connection.startswith(layer + ".outAdjustments") and + connection.endswith(".outPlug")): + + # This is a shader override on that layer so get the shader + # connected to .outValue of the .outAdjustment[i] + out_adjustment = connection.rsplit(".", 1)[0] + connection_attr = out_adjustment + ".outValue" + override = cmds.listConnections(connection_attr) or [] + + return override + + override_shader = _get_override(connections, layer) + if override_shader is not None: + return override_shader + else: + # Get the override for "defaultRenderLayer" (=masterLayer) + return _get_override(connections, layer="defaultRenderLayer") diff --git a/openpype/hosts/maya/api/menu.py b/openpype/hosts/maya/api/menu.py index 97f06c43af..e20f29049b 100644 --- a/openpype/hosts/maya/api/menu.py +++ b/openpype/hosts/maya/api/menu.py @@ -6,14 +6,20 @@ from Qt import QtWidgets, QtGui import maya.utils import maya.cmds as cmds -from openpype.api import BuildWorkfile from openpype.settings import get_project_settings from openpype.pipeline import legacy_io +from openpype.pipeline.workfile import BuildWorkfile from openpype.tools.utils import host_tools -from openpype.hosts.maya.api import lib +from openpype.hosts.maya.api import lib, lib_rendersettings from .lib import get_main_window, IS_HEADLESS from .commands import reset_frame_range +from .workfile_template_builder import ( + create_placeholder, + update_placeholder, + build_workfile_template, + update_workfile_template, +) log = logging.getLogger(__name__) @@ -44,6 +50,7 @@ def install(): parent="MayaWindow" ) + renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower() # Create context menu context_label = "{}, {}".format( legacy_io.Session["AVALON_ASSET"], @@ -119,6 +126,12 @@ def install(): "Set Colorspace", command=lambda *args: lib.set_colorspace(), ) + + cmds.menuItem( + "Set Render Settings", + command=lambda *args: lib_rendersettings.RenderSettings().set_default_renderer_settings() # noqa + ) + cmds.menuItem(divider=True, parent=MENU_NAME) cmds.menuItem( "Build First Workfile", @@ -139,6 +152,34 @@ def install(): parent_widget ) ) + + builder_menu = cmds.menuItem( + "Template Builder", + subMenu=True, + tearOff=True, + parent=MENU_NAME + ) + cmds.menuItem( + "Create Placeholder", + parent=builder_menu, + command=create_placeholder + ) + cmds.menuItem( + "Update Placeholder", + parent=builder_menu, + command=update_placeholder + ) + cmds.menuItem( + "Build Workfile from template", + parent=builder_menu, + command=build_workfile_template + ) + cmds.menuItem( + "Update Workfile from template", + parent=builder_menu, + command=update_workfile_template + ) + cmds.setParent(MENU_NAME, menu=True) def add_scripts_menu(): diff --git a/openpype/hosts/maya/api/obj.py b/openpype/hosts/maya/api/obj.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index b0e8fac635..b3bf738a2b 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -1,21 +1,26 @@ import os -import sys import errno import logging +import contextlib from maya import utils, cmds, OpenMaya import maya.api.OpenMaya as om import pyblish.api -import openpype.hosts.maya +from openpype.settings import get_project_settings +from openpype.host import ( + HostBase, + IWorkfileHost, + ILoadHost, + HostDirmap, +) from openpype.tools.utils import host_tools +from openpype.tools.workfiles.lock_dialog import WorkfileLockDialog from openpype.lib import ( - any_outdated, register_event_callback, emit_event ) -from openpype.lib.path_tools import HostDirmap from openpype.pipeline import ( legacy_io, register_loader_plugin_path, @@ -26,13 +31,30 @@ from openpype.pipeline import ( deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) -from openpype.hosts.maya.lib import copy_workspace_mel +from openpype.pipeline.load import any_outdated_containers +from openpype.pipeline.workfile.lock_workfile import ( + create_workfile_lock, + remove_workfile_lock, + is_workfile_locked, + is_workfile_lock_enabled +) +from openpype.hosts.maya import MAYA_ROOT_DIR +from openpype.hosts.maya.lib import create_workspace_mel + from . import menu, lib +from .workfile_template_builder import MayaPlaceholderLoadPlugin +from .workio import ( + open_file, + save_file, + file_extensions, + has_unsaved_changes, + work_root, + current_file +) log = logging.getLogger("openpype.hosts.maya") -HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.maya.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +PLUGINS_DIR = os.path.join(MAYA_ROOT_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") @@ -40,50 +62,159 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") AVALON_CONTAINERS = ":AVALON_CONTAINERS" -self = sys.modules[__name__] -self._ignore_lock = False -self._events = {} +class MayaHost(HostBase, IWorkfileHost, ILoadHost): + name = "maya" -def install(): - from openpype.settings import get_project_settings + def __init__(self): + super(MayaHost, self).__init__() + self._op_events = {} - project_settings = get_project_settings(os.getenv("AVALON_PROJECT")) - # process path mapping - dirmap_processor = MayaDirmap("maya", project_settings) - dirmap_processor.process_dirmap() + def install(self): + project_name = legacy_io.active_project() + project_settings = get_project_settings(project_name) + # process path mapping + dirmap_processor = MayaDirmap("maya", project_name, project_settings) + dirmap_processor.process_dirmap() - pyblish.api.register_plugin_path(PUBLISH_PATH) - pyblish.api.register_host("mayabatch") - pyblish.api.register_host("mayapy") - pyblish.api.register_host("maya") + pyblish.api.register_plugin_path(PUBLISH_PATH) + pyblish.api.register_host("mayabatch") + pyblish.api.register_host("mayapy") + pyblish.api.register_host("maya") - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - register_inventory_action_path(INVENTORY_PATH) - log.info(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + register_inventory_action_path(INVENTORY_PATH) + self.log.info(PUBLISH_PATH) - log.info("Installing callbacks ... ") - register_event_callback("init", on_init) + self.log.info("Installing callbacks ... ") + register_event_callback("init", on_init) - # Callbacks below are not required for headless mode, the `init` however - # is important to load referenced Alembics correctly at rendertime. - if lib.IS_HEADLESS: - log.info(("Running in headless mode, skipping Maya " - "save/open/new callback installation..")) - return + if lib.IS_HEADLESS: + self.log.info(( + "Running in headless mode, skipping Maya save/open/new" + " callback installation.." + )) - _set_project() - _register_callbacks() + return - menu.install() + _set_project() + self._register_callbacks() - register_event_callback("save", on_save) - register_event_callback("open", on_open) - register_event_callback("new", on_new) - register_event_callback("before.save", on_before_save) - register_event_callback("taskChanged", on_task_changed) - register_event_callback("workfile.save.before", before_workfile_save) + menu.install() + + register_event_callback("save", on_save) + register_event_callback("open", on_open) + register_event_callback("new", on_new) + register_event_callback("before.save", on_before_save) + register_event_callback("after.save", on_after_save) + register_event_callback("before.close", on_before_close) + register_event_callback("before.file.open", before_file_open) + register_event_callback("taskChanged", on_task_changed) + register_event_callback("workfile.open.before", before_workfile_open) + register_event_callback("workfile.save.before", before_workfile_save) + register_event_callback("workfile.save.before", after_workfile_save) + + def open_workfile(self, filepath): + return open_file(filepath) + + def save_workfile(self, filepath=None): + return save_file(filepath) + + def work_root(self, session): + return work_root(session) + + def get_current_workfile(self): + return current_file() + + def workfile_has_unsaved_changes(self): + return has_unsaved_changes() + + def get_workfile_extensions(self): + return file_extensions() + + def get_containers(self): + return ls() + + def get_workfile_build_placeholder_plugins(self): + return [ + MayaPlaceholderLoadPlugin + ] + + @contextlib.contextmanager + def maintained_selection(self): + with lib.maintained_selection(): + yield + + def _register_callbacks(self): + for handler, event in self._op_events.copy().items(): + if event is None: + continue + + try: + OpenMaya.MMessage.removeCallback(event) + self._op_events[handler] = None + except RuntimeError as exc: + self.log.info(exc) + + self._op_events[_on_scene_save] = OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kBeforeSave, _on_scene_save + ) + + self._op_events[_after_scene_save] = ( + OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kAfterSave, + _after_scene_save + ) + ) + + self._op_events[_before_scene_save] = ( + OpenMaya.MSceneMessage.addCheckCallback( + OpenMaya.MSceneMessage.kBeforeSaveCheck, + _before_scene_save + ) + ) + + self._op_events[_on_scene_new] = OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kAfterNew, _on_scene_new + ) + + self._op_events[_on_maya_initialized] = ( + OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kMayaInitialized, + _on_maya_initialized + ) + ) + + self._op_events[_on_scene_open] = ( + OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kAfterOpen, + _on_scene_open + ) + ) + + self._op_events[_before_scene_open] = ( + OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kBeforeOpen, + _before_scene_open + ) + ) + + self._op_events[_before_close_maya] = ( + OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kMayaExiting, + _before_close_maya + ) + ) + + self.log.info("Installed event handler _on_scene_save..") + self.log.info("Installed event handler _before_scene_save..") + self.log.info("Installed event handler _on_after_save..") + self.log.info("Installed event handler _on_scene_new..") + self.log.info("Installed event handler _on_maya_initialized..") + self.log.info("Installed event handler _on_scene_open..") + self.log.info("Installed event handler _check_lock_file..") + self.log.info("Installed event handler _before_close_maya..") def _set_project(): @@ -107,44 +238,6 @@ def _set_project(): cmds.workspace(workdir, openWorkspace=True) -def _register_callbacks(): - for handler, event in self._events.copy().items(): - if event is None: - continue - - try: - OpenMaya.MMessage.removeCallback(event) - self._events[handler] = None - except RuntimeError as e: - log.info(e) - - self._events[_on_scene_save] = OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kBeforeSave, _on_scene_save - ) - - self._events[_before_scene_save] = OpenMaya.MSceneMessage.addCheckCallback( - OpenMaya.MSceneMessage.kBeforeSaveCheck, _before_scene_save - ) - - self._events[_on_scene_new] = OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kAfterNew, _on_scene_new - ) - - self._events[_on_maya_initialized] = OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kMayaInitialized, _on_maya_initialized - ) - - self._events[_on_scene_open] = OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kAfterOpen, _on_scene_open - ) - - log.info("Installed event handler _on_scene_save..") - log.info("Installed event handler _before_scene_save..") - log.info("Installed event handler _on_scene_new..") - log.info("Installed event handler _on_maya_initialized..") - log.info("Installed event handler _on_scene_open..") - - def _on_maya_initialized(*args): emit_event("init") @@ -160,6 +253,10 @@ def _on_scene_new(*args): emit_event("new") +def _after_scene_save(*arg): + emit_event("after.save") + + def _on_scene_save(*args): emit_event("save") @@ -168,6 +265,14 @@ def _on_scene_open(*args): emit_event("open") +def _before_close_maya(*args): + emit_event("before.close") + + +def _before_scene_open(*args): + emit_event("before.file.open") + + def _before_scene_save(return_code, client_data): # Default to allowing the action. Registered @@ -181,6 +286,23 @@ def _before_scene_save(return_code, client_data): ) +def _remove_workfile_lock(): + """Remove workfile lock on current file""" + if not handle_workfile_locks(): + return + filepath = current_file() + log.info("Removing lock on current file {}...".format(filepath)) + if filepath: + remove_workfile_lock(filepath) + + +def handle_workfile_locks(): + if lib.IS_HEADLESS: + return False + project_name = legacy_io.active_project() + return is_workfile_lock_enabled(MayaHost.name, project_name) + + def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) pyblish.api.deregister_host("mayabatch") @@ -301,21 +423,13 @@ def containerise(name, ("id", AVALON_CONTAINER_ID), ("name", name), ("namespace", namespace), - ("loader", str(loader)), + ("loader", loader), ("representation", context["representation"]["_id"]), ] for key, value in data: - if not value: - continue - - if isinstance(value, (int, float)): - cmds.addAttr(container, longName=key, attributeType="short") - cmds.setAttr(container + "." + key, value) - - else: - cmds.addAttr(container, longName=key, dataType="string") - cmds.setAttr(container + "." + key, value, type="string") + cmds.addAttr(container, longName=key, dataType="string") + cmds.setAttr(container + "." + key, str(value), type="string") main_container = cmds.ls(AVALON_CONTAINERS, type="objectSet") if not main_container: @@ -386,6 +500,46 @@ def on_before_save(): return lib.validate_fps() +def on_after_save(): + """Check if there is a lockfile after save""" + check_lock_on_current_file() + + +def check_lock_on_current_file(): + + """Check if there is a user opening the file""" + if not handle_workfile_locks(): + return + log.info("Running callback on checking the lock file...") + + # add the lock file when opening the file + filepath = current_file() + + if is_workfile_locked(filepath): + # add lockfile dialog + workfile_dialog = WorkfileLockDialog(filepath) + if not workfile_dialog.exec_(): + cmds.file(new=True) + return + + create_workfile_lock(filepath) + + +def on_before_close(): + """Delete the lock file after user quitting the Maya Scene""" + log.info("Closing Maya...") + # delete the lock file + filepath = current_file() + if handle_workfile_locks(): + remove_workfile_lock(filepath) + + +def before_file_open(): + """check lock file when the file changed""" + # delete the lock file + _remove_workfile_lock() + + def on_save(): """Automatically add IDs to new nodes @@ -394,6 +548,8 @@ def on_save(): """ log.info("Running callback on save..") + # remove lockfile if users jumps over from one scene to another + _remove_workfile_lock() # # Update current task for the current scene # update_task_from_path(cmds.file(query=True, sceneName=True)) @@ -427,7 +583,7 @@ def on_open(): lib.validate_fps() lib.fix_incompatible_containers() - if any_outdated(): + if any_outdated_containers(): log.warning("Scene has outdated content.") # Find maya main window @@ -451,6 +607,9 @@ def on_open(): dialog.on_clicked.connect(_on_show_inventory) dialog.show() + # create lock file for the maya scene + check_lock_on_current_file() + def on_new(): """Set project resolution and fps when create a new file""" @@ -466,6 +625,7 @@ def on_new(): "from openpype.hosts.maya.api import lib;" "lib.add_render_layer_change_observer()") lib.set_context_settings() + _remove_workfile_lock() def on_task_changed(): @@ -476,7 +636,6 @@ def on_task_changed(): workdir = legacy_io.Session["AVALON_WORKDIR"] if os.path.exists(workdir): log.info("Updating Maya workspace for task change to %s", workdir) - _set_project() # Set Maya fileDialog's start-dir to /scenes @@ -494,7 +653,7 @@ def on_task_changed(): lib.update_content_on_context_change() msg = " project: {}\n asset: {}\n task:{}".format( - legacy_io.Session["AVALON_PROJECT"], + legacy_io.active_project(), legacy_io.Session["AVALON_ASSET"], legacy_io.Session["AVALON_TASK"] ) @@ -505,10 +664,26 @@ def on_task_changed(): ) +def before_workfile_open(): + if handle_workfile_locks(): + _remove_workfile_lock() + + def before_workfile_save(event): + project_name = legacy_io.active_project() + if handle_workfile_locks(): + _remove_workfile_lock() workdir_path = event["workdir_path"] if workdir_path: - copy_workspace_mel(workdir_path) + create_workspace_mel(workdir_path, project_name) + + +def after_workfile_save(event): + workfile_name = event["filename"] + if handle_workfile_locks(): + if workfile_name: + if not is_workfile_locked(workfile_name): + create_workfile_lock(workfile_name) class MayaDirmap(HostDirmap): diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index 3721868823..39d821f620 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -4,13 +4,15 @@ from maya import cmds import qargparse +from openpype.lib import Logger from openpype.pipeline import ( LegacyCreator, LoaderPlugin, get_representation_path, AVALON_CONTAINER_ID, + Anatomy, ) - +from openpype.settings import get_project_settings from .pipeline import containerise from . import lib @@ -49,9 +51,7 @@ def get_reference_node(members, log=None): # Warn the user when we're taking the highest reference node if len(references) > 1: if not log: - from openpype.lib import PypeLogger - - log = PypeLogger().get_logger(__name__) + log = Logger.get_logger(__name__) log.warning("More than one reference node found in " "container, using highest reference node: " @@ -207,7 +207,8 @@ class ReferenceLoader(Loader): file_type = { "ma": "mayaAscii", "mb": "mayaBinary", - "abc": "Alembic" + "abc": "Alembic", + "fbx": "FBX" }.get(representation["name"]) assert file_type, "Unsupported representation: %s" % representation @@ -230,6 +231,10 @@ class ReferenceLoader(Loader): self.log.debug("No alembic nodes found in {}".format(members)) try: + path = self.prepare_root_value(path, + representation["context"] + ["project"] + ["name"]) content = cmds.file(path, loadReference=reference_node, type=file_type, @@ -319,6 +324,29 @@ class ReferenceLoader(Loader): except RuntimeError: pass + def prepare_root_value(self, file_url, project_name): + """Replace root value with env var placeholder. + + Use ${OPENPYPE_ROOT_WORK} (or any other root) instead of proper root + value when storing referenced url into a workfile. + Useful for remote workflows with SiteSync. + + Args: + file_url (str) + project_name (dict) + Returns: + (str) + """ + settings = get_project_settings(project_name) + use_env_var_as_root = (settings["maya"] + ["maya-dirmap"] + ["use_env_var_as_root"]) + if use_env_var_as_root: + anatomy = Anatomy(project_name) + file_url = anatomy.replace_root_with_env_key(file_url, '${{{}}}') + + return file_url + @staticmethod def _organize_containers(nodes, container): # type: (list, str) -> None diff --git a/openpype/hosts/maya/api/setdress.py b/openpype/hosts/maya/api/setdress.py index f8d3ed79b8..159bfe9eb3 100644 --- a/openpype/hosts/maya/api/setdress.py +++ b/openpype/hosts/maya/api/setdress.py @@ -6,10 +6,16 @@ import contextlib import copy import six -from bson.objectid import ObjectId from maya import cmds +from openpype.client import ( + get_version_by_name, + get_last_version_by_subset_id, + get_representation_by_id, + get_representation_by_name, + get_representation_parents, +) from openpype.pipeline import ( schema, legacy_io, @@ -283,36 +289,32 @@ def update_package_version(container, version): """ # Versioning (from `core.maya.pipeline`) - current_representation = legacy_io.find_one({ - "_id": ObjectId(container["representation"]) - }) + project_name = legacy_io.active_project() + current_representation = get_representation_by_id( + project_name, container["representation"] + ) assert current_representation is not None, "This is a bug" - version_, subset, asset, project = legacy_io.parenthood( - current_representation + version_doc, subset_doc, asset_doc, project_doc = ( + get_representation_parents(project_name, current_representation) ) if version == -1: - new_version = legacy_io.find_one({ - "type": "version", - "parent": subset["_id"] - }, sort=[("name", -1)]) + new_version = get_last_version_by_subset_id( + project_name, subset_doc["_id"] + ) else: - new_version = legacy_io.find_one({ - "type": "version", - "parent": subset["_id"], - "name": version, - }) + new_version = get_version_by_name( + project_name, version, subset_doc["_id"] + ) assert new_version is not None, "This is a bug" # Get the new representation (new file) - new_representation = legacy_io.find_one({ - "type": "representation", - "parent": new_version["_id"], - "name": current_representation["name"] - }) + new_representation = get_representation_by_name( + project_name, current_representation["name"], new_version["_id"] + ) update_package(container, new_representation) @@ -330,10 +332,10 @@ def update_package(set_container, representation): """ # Load the original package data - current_representation = legacy_io.find_one({ - "_id": ObjectId(set_container['representation']), - "type": "representation" - }) + project_name = legacy_io.active_project() + current_representation = get_representation_by_id( + project_name, set_container["representation"] + ) current_file = get_representation_path(current_representation) assert current_file.endswith(".json") @@ -380,6 +382,7 @@ def update_scene(set_container, containers, current_data, new_data, new_file): from openpype.hosts.maya.lib import DEFAULT_MATRIX, get_container_transforms set_namespace = set_container['namespace'] + project_name = legacy_io.active_project() # Update the setdress hierarchy alembic set_root = get_container_transforms(set_container, root=True) @@ -481,12 +484,12 @@ def update_scene(set_container, containers, current_data, new_data, new_file): # Check whether the conversion can be done by the Loader. # They *must* use the same asset, subset and Loader for # `update_container` to make sense. - old = legacy_io.find_one({ - "_id": ObjectId(representation_current) - }) - new = legacy_io.find_one({ - "_id": ObjectId(representation_new) - }) + old = get_representation_by_id( + project_name, representation_current + ) + new = get_representation_by_id( + project_name, representation_new + ) is_valid = compare_representations(old=old, new=new) if not is_valid: log.error("Skipping: %s. See log for details.", diff --git a/openpype/hosts/maya/api/shader_definition_editor.py b/openpype/hosts/maya/api/shader_definition_editor.py index 911db48ac2..6ea5e1a127 100644 --- a/openpype/hosts/maya/api/shader_definition_editor.py +++ b/openpype/hosts/maya/api/shader_definition_editor.py @@ -6,7 +6,7 @@ Shader names are stored as simple text file over GridFS in mongodb. """ import os from Qt import QtWidgets, QtCore, QtGui -from openpype.lib.mongo import OpenPypeMongoConnection +from openpype.client.mongo import OpenPypeMongoConnection from openpype import resources import gridfs diff --git a/openpype/hosts/maya/api/workfile_template_builder.py b/openpype/hosts/maya/api/workfile_template_builder.py new file mode 100644 index 0000000000..ef043ed0f4 --- /dev/null +++ b/openpype/hosts/maya/api/workfile_template_builder.py @@ -0,0 +1,330 @@ +import json + +from maya import cmds + +from openpype.pipeline import registered_host +from openpype.pipeline.workfile.workfile_template_builder import ( + TemplateAlreadyImported, + AbstractTemplateBuilder, + PlaceholderPlugin, + LoadPlaceholderItem, + PlaceholderLoadMixin, +) +from openpype.tools.workfile_template_build import ( + WorkfileBuildPlaceholderDialog, +) + +from .lib import read, imprint + +PLACEHOLDER_SET = "PLACEHOLDERS_SET" + + +class MayaTemplateBuilder(AbstractTemplateBuilder): + """Concrete implementation of AbstractTemplateBuilder for maya""" + + def import_template(self, path): + """Import template into current scene. + Block if a template is already loaded. + + Args: + path (str): A path to current template (usually given by + get_template_path implementation) + + Returns: + bool: Wether the template was succesfully imported or not + """ + + if cmds.objExists(PLACEHOLDER_SET): + raise TemplateAlreadyImported(( + "Build template already loaded\n" + "Clean scene if needed (File > New Scene)" + )) + + cmds.sets(name=PLACEHOLDER_SET, empty=True) + cmds.file(path, i=True, returnNewNodes=True) + + cmds.setAttr(PLACEHOLDER_SET + ".hiddenInOutliner", True) + + return True + + +class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin): + identifier = "maya.load" + label = "Maya load" + + def _collect_scene_placeholders(self): + # Cache placeholder data to shared data + placeholder_nodes = self.builder.get_shared_populate_data( + "placeholder_nodes" + ) + if placeholder_nodes is None: + attributes = cmds.ls("*.plugin_identifier", long=True) + placeholder_nodes = {} + for attribute in attributes: + node_name = attribute.rpartition(".")[0] + placeholder_nodes[node_name] = ( + self._parse_placeholder_node_data(node_name) + ) + + self.builder.set_shared_populate_data( + "placeholder_nodes", placeholder_nodes + ) + return placeholder_nodes + + def _parse_placeholder_node_data(self, node_name): + placeholder_data = read(node_name) + parent_name = ( + cmds.getAttr(node_name + ".parent", asString=True) + or node_name.rpartition("|")[0] + or "" + ) + if parent_name: + siblings = cmds.listRelatives(parent_name, children=True) + else: + siblings = cmds.ls(assemblies=True) + node_shortname = node_name.rpartition("|")[2] + current_index = cmds.getAttr(node_name + ".index", asString=True) + if current_index < 0: + current_index = siblings.index(node_shortname) + + placeholder_data.update({ + "parent": parent_name, + "index": current_index + }) + return placeholder_data + + def _create_placeholder_name(self, placeholder_data): + placeholder_name_parts = placeholder_data["builder_type"].split("_") + + pos = 1 + # add famlily in any + placeholder_family = placeholder_data["family"] + if placeholder_family: + placeholder_name_parts.insert(pos, placeholder_family) + pos += 1 + + # add loader arguments if any + loader_args = placeholder_data["loader_args"] + if loader_args: + loader_args = json.loads(loader_args.replace('\'', '\"')) + values = [v for v in loader_args.values()] + for value in values: + placeholder_name_parts.insert(pos, value) + pos += 1 + + placeholder_name = "_".join(placeholder_name_parts) + + return placeholder_name.capitalize() + + def _get_loaded_repre_ids(self): + loaded_representation_ids = self.builder.get_shared_populate_data( + "loaded_representation_ids" + ) + if loaded_representation_ids is None: + try: + containers = cmds.sets("AVALON_CONTAINERS", q=True) + except ValueError: + containers = [] + + loaded_representation_ids = { + cmds.getAttr(container + ".representation") + for container in containers + } + self.builder.set_shared_populate_data( + "loaded_representation_ids", loaded_representation_ids + ) + return loaded_representation_ids + + def create_placeholder(self, placeholder_data): + selection = cmds.ls(selection=True) + if not selection: + raise ValueError("Nothing is selected") + if len(selection) > 1: + raise ValueError("More then one item are selected") + + placeholder_data["plugin_identifier"] = self.identifier + + placeholder_name = self._create_placeholder_name(placeholder_data) + + placeholder = cmds.spaceLocator(name=placeholder_name)[0] + # TODO: this can crash if selection can't be used + cmds.parent(placeholder, selection[0]) + + # get the long name of the placeholder (with the groups) + placeholder_full_name = ( + cmds.ls(selection[0], long=True)[0] + + "|" + + placeholder.replace("|", "") + ) + + imprint(placeholder_full_name, placeholder_data) + + # Add helper attributes to keep placeholder info + cmds.addAttr( + placeholder_full_name, + longName="parent", + hidden=True, + dataType="string" + ) + cmds.addAttr( + placeholder_full_name, + longName="index", + hidden=True, + attributeType="short", + defaultValue=-1 + ) + + cmds.setAttr(placeholder_full_name + ".parent", "", type="string") + + def update_placeholder(self, placeholder_item, placeholder_data): + node_name = placeholder_item.scene_identifier + new_values = {} + for key, value in placeholder_data.items(): + placeholder_value = placeholder_item.data.get(key) + if value != placeholder_value: + new_values[key] = value + placeholder_item.data[key] = value + + for key in new_values.keys(): + cmds.deleteAttr(node_name + "." + key) + + imprint(node_name, new_values) + + def collect_placeholders(self): + output = [] + scene_placeholders = self._collect_scene_placeholders() + for node_name, placeholder_data in scene_placeholders.items(): + if placeholder_data.get("plugin_identifier") != self.identifier: + continue + + # TODO do data validations and maybe updgrades if are invalid + output.append( + LoadPlaceholderItem(node_name, placeholder_data, self) + ) + + return output + + def populate_placeholder(self, placeholder): + self.populate_load_placeholder(placeholder) + + def repopulate_placeholder(self, placeholder): + repre_ids = self._get_loaded_repre_ids() + self.populate_load_placeholder(placeholder, repre_ids) + + def get_placeholder_options(self, options=None): + return self.get_load_plugin_options(options) + + def cleanup_placeholder(self, placeholder, failed): + """Hide placeholder, parent them to root + add them to placeholder set and register placeholder's parent + to keep placeholder info available for future use + """ + + node = placeholder._scene_identifier + node_parent = placeholder.data["parent"] + if node_parent: + cmds.setAttr(node + ".parent", node_parent, type="string") + + if cmds.getAttr(node + ".index") < 0: + cmds.setAttr(node + ".index", placeholder.data["index"]) + + holding_sets = cmds.listSets(object=node) + if holding_sets: + for set in holding_sets: + cmds.sets(node, remove=set) + + if cmds.listRelatives(node, p=True): + node = cmds.parent(node, world=True)[0] + cmds.sets(node, addElement=PLACEHOLDER_SET) + cmds.hide(node) + cmds.setAttr(node + ".hiddenInOutliner", True) + + def load_succeed(self, placeholder, container): + self._parent_in_hierarhchy(placeholder, container) + + def _parent_in_hierarchy(self, placeholder, container): + """Parent loaded container to placeholder's parent. + + ie : Set loaded content as placeholder's sibling + + Args: + container (str): Placeholder loaded containers + """ + + if not container: + return + + roots = cmds.sets(container, q=True) + nodes_to_parent = [] + for root in roots: + if root.endswith("_RN"): + refRoot = cmds.referenceQuery(root, n=True)[0] + refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot] + nodes_to_parent.extend(refRoot) + elif root not in cmds.listSets(allSets=True): + nodes_to_parent.append(root) + + elif not cmds.sets(root, q=True): + return + + if placeholder.data["parent"]: + cmds.parent(nodes_to_parent, placeholder.data["parent"]) + # Move loaded nodes to correct index in outliner hierarchy + placeholder_form = cmds.xform( + placeholder.scene_identifier, + q=True, + matrix=True, + worldSpace=True + ) + for node in set(nodes_to_parent): + cmds.reorder(node, front=True) + cmds.reorder(node, relative=placeholder.data["index"]) + cmds.xform(node, matrix=placeholder_form, ws=True) + + holding_sets = cmds.listSets(object=placeholder.scene_identifier) + if not holding_sets: + return + for holding_set in holding_sets: + cmds.sets(roots, forceElement=holding_set) + + +def build_workfile_template(*args): + builder = MayaTemplateBuilder(registered_host()) + builder.build_template() + + +def update_workfile_template(*args): + builder = MayaTemplateBuilder(registered_host()) + builder.rebuild_template() + + +def create_placeholder(*args): + host = registered_host() + builder = MayaTemplateBuilder(host) + window = WorkfileBuildPlaceholderDialog(host, builder) + window.exec_() + + +def update_placeholder(*args): + host = registered_host() + builder = MayaTemplateBuilder(host) + placeholder_items_by_id = { + placeholder_item.scene_identifier: placeholder_item + for placeholder_item in builder.get_placeholders() + } + placeholder_items = [] + for node_name in cmds.ls(selection=True, long=True): + if node_name in placeholder_items_by_id: + placeholder_items.append(placeholder_items_by_id[node_name]) + + # TODO show UI at least + if len(placeholder_items) == 0: + raise ValueError("No node selected") + + if len(placeholder_items) > 1: + raise ValueError("Too many selected nodes") + + placeholder_item = placeholder_items[0] + window = WorkfileBuildPlaceholderDialog(host, builder) + window.set_update_mode(placeholder_item) + window.exec_() diff --git a/openpype/hosts/maya/api/workio.py b/openpype/hosts/maya/api/workio.py index fd4961c4bf..8c31974c73 100644 --- a/openpype/hosts/maya/api/workio.py +++ b/openpype/hosts/maya/api/workio.py @@ -2,11 +2,9 @@ import os from maya import cmds -from openpype.pipeline import HOST_WORKFILE_EXTENSIONS - def file_extensions(): - return HOST_WORKFILE_EXTENSIONS["maya"] + return [".ma", ".mb"] def has_unsaved_changes(): diff --git a/openpype/hosts/maya/hooks/pre_copy_mel.py b/openpype/hosts/maya/hooks/pre_copy_mel.py index b11e18241e..6f90af4b7c 100644 --- a/openpype/hosts/maya/hooks/pre_copy_mel.py +++ b/openpype/hosts/maya/hooks/pre_copy_mel.py @@ -1,5 +1,5 @@ from openpype.lib import PreLaunchHook -from openpype.hosts.maya.lib import copy_workspace_mel +from openpype.hosts.maya.lib import create_workspace_mel class PreCopyMel(PreLaunchHook): @@ -10,9 +10,10 @@ class PreCopyMel(PreLaunchHook): app_groups = ["maya"] def execute(self): + project_name = self.launch_context.env.get("AVALON_PROJECT") workdir = self.launch_context.env.get("AVALON_WORKDIR") if not workdir: self.log.warning("BUG: Workdir is not filled.") return - copy_workspace_mel(workdir) + create_workspace_mel(workdir, project_name) diff --git a/openpype/hosts/maya/lib.py b/openpype/hosts/maya/lib.py index 6c142053e6..ffb2f0b27c 100644 --- a/openpype/hosts/maya/lib.py +++ b/openpype/hosts/maya/lib.py @@ -1,26 +1,24 @@ import os -import shutil +from openpype.settings import get_project_settings +from openpype.lib import Logger -def copy_workspace_mel(workdir): - # Check that source mel exists - current_dir = os.path.dirname(os.path.abspath(__file__)) - src_filepath = os.path.join(current_dir, "resources", "workspace.mel") - if not os.path.exists(src_filepath): - print("Source mel file does not exist. {}".format(src_filepath)) - return - - # Skip if workspace.mel already exists +def create_workspace_mel(workdir, project_name): dst_filepath = os.path.join(workdir, "workspace.mel") if os.path.exists(dst_filepath): return - # Create workdir if does not exists yet if not os.path.exists(workdir): os.makedirs(workdir) - # Copy file - print("Copying workspace mel \"{}\" -> \"{}\"".format( - src_filepath, dst_filepath - )) - shutil.copy(src_filepath, dst_filepath) + project_setting = get_project_settings(project_name) + mel_script = project_setting["maya"].get("mel_workspace") + + # Skip if mel script in settings is empty + if not mel_script: + log = Logger.get_logger("create_workspace_mel") + log.debug("File 'workspace.mel' not created. Settings value is empty.") + return + + with open(dst_filepath, "w") as mel_file: + mel_file.write(mel_script) diff --git a/openpype/hosts/maya/plugins/create/create_animation.py b/openpype/hosts/maya/plugins/create/create_animation.py index 11a668cfc8..5ef5f61ab1 100644 --- a/openpype/hosts/maya/plugins/create/create_animation.py +++ b/openpype/hosts/maya/plugins/create/create_animation.py @@ -11,6 +11,8 @@ class CreateAnimation(plugin.Creator): label = "Animation" family = "animation" icon = "male" + write_color_sets = False + write_face_sets = False def __init__(self, *args, **kwargs): super(CreateAnimation, self).__init__(*args, **kwargs) @@ -22,8 +24,8 @@ class CreateAnimation(plugin.Creator): self.data[key] = value # Write vertex colors with the geometry. - self.data["writeColorSets"] = False - self.data["writeFaceSets"] = False + self.data["writeColorSets"] = self.write_color_sets + self.data["writeFaceSets"] = self.write_face_sets # Include only renderable visible shapes. # Skips locators and empty transforms @@ -38,3 +40,7 @@ class CreateAnimation(plugin.Creator): # Default to exporting world-space self.data["worldSpace"] = True + + # Default to not send to farm. + self.data["farm"] = False + self.data["priority"] = 50 diff --git a/openpype/hosts/maya/plugins/create/create_model.py b/openpype/hosts/maya/plugins/create/create_model.py index 37faad23a0..520e962f74 100644 --- a/openpype/hosts/maya/plugins/create/create_model.py +++ b/openpype/hosts/maya/plugins/create/create_model.py @@ -9,13 +9,14 @@ class CreateModel(plugin.Creator): family = "model" icon = "cube" defaults = ["Main", "Proxy", "_MD", "_HD", "_LD"] - + write_color_sets = False + write_face_sets = False def __init__(self, *args, **kwargs): super(CreateModel, self).__init__(*args, **kwargs) # Vertex colors with the geometry - self.data["writeColorSets"] = False - self.data["writeFaceSets"] = False + self.data["writeColorSets"] = self.write_color_sets + self.data["writeFaceSets"] = self.write_face_sets # Include attributes by attribute name or prefix self.data["attr"] = "" diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_look.py b/openpype/hosts/maya/plugins/create/create_multiverse_look.py new file mode 100644 index 0000000000..f47c88a93b --- /dev/null +++ b/openpype/hosts/maya/plugins/create/create_multiverse_look.py @@ -0,0 +1,15 @@ +from openpype.hosts.maya.api import plugin + + +class CreateMultiverseLook(plugin.Creator): + """Create Multiverse Look""" + + name = "mvLook" + label = "Multiverse Look" + family = "mvLook" + icon = "cubes" + + def __init__(self, *args, **kwargs): + super(CreateMultiverseLook, self).__init__(*args, **kwargs) + self.data["fileFormat"] = ["usda", "usd"] + self.data["publishMipMap"] = True diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd.py index b2266e5a57..5290d5143f 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_usd.py +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd.py @@ -2,11 +2,11 @@ from openpype.hosts.maya.api import plugin, lib class CreateMultiverseUsd(plugin.Creator): - """Multiverse USD data""" + """Create Multiverse USD Asset""" - name = "usdMain" - label = "Multiverse USD" - family = "usd" + name = "mvUsdMain" + label = "Multiverse USD Asset" + family = "mvUsd" icon = "cubes" def __init__(self, *args, **kwargs): @@ -15,7 +15,8 @@ class CreateMultiverseUsd(plugin.Creator): # Add animation data first, since it maintains order. self.data.update(lib.collect_animation_data(True)) - self.data["stripNamespaces"] = False + self.data["fileFormat"] = ["usd", "usda", "usdz"] + self.data["stripNamespaces"] = True self.data["mergeTransformAndShape"] = False self.data["writeAncestors"] = True self.data["flattenParentXforms"] = False @@ -36,15 +37,16 @@ class CreateMultiverseUsd(plugin.Creator): self.data["writeUVs"] = True self.data["writeColorSets"] = False self.data["writeTangents"] = False - self.data["writeRefPositions"] = False + self.data["writeRefPositions"] = True self.data["writeBlendShapes"] = False - self.data["writeDisplayColor"] = False + self.data["writeDisplayColor"] = True self.data["writeSkinWeights"] = False self.data["writeMaterialAssignment"] = False self.data["writeHardwareShader"] = False self.data["writeShadingNetworks"] = False self.data["writeTransformMatrix"] = True - self.data["writeUsdAttributes"] = False + self.data["writeUsdAttributes"] = True + self.data["writeInstancesAsReferences"] = False self.data["timeVaryingTopology"] = False self.data["customMaterialNamespace"] = '' self.data["numTimeSamples"] = 1 diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py index 77b808c459..ed466a8068 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py @@ -4,9 +4,9 @@ from openpype.hosts.maya.api import plugin, lib class CreateMultiverseUsdComp(plugin.Creator): """Create Multiverse USD Composition""" - name = "usdCompositionMain" + name = "mvUsdCompositionMain" label = "Multiverse USD Composition" - family = "usdComposition" + family = "mvUsdComposition" icon = "cubes" def __init__(self, *args, **kwargs): @@ -15,9 +15,12 @@ class CreateMultiverseUsdComp(plugin.Creator): # Add animation data first, since it maintains order. self.data.update(lib.collect_animation_data(True)) + # Order of `fileFormat` must match extract_multiverse_usd_comp.py + self.data["fileFormat"] = ["usda", "usd"] self.data["stripNamespaces"] = False self.data["mergeTransformAndShape"] = False self.data["flattenContent"] = False + self.data["writeAsCompoundLayers"] = False self.data["writePendingOverrides"] = False self.data["numTimeSamples"] = 1 self.data["timeSamplesSpan"] = 0.0 diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py index bb82ab2039..06e22df295 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py @@ -2,11 +2,11 @@ from openpype.hosts.maya.api import plugin, lib class CreateMultiverseUsdOver(plugin.Creator): - """Multiverse USD data""" + """Create Multiverse USD Override""" - name = "usdOverrideMain" + name = "mvUsdOverrideMain" label = "Multiverse USD Override" - family = "usdOverride" + family = "mvUsdOverride" icon = "cubes" def __init__(self, *args, **kwargs): @@ -15,6 +15,8 @@ class CreateMultiverseUsdOver(plugin.Creator): # Add animation data first, since it maintains order. self.data.update(lib.collect_animation_data(True)) + # Order of `fileFormat` must match extract_multiverse_usd_over.py + self.data["fileFormat"] = ["usda", "usd"] self.data["writeAll"] = False self.data["writeTransforms"] = True self.data["writeVisibility"] = True diff --git a/openpype/hosts/maya/plugins/create/create_pointcache.py b/openpype/hosts/maya/plugins/create/create_pointcache.py index ede152f1ef..ab8fe12079 100644 --- a/openpype/hosts/maya/plugins/create/create_pointcache.py +++ b/openpype/hosts/maya/plugins/create/create_pointcache.py @@ -11,6 +11,8 @@ class CreatePointCache(plugin.Creator): label = "Point Cache" family = "pointcache" icon = "gears" + write_color_sets = False + write_face_sets = False def __init__(self, *args, **kwargs): super(CreatePointCache, self).__init__(*args, **kwargs) @@ -18,8 +20,10 @@ class CreatePointCache(plugin.Creator): # Add animation data self.data.update(lib.collect_animation_data()) - self.data["writeColorSets"] = False # Vertex colors with the geometry. - self.data["writeFaceSets"] = False # Vertex colors with the geometry. + # Vertex colors with the geometry. + self.data["writeColorSets"] = self.write_color_sets + # Vertex colors with the geometry. + self.data["writeFaceSets"] = self.write_face_sets self.data["renderableOnly"] = False # Only renderable visible shapes self.data["visibleOnly"] = False # only nodes that are visible self.data["includeParentHierarchy"] = False # Include parent groups @@ -28,3 +32,7 @@ class CreatePointCache(plugin.Creator): # Add options for custom attributes self.data["attr"] = "" self.data["attrPrefix"] = "" + + # Default to not send to farm. + self.data["farm"] = False + self.data["priority"] = 50 diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 93ee6679e5..a3e1272652 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -1,26 +1,25 @@ # -*- coding: utf-8 -*- """Create ``Render`` instance in Maya.""" -import os import json +import os + import appdirs import requests from maya import cmds -import maya.app.renderSetup.model.renderSetup as renderSetup +from maya.app.renderSetup.model import renderSetup -from openpype.hosts.maya.api import ( - lib, - plugin -) -from openpype.lib import requests_get -from openpype.api import ( +from openpype.settings import ( get_system_settings, get_project_settings, - get_asset) +) +from openpype.lib import requests_get from openpype.modules import ModulesManager -from openpype.pipeline import ( - CreatorError, - legacy_io, +from openpype.pipeline import legacy_io +from openpype.hosts.maya.api import ( + lib, + lib_rendersettings, + plugin ) @@ -64,40 +63,10 @@ class CreateRender(plugin.Creator): label = "Render" family = "rendering" icon = "eye" - _token = None _user = None _password = None - # renderSetup instance - _rs = None - - _image_prefix_nodes = { - 'mentalray': 'defaultRenderGlobals.imageFilePrefix', - 'vray': 'vraySettings.fileNamePrefix', - 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'rmanGlobals.imageFileFormat', - 'redshift': 'defaultRenderGlobals.imageFilePrefix', - 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix', - } - - _image_prefixes = { - 'mentalray': 'maya///{aov_separator}', # noqa - 'vray': 'maya///', - 'arnold': 'maya///{aov_separator}', # noqa - # this needs `imageOutputDir` - # (/renders/maya/) set separately - 'renderman': '_..', - 'redshift': 'maya///', # noqa - 'mayahardware2': 'maya///', # noqa - } - - _aov_chars = { - "dot": ".", - "dash": "-", - "underscore": "_" - } - _project_settings = None def __init__(self, *args, **kwargs): @@ -109,18 +78,8 @@ class CreateRender(plugin.Creator): return self._project_settings = get_project_settings( legacy_io.Session["AVALON_PROJECT"]) - - # project_settings/maya/create/CreateRender/aov_separator - try: - self.aov_separator = self._aov_chars[( - self._project_settings["maya"] - ["create"] - ["CreateRender"] - ["aov_separator"] - )] - except KeyError: - self.aov_separator = "_" - + if self._project_settings["maya"]["RenderSettings"]["apply_render_settings"]: # noqa + lib_rendersettings.RenderSettings().set_default_renderer_settings() manager = ModulesManager() self.deadline_module = manager.modules_by_name["deadline"] try: @@ -177,13 +136,13 @@ class CreateRender(plugin.Creator): ]) cmds.setAttr("{}.machineList".format(self.instance), lock=True) - self._rs = renderSetup.instance() - layers = self._rs.getRenderLayers() + rs = renderSetup.instance() + layers = rs.getRenderLayers() if use_selection: - print(">>> processing existing layers") + self.log.info("Processing existing layers") sets = [] for layer in layers: - print(" - creating set for {}:{}".format( + self.log.info(" - creating set for {}:{}".format( namespace, layer.name())) render_set = cmds.sets( n="{}:{}".format(namespace, layer.name())) @@ -193,17 +152,10 @@ class CreateRender(plugin.Creator): # if no render layers are present, create default one with # asterisk selector if not layers: - render_layer = self._rs.createRenderLayer('Main') + render_layer = rs.createRenderLayer('Main') collection = render_layer.createCollection("defaultCollection") collection.getSelector().setPattern('*') - renderer = cmds.getAttr( - 'defaultRenderGlobals.currentRenderer').lower() - # handle various renderman names - if renderer.startswith('renderman'): - renderer = 'renderman' - - self._set_default_renderer_settings(renderer) return self.instance def _deadline_webservice_changed(self): @@ -237,7 +189,7 @@ class CreateRender(plugin.Creator): def _create_render_settings(self): """Create instance settings.""" - # get pools + # get pools (slave machines of the render farm) pool_names = [] default_priority = 50 @@ -259,6 +211,12 @@ class CreateRender(plugin.Creator): self.data["tilesY"] = 2 self.data["convertToScanline"] = False self.data["useReferencedAovs"] = False + self.data["renderSetupIncludeLights"] = ( + self._project_settings.get( + "maya", {}).get( + "RenderSettings", {}).get( + "enable_all_lights", False) + ) # Disable for now as this feature is not working yet # self.data["assScene"] = False @@ -281,7 +239,8 @@ class CreateRender(plugin.Creator): # if 'default' server is not between selected, # use first one for initial list of pools. deadline_url = next(iter(self.deadline_servers.values())) - + # Uses function to get pool machines from the assigned deadline + # url in settings pool_names = self.deadline_module.get_deadline_pools(deadline_url, self.log) maya_submit_dl = self._project_settings.get( @@ -400,102 +359,36 @@ class CreateRender(plugin.Creator): self.log.error("Cannot show login form to Muster") raise Exception("Cannot show login form to Muster") - def _set_default_renderer_settings(self, renderer): - """Set basic settings based on renderer. + def _requests_post(self, *args, **kwargs): + """Wrap request post method. - Args: - renderer (str): Renderer name. + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. + + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. """ - prefix = self._image_prefixes[renderer] - prefix = prefix.replace("{aov_separator}", self.aov_separator) - cmds.setAttr(self._image_prefix_nodes[renderer], - prefix, - type="string") + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.post(*args, **kwargs) - asset = get_asset() + def _requests_get(self, *args, **kwargs): + """Wrap request get method. - if renderer == "arnold": - # set format to exr + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. - cmds.setAttr( - "defaultArnoldDriver.ai_translator", "exr", type="string") - self._set_global_output_settings() - # resolution - cmds.setAttr( - "defaultResolution.width", - asset["data"].get("resolutionWidth")) - cmds.setAttr( - "defaultResolution.height", - asset["data"].get("resolutionHeight")) + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. - if renderer == "vray": - self._set_vray_settings(asset) - if renderer == "redshift": - cmds.setAttr("redshiftOptions.imageFormat", 1) - - # resolution - cmds.setAttr( - "defaultResolution.width", - asset["data"].get("resolutionWidth")) - cmds.setAttr( - "defaultResolution.height", - asset["data"].get("resolutionHeight")) - - self._set_global_output_settings() - - if renderer == "renderman": - cmds.setAttr("rmanGlobals.imageOutputDir", - "maya//", type="string") - - def _set_vray_settings(self, asset): - # type: (dict) -> None - """Sets important settings for Vray.""" - settings = cmds.ls(type="VRaySettingsNode") - node = settings[0] if settings else cmds.createNode("VRaySettingsNode") - - # set separator - # set it in vray menu - if cmds.optionMenuGrp("vrayRenderElementSeparator", exists=True, - q=True): - items = cmds.optionMenuGrp( - "vrayRenderElementSeparator", ill=True, query=True) - - separators = [cmds.menuItem(i, label=True, query=True) for i in items] # noqa: E501 - try: - sep_idx = separators.index(self.aov_separator) - except ValueError: - raise CreatorError( - "AOV character {} not in {}".format( - self.aov_separator, separators)) - - cmds.optionMenuGrp( - "vrayRenderElementSeparator", sl=sep_idx + 1, edit=True) - cmds.setAttr( - "{}.fileNameRenderElementSeparator".format(node), - self.aov_separator, - type="string" - ) - # set format to exr - cmds.setAttr( - "{}.imageFormatStr".format(node), "exr", type="string") - - # animType - cmds.setAttr( - "{}.animType".format(node), 1) - - # resolution - cmds.setAttr( - "{}.width".format(node), - asset["data"].get("resolutionWidth")) - cmds.setAttr( - "{}.height".format(node), - asset["data"].get("resolutionHeight")) - - @staticmethod - def _set_global_output_settings(): - # enable animation - cmds.setAttr("defaultRenderGlobals.outFormatControl", 0) - cmds.setAttr("defaultRenderGlobals.animation", 1) - cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1) - cmds.setAttr("defaultRenderGlobals.extensionPadding", 4) + """ + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.get(*args, **kwargs) diff --git a/openpype/hosts/maya/plugins/create/create_review.py b/openpype/hosts/maya/plugins/create/create_review.py index fbf3399f61..ba51ffa009 100644 --- a/openpype/hosts/maya/plugins/create/create_review.py +++ b/openpype/hosts/maya/plugins/create/create_review.py @@ -15,6 +15,8 @@ class CreateReview(plugin.Creator): keepImages = False isolate = False imagePlane = True + Width = 0 + Height = 0 transparency = [ "preset", "simple", @@ -33,6 +35,8 @@ class CreateReview(plugin.Creator): for key, value in animation_data.items(): data[key] = value + data["review_width"] = self.Width + data["review_height"] = self.Height data["isolate"] = self.isolate data["keepImages"] = self.keepImages data["imagePlane"] = self.imagePlane diff --git a/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py b/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py index 4e4417ff34..44cbee0502 100644 --- a/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py +++ b/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Creator for Unreal Static Meshes.""" from openpype.hosts.maya.api import plugin, lib -from openpype.api import get_project_settings +from openpype.settings import get_project_settings from openpype.pipeline import legacy_io from maya import cmds # noqa diff --git a/openpype/hosts/maya/plugins/create/create_vrayscene.py b/openpype/hosts/maya/plugins/create/create_vrayscene.py index 45c4b7e443..59d80e6d5b 100644 --- a/openpype/hosts/maya/plugins/create/create_vrayscene.py +++ b/openpype/hosts/maya/plugins/create/create_vrayscene.py @@ -12,7 +12,7 @@ from openpype.hosts.maya.api import ( lib, plugin ) -from openpype.api import ( +from openpype.settings import ( get_system_settings, get_project_settings ) diff --git a/openpype/hosts/maya/plugins/create/create_yeti_cache.py b/openpype/hosts/maya/plugins/create/create_yeti_cache.py index 86e13b95b2..e8c3203f21 100644 --- a/openpype/hosts/maya/plugins/create/create_yeti_cache.py +++ b/openpype/hosts/maya/plugins/create/create_yeti_cache.py @@ -22,7 +22,8 @@ class CreateYetiCache(plugin.Creator): # Add animation data without step and handles anim_data = lib.collect_animation_data() anim_data.pop("step") - anim_data.pop("handles") + anim_data.pop("handleStart") + anim_data.pop("handleEnd") self.data.update(anim_data) # Add samples diff --git a/openpype/hosts/maya/plugins/inventory/import_modelrender.py b/openpype/hosts/maya/plugins/inventory/import_modelrender.py index a5367f16e5..8a7390bc8d 100644 --- a/openpype/hosts/maya/plugins/inventory/import_modelrender.py +++ b/openpype/hosts/maya/plugins/inventory/import_modelrender.py @@ -1,6 +1,10 @@ +import re import json -from bson.objectid import ObjectId +from openpype.client import ( + get_representation_by_id, + get_representations +) from openpype.pipeline import ( InventoryAction, get_representation_context, @@ -31,6 +35,7 @@ class ImportModelRender(InventoryAction): def process(self, containers): from maya import cmds + project_name = legacy_io.active_project() for container in containers: con_name = container["objectName"] nodes = [] @@ -40,9 +45,9 @@ class ImportModelRender(InventoryAction): else: nodes.append(n) - repr_doc = legacy_io.find_one({ - "_id": ObjectId(container["representation"]), - }) + repr_doc = get_representation_by_id( + project_name, container["representation"], fields=["parent"] + ) version_id = repr_doc["parent"] print("Importing render sets for model %r" % con_name) @@ -63,26 +68,38 @@ class ImportModelRender(InventoryAction): from maya import cmds + project_name = legacy_io.active_project() + repre_docs = get_representations( + project_name, version_ids=[version_id], fields=["_id", "name"] + ) # Get representations of shader file and relationships - look_repr = legacy_io.find_one({ - "type": "representation", - "parent": version_id, - "name": {"$regex": self.scene_type_regex}, - }) - if not look_repr: + json_repre = None + look_repres = [] + scene_type_regex = re.compile(self.scene_type_regex) + for repre_doc in repre_docs: + repre_name = repre_doc["name"] + if repre_name == self.look_data_type: + json_repre = repre_doc + continue + + if scene_type_regex.fullmatch(repre_name): + look_repres.append(repre_doc) + + # QUESTION should we care if there is more then one look + # representation? (since it's based on regex match) + look_repre = None + if look_repres: + look_repre = look_repres[0] + + # QUESTION shouldn't be json representation validated too? + if not look_repre: print("No model render sets for this model version..") return - json_repr = legacy_io.find_one({ - "type": "representation", - "parent": version_id, - "name": self.look_data_type, - }) - - context = get_representation_context(look_repr["_id"]) + context = get_representation_context(look_repre["_id"]) maya_file = self.filepath_from_context(context) - context = get_representation_context(json_repr["_id"]) + context = get_representation_context(json_repre["_id"]) json_file = self.filepath_from_context(context) # Import the look file diff --git a/openpype/hosts/maya/plugins/inventory/select_containers.py b/openpype/hosts/maya/plugins/inventory/select_containers.py new file mode 100644 index 0000000000..f85bf17ab0 --- /dev/null +++ b/openpype/hosts/maya/plugins/inventory/select_containers.py @@ -0,0 +1,46 @@ +from maya import cmds + +from openpype.pipeline import InventoryAction, registered_host +from openpype.hosts.maya.api.lib import get_container_members + + +class SelectInScene(InventoryAction): + """Select nodes in the scene from selected containers in scene inventory""" + + label = "Select in scene" + icon = "search" + color = "#888888" + order = 99 + + def process(self, containers): + + all_members = [] + for container in containers: + members = get_container_members(container) + all_members.extend(members) + cmds.select(all_members, replace=True, noExpand=True) + + +class HighlightBySceneSelection(InventoryAction): + """Select containers in scene inventory from the current scene selection""" + + label = "Highlight by scene selection" + icon = "search" + color = "#888888" + order = 100 + + def process(self, containers): + + selection = set(cmds.ls(selection=True, long=True, objectsOnly=True)) + host = registered_host() + + to_select = [] + for container in host.get_containers(): + members = get_container_members(container) + if any(member in selection for member in members): + to_select.append(container["objectName"]) + + return { + "objectNames": to_select, + "options": {"clear": True} + } diff --git a/openpype/hosts/maya/plugins/load/_load_animation.py b/openpype/hosts/maya/plugins/load/_load_animation.py index 9c37e498ef..b419a730b5 100644 --- a/openpype/hosts/maya/plugins/load/_load_animation.py +++ b/openpype/hosts/maya/plugins/load/_load_animation.py @@ -35,8 +35,9 @@ class AbcLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): # hero_001 (abc) # asset_counter{optional} - - nodes = cmds.file(self.fname, + file_url = self.prepare_root_value(self.fname, + context["project"]["name"]) + nodes = cmds.file(file_url, namespace=namespace, sharedReferenceFile=False, groupReference=True, diff --git a/openpype/hosts/maya/plugins/load/actions.py b/openpype/hosts/maya/plugins/load/actions.py index 4b7871a40c..eca1b27f34 100644 --- a/openpype/hosts/maya/plugins/load/actions.py +++ b/openpype/hosts/maya/plugins/load/actions.py @@ -1,7 +1,7 @@ """A module containing generic loader actions that will display in the Loader. """ - +import qargparse from openpype.pipeline import load from openpype.hosts.maya.api.lib import ( maintained_selection, @@ -90,7 +90,7 @@ class ImportMayaLoader(load.LoaderPlugin): so you could also use it as a new base. """ - representations = ["ma", "mb"] + representations = ["ma", "mb", "obj"] families = ["*"] label = "Import" @@ -98,6 +98,15 @@ class ImportMayaLoader(load.LoaderPlugin): icon = "arrow-circle-down" color = "#775555" + options = [ + qargparse.Boolean( + "clean_import", + label="Clean import", + default=False, + help="Should all occurences of cbId be purged?" + ) + ] + def load(self, context, name=None, namespace=None, data=None): import maya.cmds as cmds @@ -114,13 +123,22 @@ class ImportMayaLoader(load.LoaderPlugin): ) with maintained_selection(): - cmds.file(self.fname, - i=True, - preserveReferences=True, - namespace=namespace, - returnNewNodes=True, - groupReference=True, - groupName="{}:{}".format(namespace, name)) + nodes = cmds.file(self.fname, + i=True, + preserveReferences=True, + namespace=namespace, + returnNewNodes=True, + groupReference=True, + groupName="{}:{}".format(namespace, name)) + + if data.get("clean_import", False): + remove_attributes = ["cbId"] + for node in nodes: + for attr in remove_attributes: + if cmds.attributeQuery(attr, node=node, exists=True): + full_attr = "{}.{}".format(node, attr) + print("Removing {}".format(full_attr)) + cmds.deleteAttr(full_attr) # We do not containerize imported content, it remains unmanaged return diff --git a/openpype/hosts/maya/plugins/load/load_abc_to_standin.py b/openpype/hosts/maya/plugins/load/load_abc_to_standin.py new file mode 100644 index 0000000000..605a492e4d --- /dev/null +++ b/openpype/hosts/maya/plugins/load/load_abc_to_standin.py @@ -0,0 +1,132 @@ +import os + +from openpype.pipeline import ( + legacy_io, + load, + get_representation_path +) +from openpype.settings import get_project_settings + + +class AlembicStandinLoader(load.LoaderPlugin): + """Load Alembic as Arnold Standin""" + + families = ["animation", "model", "pointcache"] + representations = ["abc"] + + label = "Import Alembic as Arnold Standin" + order = -5 + icon = "code-fork" + color = "orange" + + def load(self, context, name, namespace, options): + + import maya.cmds as cmds + import mtoa.ui.arnoldmenu + from openpype.hosts.maya.api.pipeline import containerise + from openpype.hosts.maya.api.lib import unique_namespace + + version = context["version"] + version_data = version.get("data", {}) + family = version["data"]["families"] + self.log.info("version_data: {}\n".format(version_data)) + self.log.info("family: {}\n".format(family)) + frameStart = version_data.get("frameStart", None) + + asset = context["asset"]["name"] + namespace = namespace or unique_namespace( + asset + "_", + prefix="_" if asset[0].isdigit() else "", + suffix="_", + ) + + # Root group + label = "{}:{}".format(namespace, name) + root = cmds.group(name=label, empty=True) + + settings = get_project_settings(os.environ['AVALON_PROJECT']) + colors = settings["maya"]["load"]["colors"] + fps = legacy_io.Session["AVALON_FPS"] + c = colors.get(family[0]) + if c is not None: + r = (float(c[0]) / 255) + g = (float(c[1]) / 255) + b = (float(c[2]) / 255) + cmds.setAttr(root + ".useOutlinerColor", 1) + cmds.setAttr(root + ".outlinerColor", + r, g, b) + + transform_name = label + "_ABC" + + standinShape = cmds.ls(mtoa.ui.arnoldmenu.createStandIn())[0] + standin = cmds.listRelatives(standinShape, parent=True, + typ="transform") + standin = cmds.rename(standin, transform_name) + standinShape = cmds.listRelatives(standin, children=True)[0] + + cmds.parent(standin, root) + + # Set the standin filepath + cmds.setAttr(standinShape + ".dso", self.fname, type="string") + cmds.setAttr(standinShape + ".abcFPS", float(fps)) + + if frameStart is None: + cmds.setAttr(standinShape + ".useFrameExtension", 0) + + elif "model" in family: + cmds.setAttr(standinShape + ".useFrameExtension", 0) + + else: + cmds.setAttr(standinShape + ".useFrameExtension", 1) + + nodes = [root, standin] + self[:] = nodes + + return containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__) + + def update(self, container, representation): + + import pymel.core as pm + + path = get_representation_path(representation) + fps = legacy_io.Session["AVALON_FPS"] + # Update the standin + standins = list() + members = pm.sets(container['objectName'], query=True) + self.log.info("container:{}".format(container)) + for member in members: + shape = member.getShape() + if (shape and shape.type() == "aiStandIn"): + standins.append(shape) + + for standin in standins: + standin.dso.set(path) + standin.abcFPS.set(float(fps)) + if "modelMain" in container['objectName']: + standin.useFrameExtension.set(0) + else: + standin.useFrameExtension.set(1) + + container = pm.PyNode(container["objectName"]) + container.representation.set(str(representation["_id"])) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + import maya.cmds as cmds + members = cmds.sets(container['objectName'], query=True) + cmds.lockNode(members, lock=False) + cmds.delete([container['objectName']] + members) + + # Clean up the namespace + try: + cmds.namespace(removeNamespace=container['namespace'], + deleteNamespaceContent=True) + except RuntimeError: + pass diff --git a/openpype/hosts/maya/plugins/load/load_ass.py b/openpype/hosts/maya/plugins/load/load_ass.py index a284b7ec1f..5db6fc3dfa 100644 --- a/openpype/hosts/maya/plugins/load/load_ass.py +++ b/openpype/hosts/maya/plugins/load/load_ass.py @@ -1,7 +1,7 @@ import os import clique -from openpype.api import get_project_settings +from openpype.settings import get_project_settings from openpype.pipeline import ( load, get_representation_path @@ -64,9 +64,12 @@ class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): path = os.path.join(publish_folder, filename) proxyPath = proxyPath_base + ".ma" - self.log.info - nodes = cmds.file(proxyPath, + project_name = context["project"]["name"] + file_url = self.prepare_root_value(proxyPath, + project_name) + + nodes = cmds.file(file_url, namespace=namespace, reference=True, returnNewNodes=True, @@ -83,7 +86,7 @@ class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): proxyShape.dso.set(path) proxyShape.aiOverrideShaders.set(0) - settings = get_project_settings(os.environ['AVALON_PROJECT']) + settings = get_project_settings(project_name) colors = settings['maya']['load']['colors'] c = colors.get(family) @@ -123,7 +126,11 @@ class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): assert os.path.exists(proxyPath), "%s does not exist." % proxyPath try: - content = cmds.file(proxyPath, + file_url = self.prepare_root_value(proxyPath, + representation["context"] + ["project"] + ["name"]) + content = cmds.file(file_url, loadReference=reference_node, type="mayaAscii", returnNewNodes=True) diff --git a/openpype/hosts/maya/plugins/load/load_audio.py b/openpype/hosts/maya/plugins/load/load_audio.py index ce814e1299..6f60cb5726 100644 --- a/openpype/hosts/maya/plugins/load/load_audio.py +++ b/openpype/hosts/maya/plugins/load/load_audio.py @@ -1,5 +1,10 @@ from maya import cmds, mel +from openpype.client import ( + get_asset_by_id, + get_subset_by_id, + get_version_by_id, +) from openpype.pipeline import ( legacy_io, load, @@ -65,9 +70,16 @@ class AudioLoader(load.LoaderPlugin): ) # Set frame range. - version = legacy_io.find_one({"_id": representation["parent"]}) - subset = legacy_io.find_one({"_id": version["parent"]}) - asset = legacy_io.find_one({"_id": subset["parent"]}) + project_name = legacy_io.active_project() + version = get_version_by_id( + project_name, representation["parent"], fields=["parent"] + ) + subset = get_subset_by_id( + project_name, version["parent"], fields=["parent"] + ) + asset = get_asset_by_id( + project_name, subset["parent"], fields=["parent"] + ) audio_node.sourceStart.set(1 - asset["data"]["frameStart"]) audio_node.sourceEnd.set(asset["data"]["frameEnd"]) diff --git a/openpype/hosts/maya/plugins/load/load_gpucache.py b/openpype/hosts/maya/plugins/load/load_gpucache.py index 6d5e945508..a09f924c7b 100644 --- a/openpype/hosts/maya/plugins/load/load_gpucache.py +++ b/openpype/hosts/maya/plugins/load/load_gpucache.py @@ -4,13 +4,13 @@ from openpype.pipeline import ( load, get_representation_path ) -from openpype.api import get_project_settings +from openpype.settings import get_project_settings class GpuCacheLoader(load.LoaderPlugin): """Load Alembic as gpuCache""" - families = ["model"] + families = ["model", "animation", "pointcache"] representations = ["abc"] label = "Import Gpu Cache" diff --git a/openpype/hosts/maya/plugins/load/load_image_plane.py b/openpype/hosts/maya/plugins/load/load_image_plane.py index b67c2cb209..b267921bdc 100644 --- a/openpype/hosts/maya/plugins/load/load_image_plane.py +++ b/openpype/hosts/maya/plugins/load/load_image_plane.py @@ -1,5 +1,10 @@ from Qt import QtWidgets, QtCore +from openpype.client import ( + get_asset_by_id, + get_subset_by_id, + get_version_by_id, +) from openpype.pipeline import ( legacy_io, load, @@ -83,7 +88,7 @@ class ImagePlaneLoader(load.LoaderPlugin): families = ["image", "plate", "render"] label = "Load imagePlane" - representations = ["mov", "exr", "preview", "png"] + representations = ["mov", "exr", "preview", "png", "jpg"] icon = "image" color = "orange" @@ -216,9 +221,16 @@ class ImagePlaneLoader(load.LoaderPlugin): ) # Set frame range. - version = legacy_io.find_one({"_id": representation["parent"]}) - subset = legacy_io.find_one({"_id": version["parent"]}) - asset = legacy_io.find_one({"_id": subset["parent"]}) + project_name = legacy_io.active_project() + version = get_version_by_id( + project_name, representation["parent"], fields=["parent"] + ) + subset = get_subset_by_id( + project_name, version["parent"], fields=["parent"] + ) + asset = get_asset_by_id( + project_name, subset["parent"], fields=["parent"] + ) start_frame = asset["data"]["frameStart"] end_frame = asset["data"]["frameEnd"] image_plane_shape.frameOffset.set(1 - start_frame) diff --git a/openpype/hosts/maya/plugins/load/load_look.py b/openpype/hosts/maya/plugins/load/load_look.py index 80eac8e0b5..3ef19ad96f 100644 --- a/openpype/hosts/maya/plugins/load/load_look.py +++ b/openpype/hosts/maya/plugins/load/load_look.py @@ -5,6 +5,7 @@ from collections import defaultdict from Qt import QtWidgets +from openpype.client import get_representation_by_name from openpype.pipeline import ( legacy_io, get_representation_path, @@ -31,7 +32,9 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): import maya.cmds as cmds with lib.maintained_selection(): - nodes = cmds.file(self.fname, + file_url = self.prepare_root_value(self.fname, + context["project"]["name"]) + nodes = cmds.file(file_url, namespace=namespace, reference=True, returnNewNodes=True) @@ -73,11 +76,10 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): shader_nodes = cmds.ls(members, type='shadingEngine') nodes = set(self._get_nodes_with_shader(shader_nodes)) - json_representation = legacy_io.find_one({ - "type": "representation", - "parent": representation['parent'], - "name": "json" - }) + project_name = legacy_io.active_project() + json_representation = get_representation_by_name( + project_name, "json", representation["parent"] + ) # Load relationships shader_relation = get_representation_path(json_representation) diff --git a/openpype/hosts/maya/plugins/load/load_multiverse_usd.py b/openpype/hosts/maya/plugins/load/load_multiverse_usd.py index c03f2c5d92..3350dc6ac9 100644 --- a/openpype/hosts/maya/plugins/load/load_multiverse_usd.py +++ b/openpype/hosts/maya/plugins/load/load_multiverse_usd.py @@ -14,13 +14,13 @@ from openpype.hosts.maya.api.pipeline import containerise class MultiverseUsdLoader(load.LoaderPlugin): - """Load the USD by Multiverse""" + """Read USD data in a Multiverse Compound""" - families = ["model", "usd", "usdComposition", "usdOverride", + families = ["model", "mvUsd", "mvUsdComposition", "mvUsdOverride", "pointcache", "animation"] representations = ["usd", "usda", "usdc", "usdz", "abc"] - label = "Read USD by Multiverse" + label = "Load USD to Multiverse" order = -10 icon = "code-fork" color = "orange" diff --git a/openpype/hosts/maya/plugins/load/load_redshift_proxy.py b/openpype/hosts/maya/plugins/load/load_redshift_proxy.py index d93a9f02a2..c288e23ded 100644 --- a/openpype/hosts/maya/plugins/load/load_redshift_proxy.py +++ b/openpype/hosts/maya/plugins/load/load_redshift_proxy.py @@ -5,7 +5,7 @@ import clique import maya.cmds as cmds -from openpype.api import get_project_settings +from openpype.settings import get_project_settings from openpype.pipeline import ( load, get_representation_path diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py index d65b5a2c1e..c762a29326 100644 --- a/openpype/hosts/maya/plugins/load/load_reference.py +++ b/openpype/hosts/maya/plugins/load/load_reference.py @@ -1,11 +1,11 @@ import os from maya import cmds -from openpype.api import get_project_settings -from openpype.lib import get_creator_by_name -from openpype.pipeline import ( - legacy_io, +from openpype.settings import get_project_settings +from openpype.pipeline import legacy_io +from openpype.pipeline.create import ( legacy_create, + get_legacy_creator_by_name, ) import openpype.hosts.maya.api.plugin from openpype.hosts.maya.api.lib import maintained_selection @@ -51,7 +51,9 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): with maintained_selection(): cmds.loadPlugin("AbcImport.mll", quiet=True) - nodes = cmds.file(self.fname, + file_url = self.prepare_root_value(self.fname, + context["project"]["name"]) + nodes = cmds.file(file_url, namespace=namespace, sharedReferenceFile=False, reference=True, @@ -151,7 +153,9 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): self.log.info("Creating subset: {}".format(namespace)) # Create the animation instance - creator_plugin = get_creator_by_name(self.animation_creator_name) + creator_plugin = get_legacy_creator_by_name( + self.animation_creator_name + ) with maintained_selection(): cmds.select([output, controls] + roots, noExpand=True) legacy_create( diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_arnold.py b/openpype/hosts/maya/plugins/load/load_vdb_to_arnold.py new file mode 100644 index 0000000000..8a386cecfd --- /dev/null +++ b/openpype/hosts/maya/plugins/load/load_vdb_to_arnold.py @@ -0,0 +1,139 @@ +import os + +from openpype.settings import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) +# TODO aiVolume doesn't automatically set velocity fps correctly, set manual? + + +class LoadVDBtoArnold(load.LoaderPlugin): + """Load OpenVDB for Arnold in aiVolume""" + + families = ["vdbcache"] + representations = ["vdb"] + + label = "Load VDB to Arnold" + icon = "cloud" + color = "orange" + + def load(self, context, name, namespace, data): + + from maya import cmds + from openpype.hosts.maya.api.pipeline import containerise + from openpype.hosts.maya.api.lib import unique_namespace + + try: + family = context["representation"]["context"]["family"] + except ValueError: + family = "vdbcache" + + # Check if the plugin for arnold is available on the pc + try: + cmds.loadPlugin("mtoa", quiet=True) + except Exception as exc: + self.log.error("Encountered exception:\n%s" % exc) + return + + asset = context['asset'] + asset_name = asset["name"] + namespace = namespace or unique_namespace( + asset_name + "_", + prefix="_" if asset_name[0].isdigit() else "", + suffix="_", + ) + + # Root group + label = "{}:{}".format(namespace, name) + root = cmds.group(name=label, empty=True) + + settings = get_project_settings(os.environ['AVALON_PROJECT']) + colors = settings['maya']['load']['colors'] + + c = colors.get(family) + if c is not None: + cmds.setAttr(root + ".useOutlinerColor", 1) + cmds.setAttr(root + ".outlinerColor", + (float(c[0]) / 255), + (float(c[1]) / 255), + (float(c[2]) / 255) + ) + + # Create VRayVolumeGrid + grid_node = cmds.createNode("aiVolume", + name="{}Shape".format(root), + parent=root) + + self._set_path(grid_node, + path=self.fname, + representation=context["representation"]) + + # Lock the shape node so the user can't delete the transform/shape + # as if it was referenced + cmds.lockNode(grid_node, lock=True) + + nodes = [root, grid_node] + self[:] = nodes + + return containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__) + + def update(self, container, representation): + + from maya import cmds + + path = get_representation_path(representation) + + # Find VRayVolumeGrid + members = cmds.sets(container['objectName'], query=True) + grid_nodes = cmds.ls(members, type="aiVolume", long=True) + assert len(grid_nodes) == 1, "This is a bug" + + # Update the VRayVolumeGrid + self._set_path(grid_nodes[0], path=path, representation=representation) + + # Update container representation + cmds.setAttr(container["objectName"] + ".representation", + str(representation["_id"]), + type="string") + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + + from maya import cmds + + # Get all members of the avalon container, ensure they are unlocked + # and delete everything + members = cmds.sets(container['objectName'], query=True) + cmds.lockNode(members, lock=False) + cmds.delete([container['objectName']] + members) + + # Clean up the namespace + try: + cmds.namespace(removeNamespace=container['namespace'], + deleteNamespaceContent=True) + except RuntimeError: + pass + + @staticmethod + def _set_path(grid_node, + path, + representation): + """Apply the settings for the VDB path to the aiVolume node""" + from maya import cmds + + if not os.path.exists(path): + raise RuntimeError("Path does not exist: %s" % path) + + is_sequence = bool(representation["context"].get("frame")) + cmds.setAttr(grid_node + ".useFrameExtension", is_sequence) + + # Set file path + cmds.setAttr(grid_node + ".filename", path, type="string") diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py b/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py index 70bd9d22e2..1f02321dc8 100644 --- a/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py +++ b/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py @@ -1,11 +1,21 @@ import os -from openpype.api import get_project_settings -from openpype.pipeline import load +from openpype.settings import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) class LoadVDBtoRedShift(load.LoaderPlugin): - """Load OpenVDB in a Redshift Volume Shape""" + """Load OpenVDB in a Redshift Volume Shape + + Note that the RedshiftVolumeShape is created without a RedshiftVolume + shader assigned. To get the Redshift volume to render correctly assign + a RedshiftVolume shader (in the Hypershade) and set the density, scatter + and emission channels to the channel names of the volumes in the VDB file. + + """ families = ["vdbcache"] representations = ["vdb"] @@ -55,7 +65,7 @@ class LoadVDBtoRedShift(load.LoaderPlugin): # Root group label = "{}:{}".format(namespace, name) - root = cmds.group(name=label, empty=True) + root = cmds.createNode("transform", name=label) settings = get_project_settings(os.environ['AVALON_PROJECT']) colors = settings['maya']['load']['colors'] @@ -74,9 +84,9 @@ class LoadVDBtoRedShift(load.LoaderPlugin): name="{}RVSShape".format(label), parent=root) - cmds.setAttr("{}.fileName".format(volume_node), - self.fname, - type="string") + self._set_path(volume_node, + path=self.fname, + representation=context["representation"]) nodes = [root, volume_node] self[:] = nodes @@ -87,3 +97,56 @@ class LoadVDBtoRedShift(load.LoaderPlugin): nodes=nodes, context=context, loader=self.__class__.__name__) + + def update(self, container, representation): + from maya import cmds + + path = get_representation_path(representation) + + # Find VRayVolumeGrid + members = cmds.sets(container['objectName'], query=True) + grid_nodes = cmds.ls(members, type="RedshiftVolumeShape", long=True) + assert len(grid_nodes) == 1, "This is a bug" + + # Update the VRayVolumeGrid + self._set_path(grid_nodes[0], path=path, representation=representation) + + # Update container representation + cmds.setAttr(container["objectName"] + ".representation", + str(representation["_id"]), + type="string") + + def remove(self, container): + from maya import cmds + + # Get all members of the avalon container, ensure they are unlocked + # and delete everything + members = cmds.sets(container['objectName'], query=True) + cmds.lockNode(members, lock=False) + cmds.delete([container['objectName']] + members) + + # Clean up the namespace + try: + cmds.namespace(removeNamespace=container['namespace'], + deleteNamespaceContent=True) + except RuntimeError: + pass + + def switch(self, container, representation): + self.update(container, representation) + + @staticmethod + def _set_path(grid_node, + path, + representation): + """Apply the settings for the VDB path to the RedshiftVolumeShape""" + from maya import cmds + + if not os.path.exists(path): + raise RuntimeError("Path does not exist: %s" % path) + + is_sequence = bool(representation["context"].get("frame")) + cmds.setAttr(grid_node + ".useFrameExtension", is_sequence) + + # Set file path + cmds.setAttr(grid_node + ".fileName", path, type="string") diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py b/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py index 3a16264ec0..9267c59c02 100644 --- a/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py +++ b/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py @@ -1,6 +1,6 @@ import os -from openpype.api import get_project_settings +from openpype.settings import get_project_settings from openpype.pipeline import ( load, get_representation_path diff --git a/openpype/hosts/maya/plugins/load/load_vrayproxy.py b/openpype/hosts/maya/plugins/load/load_vrayproxy.py index 22d56139f6..720a132aa7 100644 --- a/openpype/hosts/maya/plugins/load/load_vrayproxy.py +++ b/openpype/hosts/maya/plugins/load/load_vrayproxy.py @@ -7,11 +7,10 @@ loader will use them instead of native vray vrmesh format. """ import os -from bson.objectid import ObjectId - import maya.cmds as cmds -from openpype.api import get_project_settings +from openpype.client import get_representation_by_name +from openpype.settings import get_project_settings from openpype.pipeline import ( legacy_io, load, @@ -185,12 +184,8 @@ class VRayProxyLoader(load.LoaderPlugin): """ self.log.debug( "Looking for abc in published representations of this version.") - abc_rep = legacy_io.find_one({ - "type": "representation", - "parent": ObjectId(version_id), - "name": "abc" - }) - + project_name = legacy_io.active_project() + abc_rep = get_representation_by_name(project_name, "abc", version_id) if abc_rep: self.log.debug("Found, we'll link alembic to vray proxy.") file_name = get_representation_path(abc_rep) diff --git a/openpype/hosts/maya/plugins/load/load_vrayscene.py b/openpype/hosts/maya/plugins/load/load_vrayscene.py index 61132088cc..d87992f9a7 100644 --- a/openpype/hosts/maya/plugins/load/load_vrayscene.py +++ b/openpype/hosts/maya/plugins/load/load_vrayscene.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import os import maya.cmds as cmds # noqa -from openpype.api import get_project_settings +from openpype.settings import get_project_settings from openpype.pipeline import ( load, get_representation_path diff --git a/openpype/hosts/maya/plugins/load/load_yeti_cache.py b/openpype/hosts/maya/plugins/load/load_yeti_cache.py index fb903785ae..5ba381050a 100644 --- a/openpype/hosts/maya/plugins/load/load_yeti_cache.py +++ b/openpype/hosts/maya/plugins/load/load_yeti_cache.py @@ -1,15 +1,13 @@ import os import json import re -import glob from collections import defaultdict -from pprint import pprint +import clique from maya import cmds -from openpype.api import get_project_settings +from openpype.settings import get_project_settings from openpype.pipeline import ( - legacy_io, load, get_representation_path ) @@ -17,7 +15,15 @@ from openpype.hosts.maya.api import lib from openpype.hosts.maya.api.pipeline import containerise +def set_attribute(node, attr, value): + """Wrapper of set attribute which ignores None values""" + if value is None: + return + lib.set_attribute(node, attr, value) + + class YetiCacheLoader(load.LoaderPlugin): + """Load Yeti Cache with one or more Yeti nodes""" families = ["yeticache", "yetiRig"] representations = ["fur"] @@ -28,6 +34,16 @@ class YetiCacheLoader(load.LoaderPlugin): color = "orange" def load(self, context, name=None, namespace=None, data=None): + """Loads a .fursettings file defining how to load .fur sequences + + A single yeticache or yetiRig can have more than a single pgYetiMaya + nodes and thus load more than a single yeti.fur sequence. + + The .fursettings file defines what the node names should be and also + what "cbId" attribute they should receive to match the original source + and allow published looks to also work for Yeti rigs and its caches. + + """ try: family = context["representation"]["context"]["family"] @@ -43,22 +59,11 @@ class YetiCacheLoader(load.LoaderPlugin): if not cmds.pluginInfo("pgYetiMaya", query=True, loaded=True): cmds.loadPlugin("pgYetiMaya", quiet=True) - # Get JSON - fbase = re.search(r'^(.+)\.(\d+|#+)\.fur', self.fname) - if not fbase: - raise RuntimeError('Cannot determine fursettings file path') - settings_fname = "{}.fursettings".format(fbase.group(1)) - with open(settings_fname, "r") as fp: - fursettings = json.load(fp) - - # Check if resources map exists - # Get node name from JSON - if "nodes" not in fursettings: - raise RuntimeError("Encountered invalid data, expect 'nodes' in " - "fursettings.") - - node_data = fursettings["nodes"] - nodes = self.create_nodes(namespace, node_data) + # Create Yeti cache nodes according to settings + settings = self.read_settings(self.fname) + nodes = [] + for node in settings["nodes"]: + nodes.extend(self.create_node(namespace, node)) group_name = "{}:{}".format(namespace, name) group_node = cmds.group(nodes, name=group_name) @@ -68,8 +73,8 @@ class YetiCacheLoader(load.LoaderPlugin): c = colors.get(family) if c is not None: - cmds.setAttr(group_name + ".useOutlinerColor", 1) - cmds.setAttr(group_name + ".outlinerColor", + cmds.setAttr(group_node + ".useOutlinerColor", 1) + cmds.setAttr(group_node + ".outlinerColor", (float(c[0])/255), (float(c[1])/255), (float(c[2])/255) @@ -111,28 +116,14 @@ class YetiCacheLoader(load.LoaderPlugin): def update(self, container, representation): - legacy_io.install() namespace = container["namespace"] container_node = container["objectName"] - fur_settings = legacy_io.find_one( - {"parent": representation["parent"], "name": "fursettings"} - ) - - pprint({"parent": representation["parent"], "name": "fursettings"}) - pprint(fur_settings) - assert fur_settings is not None, ( - "cannot find fursettings representation" - ) - - settings_fname = get_representation_path(fur_settings) path = get_representation_path(representation) - # Get all node data - with open(settings_fname, "r") as fp: - settings = json.load(fp) + settings = self.read_settings(path) # Collect scene information of asset - set_members = cmds.sets(container["objectName"], query=True) + set_members = lib.get_container_members(container) container_root = lib.get_container_transforms(container, members=set_members, root=True) @@ -147,7 +138,7 @@ class YetiCacheLoader(load.LoaderPlugin): # Re-assemble metadata with cbId as keys meta_data_lookup = {n["cbId"]: n for n in settings["nodes"]} - # Compare look ups and get the nodes which ar not relevant any more + # Delete nodes by "cbId" that are not in the updated version to_delete_lookup = {cb_id for cb_id in scene_lookup.keys() if cb_id not in meta_data_lookup} if to_delete_lookup: @@ -163,25 +154,18 @@ class YetiCacheLoader(load.LoaderPlugin): fullPath=True) or [] to_remove.extend(shapes + transforms) - # Remove id from look uop + # Remove id from lookup scene_lookup.pop(_id, None) cmds.delete(to_remove) - # replace frame in filename with %04d - RE_frame = re.compile(r"(\d+)(\.fur)$") - file_name = re.sub(RE_frame, r"%04d\g<2>", os.path.basename(path)) - for cb_id, data in meta_data_lookup.items(): - - # Update cache file name - data["attrs"]["cacheFileName"] = os.path.join( - os.path.dirname(path), file_name) + for cb_id, node_settings in meta_data_lookup.items(): if cb_id not in scene_lookup: - + # Create new nodes self.log.info("Creating new nodes ..") - new_nodes = self.create_nodes(namespace, [data]) + new_nodes = self.create_node(namespace, node_settings) cmds.sets(new_nodes, addElement=container_node) cmds.parent(new_nodes, container_root) @@ -218,14 +202,8 @@ class YetiCacheLoader(load.LoaderPlugin): children=True) yeti_node = yeti_nodes[0] - for attr, value in data["attrs"].items(): - # handle empty attribute strings. Those are reported - # as None, so their type is NoneType and this is not - # supported on attributes in Maya. We change it to - # empty string. - if value is None: - value = "" - lib.set_attribute(attr, value, yeti_node) + for attr, value in node_settings["attrs"].items(): + set_attribute(attr, value, yeti_node) cmds.setAttr("{}.representation".format(container_node), str(representation["_id"]), @@ -235,7 +213,6 @@ class YetiCacheLoader(load.LoaderPlugin): self.update(container, representation) # helper functions - def create_namespace(self, asset): """Create a unique namespace Args: @@ -253,100 +230,122 @@ class YetiCacheLoader(load.LoaderPlugin): return namespace - def validate_cache(self, filename, pattern="%04d"): - """Check if the cache has more than 1 frame + def get_cache_node_filepath(self, root, node_name): + """Get the cache file path for one of the yeti nodes. - All caches with more than 1 frame need to be called with `%04d` - If the cache has only one frame we return that file name as we assume + All caches with more than 1 frame need cache file name set with `%04d` + If the cache has only one frame we return the file name as we assume it is a snapshot. + This expects the files to be named after the "node name" through + exports with in Yeti. + Args: - filename(str) - pattern(str) + root(str): Folder containing cache files to search in. + node_name(str): Node name to search cache files for Returns: - str + str: Cache file path value needed for cacheFileName attribute """ - glob_pattern = filename.replace(pattern, "*") + name = node_name.replace(":", "_") + pattern = r"^({name})(\.[0-9]+)?(\.fur)$".format(name=re.escape(name)) - escaped = re.escape(filename) - re_pattern = escaped.replace(pattern, "-?[0-9]+") - - files = glob.glob(glob_pattern) - files = [str(f) for f in files if re.match(re_pattern, f)] + files = [fname for fname in os.listdir(root) if re.match(pattern, + fname)] + if not files: + self.log.error("Could not find cache files for '{}' " + "with pattern {}".format(node_name, pattern)) + return if len(files) == 1: - return files[0] - elif len(files) == 0: - self.log.error("Could not find cache files for '%s'" % filename) + # Single file + return os.path.join(root, files[0]) - return filename + # Get filename for the sequence with padding + collections, remainder = clique.assemble(files) + assert not remainder, "This is a bug" + assert len(collections) == 1, "This is a bug" + collection = collections[0] - def create_nodes(self, namespace, settings): + # Formats name as {head}%d{tail} like cache.%04d.fur + fname = collection.format("{head}{padding}{tail}") + return os.path.join(root, fname) + + def create_node(self, namespace, node_settings): """Create nodes with the correct namespace and settings Args: namespace(str): namespace - settings(list): list of dictionaries + node_settings(dict): Single "nodes" entry from .fursettings file. Returns: - list + list: Created nodes """ - nodes = [] - for node_settings in settings: - # Create pgYetiMaya node - original_node = node_settings["name"] - node_name = "{}:{}".format(namespace, original_node) - yeti_node = cmds.createNode("pgYetiMaya", name=node_name) + # Get original names and ids + orig_transform_name = node_settings["transform"]["name"] + orig_shape_name = node_settings["name"] - # Create transform node - transform_node = node_name.rstrip("Shape") + # Add namespace + transform_name = "{}:{}".format(namespace, orig_transform_name) + shape_name = "{}:{}".format(namespace, orig_shape_name) - lib.set_id(transform_node, node_settings["transform"]["cbId"]) - lib.set_id(yeti_node, node_settings["cbId"]) + # Create pgYetiMaya node + transform_node = cmds.createNode("transform", + name=transform_name) + yeti_node = cmds.createNode("pgYetiMaya", + name=shape_name, + parent=transform_node) - nodes.extend([transform_node, yeti_node]) + lib.set_id(transform_node, node_settings["transform"]["cbId"]) + lib.set_id(yeti_node, node_settings["cbId"]) - # Ensure the node has no namespace identifiers - attributes = node_settings["attrs"] + nodes.extend([transform_node, yeti_node]) - # Check if cache file name is stored + # Update attributes with defaults + attributes = node_settings["attrs"] + attributes.update({ + "viewportDensity": 0.1, + "verbosity": 2, + "fileMode": 1, - # get number of # in path and convert it to C prinf format - # like %04d expected by Yeti - fbase = re.search(r'^(.+)\.(\d+|#+)\.fur', self.fname) - if not fbase: - raise RuntimeError('Cannot determine file path') - padding = len(fbase.group(2)) - if "cacheFileName" not in attributes: - cache = "{}.%0{}d.fur".format(fbase.group(1), padding) + # Fix render stats, like Yeti's own + # ../scripts/pgYetiNode.mel script + "visibleInReflections": True, + "visibleInRefractions": True + }) - self.validate_cache(cache) - attributes["cacheFileName"] = cache + # Apply attributes to pgYetiMaya node + for attr, value in attributes.items(): + set_attribute(attr, value, yeti_node) - # Update attributes with requirements - attributes.update({"viewportDensity": 0.1, - "verbosity": 2, - "fileMode": 1}) - - # Apply attributes to pgYetiMaya node - for attr, value in attributes.items(): - if value is None: - continue - lib.set_attribute(attr, value, yeti_node) - - # Fix for : YETI-6 - # Fixes the render stats (this is literally taken from Perigrene's - # ../scripts/pgYetiNode.mel script) - cmds.setAttr("{}.visibleInReflections".format(yeti_node), True) - cmds.setAttr("{}.visibleInRefractions".format(yeti_node), True) - - # Connect to the time node - cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node) + # Connect to the time node + cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node) return nodes + + def read_settings(self, path): + """Read .fursettings file and compute some additional attributes""" + + with open(path, "r") as fp: + fur_settings = json.load(fp) + + if "nodes" not in fur_settings: + raise RuntimeError("Encountered invalid data, " + "expected 'nodes' in fursettings.") + + # Compute the cache file name values we want to set for the nodes + root = os.path.dirname(path) + for node in fur_settings["nodes"]: + cache_filename = self.get_cache_node_filepath( + root=root, node_name=node["name"]) + + attrs = node.get("attrs", {}) # allow 'attrs' to not exist + attrs["cacheFileName"] = cache_filename + node["attrs"] = attrs + + return fur_settings diff --git a/openpype/hosts/maya/plugins/load/load_yeti_rig.py b/openpype/hosts/maya/plugins/load/load_yeti_rig.py index b4d31b473f..651607de8a 100644 --- a/openpype/hosts/maya/plugins/load/load_yeti_rig.py +++ b/openpype/hosts/maya/plugins/load/load_yeti_rig.py @@ -1,7 +1,7 @@ import os from collections import defaultdict -from openpype.api import get_project_settings +from openpype.settings import get_project_settings import openpype.hosts.maya.api.plugin from openpype.hosts.maya.api import lib @@ -53,7 +53,9 @@ class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): # load rig with lib.maintained_selection(): - nodes = cmds.file(self.fname, + file_url = self.prepare_root_value(self.fname, + context["project"]["name"]) + nodes = cmds.file(file_url, namespace=namespace, reference=True, returnNewNodes=True, diff --git a/openpype/hosts/maya/plugins/publish/collect_animation.py b/openpype/hosts/maya/plugins/publish/collect_animation.py index 9b1e38fd0a..549098863f 100644 --- a/openpype/hosts/maya/plugins/publish/collect_animation.py +++ b/openpype/hosts/maya/plugins/publish/collect_animation.py @@ -55,3 +55,6 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin): # Store data in the instance for the validator instance.data["out_hierarchy"] = hierarchy + + if instance.data.get("farm"): + instance.data["families"].append("publish.farm") diff --git a/openpype/hosts/maya/plugins/publish/collect_assembly.py b/openpype/hosts/maya/plugins/publish/collect_assembly.py index 1a65bf1fde..2aef9ab908 100644 --- a/openpype/hosts/maya/plugins/publish/collect_assembly.py +++ b/openpype/hosts/maya/plugins/publish/collect_assembly.py @@ -70,7 +70,7 @@ class CollectAssembly(pyblish.api.InstancePlugin): data[representation_id].append(instance_data) instance.data["scenedata"] = dict(data) - instance.data["hierarchy"] = list(set(hierarchy_nodes)) + instance.data["nodesHierarchy"] = list(set(hierarchy_nodes)) def get_file_rule(self, rule): return mel.eval('workspace -query -fileRuleEntry "{}"'.format(rule)) diff --git a/openpype/hosts/maya/plugins/publish/collect_fbx_camera.py b/openpype/hosts/maya/plugins/publish/collect_fbx_camera.py new file mode 100644 index 0000000000..bfa5bccbb9 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/collect_fbx_camera.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +from maya import cmds # noqa +import pyblish.api + + +class CollectFbxCamera(pyblish.api.InstancePlugin): + """Collect Camera for FBX export.""" + + order = pyblish.api.CollectorOrder + 0.2 + label = "Collect Camera for FBX export" + families = ["camera"] + + def process(self, instance): + if not instance.data.get("families"): + instance.data["families"] = [] + + if "fbx" not in instance.data["families"]: + instance.data["families"].append("fbx") + + instance.data["cameras"] = True diff --git a/openpype/hosts/maya/plugins/publish/collect_inputs.py b/openpype/hosts/maya/plugins/publish/collect_inputs.py new file mode 100644 index 0000000000..470fceffc9 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/collect_inputs.py @@ -0,0 +1,215 @@ +import copy +from bson.objectid import ObjectId + +from maya import cmds +import maya.api.OpenMaya as om +import pyblish.api + +from openpype.pipeline import registered_host +from openpype.hosts.maya.api.lib import get_container_members +from openpype.hosts.maya.api.lib_rendersetup import get_shader_in_layer + + +def iter_history(nodes, + filter=om.MFn.kInvalid, + direction=om.MItDependencyGraph.kUpstream): + """Iterate unique upstream history for list of nodes. + + This acts as a replacement to maya.cmds.listHistory. + It's faster by about 2x-3x. It returns less than + maya.cmds.listHistory as it excludes the input nodes + from the output (unless an input node was history + for another input node). It also excludes duplicates. + + Args: + nodes (list): Maya node names to start search from. + filter (om.MFn.Type): Filter to only specific types. + e.g. to dag nodes using om.MFn.kDagNode + direction (om.MItDependencyGraph.Direction): Direction to traverse in. + Defaults to upstream. + + Yields: + str: Node names in upstream history. + + """ + if not nodes: + return + + sel = om.MSelectionList() + for node in nodes: + sel.add(node) + + it = om.MItDependencyGraph(sel.getDependNode(0)) # init iterator + handle = om.MObjectHandle + + traversed = set() + fn_dep = om.MFnDependencyNode() + fn_dag = om.MFnDagNode() + for i in range(sel.length()): + + start_node = sel.getDependNode(i) + start_node_hash = handle(start_node).hashCode() + if start_node_hash in traversed: + continue + + it.resetTo(start_node, + filter=filter, + direction=direction) + while not it.isDone(): + + node = it.currentNode() + node_hash = handle(node).hashCode() + + if node_hash in traversed: + it.prune() + it.next() # noqa: B305 + continue + + traversed.add(node_hash) + + if node.hasFn(om.MFn.kDagNode): + fn_dag.setObject(node) + yield fn_dag.fullPathName() + else: + fn_dep.setObject(node) + yield fn_dep.name() + + it.next() # noqa: B305 + + +def collect_input_containers(containers, nodes): + """Collect containers that contain any of the node in `nodes`. + + This will return any loaded Avalon container that contains at least one of + the nodes. As such, the Avalon container is an input for it. Or in short, + there are member nodes of that container. + + Returns: + list: Input avalon containers + + """ + # Assume the containers have collected their cached '_members' data + # in the collector. + return [container for container in containers + if any(node in container["_members"] for node in nodes)] + + +class CollectUpstreamInputs(pyblish.api.InstancePlugin): + """Collect input source inputs for this publish. + + This will include `inputs` data of which loaded publishes were used in the + generation of this publish. This leaves an upstream trace to what was used + as input. + + """ + + label = "Collect Inputs" + order = pyblish.api.CollectorOrder + 0.34 + hosts = ["maya"] + + def process(self, instance): + + # For large scenes the querying of "host.ls()" can be relatively slow + # e.g. up to a second. Many instances calling it easily slows this + # down. As such, we cache it so we trigger it only once. + # todo: Instead of hidden cache make "CollectContainers" plug-in + cache_key = "__cache_containers" + scene_containers = instance.context.data.get(cache_key, None) + if scene_containers is None: + # Query the scenes' containers if there's no cache yet + host = registered_host() + scene_containers = list(host.ls()) + for container in scene_containers: + # Embed the members into the container dictionary + container_members = set(get_container_members(container)) + container["_members"] = container_members + instance.context.data["__cache_containers"] = scene_containers + + # Collect the relevant input containers for this instance + if "renderlayer" in set(instance.data.get("families", [])): + # Special behavior for renderlayers + self.log.debug("Collecting renderlayer inputs....") + containers = self._collect_renderlayer_inputs(scene_containers, + instance) + + else: + # Basic behavior + nodes = instance[:] + + # Include any input connections of history with long names + # For optimization purposes only trace upstream from shape nodes + # looking for used dag nodes. This way having just a constraint + # on a transform is also ignored which tended to give irrelevant + # inputs for the majority of our use cases. We tend to care more + # about geometry inputs. + shapes = cmds.ls(nodes, + type=("mesh", "nurbsSurface", "nurbsCurve"), + noIntermediate=True) + if shapes: + history = list(iter_history(shapes, filter=om.MFn.kShape)) + history = cmds.ls(history, long=True) + + # Include the transforms in the collected history as shapes + # are excluded from containers + transforms = cmds.listRelatives(cmds.ls(history, shapes=True), + parent=True, + fullPath=True, + type="transform") + if transforms: + history.extend(transforms) + + if history: + nodes = list(set(nodes + history)) + + # Collect containers for the given set of nodes + containers = collect_input_containers(scene_containers, + nodes) + + inputs = [ObjectId(c["representation"]) for c in containers] + instance.data["inputRepresentations"] = inputs + + self.log.info("Collected inputs: %s" % inputs) + + def _collect_renderlayer_inputs(self, scene_containers, instance): + """Collects inputs from nodes in renderlayer, incl. shaders + camera""" + + # Get the renderlayer + renderlayer = instance.data.get("setMembers") + + if renderlayer == "defaultRenderLayer": + # Assume all loaded containers in the scene are inputs + # for the masterlayer + return copy.deepcopy(scene_containers) + else: + # Get the members of the layer + members = cmds.editRenderLayerMembers(renderlayer, + query=True, + fullNames=True) or [] + + # In some cases invalid objects are returned from + # `editRenderLayerMembers` so we filter them out + members = cmds.ls(members, long=True) + + # Include all children + children = cmds.listRelatives(members, + allDescendents=True, + fullPath=True) or [] + members.extend(children) + + # Include assigned shaders in renderlayer + shapes = cmds.ls(members, shapes=True, long=True) + shaders = set() + for shape in shapes: + shape_shaders = get_shader_in_layer(shape, layer=renderlayer) + if not shape_shaders: + continue + shaders.update(shape_shaders) + members.extend(shaders) + + # Explicitly include the camera being rendered in renderlayer + cameras = instance.data.get("cameras") + members.extend(cameras) + + containers = collect_input_containers(scene_containers, members) + + return containers diff --git a/openpype/hosts/maya/plugins/publish/collect_instances.py b/openpype/hosts/maya/plugins/publish/collect_instances.py index 1d59a68bf6..ad1f794680 100644 --- a/openpype/hosts/maya/plugins/publish/collect_instances.py +++ b/openpype/hosts/maya/plugins/publish/collect_instances.py @@ -1,9 +1,50 @@ from maya import cmds +import maya.api.OpenMaya as om import pyblish.api import json +def get_all_children(nodes): + """Return all children of `nodes` including each instanced child. + Using maya.cmds.listRelatives(allDescendents=True) includes only the first + instance. As such, this function acts as an optimal replacement with a + focus on a fast query. + + """ + + sel = om.MSelectionList() + traversed = set() + iterator = om.MItDag(om.MItDag.kDepthFirst) + for node in nodes: + + if node in traversed: + # Ignore if already processed as a child + # before + continue + + sel.clear() + sel.add(node) + dag = sel.getDagPath(0) + + iterator.reset(dag) + # ignore self + iterator.next() # noqa: B305 + while not iterator.isDone(): + + path = iterator.fullPathName() + + if path in traversed: + iterator.prune() + iterator.next() # noqa: B305 + continue + + traversed.add(path) + iterator.next() # noqa: B305 + + return list(traversed) + + class CollectInstances(pyblish.api.ContextPlugin): """Gather instances by objectSet and pre-defined attribute @@ -86,12 +127,8 @@ class CollectInstances(pyblish.api.ContextPlugin): # Collect members members = cmds.ls(members, long=True) or [] - # `maya.cmds.listRelatives(noIntermediate=True)` only works when - # `shapes=True` argument is passed, since we also want to include - # transforms we filter afterwards. - children = cmds.listRelatives(members, - allDescendents=True, - fullPath=True) or [] + dag_members = cmds.ls(members, type="dagNode", long=True) + children = get_all_children(dag_members) children = cmds.ls(children, noIntermediate=True, long=True) parents = [] diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/openpype/hosts/maya/plugins/publish/collect_look.py index b6a76f1e21..157be5717b 100644 --- a/openpype/hosts/maya/plugins/publish/collect_look.py +++ b/openpype/hosts/maya/plugins/publish/collect_look.py @@ -22,10 +22,46 @@ RENDERER_NODE_TYPES = [ # redshift "RedshiftMeshParameters" ] - SHAPE_ATTRS = set(SHAPE_ATTRS) +def get_pxr_multitexture_file_attrs(node): + attrs = [] + for i in range(9): + if cmds.attributeQuery("filename{}".format(i), node=node, ex=True): + file = cmds.getAttr("{}.filename{}".format(node, i)) + if file: + attrs.append("filename{}".format(i)) + return attrs + + +FILE_NODES = { + "file": "fileTextureName", + + "aiImage": "filename", + + "RedshiftNormalMap": "tex0", + + "PxrBump": "filename", + "PxrNormalMap": "filename", + "PxrMultiTexture": get_pxr_multitexture_file_attrs, + "PxrPtexture": "filename", + "PxrTexture": "filename" +} + + +def get_attributes(dictionary, attr, node=None): + # type: (dict, str, str) -> list + if callable(dictionary[attr]): + val = dictionary[attr](node) + else: + val = dictionary.get(attr, []) + + if not isinstance(val, list): + return [val] + return val + + def get_look_attrs(node): """Returns attributes of a node that are important for the look. @@ -51,15 +87,14 @@ def get_look_attrs(node): if cmds.objectType(node, isAType="shape"): attrs = cmds.listAttr(node, changedSinceFileOpen=True) or [] for attr in attrs: - if attr in SHAPE_ATTRS: + if attr in SHAPE_ATTRS or \ + attr not in SHAPE_ATTRS and attr.startswith('ai'): result.append(attr) - elif attr.startswith('ai'): - result.append(attr) - return result -def node_uses_image_sequence(node): +def node_uses_image_sequence(node, node_path): + # type: (str) -> bool """Return whether file node uses an image sequence or single image. Determine if a node uses an image sequence or just a single image, @@ -74,13 +109,18 @@ def node_uses_image_sequence(node): """ # useFrameExtension indicates an explicit image sequence - node_path = get_file_node_path(node).lower() + try: + use_frame_extension = cmds.getAttr('%s.useFrameExtension' % node) + except ValueError: + use_frame_extension = False + if use_frame_extension: + return True # The following tokens imply a sequence - patterns = ["", "", "", "u_v", "", "", "", + "u_v", ""] + node_path_lowered = node_path.lower() + return any(pattern in node_path_lowered for pattern in patterns) def seq_to_glob(path): @@ -137,14 +177,15 @@ def seq_to_glob(path): return path -def get_file_node_path(node): +def get_file_node_paths(node): + # type: (str) -> list """Get the file path used by a Maya file node. Args: node (str): Name of the Maya file node Returns: - str: the file path in use + list: the file paths in use """ # if the path appears to be sequence, use computedFileTextureNamePattern, @@ -163,15 +204,20 @@ def get_file_node_path(node): ""] lower = texture_pattern.lower() if any(pattern in lower for pattern in patterns): - return texture_pattern + return [texture_pattern] - if cmds.nodeType(node) == 'aiImage': - return cmds.getAttr('{0}.filename'.format(node)) - if cmds.nodeType(node) == 'RedshiftNormalMap': - return cmds.getAttr('{}.tex0'.format(node)) + try: + file_attributes = get_attributes( + FILE_NODES, cmds.nodeType(node), node) + except AttributeError: + file_attributes = "fileTextureName" - # otherwise use fileTextureName - return cmds.getAttr('{0}.fileTextureName'.format(node)) + files = [] + for file_attr in file_attributes: + if cmds.attributeQuery(file_attr, node=node, exists=True): + files.append(cmds.getAttr("{}.{}".format(node, file_attr))) + + return files def get_file_node_files(node): @@ -185,16 +231,21 @@ def get_file_node_files(node): list: List of full file paths. """ + paths = get_file_node_paths(node) + sequences = [] + replaces = [] + for index, path in enumerate(paths): + if node_uses_image_sequence(node, path): + glob_pattern = seq_to_glob(path) + sequences.extend(glob.glob(glob_pattern)) + replaces.append(index) - path = get_file_node_path(node) - path = cmds.workspace(expandName=path) - if node_uses_image_sequence(node): - glob_pattern = seq_to_glob(path) - return glob.glob(glob_pattern) - elif os.path.exists(path): - return [path] - else: - return [] + for index in replaces: + paths.pop(index) + + paths.extend(sequences) + + return [p for p in paths if os.path.exists(p)] class CollectLook(pyblish.api.InstancePlugin): @@ -238,13 +289,13 @@ class CollectLook(pyblish.api.InstancePlugin): "for %s" % instance.data['name']) # Discover related object sets - self.log.info("Gathering sets..") + self.log.info("Gathering sets ...") sets = self.collect_sets(instance) # Lookup set (optimization) instance_lookup = set(cmds.ls(instance, long=True)) - self.log.info("Gathering set relations..") + self.log.info("Gathering set relations ...") # Ensure iteration happen in a list so we can remove keys from the # dict within the loop @@ -326,7 +377,10 @@ class CollectLook(pyblish.api.InstancePlugin): "volumeShader", "displacementShader", "aiSurfaceShader", - "aiVolumeShader"] + "aiVolumeShader", + "rman__surface", + "rman__displacement" + ] if look_sets: materials = [] @@ -374,15 +428,17 @@ class CollectLook(pyblish.api.InstancePlugin): or [] ) - files = cmds.ls(history, type="file", long=True) - files.extend(cmds.ls(history, type="aiImage", long=True)) - files.extend(cmds.ls(history, type="RedshiftNormalMap", long=True)) + all_supported_nodes = FILE_NODES.keys() + files = [] + for node_type in all_supported_nodes: + files.extend(cmds.ls(history, type=node_type, long=True)) self.log.info("Collected file nodes:\n{}".format(files)) # Collect textures if any file nodes are found instance.data["resources"] = [] for n in files: - instance.data["resources"].append(self.collect_resource(n)) + for res in self.collect_resources(n): + instance.data["resources"].append(res) self.log.info("Collected resources: {}".format(instance.data["resources"])) @@ -495,14 +551,16 @@ class CollectLook(pyblish.api.InstancePlugin): if cmds.getAttr(attribute, type=True) == "message": continue node_attributes[attr] = cmds.getAttr(attribute) - + # Only include if there are any properties we care about + if not node_attributes: + continue attributes.append({"name": node, "uuid": lib.get_id(node), "attributes": node_attributes}) return attributes - def collect_resource(self, node): + def collect_resources(self, node): """Collect the link to the file(s) used (resource) Args: node (str): name of the node @@ -510,68 +568,81 @@ class CollectLook(pyblish.api.InstancePlugin): Returns: dict """ - self.log.debug("processing: {}".format(node)) - if cmds.nodeType(node) not in ["file", "aiImage", "RedshiftNormalMap"]: + all_supported_nodes = FILE_NODES.keys() + if cmds.nodeType(node) not in all_supported_nodes: self.log.error( "Unsupported file node: {}".format(cmds.nodeType(node))) raise AssertionError("Unsupported file node") - if cmds.nodeType(node) == 'file': - self.log.debug(" - file node") - attribute = "{}.fileTextureName".format(node) - computed_attribute = "{}.computedFileTextureNamePattern".format(node) - elif cmds.nodeType(node) == 'aiImage': - self.log.debug("aiImage node") - attribute = "{}.filename".format(node) - computed_attribute = attribute - elif cmds.nodeType(node) == 'RedshiftNormalMap': - self.log.debug("RedshiftNormalMap node") - attribute = "{}.tex0".format(node) - computed_attribute = attribute + self.log.debug(" - got {}".format(cmds.nodeType(node))) - source = cmds.getAttr(attribute) - self.log.info(" - file source: {}".format(source)) - color_space_attr = "{}.colorSpace".format(node) - try: - color_space = cmds.getAttr(color_space_attr) - except ValueError: - # node doesn't have colorspace attribute - color_space = "Raw" - # Compare with the computed file path, e.g. the one with the - # pattern in it, to generate some logging information about this - # difference - # computed_attribute = "{}.computedFileTextureNamePattern".format(node) - computed_source = cmds.getAttr(computed_attribute) - if source != computed_source: - self.log.debug("Detected computed file pattern difference " - "from original pattern: {0} " - "({1} -> {2})".format(node, - source, - computed_source)) + attributes = get_attributes(FILE_NODES, cmds.nodeType(node), node) + for attribute in attributes: + source = cmds.getAttr("{}.{}".format( + node, + attribute + )) + computed_attribute = "{}.{}".format(node, attribute) + if attribute == "fileTextureName": + computed_attribute = node + ".computedFileTextureNamePattern" - # We replace backslashes with forward slashes because V-Ray - # can't handle the UDIM files with the backslashes in the - # paths as the computed patterns - source = source.replace("\\", "/") + self.log.info(" - file source: {}".format(source)) + color_space_attr = "{}.colorSpace".format(node) + try: + color_space = cmds.getAttr(color_space_attr) + except ValueError: + # node doesn't have colorspace attribute + color_space = "Raw" + # Compare with the computed file path, e.g. the one with + # the pattern in it, to generate some logging information + # about this difference + computed_source = cmds.getAttr(computed_attribute) + if source != computed_source: + self.log.debug("Detected computed file pattern difference " + "from original pattern: {0} " + "({1} -> {2})".format(node, + source, + computed_source)) - files = get_file_node_files(node) - if len(files) == 0: - self.log.error("No valid files found from node `%s`" % node) + # renderman allows nodes to have filename attribute empty while + # you can have another incoming connection from different node. + pxr_nodes = set() + if cmds.pluginInfo("RenderMan_for_Maya", query=True, loaded=True): + pxr_nodes = set( + cmds.pluginInfo("RenderMan_for_Maya", + query=True, + dependNode=True) + ) + if not source and cmds.nodeType(node) in pxr_nodes: + self.log.info("Renderman: source is empty, skipping...") + continue + # We replace backslashes with forward slashes because V-Ray + # can't handle the UDIM files with the backslashes in the + # paths as the computed patterns + source = source.replace("\\", "/") - self.log.info("collection of resource done:") - self.log.info(" - node: {}".format(node)) - self.log.info(" - attribute: {}".format(attribute)) - self.log.info(" - source: {}".format(source)) - self.log.info(" - file: {}".format(files)) - self.log.info(" - color space: {}".format(color_space)) + files = get_file_node_files(node) + if len(files) == 0: + self.log.error("No valid files found from node `%s`" % node) - # Define the resource - return {"node": node, - "attribute": attribute, + self.log.info("collection of resource done:") + self.log.info(" - node: {}".format(node)) + self.log.info(" - attribute: {}".format(attribute)) + self.log.info(" - source: {}".format(source)) + self.log.info(" - file: {}".format(files)) + self.log.info(" - color space: {}".format(color_space)) + + # Define the resource + yield { + "node": node, + # here we are passing not only attribute, but with node again + # this should be simplified and changed extractor. + "attribute": "{}.{}".format(node, attribute), "source": source, # required for resources "files": files, - "color_space": color_space} # required for resources + "color_space": color_space + } # required for resources class CollectModelRenderSets(CollectLook): diff --git a/openpype/hosts/maya/plugins/publish/collect_maya_scene.py b/openpype/hosts/maya/plugins/publish/collect_maya_scene.py deleted file mode 100644 index eb21b17989..0000000000 --- a/openpype/hosts/maya/plugins/publish/collect_maya_scene.py +++ /dev/null @@ -1,25 +0,0 @@ -from maya import cmds - -import pyblish.api - - -class CollectMayaScene(pyblish.api.InstancePlugin): - """Collect Maya Scene Data - - """ - - order = pyblish.api.CollectorOrder + 0.2 - label = 'Collect Model Data' - families = ["mayaScene"] - - def process(self, instance): - # Extract only current frame (override) - frame = cmds.currentTime(query=True) - instance.data["frameStart"] = frame - instance.data["frameEnd"] = frame - - # make ftrack publishable - if instance.data.get('families'): - instance.data['families'].append('ftrack') - else: - instance.data['families'] = ['ftrack'] diff --git a/openpype/hosts/maya/plugins/publish/collect_maya_scene_time.py b/openpype/hosts/maya/plugins/publish/collect_maya_scene_time.py new file mode 100644 index 0000000000..7e198df14d --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/collect_maya_scene_time.py @@ -0,0 +1,26 @@ +from maya import cmds + +import pyblish.api + + +class CollectMayaSceneTime(pyblish.api.InstancePlugin): + """Collect Maya Scene playback range + + This allows to reproduce the playback range for the content to be loaded. + It does *not* limit the extracted data to only data inside that time range. + + """ + + order = pyblish.api.CollectorOrder + 0.2 + label = 'Collect Maya Scene Time' + families = ["mayaScene"] + + def process(self, instance): + instance.data.update({ + "frameStart": cmds.playbackOptions(query=True, minTime=True), + "frameEnd": cmds.playbackOptions(query=True, maxTime=True), + "frameStartHandle": cmds.playbackOptions(query=True, + animationStartTime=True), + "frameEndHandle": cmds.playbackOptions(query=True, + animationEndTime=True) + }) diff --git a/openpype/hosts/maya/plugins/publish/collect_multiverse_look.py b/openpype/hosts/maya/plugins/publish/collect_multiverse_look.py new file mode 100644 index 0000000000..edf40a27a6 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/collect_multiverse_look.py @@ -0,0 +1,372 @@ +import glob +import os +import re + +from maya import cmds +import pyblish.api +from openpype.hosts.maya.api import lib + +SHAPE_ATTRS = ["castsShadows", + "receiveShadows", + "motionBlur", + "primaryVisibility", + "smoothShading", + "visibleInReflections", + "visibleInRefractions", + "doubleSided", + "opposite"] + +SHAPE_ATTRS = set(SHAPE_ATTRS) +COLOUR_SPACES = ['sRGB', 'linear', 'auto'] +MIPMAP_EXTENSIONS = ['tdl'] + + +def get_look_attrs(node): + """Returns attributes of a node that are important for the look. + + These are the "changed" attributes (those that have edits applied + in the current scene). + + Returns: + list: Attribute names to extract + + """ + # When referenced get only attributes that are "changed since file open" + # which includes any reference edits, otherwise take *all* user defined + # attributes + is_referenced = cmds.referenceQuery(node, isNodeReferenced=True) + result = cmds.listAttr(node, userDefined=True, + changedSinceFileOpen=is_referenced) or [] + + # `cbId` is added when a scene is saved, ignore by default + if "cbId" in result: + result.remove("cbId") + + # For shapes allow render stat changes + if cmds.objectType(node, isAType="shape"): + attrs = cmds.listAttr(node, changedSinceFileOpen=True) or [] + for attr in attrs: + if attr in SHAPE_ATTRS: + result.append(attr) + elif attr.startswith('ai'): + result.append(attr) + + return result + + +def node_uses_image_sequence(node): + """Return whether file node uses an image sequence or single image. + + Determine if a node uses an image sequence or just a single image, + not always obvious from its file path alone. + + Args: + node (str): Name of the Maya node + + Returns: + bool: True if node uses an image sequence + + """ + + # useFrameExtension indicates an explicit image sequence + node_path = get_file_node_path(node).lower() + + # The following tokens imply a sequence + patterns = ["", "", "", "u_v", ".tif will return as /path/to/texture.*.tif. + + Args: + path (str): the image sequence path + + Returns: + str: Return glob string that matches the filename pattern. + + """ + + if path is None: + return path + + # If any of the patterns, convert the pattern + patterns = { + "": "", + "": "", + "": "", + "#": "#", + "u_v": "|", + "", # noqa - copied from collect_look.py + "": "" + } + + lower = path.lower() + has_pattern = False + for pattern, regex_pattern in patterns.items(): + if pattern in lower: + path = re.sub(regex_pattern, "*", path, flags=re.IGNORECASE) + has_pattern = True + + if has_pattern: + return path + + base = os.path.basename(path) + matches = list(re.finditer(r'\d+', base)) + if matches: + match = matches[-1] + new_base = '{0}*{1}'.format(base[:match.start()], + base[match.end():]) + head = os.path.dirname(path) + return os.path.join(head, new_base) + else: + return path + + +def get_file_node_path(node): + """Get the file path used by a Maya file node. + + Args: + node (str): Name of the Maya file node + + Returns: + str: the file path in use + + """ + # if the path appears to be sequence, use computedFileTextureNamePattern, + # this preserves the <> tag + if cmds.attributeQuery('computedFileTextureNamePattern', + node=node, + exists=True): + plug = '{0}.computedFileTextureNamePattern'.format(node) + texture_pattern = cmds.getAttr(plug) + + patterns = ["", + "", + "u_v", + "", + ""] + lower = texture_pattern.lower() + if any(pattern in lower for pattern in patterns): + return texture_pattern + + if cmds.nodeType(node) == 'aiImage': + return cmds.getAttr('{0}.filename'.format(node)) + if cmds.nodeType(node) == 'RedshiftNormalMap': + return cmds.getAttr('{}.tex0'.format(node)) + + # otherwise use fileTextureName + return cmds.getAttr('{0}.fileTextureName'.format(node)) + + +def get_file_node_files(node): + """Return the file paths related to the file node + + Note: + Will only return existing files. Returns an empty list + if not valid existing files are linked. + + Returns: + list: List of full file paths. + + """ + + path = get_file_node_path(node) + path = cmds.workspace(expandName=path) + if node_uses_image_sequence(node): + glob_pattern = seq_to_glob(path) + return glob.glob(glob_pattern) + elif os.path.exists(path): + return [path] + else: + return [] + + +def get_mipmap(fname): + for colour_space in COLOUR_SPACES: + for mipmap_ext in MIPMAP_EXTENSIONS: + mipmap_fname = '.'.join([fname, colour_space, mipmap_ext]) + if os.path.exists(mipmap_fname): + return mipmap_fname + return None + + +def is_mipmap(fname): + ext = os.path.splitext(fname)[1][1:] + if ext in MIPMAP_EXTENSIONS: + return True + return False + + +class CollectMultiverseLookData(pyblish.api.InstancePlugin): + """Collect Multiverse Look + + """ + + order = pyblish.api.CollectorOrder + 0.2 + label = 'Collect Multiverse Look' + families = ["mvLook"] + + def process(self, instance): + # Load plugin first + cmds.loadPlugin("MultiverseForMaya", quiet=True) + import multiverse + + self.log.info("Processing mvLook for '{}'".format(instance)) + + nodes = set() + for node in instance: + # We want only mvUsdCompoundShape nodes. + nodes_of_interest = cmds.ls(node, + dag=True, + shapes=False, + type="mvUsdCompoundShape", + noIntermediate=True, + long=True) + nodes.update(nodes_of_interest) + + files = [] + sets = {} + instance.data["resources"] = [] + publishMipMap = instance.data["publishMipMap"] + + for node in nodes: + self.log.info("Getting resources for '{}'".format(node)) + + # We know what nodes need to be collected, now we need to + # extract the materials overrides. + overrides = multiverse.ListMaterialOverridePrims(node) + for override in overrides: + matOver = multiverse.GetMaterialOverride(node, override) + + if isinstance(matOver, multiverse.MaterialSourceShadingGroup): + # We now need to grab the shadingGroup so add it to the + # sets we pass down the pipe. + shadingGroup = matOver.shadingGroupName + self.log.debug("ShadingGroup = '{}'".format(shadingGroup)) + sets[shadingGroup] = {"uuid": lib.get_id( + shadingGroup), "members": list()} + + # The SG may reference files, add those too! + history = cmds.listHistory(shadingGroup) + files = cmds.ls(history, type="file", long=True) + + for f in files: + resources = self.collect_resource(f, publishMipMap) + instance.data["resources"].append(resources) + + elif isinstance(matOver, multiverse.MaterialSourceUsdPath): + # TODO: Handle this later. + pass + + # Store data on the instance for validators, extractos, etc. + instance.data["lookData"] = { + "attributes": [], + "relationships": sets + } + + def collect_resource(self, node, publishMipMap): + """Collect the link to the file(s) used (resource) + Args: + node (str): name of the node + + Returns: + dict + """ + + self.log.debug("processing: {}".format(node)) + if cmds.nodeType(node) not in ["file", "aiImage", "RedshiftNormalMap"]: + self.log.error( + "Unsupported file node: {}".format(cmds.nodeType(node))) + raise AssertionError("Unsupported file node") + + if cmds.nodeType(node) == 'file': + self.log.debug(" - file node") + attribute = "{}.fileTextureName".format(node) + computed_attribute = "{}.computedFileTextureNamePattern".format( + node) + elif cmds.nodeType(node) == 'aiImage': + self.log.debug("aiImage node") + attribute = "{}.filename".format(node) + computed_attribute = attribute + elif cmds.nodeType(node) == 'RedshiftNormalMap': + self.log.debug("RedshiftNormalMap node") + attribute = "{}.tex0".format(node) + computed_attribute = attribute + + source = cmds.getAttr(attribute) + self.log.info(" - file source: {}".format(source)) + color_space_attr = "{}.colorSpace".format(node) + try: + color_space = cmds.getAttr(color_space_attr) + except ValueError: + # node doesn't have colorspace attribute + color_space = "Raw" + # Compare with the computed file path, e.g. the one with the + # pattern in it, to generate some logging information about this + # difference + # computed_attribute = "{}.computedFileTextureNamePattern".format(node) + computed_source = cmds.getAttr(computed_attribute) + if source != computed_source: + self.log.debug("Detected computed file pattern difference " + "from original pattern: {0} " + "({1} -> {2})".format(node, + source, + computed_source)) + + # We replace backslashes with forward slashes because V-Ray + # can't handle the UDIM files with the backslashes in the + # paths as the computed patterns + source = source.replace("\\", "/") + + files = get_file_node_files(node) + files = self.handle_files(files, publishMipMap) + if len(files) == 0: + self.log.error("No valid files found from node `%s`" % node) + + self.log.info("collection of resource done:") + self.log.info(" - node: {}".format(node)) + self.log.info(" - attribute: {}".format(attribute)) + self.log.info(" - source: {}".format(source)) + self.log.info(" - file: {}".format(files)) + self.log.info(" - color space: {}".format(color_space)) + + # Define the resource + return {"node": node, + "attribute": attribute, + "source": source, # required for resources + "files": files, + "color_space": color_space} # required for resources + + def handle_files(self, files, publishMipMap): + """This will go through all the files and make sure that they are + either already mipmapped or have a corresponding mipmap sidecar and + add that to the list.""" + if not publishMipMap: + return files + + extra_files = [] + self.log.debug("Expecting MipMaps, going to look for them.") + for fname in files: + self.log.info("Checking '{}' for mipmaps".format(fname)) + if is_mipmap(fname): + self.log.debug(" - file is already MipMap, skipping.") + continue + + mipmap = get_mipmap(fname) + if mipmap: + self.log.info(" mipmap found for '{}'".format(fname)) + extra_files.append(mipmap) + else: + self.log.warning(" no mipmap found for '{}'".format(fname)) + return files + extra_files diff --git a/openpype/hosts/maya/plugins/publish/collect_pointcache.py b/openpype/hosts/maya/plugins/publish/collect_pointcache.py new file mode 100644 index 0000000000..a841341f72 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/collect_pointcache.py @@ -0,0 +1,14 @@ +import pyblish.api + + +class CollectPointcache(pyblish.api.InstancePlugin): + """Collect pointcache data for instance.""" + + order = pyblish.api.CollectorOrder + 0.4 + families = ["pointcache"] + label = "Collect Pointcache" + hosts = ["maya"] + + def process(self, instance): + if instance.data.get("farm"): + instance.data["families"].append("publish.farm") diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index e66983780e..b1ad3ca58e 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -72,7 +72,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): def process(self, context): """Entry point to collector.""" render_instance = None - deadline_url = None for instance in context: if "rendering" in instance.data["families"]: @@ -96,41 +95,33 @@ class CollectMayaRender(pyblish.api.ContextPlugin): asset = legacy_io.Session["AVALON_ASSET"] workspace = context.data["workspaceDir"] - deadline_settings = ( - context.data - ["system_settings"] - ["modules"] - ["deadline"] - ) - - if deadline_settings["enabled"]: - deadline_url = render_instance.data.get("deadlineUrl") - self._rs = renderSetup.instance() - current_layer = self._rs.getVisibleRenderLayer() + # Retrieve render setup layers + rs = renderSetup.instance() maya_render_layers = { - layer.name(): layer for layer in self._rs.getRenderLayers() + layer.name(): layer for layer in rs.getRenderLayers() } - self.maya_layers = maya_render_layers - for layer in collected_render_layers: - try: - if layer.startswith("LAYER_"): - # this is support for legacy mode where render layers - # started with `LAYER_` prefix. - expected_layer_name = re.search( - r"^LAYER_(.*)", layer).group(1) - else: - # new way is to prefix render layer name with instance - # namespace. - expected_layer_name = re.search( - r"^.+:(.*)", layer).group(1) - except IndexError: + if layer.startswith("LAYER_"): + # this is support for legacy mode where render layers + # started with `LAYER_` prefix. + layer_name_pattern = r"^LAYER_(.*)" + else: + # new way is to prefix render layer name with instance + # namespace. + layer_name_pattern = r"^.+:(.*)" + + # todo: We should have a more explicit way to link the renderlayer + match = re.match(layer_name_pattern, layer) + if not match: msg = "Invalid layer name in set [ {} ]".format(layer) self.log.warning(msg) continue - self.log.info("processing %s" % layer) + expected_layer_name = match.group(1) + self.log.info("Processing '{}' as layer [ {} ]" + "".format(layer, expected_layer_name)) + # check if layer is part of renderSetup if expected_layer_name not in maya_render_layers: msg = "Render layer [ {} ] is not in " "Render Setup".format( @@ -147,49 +138,28 @@ class CollectMayaRender(pyblish.api.ContextPlugin): self.log.warning(msg) continue - # test if there are sets (subsets) to attach render to + # detect if there are sets (subsets) to attach render to sets = cmds.sets(layer, query=True) or [] attach_to = [] - if sets: - for s in sets: - if "family" not in cmds.listAttr(s): - continue + for s in sets: + if not cmds.attributeQuery("family", node=s, exists=True): + continue - attach_to.append( - { - "version": None, # we need integrator for that - "subset": s, - "family": cmds.getAttr("{}.family".format(s)), - } - ) - self.log.info(" -> attach render to: {}".format(s)) + attach_to.append( + { + "version": None, # we need integrator for that + "subset": s, + "family": cmds.getAttr("{}.family".format(s)), + } + ) + self.log.info(" -> attach render to: {}".format(s)) layer_name = "rs_{}".format(expected_layer_name) # collect all frames we are expecting to be rendered - renderer = cmds.getAttr( - "defaultRenderGlobals.currentRenderer" - ).lower() - # handle various renderman names - if renderer.startswith("renderman"): - renderer = "renderman" - - try: - aov_separator = self._aov_chars[( - context.data["project_settings"] - ["create"] - ["CreateRender"] - ["aov_separator"] - )] - except KeyError: - aov_separator = "_" - - render_instance.data["aovSeparator"] = aov_separator - # return all expected files for all cameras and aovs in given # frame range - layer_render_products = get_layer_render_products( - layer_name, render_instance) + layer_render_products = get_layer_render_products(layer_name) render_products = layer_render_products.layer_data.products assert render_products, "no render products generated" exp_files = [] @@ -226,13 +196,11 @@ class CollectMayaRender(pyblish.api.ContextPlugin): ) # append full path - full_exp_files = [] aov_dict = {} default_render_file = context.data.get('project_settings')\ .get('maya')\ - .get('create')\ - .get('CreateRender')\ - .get('default_render_image_folder') + .get('RenderSettings')\ + .get('default_render_image_folder') or "" # replace relative paths with absolute. Render products are # returned as list of dictionaries. publish_meta_path = None @@ -246,6 +214,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): full_paths.append(full_path) publish_meta_path = os.path.dirname(full_path) aov_dict[aov_first_key] = full_paths + full_exp_files = [aov_dict] frame_start_render = int(self.get_render_attribute( "startFrame", layer=layer_name)) @@ -269,8 +238,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): frame_start_handle = frame_start_render frame_end_handle = frame_end_render - full_exp_files.append(aov_dict) - # find common path to store metadata # so if image prefix is branching to many directories # metadata file will be located in top-most common @@ -299,16 +266,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): self.log.info("collecting layer: {}".format(layer_name)) # Get layer specific settings, might be overrides - try: - aov_separator = self._aov_chars[( - context.data["project_settings"] - ["create"] - ["CreateRender"] - ["aov_separator"] - )] - except KeyError: - aov_separator = "_" - data = { "subset": expected_layer_name, "attachTo": attach_to, @@ -339,9 +296,16 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "source": filepath, "expectedFiles": full_exp_files, "publishRenderMetadataFolder": common_publish_meta_path, - "resolutionWidth": cmds.getAttr("defaultResolution.width"), - "resolutionHeight": cmds.getAttr("defaultResolution.height"), - "pixelAspect": cmds.getAttr("defaultResolution.pixelAspect"), + "renderProducts": layer_render_products, + "resolutionWidth": lib.get_attr_in_layer( + "defaultResolution.width", layer=layer_name + ), + "resolutionHeight": lib.get_attr_in_layer( + "defaultResolution.height", layer=layer_name + ), + "pixelAspect": lib.get_attr_in_layer( + "defaultResolution.pixelAspect", layer=layer_name + ), "tileRendering": render_instance.data.get("tileRendering") or False, # noqa: E501 "tilesX": render_instance.data.get("tilesX") or 2, "tilesY": render_instance.data.get("tilesY") or 2, @@ -351,11 +315,18 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "useReferencedAovs": render_instance.data.get( "useReferencedAovs") or render_instance.data.get( "vrayUseReferencedAovs") or False, - "aovSeparator": aov_separator + "aovSeparator": layer_render_products.layer_data.aov_separator, # noqa: E501 + "renderSetupIncludeLights": render_instance.data.get( + "renderSetupIncludeLights" + ) } - if deadline_url: - data["deadlineUrl"] = deadline_url + # Collect Deadline url if Deadline module is enabled + deadline_settings = ( + context.data["system_settings"]["modules"]["deadline"] + ) + if deadline_settings["enabled"]: + data["deadlineUrl"] = render_instance.data.get("deadlineUrl") if self.sync_workfile_version: data["version"] = context.data["version"] @@ -364,19 +335,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): if instance.data['family'] == "workfile": instance.data["version"] = context.data["version"] - # Apply each user defined attribute as data - for attr in cmds.listAttr(layer, userDefined=True) or list(): - try: - value = cmds.getAttr("{}.{}".format(layer, attr)) - except Exception: - # Some attributes cannot be read directly, - # such as mesh and color attributes. These - # are considered non-essential to this - # particular publishing pipeline. - value = None - - data[attr] = value - # handle standalone renderers if render_instance.data.get("vrayScene") is True: data["families"].append("vrayscene_render") @@ -403,8 +361,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin): instance = context.create_instance(expected_layer_name) instance.data["label"] = label + instance.data["farm"] = True instance.data.update(data) - self.log.debug("data: {}".format(json.dumps(data, indent=4))) def parse_options(self, render_globals): """Get all overrides with a value, skip those without. @@ -484,10 +442,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): return pool_a, pool_b - def _get_overrides(self, layer): - rset = self.maya_layers[layer].renderSettingsCollectionInstance() - return rset.getOverrides() - @staticmethod def get_render_attribute(attr, layer): """Get attribute from render options. diff --git a/openpype/hosts/maya/plugins/publish/collect_review.py b/openpype/hosts/maya/plugins/publish/collect_review.py index 1af92c3bfc..eb872c2935 100644 --- a/openpype/hosts/maya/plugins/publish/collect_review.py +++ b/openpype/hosts/maya/plugins/publish/collect_review.py @@ -3,6 +3,7 @@ import pymel.core as pm import pyblish.api +from openpype.client import get_subset_by_name from openpype.pipeline import legacy_io @@ -70,6 +71,8 @@ class CollectReview(pyblish.api.InstancePlugin): data['handles'] = instance.data.get('handles', None) data['step'] = instance.data['step'] data['fps'] = instance.data['fps'] + data['review_width'] = instance.data['review_width'] + data['review_height'] = instance.data['review_height'] data["isolate"] = instance.data["isolate"] cmds.setAttr(str(instance) + '.active', 1) self.log.debug('data {}'.format(instance.context[i].data)) @@ -77,15 +80,18 @@ class CollectReview(pyblish.api.InstancePlugin): instance.data['remove'] = True self.log.debug('isntance data {}'.format(instance.data)) else: - if self.legacy: - instance.data['subset'] = task + 'Review' - else: - subset = "{}{}{}".format( - task, - instance.data["subset"][0].upper(), - instance.data["subset"][1:] - ) - instance.data['subset'] = subset + legacy_subset_name = task + 'Review' + asset_doc = instance.context.data['assetEntity'] + project_name = legacy_io.active_project() + subset_doc = get_subset_by_name( + project_name, + legacy_subset_name, + asset_doc["_id"], + fields=["_id"] + ) + if subset_doc: + self.log.debug("Existing subsets found, keep legacy name.") + instance.data['subset'] = legacy_subset_name instance.data['review_camera'] = camera instance.data['frameStartFtrack'] = \ diff --git a/openpype/hosts/maya/plugins/publish/collect_rig.py b/openpype/hosts/maya/plugins/publish/collect_rig.py deleted file mode 100644 index 98ae1e8009..0000000000 --- a/openpype/hosts/maya/plugins/publish/collect_rig.py +++ /dev/null @@ -1,22 +0,0 @@ -from maya import cmds - -import pyblish.api - - -class CollectRigData(pyblish.api.InstancePlugin): - """Collect rig data - - Ensures rigs are published to Ftrack. - - """ - - order = pyblish.api.CollectorOrder + 0.2 - label = 'Collect Rig Data' - families = ["rig"] - - def process(self, instance): - # make ftrack publishable - if instance.data.get('families'): - instance.data['families'].append('ftrack') - else: - instance.data['families'] = ['ftrack'] diff --git a/openpype/hosts/maya/plugins/publish/collect_vrayscene.py b/openpype/hosts/maya/plugins/publish/collect_vrayscene.py index afdb570cbc..0bae9656f3 100644 --- a/openpype/hosts/maya/plugins/publish/collect_vrayscene.py +++ b/openpype/hosts/maya/plugins/publish/collect_vrayscene.py @@ -124,9 +124,15 @@ class CollectVrayScene(pyblish.api.InstancePlugin): # Add source to allow tracing back to the scene from # which was submitted originally "source": context.data["currentFile"].replace("\\", "/"), - "resolutionWidth": cmds.getAttr("defaultResolution.width"), - "resolutionHeight": cmds.getAttr("defaultResolution.height"), - "pixelAspect": cmds.getAttr("defaultResolution.pixelAspect"), + "resolutionWidth": lib.get_attr_in_layer( + "defaultResolution.height", layer=layer_name + ), + "resolutionHeight": lib.get_attr_in_layer( + "defaultResolution.width", layer=layer_name + ), + "pixelAspect": lib.get_attr_in_layer( + "defaultResolution.pixelAspect", layer=layer_name + ), "priority": instance.data.get("priority"), "useMultipleSceneFiles": instance.data.get( "vraySceneMultipleFiles") diff --git a/openpype/hosts/maya/plugins/publish/collect_yeti_rig.py b/openpype/hosts/maya/plugins/publish/collect_yeti_rig.py index 029432223b..bc15edd9e0 100644 --- a/openpype/hosts/maya/plugins/publish/collect_yeti_rig.py +++ b/openpype/hosts/maya/plugins/publish/collect_yeti_rig.py @@ -43,11 +43,12 @@ class CollectYetiRig(pyblish.api.InstancePlugin): instance.data["resources"] = yeti_resources - # Force frame range for export - instance.data["frameStart"] = cmds.playbackOptions( - query=True, animationStartTime=True) - instance.data["frameEnd"] = cmds.playbackOptions( - query=True, animationStartTime=True) + # Force frame range for yeti cache export for the rig + start = cmds.playbackOptions(query=True, animationStartTime=True) + for key in ["frameStart", "frameEnd", + "frameStartHandle", "frameEndHandle"]: + instance.data[key] = start + instance.data["preroll"] = 0 def collect_input_connections(self, instance): """Collect the inputs for all nodes in the input_SET""" diff --git a/openpype/hosts/maya/plugins/publish/extract_animation.py b/openpype/hosts/maya/plugins/publish/extract_animation.py deleted file mode 100644 index 8a8bd67cd8..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_animation.py +++ /dev/null @@ -1,92 +0,0 @@ -import os - -from maya import cmds - -import openpype.api -from openpype.hosts.maya.api.lib import ( - extract_alembic, - suspended_refresh, - maintained_selection -) - - -class ExtractAnimation(openpype.api.Extractor): - """Produce an alembic of just point positions and normals. - - Positions and normals, uvs, creases are preserved, but nothing more, - for plain and predictable point caches. - - """ - - label = "Extract Animation" - hosts = ["maya"] - families = ["animation"] - - def process(self, instance): - - # Collect the out set nodes - out_sets = [node for node in instance if node.endswith("out_SET")] - if len(out_sets) != 1: - raise RuntimeError("Couldn't find exactly one out_SET: " - "{0}".format(out_sets)) - out_set = out_sets[0] - roots = cmds.sets(out_set, query=True) - - # Include all descendants - nodes = roots + cmds.listRelatives(roots, - allDescendents=True, - fullPath=True) or [] - - # Collect the start and end including handles - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - self.log.info("Extracting animation..") - dirname = self.staging_dir(instance) - - parent_dir = self.staging_dir(instance) - filename = "{name}.abc".format(**instance.data) - path = os.path.join(parent_dir, filename) - - options = { - "step": instance.data.get("step", 1.0) or 1.0, - "attr": ["cbId"], - "writeVisibility": True, - "writeCreases": True, - "uvWrite": True, - "selection": True, - "worldSpace": instance.data.get("worldSpace", True), - "writeColorSets": instance.data.get("writeColorSets", False), - "writeFaceSets": instance.data.get("writeFaceSets", False) - } - - if not instance.data.get("includeParentHierarchy", True): - # Set the root nodes if we don't want to include parents - # The roots are to be considered the ones that are the actual - # direct members of the set - options["root"] = roots - - if int(cmds.about(version=True)) >= 2017: - # Since Maya 2017 alembic supports multiple uv sets - write them. - options["writeUVSets"] = True - - with suspended_refresh(): - with maintained_selection(): - cmds.select(nodes, noExpand=True) - extract_alembic(file=path, - startFrame=float(start), - endFrame=float(end), - **options) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': filename, - "stagingDir": dirname, - } - instance.data["representations"].append(representation) - - self.log.info("Extracted {} to {}".format(instance, dirname)) diff --git a/openpype/hosts/maya/plugins/publish/extract_ass.py b/openpype/hosts/maya/plugins/publish/extract_ass.py index 760f410f91..5c21a4ff08 100644 --- a/openpype/hosts/maya/plugins/publish/extract_ass.py +++ b/openpype/hosts/maya/plugins/publish/extract_ass.py @@ -1,12 +1,12 @@ import os -import openpype.api - from maya import cmds + +from openpype.pipeline import publish from openpype.hosts.maya.api.lib import maintained_selection -class ExtractAssStandin(openpype.api.Extractor): +class ExtractAssStandin(publish.Extractor): """Extract the content of the instance to a ass file Things to pay attention to: diff --git a/openpype/hosts/maya/plugins/publish/extract_assembly.py b/openpype/hosts/maya/plugins/publish/extract_assembly.py index 482930b76e..35932003ee 100644 --- a/openpype/hosts/maya/plugins/publish/extract_assembly.py +++ b/openpype/hosts/maya/plugins/publish/extract_assembly.py @@ -1,14 +1,13 @@ +import os import json -import os - -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api.lib import extract_alembic from maya import cmds -class ExtractAssembly(openpype.api.Extractor): +class ExtractAssembly(publish.Extractor): """Produce an alembic of just point positions and normals. Positions and normals are preserved, but nothing more, @@ -33,7 +32,7 @@ class ExtractAssembly(openpype.api.Extractor): json.dump(instance.data["scenedata"], filepath, ensure_ascii=False) self.log.info("Extracting point cache ..") - cmds.select(instance.data["hierarchy"]) + cmds.select(instance.data["nodesHierarchy"]) # Run basic alembic exporter extract_alembic(file=hierarchy_path, diff --git a/openpype/hosts/maya/plugins/publish/extract_assproxy.py b/openpype/hosts/maya/plugins/publish/extract_assproxy.py index 93720dbb82..4937a28a9e 100644 --- a/openpype/hosts/maya/plugins/publish/extract_assproxy.py +++ b/openpype/hosts/maya/plugins/publish/extract_assproxy.py @@ -3,17 +3,17 @@ import contextlib from maya import cmds -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api.lib import maintained_selection -class ExtractAssProxy(openpype.api.Extractor): +class ExtractAssProxy(publish.Extractor): """Extract proxy model as Maya Ascii to use as arnold standin """ - order = openpype.api.Extractor.order + 0.2 + order = publish.Extractor.order + 0.2 label = "Ass Proxy (Maya ASCII)" hosts = ["maya"] families = ["ass"] diff --git a/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py b/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py index 5ad6b79d5c..aa445a0387 100644 --- a/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py +++ b/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py @@ -2,11 +2,11 @@ import os from maya import cmds -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api import lib -class ExtractCameraAlembic(openpype.api.Extractor): +class ExtractCameraAlembic(publish.Extractor): """Extract a Camera as Alembic. The cameras gets baked to world space by default. Only when the instance's @@ -30,11 +30,10 @@ class ExtractCameraAlembic(openpype.api.Extractor): # get cameras members = instance.data['setMembers'] - cameras = cmds.ls(members, leaf=True, shapes=True, long=True, + cameras = cmds.ls(members, leaf=True, long=True, dag=True, type="camera") # validate required settings - assert len(cameras) == 1, "Not a single camera found in extraction" assert isinstance(step, float), "Step must be a float value" camera = cameras[0] @@ -44,8 +43,12 @@ class ExtractCameraAlembic(openpype.api.Extractor): path = os.path.join(dir_path, filename) # Perform alembic extraction + member_shapes = cmds.ls( + members, leaf=True, shapes=True, long=True, dag=True) with lib.maintained_selection(): - cmds.select(camera, replace=True, noExpand=True) + cmds.select( + member_shapes, + replace=True, noExpand=True) # Enforce forward slashes for AbcExport because we're # embedding it into a job string @@ -57,10 +60,32 @@ class ExtractCameraAlembic(openpype.api.Extractor): job_str += ' -step {0} '.format(step) if bake_to_worldspace: - transform = cmds.listRelatives(camera, - parent=True, - fullPath=True)[0] - job_str += ' -worldSpace -root {0}'.format(transform) + job_str += ' -worldSpace' + + # if baked, drop the camera hierarchy to maintain + # clean output and backwards compatibility + camera_root = cmds.listRelatives( + camera, parent=True, fullPath=True)[0] + job_str += ' -root {0}'.format(camera_root) + + for member in members: + descendants = cmds.listRelatives(member, + allDescendents=True, + fullPath=True) or [] + shapes = cmds.ls(descendants, shapes=True, + noIntermediate=True, long=True) + cameras = cmds.ls(shapes, type="camera", long=True) + if cameras: + if not set(shapes) - set(cameras): + continue + self.log.warning(( + "Camera hierarchy contains additional geometry. " + "Extraction will fail.") + ) + transform = cmds.listRelatives( + member, parent=True, fullPath=True) + transform = transform[0] if transform else member + job_str += ' -root {0}'.format(transform) job_str += ' -file "{0}"'.format(path) diff --git a/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py b/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py index 49c156f9cd..7467fa027d 100644 --- a/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py +++ b/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py @@ -5,7 +5,7 @@ import itertools from maya import cmds -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api import lib @@ -78,7 +78,7 @@ def unlock(plug): cmds.disconnectAttr(source, destination) -class ExtractCameraMayaScene(openpype.api.Extractor): +class ExtractCameraMayaScene(publish.Extractor): """Extract a Camera as Maya Scene. This will create a duplicate of the camera that will be baked *with* @@ -131,12 +131,12 @@ class ExtractCameraMayaScene(openpype.api.Extractor): "bake to world space is ignored...") # get cameras - members = instance.data['setMembers'] + members = cmds.ls(instance.data['setMembers'], leaf=True, shapes=True, + long=True, dag=True) cameras = cmds.ls(members, leaf=True, shapes=True, long=True, dag=True, type="camera") # validate required settings - assert len(cameras) == 1, "Single camera must be found in extraction" assert isinstance(step, float), "Step must be a float value" camera = cameras[0] transform = cmds.listRelatives(camera, parent=True, fullPath=True) @@ -158,26 +158,37 @@ class ExtractCameraMayaScene(openpype.api.Extractor): frame_range=[start, end], step=step ) - baked_shapes = cmds.ls(baked, + baked_camera_shapes = cmds.ls(baked, type="camera", dag=True, shapes=True, long=True) + + members = members + baked_camera_shapes + members.remove(camera) else: - baked_shapes = cameras + baked_camera_shapes = cmds.ls(cameras, + type="camera", + dag=True, + shapes=True, + long=True) + + attrs = {"backgroundColorR": 0.0, + "backgroundColorG": 0.0, + "backgroundColorB": 0.0, + "overscan": 1.0} + # Fix PLN-178: Don't allow background color to be non-black - for cam in baked_shapes: - attrs = {"backgroundColorR": 0.0, - "backgroundColorG": 0.0, - "backgroundColorB": 0.0, - "overscan": 1.0} - for attr, value in attrs.items(): - plug = "{0}.{1}".format(cam, attr) - unlock(plug) - cmds.setAttr(plug, value) + for cam, (attr, value) in itertools.product(cmds.ls( + baked_camera_shapes, type="camera", dag=True, + long=True), attrs.items()): + plug = "{0}.{1}".format(cam, attr) + unlock(plug) + cmds.setAttr(plug, value) self.log.info("Performing extraction..") - cmds.select(baked_shapes, noExpand=True) + cmds.select(cmds.ls(members, dag=True, + shapes=True, long=True), noExpand=True) cmds.file(path, force=True, typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501 diff --git a/openpype/hosts/maya/plugins/publish/extract_fbx.py b/openpype/hosts/maya/plugins/publish/extract_fbx.py index fbbe8e06b0..9af3acef65 100644 --- a/openpype/hosts/maya/plugins/publish/extract_fbx.py +++ b/openpype/hosts/maya/plugins/publish/extract_fbx.py @@ -4,13 +4,13 @@ import os from maya import cmds # noqa import maya.mel as mel # noqa import pyblish.api -import openpype.api -from openpype.hosts.maya.api.lib import maintained_selection +from openpype.pipeline import publish +from openpype.hosts.maya.api.lib import maintained_selection from openpype.hosts.maya.api import fbx -class ExtractFBX(openpype.api.Extractor): +class ExtractFBX(publish.Extractor): """Extract FBX from Maya. This extracts reproducible FBX exports ignoring any of the diff --git a/openpype/hosts/maya/plugins/publish/extract_layout.py b/openpype/hosts/maya/plugins/publish/extract_layout.py new file mode 100644 index 0000000000..a801d99f42 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_layout.py @@ -0,0 +1,149 @@ +import math +import os +import json + +from maya import cmds +from maya.api import OpenMaya as om + +from openpype.client import get_representation_by_id +from openpype.pipeline import legacy_io, publish + + +class ExtractLayout(publish.Extractor): + """Extract a layout.""" + + label = "Extract Layout" + hosts = ["maya"] + families = ["layout"] + optional = True + + def process(self, instance): + # Define extract output file path + stagingdir = self.staging_dir(instance) + + # Perform extraction + self.log.info("Performing extraction..") + + if "representations" not in instance.data: + instance.data["representations"] = [] + + json_data = [] + # TODO representation queries can be refactored to be faster + project_name = legacy_io.active_project() + + for asset in cmds.sets(str(instance), query=True): + # Find the container + grp_name = asset.split(':')[0] + containers = cmds.ls("{}*_CON".format(grp_name)) + + assert len(containers) == 1, \ + "More than one container found for {}".format(asset) + + container = containers[0] + + representation_id = cmds.getAttr( + "{}.representation".format(container)) + + representation = get_representation_by_id( + project_name, + representation_id, + fields=["parent", "context.family"] + ) + + self.log.info(representation) + + version_id = representation.get("parent") + family = representation.get("context").get("family") + + json_element = { + "family": family, + "instance_name": cmds.getAttr( + "{}.namespace".format(container)), + "representation": str(representation_id), + "version": str(version_id) + } + + loc = cmds.xform(asset, query=True, translation=True) + rot = cmds.xform(asset, query=True, rotation=True, euler=True) + scl = cmds.xform(asset, query=True, relative=True, scale=True) + + json_element["transform"] = { + "translation": { + "x": loc[0], + "y": loc[1], + "z": loc[2] + }, + "rotation": { + "x": math.radians(rot[0]), + "y": math.radians(rot[1]), + "z": math.radians(rot[2]) + }, + "scale": { + "x": scl[0], + "y": scl[1], + "z": scl[2] + } + } + + row_length = 4 + t_matrix_list = cmds.xform(asset, query=True, matrix=True) + + transform_mm = om.MMatrix(t_matrix_list) + transform = om.MTransformationMatrix(transform_mm) + + t = transform.translation(om.MSpace.kWorld) + t = om.MVector(t.x, t.z, -t.y) + transform.setTranslation(t, om.MSpace.kWorld) + transform.rotateBy( + om.MEulerRotation(math.radians(-90), 0, 0), om.MSpace.kWorld) + transform.scaleBy([1.0, 1.0, -1.0], om.MSpace.kObject) + + t_matrix_list = list(transform.asMatrix()) + + t_matrix = [] + for i in range(0, len(t_matrix_list), row_length): + t_matrix.append(t_matrix_list[i:i + row_length]) + + json_element["transform_matrix"] = [ + list(row) + for row in t_matrix + ] + + basis_list = [ + 1, 0, 0, 0, + 0, 1, 0, 0, + 0, 0, -1, 0, + 0, 0, 0, 1 + ] + + basis_mm = om.MMatrix(basis_list) + basis = om.MTransformationMatrix(basis_mm) + + b_matrix_list = list(basis.asMatrix()) + b_matrix = [] + + for i in range(0, len(b_matrix_list), row_length): + b_matrix.append(b_matrix_list[i:i + row_length]) + + json_element["basis"] = [] + for row in b_matrix: + json_element["basis"].append(list(row)) + + json_data.append(json_element) + + json_filename = "{}.json".format(instance.name) + json_path = os.path.join(stagingdir, json_filename) + + with open(json_path, "w+") as file: + json.dump(json_data, fp=file, indent=2) + + json_representation = { + 'name': 'json', + 'ext': 'json', + 'files': json_filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(json_representation) + + self.log.info("Extracted instance '%s' to: %s", + instance.name, json_representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py index 881705b92c..403b4ee6bc 100644 --- a/openpype/hosts/maya/plugins/publish/extract_look.py +++ b/openpype/hosts/maya/plugins/publish/extract_look.py @@ -13,8 +13,8 @@ from maya import cmds # noqa import pyblish.api -import openpype.api -from openpype.pipeline import legacy_io +from openpype.lib import source_hash, run_subprocess +from openpype.pipeline import legacy_io, publish from openpype.hosts.maya.api import lib # Modes for transfer @@ -27,6 +27,31 @@ def escape_space(path): return '"{}"'.format(path) if " " in path else path +def get_ocio_config_path(profile_folder): + """Path to OpenPype vendorized OCIO. + + Vendorized OCIO config file path is grabbed from the specific path + hierarchy specified below. + + "{OPENPYPE_ROOT}/vendor/OpenColorIO-Configs/{profile_folder}/config.ocio" + Args: + profile_folder (str): Name of folder to grab config file from. + + Returns: + str: Path to vendorized config file. + """ + + return os.path.join( + os.environ["OPENPYPE_ROOT"], + "vendor", + "bin", + "ocioconfig", + "OpenColorIOConfigs", + profile_folder, + "config.ocio" + ) + + def find_paths_by_hash(texture_hash): """Find the texture hash key in the dictionary. @@ -43,7 +68,7 @@ def find_paths_by_hash(texture_hash): return legacy_io.distinct(key, {"type": "version"}) -def maketx(source, destination, *args): +def maketx(source, destination, args, logger): """Make `.tx` using `maketx` with some default settings. The settings are based on default as used in Arnold's @@ -54,7 +79,8 @@ def maketx(source, destination, *args): Args: source (str): Path to source file. destination (str): Writing destination path. - *args: Additional arguments for `maketx`. + args (list): Additional arguments for `maketx`. + logger (logging.Logger): Logger to log messages to. Returns: str: Output of `maketx` command. @@ -69,7 +95,7 @@ def maketx(source, destination, *args): "OIIO tool not found in {}".format(maketx_path)) raise AssertionError("OIIO tool not found") - cmd = [ + subprocess_args = [ maketx_path, "-v", # verbose "-u", # update mode @@ -78,26 +104,20 @@ def maketx(source, destination, *args): "--checknan", # use oiio-optimized settings for tile-size, planarconfig, metadata "--oiio", - "--filter lanczos3", + "--filter", "lanczos3", + source ] - cmd.extend(args) - cmd.extend(["-o", escape_space(destination), escape_space(source)]) + subprocess_args.extend(args) + subprocess_args.extend(["-o", destination]) - cmd = " ".join(cmd) + cmd = " ".join(subprocess_args) + logger.debug(cmd) - CREATE_NO_WINDOW = 0x08000000 # noqa - kwargs = dict(args=cmd, stderr=subprocess.STDOUT) - - if sys.platform == "win32": - kwargs["creationflags"] = CREATE_NO_WINDOW try: - out = subprocess.check_output(**kwargs) - except subprocess.CalledProcessError as exc: - print(exc) - import traceback - - traceback.print_exc() + out = run_subprocess(subprocess_args) + except Exception: + logger.error("Maketx converion failed", exc_info=True) raise return out @@ -135,7 +155,7 @@ def no_workspace_dir(): os.rmdir(fake_workspace_dir) -class ExtractLook(openpype.api.Extractor): +class ExtractLook(publish.Extractor): """Extract Look (Maya Scene + JSON) Only extracts the sets (shadingEngines and alike) alongside a .json file @@ -146,7 +166,7 @@ class ExtractLook(openpype.api.Extractor): label = "Extract Look (Maya Scene + JSON)" hosts = ["maya"] - families = ["look"] + families = ["look", "mvLook"] order = pyblish.api.ExtractorOrder + 0.2 scene_type = "ma" look_data_type = "json" @@ -372,10 +392,12 @@ class ExtractLook(openpype.api.Extractor): if mode == COPY: transfers.append((source, destination)) - self.log.info('copying') + self.log.info('file will be copied {} -> {}'.format( + source, destination)) elif mode == HARDLINK: hardlinks.append((source, destination)) - self.log.info('hardlinking') + self.log.info('file will be hardlinked {} -> {}'.format( + source, destination)) # Store the hashes from hash to destination to include in the # database @@ -403,7 +425,19 @@ class ExtractLook(openpype.api.Extractor): # node doesn't have color space attribute color_space = "Raw" else: - if files_metadata[source]["color_space"] == "Raw": + # get the resolved files + metadata = files_metadata.get(source) + # if the files are unresolved from `source` + # assume color space from the first file of + # the resource + if not metadata: + first_file = next(iter(resource.get( + "files", [])), None) + if not first_file: + continue + first_filepath = os.path.normpath(first_file) + metadata = files_metadata[first_filepath] + if metadata["color_space"] == "Raw": # set color space to raw if we linearized it color_space = "Raw" # Remap file node filename to destination @@ -465,7 +499,7 @@ class ExtractLook(openpype.api.Extractor): args = [] if do_maketx: args.append("maketx") - texture_hash = openpype.api.source_hash(filepath, *args) + texture_hash = source_hash(filepath, *args) # If source has been published before with the same settings, # then don't reprocess but hardlink from the original @@ -484,13 +518,17 @@ class ExtractLook(openpype.api.Extractor): if do_maketx and ext != ".tx": # Produce .tx file in staging if source file is not .tx converted = os.path.join(staging, "resources", fname + ".tx") - + additional_args = [ + "--sattrib", + "sourceHash", + texture_hash + ] if linearize: self.log.info("tx: converting sRGB -> linear") - colorconvert = "--colorconvert sRGB linear" - else: - colorconvert = "" + additional_args.extend(["--colorconvert", "sRGB", "linear"]) + config_path = get_ocio_config_path("nuke-default") + additional_args.extend(["--colorconfig", config_path]) # Ensure folder exists if not os.path.exists(os.path.dirname(converted)): os.makedirs(os.path.dirname(converted)) @@ -499,11 +537,8 @@ class ExtractLook(openpype.api.Extractor): maketx( filepath, converted, - # Include `source-hash` as string metadata - "-sattrib", - "sourceHash", - escape_space(texture_hash), - colorconvert, + additional_args, + self.log ) return converted, COPY, texture_hash diff --git a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py index 3a47cdadb5..3769ec3605 100644 --- a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py +++ b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py @@ -4,12 +4,11 @@ import os from maya import cmds -import openpype.api from openpype.hosts.maya.api.lib import maintained_selection -from openpype.pipeline import AVALON_CONTAINER_ID +from openpype.pipeline import AVALON_CONTAINER_ID, publish -class ExtractMayaSceneRaw(openpype.api.Extractor): +class ExtractMayaSceneRaw(publish.Extractor): """Extract as Maya Scene (raw). This will preserve all references, construction history, etc. diff --git a/openpype/hosts/maya/plugins/publish/extract_model.py b/openpype/hosts/maya/plugins/publish/extract_model.py index 0282d1e9c8..7c8c3a2981 100644 --- a/openpype/hosts/maya/plugins/publish/extract_model.py +++ b/openpype/hosts/maya/plugins/publish/extract_model.py @@ -4,11 +4,11 @@ import os from maya import cmds -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api import lib -class ExtractModel(openpype.api.Extractor): +class ExtractModel(publish.Extractor): """Extract as Model (Maya Scene). Only extracts contents based on the original "setMembers" data to ensure diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_look.py b/openpype/hosts/maya/plugins/publish/extract_multiverse_look.py new file mode 100644 index 0000000000..92137acb95 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_multiverse_look.py @@ -0,0 +1,157 @@ +import os + +from maya import cmds + +from openpype.pipeline import publish +from openpype.hosts.maya.api.lib import maintained_selection + + +class ExtractMultiverseLook(publish.Extractor): + """Extractor for Multiverse USD look data. + + This will extract: + + - the shading networks that are assigned in MEOW as Maya material overrides + to a Multiverse Compound + - settings for a Multiverse Write Override operation. + + Relevant settings are visible in the Maya set node created by a Multiverse + USD Look instance creator. + + The input data contained in the set is: + + - a single Multiverse Compound node with any number of Maya material + overrides (typically set in MEOW) + + Upon publish two files will be written: + + - a .usda override file containing material assignment information + - a .ma file containing shading networks + + Note: when layering the material assignment override on a loaded Compound, + remember to set a matching attribute override with the namespace of + the loaded compound in order for the material assignment to resolve. + """ + + label = "Extract Multiverse USD Look" + hosts = ["maya"] + families = ["mvLook"] + scene_type = "usda" + file_formats = ["usda", "usd"] + + @property + def options(self): + """Overridable options for Multiverse USD Export + + Given in the following format + - {NAME: EXPECTED TYPE} + + If the overridden option's type does not match, + the option is not included and a warning is logged. + + """ + + return { + "writeAll": bool, + "writeTransforms": bool, + "writeVisibility": bool, + "writeAttributes": bool, + "writeMaterials": bool, + "writeVariants": bool, + "writeVariantsDefinition": bool, + "writeActiveState": bool, + "writeNamespaces": bool, + "numTimeSamples": int, + "timeSamplesSpan": float + } + + @property + def default_options(self): + """The default options for Multiverse USD extraction.""" + + return { + "writeAll": False, + "writeTransforms": False, + "writeVisibility": False, + "writeAttributes": False, + "writeMaterials": True, + "writeVariants": False, + "writeVariantsDefinition": False, + "writeActiveState": False, + "writeNamespaces": False, + "numTimeSamples": 1, + "timeSamplesSpan": 0.0 + } + + def get_file_format(self, instance): + fileFormat = instance.data["fileFormat"] + if fileFormat in range(len(self.file_formats)): + self.scene_type = self.file_formats[fileFormat] + + def process(self, instance): + # Load plugin first + cmds.loadPlugin("MultiverseForMaya", quiet=True) + + # Define output file path + staging_dir = self.staging_dir(instance) + self.get_file_format(instance) + file_name = "{0}.{1}".format(instance.name, self.scene_type) + file_path = os.path.join(staging_dir, file_name) + file_path = file_path.replace('\\', '/') + + # Parse export options + options = self.default_options + self.log.info("Export options: {0}".format(options)) + + # Perform extraction + self.log.info("Performing extraction ...") + + with maintained_selection(): + members = instance.data("setMembers") + members = cmds.ls(members, + dag=True, + shapes=False, + type="mvUsdCompoundShape", + noIntermediate=True, + long=True) + self.log.info('Collected object {}'.format(members)) + if len(members) > 1: + self.log.error('More than one member: {}'.format(members)) + + import multiverse + + over_write_opts = multiverse.OverridesWriteOptions() + options_discard_keys = { + "numTimeSamples", + "timeSamplesSpan", + "frameStart", + "frameEnd", + "handleStart", + "handleEnd", + "step", + "fps" + } + for key, value in options.items(): + if key in options_discard_keys: + continue + setattr(over_write_opts, key, value) + + for member in members: + # @TODO: Make sure there is only one here. + + self.log.debug("Writing Override for '{}'".format(member)) + multiverse.WriteOverrides(file_path, member, over_write_opts) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': self.scene_type, + 'ext': self.scene_type, + 'files': file_name, + 'stagingDir': staging_dir + } + instance.data["representations"].append(representation) + + self.log.info("Extracted instance {} to {}".format( + instance.name, file_path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd.py b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd.py index 4e4efdc32c..6c352bebe6 100644 --- a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd.py +++ b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd.py @@ -3,16 +3,32 @@ import six from maya import cmds -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api.lib import maintained_selection -class ExtractMultiverseUsd(openpype.api.Extractor): - """Extractor for USD by Multiverse.""" +class ExtractMultiverseUsd(publish.Extractor): + """Extractor for Multiverse USD Asset data. - label = "Extract Multiverse USD" + This will extract settings for a Multiverse Write Asset operation: + they are visible in the Maya set node created by a Multiverse USD + Asset instance creator. + + The input data contained in the set is: + + - a single hierarchy of Maya nodes. Multiverse supports a variety of Maya + nodes such as transforms, mesh, curves, particles, instances, particle + instancers, pfx, MASH, lights, cameras, joints, connected materials, + shading networks etc. including many of their attributes. + + Upon publish a .usd (or .usdz) asset file will be typically written. + """ + + label = "Extract Multiverse USD Asset" hosts = ["maya"] - families = ["usd"] + families = ["mvUsd"] + scene_type = "usd" + file_formats = ["usd", "usda", "usdz"] @property def options(self): @@ -57,6 +73,7 @@ class ExtractMultiverseUsd(openpype.api.Extractor): "writeShadingNetworks": bool, "writeTransformMatrix": bool, "writeUsdAttributes": bool, + "writeInstancesAsReferences": bool, "timeVaryingTopology": bool, "customMaterialNamespace": str, "numTimeSamples": int, @@ -98,6 +115,7 @@ class ExtractMultiverseUsd(openpype.api.Extractor): "writeShadingNetworks": False, "writeTransformMatrix": True, "writeUsdAttributes": False, + "writeInstancesAsReferences": False, "timeVaryingTopology": False, "customMaterialNamespace": str(), "numTimeSamples": 1, @@ -130,12 +148,15 @@ class ExtractMultiverseUsd(openpype.api.Extractor): return options def process(self, instance): - # Load plugin firstly + # Load plugin first cmds.loadPlugin("MultiverseForMaya", quiet=True) # Define output file path staging_dir = self.staging_dir(instance) - file_name = "{}.usd".format(instance.name) + file_format = instance.data.get("fileFormat", 0) + if file_format in range(len(self.file_formats)): + self.scene_type = self.file_formats[file_format] + file_name = "{0}.{1}".format(instance.name, self.scene_type) file_path = os.path.join(staging_dir, file_name) file_path = file_path.replace('\\', '/') @@ -149,12 +170,6 @@ class ExtractMultiverseUsd(openpype.api.Extractor): with maintained_selection(): members = instance.data("setMembers") - members = cmds.ls(members, - dag=True, - shapes=True, - type=("mesh"), - noIntermediate=True, - long=True) self.log.info('Collected object {}'.format(members)) import multiverse @@ -199,10 +214,10 @@ class ExtractMultiverseUsd(openpype.api.Extractor): instance.data["representations"] = [] representation = { - 'name': 'usd', - 'ext': 'usd', + 'name': self.scene_type, + 'ext': self.scene_type, 'files': file_name, - "stagingDir": staging_dir + 'stagingDir': staging_dir } instance.data["representations"].append(representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py index 8fccc412e6..a62729c198 100644 --- a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py +++ b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py @@ -2,16 +2,33 @@ import os from maya import cmds -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api.lib import maintained_selection -class ExtractMultiverseUsdComposition(openpype.api.Extractor): - """Extractor of Multiverse USD Composition.""" +class ExtractMultiverseUsdComposition(publish.Extractor): + """Extractor of Multiverse USD Composition data. + + This will extract settings for a Multiverse Write Composition operation: + they are visible in the Maya set node created by a Multiverse USD + Composition instance creator. + + The input data contained in the set is either: + + - a single hierarchy consisting of several Multiverse Compound nodes, with + any number of layers, and Maya transform nodes + - a single Compound node with more than one layer (in this case the "Write + as Compound Layers" option should be set). + + Upon publish a .usda composition file will be written. + """ label = "Extract Multiverse USD Composition" hosts = ["maya"] - families = ["usdComposition"] + families = ["mvUsdComposition"] + scene_type = "usd" + # Order of `fileFormat` must match create_multiverse_usd_comp.py + file_formats = ["usda", "usd"] @property def options(self): @@ -29,6 +46,7 @@ class ExtractMultiverseUsdComposition(openpype.api.Extractor): "stripNamespaces": bool, "mergeTransformAndShape": bool, "flattenContent": bool, + "writeAsCompoundLayers": bool, "writePendingOverrides": bool, "numTimeSamples": int, "timeSamplesSpan": float @@ -42,6 +60,7 @@ class ExtractMultiverseUsdComposition(openpype.api.Extractor): "stripNamespaces": True, "mergeTransformAndShape": False, "flattenContent": False, + "writeAsCompoundLayers": False, "writePendingOverrides": False, "numTimeSamples": 1, "timeSamplesSpan": 0.0 @@ -71,12 +90,15 @@ class ExtractMultiverseUsdComposition(openpype.api.Extractor): return options def process(self, instance): - # Load plugin firstly + # Load plugin first cmds.loadPlugin("MultiverseForMaya", quiet=True) # Define output file path staging_dir = self.staging_dir(instance) - file_name = "{}.usd".format(instance.name) + file_format = instance.data.get("fileFormat", 0) + if file_format in range(len(self.file_formats)): + self.scene_type = self.file_formats[file_format] + file_name = "{0}.{1}".format(instance.name, self.scene_type) file_path = os.path.join(staging_dir, file_name) file_path = file_path.replace('\\', '/') @@ -90,12 +112,6 @@ class ExtractMultiverseUsdComposition(openpype.api.Extractor): with maintained_selection(): members = instance.data("setMembers") - members = cmds.ls(members, - dag=True, - shapes=True, - type="mvUsdCompoundShape", - noIntermediate=True, - long=True) self.log.info('Collected object {}'.format(members)) import multiverse @@ -119,6 +135,18 @@ class ExtractMultiverseUsdComposition(openpype.api.Extractor): time_opts.framePerSecond = fps comp_write_opts = multiverse.CompositionWriteOptions() + + """ + OP tells MV to write to a staging directory, and then moves the + file to it's final publish directory. By default, MV write relative + paths, but these paths will break when the referencing file moves. + This option forces writes to absolute paths, which is ok within OP + because all published assets have static paths, and MV can only + reference published assets. When a proper UsdAssetResolver is used, + this won't be needed. + """ + comp_write_opts.forceAbsolutePaths = True + options_discard_keys = { 'numTimeSamples', 'timeSamplesSpan', @@ -140,10 +168,10 @@ class ExtractMultiverseUsdComposition(openpype.api.Extractor): instance.data["representations"] = [] representation = { - 'name': 'usd', - 'ext': 'usd', + 'name': self.scene_type, + 'ext': self.scene_type, 'files': file_name, - "stagingDir": staging_dir + 'stagingDir': staging_dir } instance.data["representations"].append(representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py index ce0e8a392a..0628623e88 100644 --- a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py +++ b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py @@ -1,17 +1,32 @@ import os -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api.lib import maintained_selection from maya import cmds -class ExtractMultiverseUsdOverride(openpype.api.Extractor): - """Extractor for USD Override by Multiverse.""" +class ExtractMultiverseUsdOverride(publish.Extractor): + """Extractor for Multiverse USD Override data. + + This will extract settings for a Multiverse Write Override operation: + they are visible in the Maya set node created by a Multiverse USD + Override instance creator. + + The input data contained in the set is: + + - a single Multiverse Compound node with any number of overrides (typically + set in MEOW) + + Upon publish a .usda override file will be written. + """ label = "Extract Multiverse USD Override" hosts = ["maya"] - families = ["usdOverride"] + families = ["mvUsdOverride"] + scene_type = "usd" + # Order of `fileFormat` must match create_multiverse_usd_over.py + file_formats = ["usda", "usd"] @property def options(self): @@ -58,12 +73,15 @@ class ExtractMultiverseUsdOverride(openpype.api.Extractor): } def process(self, instance): - # Load plugin firstly + # Load plugin first cmds.loadPlugin("MultiverseForMaya", quiet=True) # Define output file path staging_dir = self.staging_dir(instance) - file_name = "{}.usda".format(instance.name) + file_format = instance.data.get("fileFormat", 0) + if file_format in range(len(self.file_formats)): + self.scene_type = self.file_formats[file_format] + file_name = "{0}.{1}".format(instance.name, self.scene_type) file_path = os.path.join(staging_dir, file_name) file_path = file_path.replace("\\", "/") @@ -78,7 +96,7 @@ class ExtractMultiverseUsdOverride(openpype.api.Extractor): members = instance.data("setMembers") members = cmds.ls(members, dag=True, - shapes=True, + shapes=False, type="mvUsdCompoundShape", noIntermediate=True, long=True) @@ -128,10 +146,10 @@ class ExtractMultiverseUsdOverride(openpype.api.Extractor): instance.data["representations"] = [] representation = { - "name": "usd", - "ext": "usd", - "files": file_name, - "stagingDir": staging_dir + 'name': self.scene_type, + 'ext': self.scene_type, + 'files': file_name, + 'stagingDir': staging_dir } instance.data["representations"].append(representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_obj.py b/openpype/hosts/maya/plugins/publish/extract_obj.py new file mode 100644 index 0000000000..edfe0b9439 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_obj.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +import os + +from maya import cmds +# import maya.mel as mel +import pyblish.api +from openpype.pipeline import publish +from openpype.hosts.maya.api import lib + + +class ExtractObj(publish.Extractor): + """Extract OBJ from Maya. + + This extracts reproducible OBJ exports ignoring any of the settings + set on the local machine in the OBJ export options window. + + """ + order = pyblish.api.ExtractorOrder + hosts = ["maya"] + label = "Extract OBJ" + families = ["model"] + + def process(self, instance): + + # Define output path + + staging_dir = self.staging_dir(instance) + filename = "{0}.obj".format(instance.name) + path = os.path.join(staging_dir, filename) + + # The export requires forward slashes because we need to + # format it into a string in a mel expression + + self.log.info("Extracting OBJ to: {0}".format(path)) + + members = instance.data("setMembers") + members = cmds.ls(members, + dag=True, + shapes=True, + type=("mesh", "nurbsCurve"), + noIntermediate=True, + long=True) + self.log.info("Members: {0}".format(members)) + self.log.info("Instance: {0}".format(instance[:])) + + if not cmds.pluginInfo('objExport', query=True, loaded=True): + cmds.loadPlugin('objExport') + + # Export + with lib.no_display_layers(instance): + with lib.displaySmoothness(members, + divisionsU=0, + divisionsV=0, + pointsWire=4, + pointsShaded=1, + polygonObject=1): + with lib.shader(members, + shadingEngine="initialShadingGroup"): + with lib.maintained_selection(): + cmds.select(members, noExpand=True) + cmds.file(path, + exportSelected=True, + type='OBJexport', + preserveReferences=True, + force=True) + + if "representation" not in instance.data: + instance.data["representation"] = [] + + representation = { + 'name': 'obj', + 'ext': 'obj', + 'files': filename, + "stagingDir": staging_dir, + } + instance.data["representations"].append(representation) + + self.log.info("Extract OBJ successful to: {0}".format(path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_playblast.py b/openpype/hosts/maya/plugins/publish/extract_playblast.py index bb1ecf279d..b19d24fad7 100644 --- a/openpype/hosts/maya/plugins/publish/extract_playblast.py +++ b/openpype/hosts/maya/plugins/publish/extract_playblast.py @@ -1,17 +1,16 @@ import os -import glob -import contextlib + import clique import capture +from openpype.pipeline import publish from openpype.hosts.maya.api import lib -import openpype.api from maya import cmds import pymel.core as pm -class ExtractPlayblast(openpype.api.Extractor): +class ExtractPlayblast(publish.Extractor): """Extract viewport playblast. Takes review camera and creates review Quicktime video based on viewport @@ -50,12 +49,38 @@ class ExtractPlayblast(openpype.api.Extractor): ['override_viewport_options'] ) preset = lib.load_capture_preset(data=self.capture_preset) - + # Grab capture presets from the project settings + capture_presets = self.capture_preset + # Set resolution variables from capture presets + width_preset = capture_presets["Resolution"]["width"] + height_preset = capture_presets["Resolution"]["height"] + # Set resolution variables from asset values + asset_data = instance.data["assetEntity"]["data"] + asset_width = asset_data.get("resolutionWidth") + asset_height = asset_data.get("resolutionHeight") + review_instance_width = instance.data.get("review_width") + review_instance_height = instance.data.get("review_height") preset['camera'] = camera + + # Tests if project resolution is set, + # if it is a value other than zero, that value is + # used, if not then the asset resolution is + # used + if review_instance_width and review_instance_height: + preset['width'] = review_instance_width + preset['height'] = review_instance_height + elif width_preset and height_preset: + preset['width'] = width_preset + preset['height'] = height_preset + elif asset_width and asset_height: + preset['width'] = asset_width + preset['height'] = asset_height preset['start_frame'] = start preset['end_frame'] = end - camera_option = preset.get("camera_option", {}) - camera_option["depthOfField"] = cmds.getAttr( + + # Enforce persisting camera depth of field + camera_options = preset.setdefault("camera_options", {}) + camera_options["depthOfField"] = cmds.getAttr( "{0}.depthOfField".format(camera)) stagingdir = self.staging_dir(instance) @@ -90,7 +115,7 @@ class ExtractPlayblast(openpype.api.Extractor): else: preset["viewport_options"] = {"imagePlane": image_plane} - with maintained_time(): + with lib.maintained_time(): filename = preset.get("filename", "%TEMP%") # Force viewer to False in call to capture because we have our own @@ -103,15 +128,20 @@ class ExtractPlayblast(openpype.api.Extractor): # Update preset with current panel setting # if override_viewport_options is turned off if not override_viewport_options: + panel = cmds.getPanel(withFocus=True) panel_preset = capture.parse_active_view() preset.update(panel_preset) + cmds.setFocus(panel) - path = capture.capture(**preset) + path = capture.capture(log=self.log, **preset) self.log.debug("playblast path {}".format(path)) collected_files = os.listdir(stagingdir) - collections, remainder = clique.assemble(collected_files) + patterns = [clique.PATTERNS["frames"]] + collections, remainder = clique.assemble(collected_files, + minimum_items=1, + patterns=patterns) self.log.debug("filename {}".format(filename)) frame_collection = None @@ -134,10 +164,15 @@ class ExtractPlayblast(openpype.api.Extractor): # Add camera node name to representation data camera_node_name = pm.ls(camera)[0].getTransform().name() + collected_files = list(frame_collection) + # single frame file shouldn't be in list, only as a string + if len(collected_files) == 1: + collected_files = collected_files[0] + representation = { 'name': 'png', 'ext': 'png', - 'files': list(frame_collection), + 'files': collected_files, "stagingDir": stagingdir, "frameStart": start, "frameEnd": end, @@ -147,12 +182,3 @@ class ExtractPlayblast(openpype.api.Extractor): 'camera_name': camera_node_name } instance.data["representations"].append(representation) - - -@contextlib.contextmanager -def maintained_time(): - ct = cmds.currentTime(query=True) - try: - yield - finally: - cmds.currentTime(ct, edit=True) diff --git a/openpype/hosts/maya/plugins/publish/extract_pointcache.py b/openpype/hosts/maya/plugins/publish/extract_pointcache.py index 60502fdde1..7c1c6d5c12 100644 --- a/openpype/hosts/maya/plugins/publish/extract_pointcache.py +++ b/openpype/hosts/maya/plugins/publish/extract_pointcache.py @@ -2,20 +2,23 @@ import os from maya import cmds -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api.lib import ( extract_alembic, suspended_refresh, - maintained_selection + maintained_selection, + iter_visible_nodes_in_range ) -class ExtractAlembic(openpype.api.Extractor): +class ExtractAlembic(publish.Extractor): """Produce an alembic of just point positions and normals. Positions and normals, uvs, creases are preserved, but nothing more, for plain and predictable point caches. + Plugin can run locally or remotely (on a farm - if instance is marked with + "farm" it will be skipped in local processing, but processed on farm) """ label = "Extract Pointcache (Alembic)" @@ -23,10 +26,14 @@ class ExtractAlembic(openpype.api.Extractor): families = ["pointcache", "model", "vrayproxy"] + targets = ["local", "remote"] def process(self, instance): + if instance.data.get("farm"): + self.log.debug("Should be processed on farm, skipping.") + return - nodes = instance[:] + nodes, roots = self.get_members_and_roots(instance) # Collect the start and end including handles start = float(instance.data.get("frameStartHandle", 1)) @@ -39,10 +46,6 @@ class ExtractAlembic(openpype.api.Extractor): attr_prefixes = instance.data.get("attrPrefix", "").split(";") attr_prefixes = [value for value in attr_prefixes if value.strip()] - # Get extra export arguments - writeColorSets = instance.data.get("writeColorSets", False) - writeFaceSets = instance.data.get("writeFaceSets", False) - self.log.info("Extracting pointcache..") dirname = self.staging_dir(instance) @@ -56,8 +59,8 @@ class ExtractAlembic(openpype.api.Extractor): "attrPrefix": attr_prefixes, "writeVisibility": True, "writeCreases": True, - "writeColorSets": writeColorSets, - "writeFaceSets": writeFaceSets, + "writeColorSets": instance.data.get("writeColorSets", False), + "writeFaceSets": instance.data.get("writeFaceSets", False), "uvWrite": True, "selection": True, "worldSpace": instance.data.get("worldSpace", True) @@ -67,12 +70,22 @@ class ExtractAlembic(openpype.api.Extractor): # Set the root nodes if we don't want to include parents # The roots are to be considered the ones that are the actual # direct members of the set - options["root"] = instance.data.get("setMembers") + options["root"] = roots if int(cmds.about(version=True)) >= 2017: # Since Maya 2017 alembic supports multiple uv sets - write them. options["writeUVSets"] = True + if instance.data.get("visibleOnly", False): + # If we only want to include nodes that are visible in the frame + # range then we need to do our own check. Alembic's `visibleOnly` + # flag does not filter out those that are only hidden on some + # frames as it counts "animated" or "connected" visibilities as + # if it's always visible. + nodes = list(iter_visible_nodes_in_range(nodes, + start=start, + end=end)) + with suspended_refresh(): with maintained_selection(): cmds.select(nodes, noExpand=True) @@ -92,4 +105,31 @@ class ExtractAlembic(openpype.api.Extractor): } instance.data["representations"].append(representation) + instance.context.data["cleanupFullPaths"].append(path) + self.log.info("Extracted {} to {}".format(instance, dirname)) + + def get_members_and_roots(self, instance): + return instance[:], instance.data.get("setMembers") + + +class ExtractAnimation(ExtractAlembic): + label = "Extract Animation" + families = ["animation"] + + def get_members_and_roots(self, instance): + + # Collect the out set nodes + out_sets = [node for node in instance if node.endswith("out_SET")] + if len(out_sets) != 1: + raise RuntimeError("Couldn't find exactly one out_SET: " + "{0}".format(out_sets)) + out_set = out_sets[0] + roots = cmds.sets(out_set, query=True) + + # Include all descendants + nodes = roots + cmds.listRelatives(roots, + allDescendents=True, + fullPath=True) or [] + + return nodes, roots diff --git a/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py b/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py index 23cac9190d..4377275635 100644 --- a/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py +++ b/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py @@ -4,11 +4,11 @@ import os from maya import cmds -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api.lib import maintained_selection -class ExtractRedshiftProxy(openpype.api.Extractor): +class ExtractRedshiftProxy(publish.Extractor): """Extract the content of the instance to a redshift proxy file.""" label = "Redshift Proxy (.rs)" diff --git a/openpype/hosts/maya/plugins/publish/extract_rendersetup.py b/openpype/hosts/maya/plugins/publish/extract_rendersetup.py index 6bdd5f590e..5970c038a4 100644 --- a/openpype/hosts/maya/plugins/publish/extract_rendersetup.py +++ b/openpype/hosts/maya/plugins/publish/extract_rendersetup.py @@ -1,10 +1,11 @@ -import json import os -import openpype.api +import json + import maya.app.renderSetup.model.renderSetup as renderSetup +from openpype.pipeline import publish -class ExtractRenderSetup(openpype.api.Extractor): +class ExtractRenderSetup(publish.Extractor): """ Produce renderSetup template file diff --git a/openpype/hosts/maya/plugins/publish/extract_rig.py b/openpype/hosts/maya/plugins/publish/extract_rig.py index 53c1eeb671..c71a2f710d 100644 --- a/openpype/hosts/maya/plugins/publish/extract_rig.py +++ b/openpype/hosts/maya/plugins/publish/extract_rig.py @@ -4,11 +4,11 @@ import os from maya import cmds -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api.lib import maintained_selection -class ExtractRig(openpype.api.Extractor): +class ExtractRig(publish.Extractor): """Extract rig as Maya Scene.""" label = "Extract Rig (Maya Scene)" diff --git a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py index c2cefc56f1..712159c2be 100644 --- a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py @@ -1,17 +1,17 @@ import os -import contextlib import glob +import tempfile import capture +from openpype.pipeline import publish from openpype.hosts.maya.api import lib -import openpype.api from maya import cmds import pymel.core as pm -class ExtractThumbnail(openpype.api.Extractor): +class ExtractThumbnail(publish.Extractor): """Extract viewport thumbnail. Takes review camera and creates a thumbnail based on viewport @@ -28,7 +28,6 @@ class ExtractThumbnail(openpype.api.Extractor): camera = instance.data['review_camera'] - capture_preset = "" capture_preset = ( instance.context.data["project_settings"]['maya']['publish']['ExtractPlayblast']['capture_preset'] ) @@ -60,10 +59,40 @@ class ExtractThumbnail(openpype.api.Extractor): "overscan": 1.0, "depthOfField": cmds.getAttr("{0}.depthOfField".format(camera)), } + capture_presets = capture_preset + # Set resolution variables from capture presets + width_preset = capture_presets["Resolution"]["width"] + height_preset = capture_presets["Resolution"]["height"] + # Set resolution variables from asset values + asset_data = instance.data["assetEntity"]["data"] + asset_width = asset_data.get("resolutionWidth") + asset_height = asset_data.get("resolutionHeight") + review_instance_width = instance.data.get("review_width") + review_instance_height = instance.data.get("review_height") + # Tests if project resolution is set, + # if it is a value other than zero, that value is + # used, if not then the asset resolution is + # used + if review_instance_width and review_instance_height: + preset['width'] = review_instance_width + preset['height'] = review_instance_height + elif width_preset and height_preset: + preset['width'] = width_preset + preset['height'] = height_preset + elif asset_width and asset_height: + preset['width'] = asset_width + preset['height'] = asset_height - stagingDir = self.staging_dir(instance) + # Create temp directory for thumbnail + # - this is to avoid "override" of source file + dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_") + self.log.debug( + "Create temp directory {} for thumbnail".format(dst_staging) + ) + # Store new staging to cleanup paths + instance.context.data["cleanupFullPaths"].append(dst_staging) filename = "{0}".format(instance.name) - path = os.path.join(stagingDir, filename) + path = os.path.join(dst_staging, filename) self.log.info("Outputting images to %s" % path) @@ -81,9 +110,14 @@ class ExtractThumbnail(openpype.api.Extractor): if preset.pop("isolate_view", False) and instance.data.get("isolate"): preset["isolate"] = instance.data["setMembers"] - with maintained_time(): - filename = preset.get("filename", "%TEMP%") + # Show or Hide Image Plane + image_plane = instance.data.get("imagePlane", True) + if "viewport_options" in preset: + preset["viewport_options"]["imagePlane"] = image_plane + else: + preset["viewport_options"] = {"imagePlane": image_plane} + with lib.maintained_time(): # Force viewer to False in call to capture because we have our own # viewer opening call to allow a signal to trigger between # playblast and viewer @@ -92,14 +126,17 @@ class ExtractThumbnail(openpype.api.Extractor): # Update preset with current panel setting # if override_viewport_options is turned off if not override_viewport_options: + panel = cmds.getPanel(withFocus=True) panel_preset = capture.parse_active_view() preset.update(panel_preset) + cmds.setFocus(panel) path = capture.capture(**preset) playblast = self._fix_playblast_output_path(path) _, thumbnail = os.path.split(playblast) + self.log.info("file list {}".format(thumbnail)) if "representations" not in instance.data: @@ -109,7 +146,7 @@ class ExtractThumbnail(openpype.api.Extractor): 'name': 'thumbnail', 'ext': 'jpg', 'files': thumbnail, - "stagingDir": stagingDir, + "stagingDir": dst_staging, "thumbnail": True } instance.data["representations"].append(representation) @@ -152,12 +189,3 @@ class ExtractThumbnail(openpype.api.Extractor): filepath = max(files, key=os.path.getmtime) return filepath - - -@contextlib.contextmanager -def maintained_time(): - ct = cmds.currentTime(query=True) - try: - yield - finally: - cmds.currentTime(ct, edit=True) diff --git a/openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh.py b/openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh.py index 7ef7f2f181..258120db2f 100644 --- a/openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh.py +++ b/openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh.py @@ -6,7 +6,8 @@ from contextlib import contextmanager from maya import cmds # noqa import pyblish.api -import openpype.api + +from openpype.pipeline import publish from openpype.hosts.maya.api import fbx @@ -20,7 +21,7 @@ def renamed(original_name, renamed_name): cmds.rename(renamed_name, original_name) -class ExtractUnrealSkeletalMesh(openpype.api.Extractor): +class ExtractUnrealSkeletalMesh(publish.Extractor): """Extract Unreal Skeletal Mesh as FBX from Maya. """ order = pyblish.api.ExtractorOrder - 0.1 diff --git a/openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py b/openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py index 69d51f9ff1..44f0615a27 100644 --- a/openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py +++ b/openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py @@ -5,7 +5,8 @@ import os from maya import cmds # noqa import pyblish.api -import openpype.api + +from openpype.pipeline import publish from openpype.hosts.maya.api.lib import ( parent_nodes, maintained_selection @@ -13,7 +14,7 @@ from openpype.hosts.maya.api.lib import ( from openpype.hosts.maya.api import fbx -class ExtractUnrealStaticMesh(openpype.api.Extractor): +class ExtractUnrealStaticMesh(publish.Extractor): """Extract Unreal Static Mesh as FBX from Maya. """ order = pyblish.api.ExtractorOrder - 0.1 diff --git a/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py b/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py index 562ca078e1..38bf02245a 100644 --- a/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py +++ b/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py @@ -2,11 +2,11 @@ import os from maya import cmds -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api.lib import maintained_selection -class ExtractVRayProxy(openpype.api.Extractor): +class ExtractVRayProxy(publish.Extractor): """Extract the content of the instance to a vrmesh file Things to pay attention to: diff --git a/openpype/hosts/maya/plugins/publish/extract_vrayscene.py b/openpype/hosts/maya/plugins/publish/extract_vrayscene.py index 5d41697e5f..8442df1611 100644 --- a/openpype/hosts/maya/plugins/publish/extract_vrayscene.py +++ b/openpype/hosts/maya/plugins/publish/extract_vrayscene.py @@ -3,14 +3,14 @@ import os import re -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api.render_setup_tools import export_in_rs_layer from openpype.hosts.maya.api.lib import maintained_selection from maya import cmds -class ExtractVrayscene(openpype.api.Extractor): +class ExtractVrayscene(publish.Extractor): """Extractor for vrscene.""" label = "VRay Scene (.vrscene)" diff --git a/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py b/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py index 5728682abe..77350f343e 100644 --- a/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py +++ b/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py @@ -2,14 +2,14 @@ import os from maya import cmds -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api.lib import ( suspended_refresh, maintained_selection ) -class ExtractXgenCache(openpype.api.Extractor): +class ExtractXgenCache(publish.Extractor): """Produce an alembic of just xgen interactive groom """ diff --git a/openpype/hosts/maya/plugins/publish/extract_yeti_cache.py b/openpype/hosts/maya/plugins/publish/extract_yeti_cache.py index 0d85708789..b61f599cab 100644 --- a/openpype/hosts/maya/plugins/publish/extract_yeti_cache.py +++ b/openpype/hosts/maya/plugins/publish/extract_yeti_cache.py @@ -3,10 +3,10 @@ import json from maya import cmds -import openpype.api +from openpype.pipeline import publish -class ExtractYetiCache(openpype.api.Extractor): +class ExtractYetiCache(publish.Extractor): """Producing Yeti cache files using scene time range. This will extract Yeti cache file sequence and fur settings. @@ -25,13 +25,10 @@ class ExtractYetiCache(openpype.api.Extractor): # Define extract output file path dirname = self.staging_dir(instance) - # Yeti related staging dirs - data_file = os.path.join(dirname, "yeti.fursettings") - # Collect information for writing cache - start_frame = instance.data.get("frameStartHandle") - end_frame = instance.data.get("frameEndHandle") - preroll = instance.data.get("preroll") + start_frame = instance.data["frameStartHandle"] + end_frame = instance.data["frameEndHandle"] + preroll = instance.data["preroll"] if preroll > 0: start_frame -= preroll @@ -57,32 +54,35 @@ class ExtractYetiCache(openpype.api.Extractor): cache_files = [x for x in os.listdir(dirname) if x.endswith(".fur")] self.log.info("Writing metadata file") - settings = instance.data.get("fursettings", None) - if settings is not None: - with open(data_file, "w") as fp: - json.dump(settings, fp, ensure_ascii=False) + settings = instance.data["fursettings"] + fursettings_path = os.path.join(dirname, "yeti.fursettings") + with open(fursettings_path, "w") as fp: + json.dump(settings, fp, ensure_ascii=False) # build representations if "representations" not in instance.data: instance.data["representations"] = [] self.log.info("cache files: {}".format(cache_files[0])) - instance.data["representations"].append( - { - 'name': 'fur', - 'ext': 'fur', - 'files': cache_files[0] if len(cache_files) == 1 else cache_files, - 'stagingDir': dirname, - 'frameStart': int(start_frame), - 'frameEnd': int(end_frame) - } - ) + + # Workaround: We do not explicitly register these files with the + # representation solely so that we can write multiple sequences + # a single Subset without renaming - it's a bit of a hack + # TODO: Implement better way to manage this sort of integration + if 'transfers' not in instance.data: + instance.data['transfers'] = [] + + publish_dir = instance.data["publishDir"] + for cache_filename in cache_files: + src = os.path.join(dirname, cache_filename) + dst = os.path.join(publish_dir, os.path.basename(cache_filename)) + instance.data['transfers'].append([src, dst]) instance.data["representations"].append( { - 'name': 'fursettings', + 'name': 'fur', 'ext': 'fursettings', - 'files': os.path.basename(data_file), + 'files': os.path.basename(fursettings_path), 'stagingDir': dirname } ) diff --git a/openpype/hosts/maya/plugins/publish/extract_yeti_rig.py b/openpype/hosts/maya/plugins/publish/extract_yeti_rig.py index d12567a55a..1d0c5e88c3 100644 --- a/openpype/hosts/maya/plugins/publish/extract_yeti_rig.py +++ b/openpype/hosts/maya/plugins/publish/extract_yeti_rig.py @@ -7,7 +7,7 @@ import contextlib from maya import cmds -import openpype.api +from openpype.pipeline import publish from openpype.hosts.maya.api import lib @@ -90,7 +90,7 @@ def yetigraph_attribute_values(assumed_destination, resources): pass -class ExtractYetiRig(openpype.api.Extractor): +class ExtractYetiRig(publish.Extractor): """Extract the Yeti rig to a Maya Scene and write the Yeti rig data.""" label = "Extract Yeti Rig" @@ -124,8 +124,8 @@ class ExtractYetiRig(openpype.api.Extractor): settings_path = os.path.join(dirname, "yeti.rigsettings") # Yeti related staging dirs - maya_path = os.path.join( - dirname, "yeti_rig.{}".format(self.scene_type)) + maya_path = os.path.join(dirname, + "yeti_rig.{}".format(self.scene_type)) self.log.info("Writing metadata file") @@ -157,7 +157,7 @@ class ExtractYetiRig(openpype.api.Extractor): input_set = next(i for i in instance if i == "input_SET") # Get all items - set_members = cmds.sets(input_set, query=True) + set_members = cmds.sets(input_set, query=True) or [] set_members += cmds.listRelatives(set_members, allDescendents=True, fullPath=True) or [] @@ -167,7 +167,7 @@ class ExtractYetiRig(openpype.api.Extractor): resources = instance.data.get("resources", {}) with disconnect_plugs(settings, members): with yetigraph_attribute_values(resources_dir, resources): - with maya.attribute_values(attr_value): + with lib.attribute_values(attr_value): cmds.select(nodes, noExpand=True) cmds.file(maya_path, force=True, diff --git a/openpype/hosts/maya/plugins/publish/help/submit_maya_remote_publish_deadline.xml b/openpype/hosts/maya/plugins/publish/help/submit_maya_remote_publish_deadline.xml new file mode 100644 index 0000000000..e92320ccdc --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/help/submit_maya_remote_publish_deadline.xml @@ -0,0 +1,16 @@ + + + +Errors found + +## Publish process has errors + +At least one plugin failed before this plugin, job won't be sent to Deadline for processing before all issues are fixed. + +### How to repair? + +Check all failing plugins (should be highlighted in red) and fix issues if possible. + + + + \ No newline at end of file diff --git a/openpype/hosts/maya/plugins/publish/help/validate_review_subset_uniqueness.xml b/openpype/hosts/maya/plugins/publish/help/validate_review_subset_uniqueness.xml new file mode 100644 index 0000000000..fd1bf4cbaa --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/help/validate_review_subset_uniqueness.xml @@ -0,0 +1,28 @@ + + + + Review subsets not unique + + ## Non unique subset name found + + Non unique subset names: '{non_unique}' + + ### __Detailed Info__ (optional) + + This might happen if you already published for this asset + review subset with legacy name {task}Review. + This legacy name limits possibility of publishing of multiple + reviews from a single workfile. Proper review subset name should + now + contain variant also (as 'Main', 'Default' etc.). That would + result in completely new subset though, so this situation must + be handled manually. + + ### How to repair? + + Legacy subsets must be removed from Openpype DB, please ask admin + to do that. Please provide them asset and subset names. + + + + \ No newline at end of file diff --git a/openpype/hosts/maya/plugins/publish/increment_current_file_deadline.py b/openpype/hosts/maya/plugins/publish/increment_current_file_deadline.py index f9cfac3eb9..b5d5847e9f 100644 --- a/openpype/hosts/maya/plugins/publish/increment_current_file_deadline.py +++ b/openpype/hosts/maya/plugins/publish/increment_current_file_deadline.py @@ -16,12 +16,11 @@ class IncrementCurrentFileDeadline(pyblish.api.ContextPlugin): def process(self, context): - import os from maya import cmds from openpype.lib import version_up - from openpype.action import get_errored_plugins_from_data + from openpype.pipeline.publish import get_errored_plugins_from_context - errored_plugins = get_errored_plugins_from_data(context) + errored_plugins = get_errored_plugins_from_context(context) if any(plugin.__name__ == "MayaSubmitDeadline" for plugin in errored_plugins): raise RuntimeError("Skipping incrementing current file because " diff --git a/openpype/hosts/maya/plugins/publish/save_scene.py b/openpype/hosts/maya/plugins/publish/save_scene.py index 50a2f2112a..45e62e7b44 100644 --- a/openpype/hosts/maya/plugins/publish/save_scene.py +++ b/openpype/hosts/maya/plugins/publish/save_scene.py @@ -1,4 +1,8 @@ import pyblish.api +from openpype.pipeline.workfile.lock_workfile import ( + is_workfile_lock_enabled, + remove_workfile_lock +) class SaveCurrentScene(pyblish.api.ContextPlugin): @@ -22,6 +26,10 @@ class SaveCurrentScene(pyblish.api.ContextPlugin): self.log.debug("Skipping file save as there " "are no modifications..") return - + project_name = context.data["projectName"] + project_settings = context.data["project_settings"] + # remove lockfile before saving + if is_workfile_lock_enabled("maya", project_name, project_settings): + remove_workfile_lock(current) self.log.info("Saving current file..") cmds.file(save=True, force=True) diff --git a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py index c4250a20bd..1a6463fb9d 100644 --- a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py +++ b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py @@ -11,7 +11,7 @@ import pyblish.api from openpype.lib import requests_post from openpype.hosts.maya.api import lib from openpype.pipeline import legacy_io -from openpype.api import get_system_settings +from openpype.settings import get_system_settings # mapping between Maya renderer names and Muster template ids @@ -118,7 +118,7 @@ def preview_fname(folder, scene, layer, padding, ext): """ # Following hardcoded "/_/" - output = "maya/{scene}/{layer}/{layer}.{number}.{ext}".format( + output = "{scene}/{layer}/{layer}.{number}.{ext}".format( scene=scene, layer=layer, number="#" * padding, diff --git a/openpype/hosts/maya/plugins/publish/validate_animation_content.py b/openpype/hosts/maya/plugins/publish/validate_animation_content.py index bcea761a01..9dbb09a046 100644 --- a/openpype/hosts/maya/plugins/publish/validate_animation_content.py +++ b/openpype/hosts/maya/plugins/publish/validate_animation_content.py @@ -1,6 +1,6 @@ import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateAnimationContent(pyblish.api.InstancePlugin): @@ -11,7 +11,7 @@ class ValidateAnimationContent(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["maya"] families = ["animation"] label = "Animation Content" @@ -30,6 +30,10 @@ class ValidateAnimationContent(pyblish.api.InstancePlugin): assert 'out_hierarchy' in instance.data, "Missing `out_hierarchy` data" + out_sets = [node for node in instance if node.endswith("out_SET")] + msg = "Couldn't find exactly one out_SET: {0}".format(out_sets) + assert len(out_sets) == 1, msg + # All nodes in the `out_hierarchy` must be among the nodes that are # in the instance. The nodes in the instance are found from the top # group, as such this tests whether all nodes are under that top group. diff --git a/openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py b/openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py index 05d63f1d56..649913fff6 100644 --- a/openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py +++ b/openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py @@ -1,9 +1,12 @@ import maya.cmds as cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, +) class ValidateOutRelatedNodeIds(pyblish.api.InstancePlugin): @@ -16,13 +19,13 @@ class ValidateOutRelatedNodeIds(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ['animation', "pointcache"] hosts = ['maya'] label = 'Animation Out Set Related Node Ids' actions = [ openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction + RepairAction ] def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py b/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py index 5fb9bd98b1..ac6ce4d22d 100644 --- a/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py +++ b/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py @@ -4,18 +4,20 @@ import types import maya.cmds as cmds import pyblish.api -import openpype.api -import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, +) class ValidateAssRelativePaths(pyblish.api.InstancePlugin): """Ensure exporting ass file has set relative texture paths""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ['maya'] families = ['ass'] label = "ASS has relative texture paths" - actions = [openpype.api.RepairAction] + actions = [RepairAction] def process(self, instance): # we cannot ask this until user open render settings as diff --git a/openpype/hosts/maya/plugins/publish/validate_assembly_namespaces.py b/openpype/hosts/maya/plugins/publish/validate_assembly_namespaces.py index a9ea5a6d15..229da63c42 100644 --- a/openpype/hosts/maya/plugins/publish/validate_assembly_namespaces.py +++ b/openpype/hosts/maya/plugins/publish/validate_assembly_namespaces.py @@ -1,5 +1,4 @@ import pyblish.api -import openpype.api import openpype.hosts.maya.api.action diff --git a/openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py b/openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py index dca59b147b..3f2c59b95b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py +++ b/openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py @@ -1,9 +1,9 @@ import pyblish.api -import openpype.api from maya import cmds import openpype.hosts.maya.api.action +from openpype.pipeline.publish import RepairAction class ValidateAssemblyModelTransforms(pyblish.api.InstancePlugin): @@ -29,7 +29,7 @@ class ValidateAssemblyModelTransforms(pyblish.api.InstancePlugin): label = "Assembly Model Transforms" families = ["assembly"] actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction] + RepairAction] prompt_message = ("You are about to reset the matrix to the default values." " This can alter the look of your scene. " @@ -47,7 +47,7 @@ class ValidateAssemblyModelTransforms(pyblish.api.InstancePlugin): from openpype.hosts.maya.api import lib # Get all transforms in the loaded containers - container_roots = cmds.listRelatives(instance.data["hierarchy"], + container_roots = cmds.listRelatives(instance.data["nodesHierarchy"], children=True, type="transform", fullPath=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_attributes.py b/openpype/hosts/maya/plugins/publish/validate_attributes.py index e2a22f80b6..136c38bc1d 100644 --- a/openpype/hosts/maya/plugins/publish/validate_attributes.py +++ b/openpype/hosts/maya/plugins/publish/validate_attributes.py @@ -1,7 +1,10 @@ import pymel.core as pm import pyblish.api -import openpype.api +from openpype.pipeline.publish import ( + RepairContextAction, + ValidateContentsOrder, +) class ValidateAttributes(pyblish.api.ContextPlugin): @@ -16,10 +19,10 @@ class ValidateAttributes(pyblish.api.ContextPlugin): } """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "Attributes" hosts = ["maya"] - actions = [openpype.api.RepairContextAction] + actions = [RepairContextAction] optional = True attributes = None diff --git a/openpype/hosts/maya/plugins/publish/validate_camera_attributes.py b/openpype/hosts/maya/plugins/publish/validate_camera_attributes.py index e019788aff..bd1529e252 100644 --- a/openpype/hosts/maya/plugins/publish/validate_camera_attributes.py +++ b/openpype/hosts/maya/plugins/publish/validate_camera_attributes.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateCameraAttributes(pyblish.api.InstancePlugin): @@ -14,7 +14,7 @@ class ValidateCameraAttributes(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ['camera'] hosts = ['maya'] label = 'Camera Attributes' diff --git a/openpype/hosts/maya/plugins/publish/validate_camera_contents.py b/openpype/hosts/maya/plugins/publish/validate_camera_contents.py index 20af8d2315..1ce8026fc2 100644 --- a/openpype/hosts/maya/plugins/publish/validate_camera_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_camera_contents.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateCameraContents(pyblish.api.InstancePlugin): @@ -15,11 +15,12 @@ class ValidateCameraContents(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ['camera'] hosts = ['maya'] label = 'Camera Contents' actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + validate_shapes = True @classmethod def get_invalid(cls, instance): @@ -32,7 +33,7 @@ class ValidateCameraContents(pyblish.api.InstancePlugin): invalid = [] cameras = cmds.ls(shapes, type='camera', long=True) if len(cameras) != 1: - cls.log.warning("Camera instance must have a single camera. " + cls.log.error("Camera instance must have a single camera. " "Found {0}: {1}".format(len(cameras), cameras)) invalid.extend(cameras) @@ -49,15 +50,21 @@ class ValidateCameraContents(pyblish.api.InstancePlugin): raise RuntimeError("No cameras found in empty instance.") + if not cls.validate_shapes: + cls.log.info("not validating shapes in the content") + return invalid + # non-camera shapes valid_shapes = cmds.ls(shapes, type=('camera', 'locator'), long=True) shapes = set(shapes) - set(valid_shapes) if shapes: shapes = list(shapes) - cls.log.warning("Camera instance should only contain camera " + cls.log.error("Camera instance should only contain camera " "shapes. Found: {0}".format(shapes)) invalid.extend(shapes) + + invalid = list(set(invalid)) return invalid diff --git a/openpype/hosts/maya/plugins/publish/validate_color_sets.py b/openpype/hosts/maya/plugins/publish/validate_color_sets.py index 45224b0672..905417bafa 100644 --- a/openpype/hosts/maya/plugins/publish/validate_color_sets.py +++ b/openpype/hosts/maya/plugins/publish/validate_color_sets.py @@ -1,8 +1,11 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ( + RepairAction, + ValidateMeshOrder, +) class ValidateColorSets(pyblish.api.Validator): @@ -13,13 +16,13 @@ class ValidateColorSets(pyblish.api.Validator): """ - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder hosts = ['maya'] families = ['model'] category = 'geometry' label = 'Mesh ColorSets' actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction] + RepairAction] optional = True @staticmethod diff --git a/openpype/hosts/maya/plugins/publish/validate_current_renderlayer_renderable.py b/openpype/hosts/maya/plugins/publish/validate_current_renderlayer_renderable.py index 3c3ea68fc6..f072e5e323 100644 --- a/openpype/hosts/maya/plugins/publish/validate_current_renderlayer_renderable.py +++ b/openpype/hosts/maya/plugins/publish/validate_current_renderlayer_renderable.py @@ -1,7 +1,7 @@ import pyblish.api from maya import cmds -from openpype.plugin import contextplugin_should_run +from openpype.pipeline.publish import context_plugin_should_run class ValidateCurrentRenderLayerIsRenderable(pyblish.api.ContextPlugin): @@ -24,7 +24,7 @@ class ValidateCurrentRenderLayerIsRenderable(pyblish.api.ContextPlugin): def process(self, context): # Workaround bug pyblish-base#250 - if not contextplugin_should_run(self, context): + if not context_plugin_should_run(self, context): return layer = cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_cycle_error.py b/openpype/hosts/maya/plugins/publish/validate_cycle_error.py index 4dfe0b8add..210ee4127c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_cycle_error.py +++ b/openpype/hosts/maya/plugins/publish/validate_cycle_error.py @@ -2,15 +2,15 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action from openpype.hosts.maya.api.lib import maintained_selection +from openpype.pipeline.publish import ValidateContentsOrder class ValidateCycleError(pyblish.api.InstancePlugin): """Validate nodes produce no cycle errors.""" - order = openpype.api.ValidateContentsOrder + 0.05 + order = ValidateContentsOrder + 0.05 label = "Cycle Errors" hosts = ["maya"] families = ["rig"] diff --git a/openpype/hosts/maya/plugins/publish/validate_frame_range.py b/openpype/hosts/maya/plugins/publish/validate_frame_range.py index 98b5b4d79b..b467a7c232 100644 --- a/openpype/hosts/maya/plugins/publish/validate_frame_range.py +++ b/openpype/hosts/maya/plugins/publish/validate_frame_range.py @@ -1,7 +1,10 @@ import pyblish.api -import openpype.api from maya import cmds +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, +) class ValidateFrameRange(pyblish.api.InstancePlugin): @@ -18,7 +21,7 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): """ label = "Validate Frame Range" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["animation", "pointcache", "camera", @@ -26,7 +29,8 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): "review", "yeticache"] optional = True - actions = [openpype.api.RepairAction] + actions = [RepairAction] + exclude_families = [] def process(self, instance): context = instance.context @@ -56,7 +60,9 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): # compare with data on instance errors = [] - + if [ef for ef in self.exclude_families + if instance.data["family"] in ef]: + return if(inst_start != frame_start_handle): errors.append("Instance start frame [ {} ] doesn't " "match the one set on instance [ {} ]: " diff --git a/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py b/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py index e04a26e4fd..4870f27bff 100644 --- a/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py +++ b/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py @@ -1,12 +1,12 @@ import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateInstanceHasMembers(pyblish.api.InstancePlugin): """Validates instance objectSet has *any* members.""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["maya"] label = 'Instance has members' actions = [openpype.hosts.maya.api.action.SelectInvalidAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_instance_in_context.py b/openpype/hosts/maya/plugins/publish/validate_instance_in_context.py index 7b8c335062..41bb414829 100644 --- a/openpype/hosts/maya/plugins/publish/validate_instance_in_context.py +++ b/openpype/hosts/maya/plugins/publish/validate_instance_in_context.py @@ -3,7 +3,7 @@ from __future__ import absolute_import import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder from maya import cmds @@ -98,7 +98,7 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin): Action on this validator will select invalid instances in Outliner. """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "Instance in same Context" optional = True hosts = ["maya"] diff --git a/openpype/hosts/maya/plugins/publish/validate_instance_subset.py b/openpype/hosts/maya/plugins/publish/validate_instance_subset.py index 539f3f9d3c..bb3dde761c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_instance_subset.py +++ b/openpype/hosts/maya/plugins/publish/validate_instance_subset.py @@ -1,8 +1,8 @@ import pyblish.api -import openpype.api import string import six +from openpype.pipeline.publish import ValidateContentsOrder # Allow only characters, numbers and underscore allowed = set(string.ascii_lowercase + @@ -18,7 +18,7 @@ def validate_name(subset): class ValidateSubsetName(pyblish.api.InstancePlugin): """Validates subset name has only valid characters""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["*"] label = "Subset Name" diff --git a/openpype/hosts/maya/plugins/publish/validate_loaded_plugin.py b/openpype/hosts/maya/plugins/publish/validate_loaded_plugin.py index 9306d8ce15..624074aaf9 100644 --- a/openpype/hosts/maya/plugins/publish/validate_loaded_plugin.py +++ b/openpype/hosts/maya/plugins/publish/validate_loaded_plugin.py @@ -1,7 +1,8 @@ +import os import pyblish.api import maya.cmds as cmds -import openpype.api -import os + +from openpype.pipeline.publish import RepairContextAction class ValidateLoadedPlugin(pyblish.api.ContextPlugin): @@ -10,7 +11,7 @@ class ValidateLoadedPlugin(pyblish.api.ContextPlugin): label = "Loaded Plugin" order = pyblish.api.ValidatorOrder host = ["maya"] - actions = [openpype.api.RepairContextAction] + actions = [RepairContextAction] @classmethod def get_invalid(cls, context): diff --git a/openpype/hosts/maya/plugins/publish/validate_look_contents.py b/openpype/hosts/maya/plugins/publish/validate_look_contents.py index 443a0ad719..53501d11e5 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_look_contents.py @@ -1,6 +1,6 @@ import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateLookContents(pyblish.api.InstancePlugin): @@ -17,7 +17,7 @@ class ValidateLookContents(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ['look'] hosts = ['maya'] label = 'Look Data Contents' @@ -78,14 +78,13 @@ class ValidateLookContents(pyblish.api.InstancePlugin): # Check if attributes are on a node with an ID, crucial for rebuild! for attr_changes in lookdata["attributes"]: - if not attr_changes["uuid"]: + if not attr_changes["uuid"] and not attr_changes["attributes"]: cls.log.error("Node '%s' has no cbId, please set the " "attributes to its children if it has any" % attr_changes["name"]) invalid.add(instance.name) return list(invalid) - @classmethod def validate_looks(cls, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_look_default_shaders_connections.py b/openpype/hosts/maya/plugins/publish/validate_look_default_shaders_connections.py index 262dd10b74..20f561a892 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_default_shaders_connections.py +++ b/openpype/hosts/maya/plugins/publish/validate_look_default_shaders_connections.py @@ -1,7 +1,7 @@ from maya import cmds import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateLookDefaultShadersConnections(pyblish.api.InstancePlugin): @@ -16,7 +16,7 @@ class ValidateLookDefaultShadersConnections(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ['look'] hosts = ['maya'] label = 'Look Default Shader Connections' diff --git a/openpype/hosts/maya/plugins/publish/validate_look_id_reference_edits.py b/openpype/hosts/maya/plugins/publish/validate_look_id_reference_edits.py index 9d074f927b..a266a0fd74 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_id_reference_edits.py +++ b/openpype/hosts/maya/plugins/publish/validate_look_id_reference_edits.py @@ -2,8 +2,11 @@ from collections import defaultdict from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, +) class ValidateLookIdReferenceEdits(pyblish.api.InstancePlugin): @@ -16,12 +19,12 @@ class ValidateLookIdReferenceEdits(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ['look'] hosts = ['maya'] label = 'Look Id Reference Edits' actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction] + RepairAction] def process(self, instance): invalid = self.get_invalid(instance) diff --git a/openpype/hosts/maya/plugins/publish/validate_look_members_unique.py b/openpype/hosts/maya/plugins/publish/validate_look_members_unique.py index 2367602d05..f81e511ff3 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_members_unique.py +++ b/openpype/hosts/maya/plugins/publish/validate_look_members_unique.py @@ -1,8 +1,8 @@ from collections import defaultdict import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidatePipelineOrder class ValidateUniqueRelationshipMembers(pyblish.api.InstancePlugin): @@ -20,7 +20,7 @@ class ValidateUniqueRelationshipMembers(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidatePipelineOrder + order = ValidatePipelineOrder label = 'Look members unique' hosts = ['maya'] families = ['look'] diff --git a/openpype/hosts/maya/plugins/publish/validate_look_no_default_shaders.py b/openpype/hosts/maya/plugins/publish/validate_look_no_default_shaders.py index 8ba6cde988..db6aadae8d 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_no_default_shaders.py +++ b/openpype/hosts/maya/plugins/publish/validate_look_no_default_shaders.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin): @@ -23,7 +23,7 @@ class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + 0.01 + order = ValidateContentsOrder + 0.01 families = ['look'] hosts = ['maya'] label = 'Look No Default Shaders' diff --git a/openpype/hosts/maya/plugins/publish/validate_look_sets.py b/openpype/hosts/maya/plugins/publish/validate_look_sets.py index 5e737ca876..8434ddde04 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_sets.py +++ b/openpype/hosts/maya/plugins/publish/validate_look_sets.py @@ -1,8 +1,7 @@ +import pyblish.api import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib - -import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateLookSets(pyblish.api.InstancePlugin): @@ -38,7 +37,7 @@ class ValidateLookSets(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ['look'] hosts = ['maya'] label = 'Look Sets' diff --git a/openpype/hosts/maya/plugins/publish/validate_look_shading_group.py b/openpype/hosts/maya/plugins/publish/validate_look_shading_group.py index e8affac036..9b57b06ee7 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_shading_group.py +++ b/openpype/hosts/maya/plugins/publish/validate_look_shading_group.py @@ -1,8 +1,11 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, +) class ValidateShadingEngine(pyblish.api.InstancePlugin): @@ -11,12 +14,12 @@ class ValidateShadingEngine(pyblish.api.InstancePlugin): Shading engines should be named "{surface_shader}SG" """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["look"] hosts = ["maya"] label = "Look Shading Engine Naming" actions = [ - openpype.hosts.maya.api.action.SelectInvalidAction, openpype.api.RepairAction + openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction ] # The default connections to check diff --git a/openpype/hosts/maya/plugins/publish/validate_look_single_shader.py b/openpype/hosts/maya/plugins/publish/validate_look_single_shader.py index 2b32ccf492..788e440d12 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_single_shader.py +++ b/openpype/hosts/maya/plugins/publish/validate_look_single_shader.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateSingleShader(pyblish.api.InstancePlugin): @@ -12,7 +12,7 @@ class ValidateSingleShader(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ['look'] hosts = ['maya'] label = 'Look Single Shader Per Shape' diff --git a/openpype/hosts/maya/plugins/publish/validate_maya_units.py b/openpype/hosts/maya/plugins/publish/validate_maya_units.py index d5a8c350d5..5698d795ff 100644 --- a/openpype/hosts/maya/plugins/publish/validate_maya_units.py +++ b/openpype/hosts/maya/plugins/publish/validate_maya_units.py @@ -1,10 +1,14 @@ import maya.cmds as cmds import pyblish.api -import openpype.api -from openpype import lib + import openpype.hosts.maya.api.lib as mayalib +from openpype.pipeline.context_tools import get_current_project_asset from math import ceil +from openpype.pipeline.publish import ( + RepairContextAction, + ValidateSceneOrder, +) def float_round(num, places=0, direction=ceil): @@ -14,10 +18,10 @@ def float_round(num, places=0, direction=ceil): class ValidateMayaUnits(pyblish.api.ContextPlugin): """Check if the Maya units are set correct""" - order = openpype.api.ValidateSceneOrder + order = ValidateSceneOrder label = "Maya Units" hosts = ['maya'] - actions = [openpype.api.RepairContextAction] + actions = [RepairContextAction] validate_linear_units = True linear_units = "cm" @@ -41,7 +45,9 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin): # now flooring the value? fps = float_round(context.data.get('fps'), 2, ceil) - asset_fps = lib.get_asset()["data"]["fps"] + # TODO repace query with using 'context.data["assetEntity"]' + asset_doc = get_current_project_asset() + asset_fps = asset_doc["data"]["fps"] self.log.info('Units (linear): {0}'.format(linearunits)) self.log.info('Units (angular): {0}'.format(angularunits)) @@ -91,5 +97,7 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin): cls.log.debug(current_linear) cls.log.info("Setting time unit to match project") - asset_fps = lib.get_asset()["data"]["fps"] + # TODO repace query with using 'context.data["assetEntity"]' + asset_doc = get_current_project_asset() + asset_fps = asset_doc["data"]["fps"] mayalib.set_scene_fps(asset_fps) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py b/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py index 90eb01aa12..c1c0636b9e 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py @@ -1,9 +1,12 @@ import pymel.core as pc from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action from openpype.hosts.maya.api.lib import maintained_selection +from openpype.pipeline.publish import ( + RepairAction, + ValidateMeshOrder, +) class ValidateMeshArnoldAttributes(pyblish.api.InstancePlugin): @@ -13,14 +16,14 @@ class ValidateMeshArnoldAttributes(pyblish.api.InstancePlugin): later published looks can discover non-default Arnold attributes. """ - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder hosts = ["maya"] families = ["model"] category = "geometry" label = "Mesh Arnold Attributes" actions = [ openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction + RepairAction ] optional = True if cmds.getAttr( diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py b/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py index 8f9b5d1c4e..36a0da7a59 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py @@ -3,8 +3,8 @@ import re from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateMeshOrder def len_flattened(components): @@ -45,7 +45,7 @@ class ValidateMeshHasUVs(pyblish.api.InstancePlugin): UVs for every face. """ - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder hosts = ['maya'] families = ['model'] category = 'geometry' diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py b/openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py index 8fa1f3cf3b..4427c6eece 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateMeshOrder class ValidateMeshLaminaFaces(pyblish.api.InstancePlugin): @@ -12,7 +12,7 @@ class ValidateMeshLaminaFaces(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder hosts = ['maya'] families = ['model'] category = 'geometry' diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_ngons.py b/openpype/hosts/maya/plugins/publish/validate_mesh_ngons.py index ab0beb2a9c..5b67db3307 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_ngons.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_ngons.py @@ -1,9 +1,9 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib +from openpype.pipeline.publish import ValidateContentsOrder class ValidateMeshNgons(pyblish.api.Validator): @@ -16,7 +16,7 @@ class ValidateMeshNgons(pyblish.api.Validator): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["maya"] families = ["model"] label = "Mesh ngons" diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_no_negative_scale.py b/openpype/hosts/maya/plugins/publish/validate_mesh_no_negative_scale.py index 5ccfa7377a..664e2b5772 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_no_negative_scale.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_no_negative_scale.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateMeshOrder class ValidateMeshNoNegativeScale(pyblish.api.Validator): @@ -17,7 +17,7 @@ class ValidateMeshNoNegativeScale(pyblish.api.Validator): """ - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder hosts = ['maya'] families = ['model'] label = 'Mesh No Negative Scale' diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_non_manifold.py b/openpype/hosts/maya/plugins/publish/validate_mesh_non_manifold.py index 9bd584bbbf..d7711da722 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_non_manifold.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_non_manifold.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateMeshOrder class ValidateMeshNonManifold(pyblish.api.Validator): @@ -13,7 +13,7 @@ class ValidateMeshNonManifold(pyblish.api.Validator): """ - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder hosts = ['maya'] families = ['model'] label = 'Mesh Non-Manifold Vertices/Edges' diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py b/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py index 5e6f24cf79..0ef2716559 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py @@ -1,9 +1,9 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib +from openpype.pipeline.publish import ValidateMeshOrder class ValidateMeshNonZeroEdgeLength(pyblish.api.InstancePlugin): @@ -16,7 +16,7 @@ class ValidateMeshNonZeroEdgeLength(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder families = ['model'] hosts = ['maya'] category = 'geometry' diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py b/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py index 750932df54..c8892a8e59 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py @@ -2,8 +2,11 @@ from maya import cmds import maya.api.OpenMaya as om2 import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ( + RepairAction, + ValidateMeshOrder, +) class ValidateMeshNormalsUnlocked(pyblish.api.Validator): @@ -14,14 +17,14 @@ class ValidateMeshNormalsUnlocked(pyblish.api.Validator): """ - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder hosts = ['maya'] families = ['model'] category = 'geometry' version = (0, 1, 0) label = 'Mesh Normals Unlocked' actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction] + RepairAction] optional = True @staticmethod diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py b/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py index bf95d8ba09..be7324a68f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py @@ -1,11 +1,11 @@ import pyblish.api -import openpype.api import openpype.hosts.maya.api.action import math import maya.api.OpenMaya as om import pymel.core as pm from six.moves import xrange +from openpype.pipeline.publish import ValidateMeshOrder class GetOverlappingUVs(object): @@ -232,7 +232,7 @@ class ValidateMeshHasOverlappingUVs(pyblish.api.InstancePlugin): It is optional to warn publisher about it. """ - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder hosts = ['maya'] families = ['model'] category = 'geometry' diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_shader_connections.py b/openpype/hosts/maya/plugins/publish/validate_mesh_shader_connections.py index 0969573a90..2a0abe975c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_shader_connections.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_shader_connections.py @@ -1,8 +1,11 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ( + RepairAction, + ValidateMeshOrder, +) def pairs(iterable): @@ -12,28 +15,41 @@ def pairs(iterable): yield i, y -def get_invalid_sets(shape): - """Get sets that are considered related but do not contain the shape. +def get_invalid_sets(shapes): + """Return invalid sets for the given shapes. - In some scenarios Maya keeps connections to multiple shaders - even if just a single one is assigned on the full object. + This takes a list of shape nodes to cache the set members for overlapping + sets in the queries. This avoids many Maya set member queries. - These are related sets returned by `maya.cmds.listSets` that don't - actually have the shape as member. + Returns: + dict: Dictionary of shapes and their invalid sets, e.g. + {"pCubeShape": ["set1", "set2"]} """ - invalid = [] - sets = cmds.listSets(object=shape, t=1, extendToShape=False) or [] - for s in sets: - members = cmds.sets(s, query=True, nodesOnly=True) - if not members: - invalid.append(s) - continue + cache = dict() + invalid = dict() - members = set(cmds.ls(members, long=True)) - if shape not in members: - invalid.append(s) + # Collect the sets from the shape + for shape in shapes: + invalid_sets = [] + sets = cmds.listSets(object=shape, t=1, extendToShape=False) or [] + for set_ in sets: + + members = cache.get(set_, None) + if members is None: + members = set(cmds.ls(cmds.sets(set_, + query=True, + nodesOnly=True), long=True)) + cache[set_] = members + + # If the shape is not actually present as a member of the set + # consider it invalid + if shape not in members: + invalid_sets.append(set_) + + if invalid_sets: + invalid[shape] = invalid_sets return invalid @@ -73,12 +89,12 @@ class ValidateMeshShaderConnections(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder hosts = ['maya'] families = ['model'] label = "Mesh Shader Connections" actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction] + RepairAction] def process(self, instance): """Process all the nodes in the instance 'objectSet'""" @@ -92,15 +108,9 @@ class ValidateMeshShaderConnections(pyblish.api.InstancePlugin): @staticmethod def get_invalid(instance): - shapes = cmds.ls(instance[:], dag=1, leaf=1, shapes=1, long=True) - - # todo: allow to check anything that can have a shader - shapes = cmds.ls(shapes, noIntermediate=True, long=True, type="mesh") - - invalid = [] - for shape in shapes: - if get_invalid_sets(shape): - invalid.append(shape) + nodes = instance[:] + shapes = cmds.ls(nodes, noIntermediate=True, long=True, type="mesh") + invalid = get_invalid_sets(shapes).keys() return invalid @@ -108,7 +118,7 @@ class ValidateMeshShaderConnections(pyblish.api.InstancePlugin): def repair(cls, instance): shapes = cls.get_invalid(instance) - for shape in shapes: - invalid_sets = get_invalid_sets(shape) + invalid = get_invalid_sets(shapes) + for shape, invalid_sets in invalid.items(): for set_node in invalid_sets: disconnect(shape, set_node) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py b/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py index 9d2aeb7d99..6ca8c06ba5 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py @@ -1,9 +1,12 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib +from openpype.pipeline.publish import ( + RepairAction, + ValidateMeshOrder, +) class ValidateMeshSingleUVSet(pyblish.api.InstancePlugin): @@ -15,7 +18,7 @@ class ValidateMeshSingleUVSet(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder hosts = ['maya'] families = ['model', 'pointcache'] category = 'uv' @@ -23,7 +26,7 @@ class ValidateMeshSingleUVSet(pyblish.api.InstancePlugin): version = (0, 1, 0) label = "Mesh Single UV Set" actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction] + RepairAction] @staticmethod def get_invalid(instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_uv_set_map1.py b/openpype/hosts/maya/plugins/publish/validate_mesh_uv_set_map1.py index 52c45d3b0c..40ddb916ca 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_uv_set_map1.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_uv_set_map1.py @@ -1,8 +1,11 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ( + RepairAction, + ValidateMeshOrder, +) class ValidateMeshUVSetMap1(pyblish.api.InstancePlugin): @@ -15,13 +18,13 @@ class ValidateMeshUVSetMap1(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder hosts = ['maya'] families = ['model'] optional = True label = "Mesh has map1 UV Set" actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction] + RepairAction] @staticmethod def get_invalid(instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py b/openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py index 463c3c4c50..1e6d290ae7 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py @@ -3,8 +3,11 @@ import re from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ( + RepairAction, + ValidateMeshOrder, +) def len_flattened(components): @@ -57,13 +60,13 @@ class ValidateMeshVerticesHaveEdges(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder hosts = ['maya'] families = ['model'] category = 'geometry' label = 'Mesh Vertices Have Edges' actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction] + RepairAction] @classmethod def repair(cls, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_model_content.py b/openpype/hosts/maya/plugins/publish/validate_model_content.py index aee0ea52f0..723346a285 100644 --- a/openpype/hosts/maya/plugins/publish/validate_model_content.py +++ b/openpype/hosts/maya/plugins/publish/validate_model_content.py @@ -1,9 +1,9 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib +from openpype.pipeline.publish import ValidateContentsOrder class ValidateModelContent(pyblish.api.InstancePlugin): @@ -14,7 +14,7 @@ class ValidateModelContent(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["maya"] families = ["model"] label = "Model Content" diff --git a/openpype/hosts/maya/plugins/publish/validate_model_name.py b/openpype/hosts/maya/plugins/publish/validate_model_name.py index 50acf2b8b7..2dec9ba267 100644 --- a/openpype/hosts/maya/plugins/publish/validate_model_name.py +++ b/openpype/hosts/maya/plugins/publish/validate_model_name.py @@ -5,12 +5,12 @@ import re from maya import cmds import pyblish.api -import openpype.api from openpype.pipeline import legacy_io +from openpype.pipeline.publish import ValidateContentsOrder import openpype.hosts.maya.api.action from openpype.hosts.maya.api.shader_definition_editor import ( DEFINITION_FILENAME) -from openpype.lib.mongo import OpenPypeMongoConnection +from openpype.client.mongo import OpenPypeMongoConnection import gridfs @@ -23,7 +23,7 @@ class ValidateModelName(pyblish.api.InstancePlugin): """ optional = True - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["maya"] families = ["model"] label = "Model Name" diff --git a/openpype/hosts/maya/plugins/publish/validate_muster_connection.py b/openpype/hosts/maya/plugins/publish/validate_muster_connection.py index 6dc7bd3bc4..c31ccf405c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_muster_connection.py +++ b/openpype/hosts/maya/plugins/publish/validate_muster_connection.py @@ -5,8 +5,10 @@ import appdirs import pyblish.api from openpype.lib import requests_get -from openpype.plugin import contextplugin_should_run -import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ( + context_plugin_should_run, + RepairAction, +) class ValidateMusterConnection(pyblish.api.ContextPlugin): @@ -21,12 +23,12 @@ class ValidateMusterConnection(pyblish.api.ContextPlugin): token = None if not os.environ.get("MUSTER_REST_URL"): active = False - actions = [openpype.api.RepairAction] + actions = [RepairAction] def process(self, context): # Workaround bug pyblish-base#250 - if not contextplugin_should_run(self, context): + if not context_plugin_should_run(self, context): return # test if we have environment set (redundant as this plugin shouldn' diff --git a/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py b/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py new file mode 100644 index 0000000000..67fc1616c2 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py @@ -0,0 +1,92 @@ +import os +import pyblish.api +import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder + + +COLOUR_SPACES = ['sRGB', 'linear', 'auto'] +MIPMAP_EXTENSIONS = ['tdl'] + + +class ValidateMvLookContents(pyblish.api.InstancePlugin): + order = ValidateContentsOrder + families = ['mvLook'] + hosts = ['maya'] + label = 'Validate mvLook Data' + actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + + # Allow this validation step to be skipped when you just need to + # get things pushed through. + optional = True + + # These intents get enforced checks, other ones get warnings. + enforced_intents = ['-', 'Final'] + + def process(self, instance): + intent = instance.context.data['intent']['value'] + publishMipMap = instance.data["publishMipMap"] + enforced = True + if intent in self.enforced_intents: + self.log.info("This validation will be enforced: '{}'" + .format(intent)) + else: + enforced = False + self.log.info("This validation will NOT be enforced: '{}'" + .format(intent)) + + if not instance[:]: + raise RuntimeError("Instance is empty") + + invalid = set() + + resources = instance.data.get("resources", []) + for resource in resources: + files = resource["files"] + self.log.debug("Resouce '{}', files: [{}]".format(resource, files)) + node = resource["node"] + if len(files) == 0: + self.log.error("File node '{}' uses no or non-existing " + "files".format(node)) + invalid.add(node) + continue + for fname in files: + if not self.valid_file(fname): + self.log.error("File node '{}'/'{}' is not valid" + .format(node, fname)) + invalid.add(node) + + if publishMipMap and not self.is_or_has_mipmap(fname, files): + msg = "File node '{}'/'{}' does not have a mipmap".format( + node, fname) + if enforced: + invalid.add(node) + self.log.error(msg) + raise RuntimeError(msg) + else: + self.log.warning(msg) + + if invalid: + raise RuntimeError("'{}' has invalid look " + "content".format(instance.name)) + + def valid_file(self, fname): + self.log.debug("Checking validity of '{}'".format(fname)) + if not os.path.exists(fname): + return False + if os.path.getsize(fname) == 0: + return False + return True + + def is_or_has_mipmap(self, fname, files): + ext = os.path.splitext(fname)[1][1:] + if ext in MIPMAP_EXTENSIONS: + self.log.debug("Is a mipmap '{}'".format(fname)) + return True + + for colour_space in COLOUR_SPACES: + for mipmap_ext in MIPMAP_EXTENSIONS: + mipmap_fname = '.'.join([fname, colour_space, mipmap_ext]) + if mipmap_fname in files: + self.log.debug("Has a mipmap '{}'".format(fname)) + return True + return False diff --git a/openpype/hosts/maya/plugins/publish/validate_no_animation.py b/openpype/hosts/maya/plugins/publish/validate_no_animation.py index 6621e452f0..2e7cafe4ab 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_animation.py +++ b/openpype/hosts/maya/plugins/publish/validate_no_animation.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateNoAnimation(pyblish.api.Validator): @@ -14,7 +14,7 @@ class ValidateNoAnimation(pyblish.api.Validator): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "No Animation" hosts = ["maya"] families = ["model"] diff --git a/openpype/hosts/maya/plugins/publish/validate_no_default_camera.py b/openpype/hosts/maya/plugins/publish/validate_no_default_camera.py index c3f6f3c38e..1a5773e6a7 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_default_camera.py +++ b/openpype/hosts/maya/plugins/publish/validate_no_default_camera.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateNoDefaultCameras(pyblish.api.InstancePlugin): @@ -13,7 +13,7 @@ class ValidateNoDefaultCameras(pyblish.api.InstancePlugin): settings when being loaded and sometimes being skipped. """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ['maya'] families = ['camera'] version = (0, 1, 0) diff --git a/openpype/hosts/maya/plugins/publish/validate_no_namespace.py b/openpype/hosts/maya/plugins/publish/validate_no_namespace.py index 5b3d6bc9c4..01c77e5b2e 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_namespace.py +++ b/openpype/hosts/maya/plugins/publish/validate_no_namespace.py @@ -2,7 +2,11 @@ import pymel.core as pm import maya.cmds as cmds import pyblish.api -import openpype.api +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, +) + import openpype.hosts.maya.api.action @@ -16,14 +20,14 @@ def get_namespace(node_name): class ValidateNoNamespace(pyblish.api.InstancePlugin): """Ensure the nodes don't have a namespace""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ['maya'] families = ['model'] category = 'cleanup' version = (0, 1, 0) label = 'No Namespaces' actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction] + RepairAction] @staticmethod def get_invalid(instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py b/openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py index 36d61b03e8..b430c2b63c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py +++ b/openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py @@ -1,8 +1,11 @@ import maya.cmds as cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, +) def has_shape_children(node): @@ -37,13 +40,13 @@ class ValidateNoNullTransforms(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ['maya'] families = ['model'] category = 'cleanup' version = (0, 1, 0) label = 'No Empty/Null Transforms' - actions = [openpype.api.RepairAction, + actions = [RepairAction, openpype.hosts.maya.api.action.SelectInvalidAction] @staticmethod diff --git a/openpype/hosts/maya/plugins/publish/validate_no_unknown_nodes.py b/openpype/hosts/maya/plugins/publish/validate_no_unknown_nodes.py index d140a1f24a..2cfdc28128 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_unknown_nodes.py +++ b/openpype/hosts/maya/plugins/publish/validate_no_unknown_nodes.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateNoUnknownNodes(pyblish.api.InstancePlugin): @@ -16,7 +16,7 @@ class ValidateNoUnknownNodes(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ['maya'] families = ['model', 'rig'] optional = True diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids.py b/openpype/hosts/maya/plugins/publish/validate_node_ids.py index d17d34117f..796f4c8d76 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids.py @@ -1,7 +1,7 @@ import pyblish.api -import openpype.api -import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidatePipelineOrder +import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib @@ -14,7 +14,7 @@ class ValidateNodeIDs(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidatePipelineOrder + order = ValidatePipelineOrder label = 'Instance Nodes Have ID' hosts = ['maya'] families = ["model", diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py b/openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py index 0324be9fc9..68c47f3a96 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py @@ -1,9 +1,12 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, +) class ValidateNodeIdsDeformedShape(pyblish.api.InstancePlugin): @@ -16,13 +19,13 @@ class ValidateNodeIdsDeformedShape(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ['look'] hosts = ['maya'] label = 'Deformed shape ids' actions = [ openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction + RepairAction ] def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py b/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py index 068d6b38a1..b2f28fd4e5 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py @@ -1,7 +1,8 @@ import pyblish.api -import openpype.api +from openpype.client import get_assets from openpype.pipeline import legacy_io +from openpype.pipeline.publish import ValidatePipelineOrder import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib @@ -17,7 +18,7 @@ class ValidateNodeIdsInDatabase(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidatePipelineOrder + order = ValidatePipelineOrder label = 'Node Ids in Database' hosts = ['maya'] families = ["*"] @@ -42,8 +43,12 @@ class ValidateNodeIdsInDatabase(pyblish.api.InstancePlugin): nodes=instance[:]) # check ids against database ids - db_asset_ids = legacy_io.find({"type": "asset"}).distinct("_id") - db_asset_ids = set(str(i) for i in db_asset_ids) + project_name = legacy_io.active_project() + asset_docs = get_assets(project_name, fields=["_id"]) + db_asset_ids = { + str(asset_doc["_id"]) + for asset_doc in asset_docs + } # Get all asset IDs for node in id_required_nodes: diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py b/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py index 38407e4176..f901dc58c4 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py @@ -1,7 +1,6 @@ import pyblish.api -import openpype.api -from openpype.pipeline import legacy_io +from openpype.pipeline.publish import ValidatePipelineOrder import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib @@ -11,7 +10,7 @@ class ValidateNodeIDsRelated(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidatePipelineOrder + order = ValidatePipelineOrder label = 'Node Ids Related (ID)' hosts = ['maya'] families = ["model", @@ -36,15 +35,7 @@ class ValidateNodeIDsRelated(pyblish.api.InstancePlugin): """Return the member nodes that are invalid""" invalid = list() - asset = instance.data['asset'] - asset_data = legacy_io.find_one( - { - "name": asset, - "type": "asset" - }, - projection={"_id": True} - ) - asset_id = str(asset_data['_id']) + asset_id = str(instance.data['assetEntity']["_id"]) # We do want to check the referenced nodes as we it might be # part of the end product diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py b/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py index ed9ef526d6..f7a5e6e292 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py @@ -1,7 +1,7 @@ from collections import defaultdict import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidatePipelineOrder import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib @@ -12,7 +12,7 @@ class ValidateNodeIdsUnique(pyblish.api.InstancePlugin): Here we ensure that what has been added to the instance is unique """ - order = openpype.api.ValidatePipelineOrder + order = ValidatePipelineOrder label = 'Non Duplicate Instance Members (ID)' hosts = ['maya'] families = ["model", diff --git a/openpype/hosts/maya/plugins/publish/validate_node_no_ghosting.py b/openpype/hosts/maya/plugins/publish/validate_node_no_ghosting.py index 38f3ab1e68..0f608dab2c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_no_ghosting.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_no_ghosting.py @@ -1,8 +1,9 @@ from maya import cmds import pyblish.api -import openpype.api + import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateNodeNoGhosting(pyblish.api.InstancePlugin): @@ -17,7 +18,7 @@ class ValidateNodeNoGhosting(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ['maya'] families = ['model', 'rig'] label = "No Ghosting" diff --git a/openpype/hosts/maya/plugins/publish/validate_render_image_rule.py b/openpype/hosts/maya/plugins/publish/validate_render_image_rule.py index 642ca9e25d..78bb022785 100644 --- a/openpype/hosts/maya/plugins/publish/validate_render_image_rule.py +++ b/openpype/hosts/maya/plugins/publish/validate_render_image_rule.py @@ -1,46 +1,52 @@ -import maya.mel as mel -import pymel.core as pm +from maya import cmds import pyblish.api -import openpype.api - - -def get_file_rule(rule): - """Workaround for a bug in python with cmds.workspace""" - return mel.eval('workspace -query -fileRuleEntry "{}"'.format(rule)) +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, +) class ValidateRenderImageRule(pyblish.api.InstancePlugin): - """Validates "images" file rule is set to "renders/" + """Validates Maya Workpace "images" file rule matches project settings. + + This validates against the configured default render image folder: + Studio Settings > Project > Maya > + Render Settings > Default render image folder. """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "Images File Rule (Workspace)" hosts = ["maya"] families = ["renderlayer"] - actions = [openpype.api.RepairAction] + actions = [RepairAction] def process(self, instance): - default_render_file = self.get_default_render_image_folder(instance) + required_images_rule = self.get_default_render_image_folder(instance) + current_images_rule = cmds.workspace(fileRuleEntry="images") - assert get_file_rule("images") == default_render_file, ( - "Workspace's `images` file rule must be set to: {}".format( - default_render_file + assert current_images_rule == required_images_rule, ( + "Invalid workspace `images` file rule value: '{}'. " + "Must be set to: '{}'".format( + current_images_rule, required_images_rule ) ) @classmethod def repair(cls, instance): - default = cls.get_default_render_image_folder(instance) - pm.workspace.fileRules["images"] = default - pm.system.Workspace.save() + + required_images_rule = cls.get_default_render_image_folder(instance) + current_images_rule = cmds.workspace(fileRuleEntry="images") + + if current_images_rule != required_images_rule: + cmds.workspace(fileRule=("images", required_images_rule)) + cmds.workspace(saveWorkspace=True) @staticmethod def get_default_render_image_folder(instance): return instance.context.data.get('project_settings')\ .get('maya') \ - .get('create') \ - .get('CreateRender') \ + .get('RenderSettings') \ .get('default_render_image_folder') diff --git a/openpype/hosts/maya/plugins/publish/validate_render_no_default_cameras.py b/openpype/hosts/maya/plugins/publish/validate_render_no_default_cameras.py index 044cc7c6a2..67ece75af8 100644 --- a/openpype/hosts/maya/plugins/publish/validate_render_no_default_cameras.py +++ b/openpype/hosts/maya/plugins/publish/validate_render_no_default_cameras.py @@ -1,14 +1,15 @@ from maya import cmds import pyblish.api -import openpype.api + import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateRenderNoDefaultCameras(pyblish.api.InstancePlugin): """Ensure no default (startup) cameras are to be rendered.""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ['maya'] families = ['renderlayer'] label = "No Default Cameras Renderable" diff --git a/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py b/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py index e6c6ef6c9e..77322fefd5 100644 --- a/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py +++ b/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py @@ -1,20 +1,11 @@ import re import pyblish.api -import openpype.api -import openpype.hosts.maya.api.action - from maya import cmds - -ImagePrefixes = { - 'mentalray': 'defaultRenderGlobals.imageFilePrefix', - 'vray': 'vraySettings.fileNamePrefix', - 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'defaultRenderGlobals.imageFilePrefix', - 'redshift': 'defaultRenderGlobals.imageFilePrefix', - 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix', -} +import openpype.hosts.maya.api.action +from openpype.hosts.maya.api.lib_rendersettings import RenderSettings +from openpype.pipeline.publish import ValidateContentsOrder class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): @@ -24,7 +15,7 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): prefix must contain token. """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "Render Single Camera" hosts = ['maya'] families = ["renderlayer", @@ -47,7 +38,11 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): # handle various renderman names if renderer.startswith('renderman'): renderer = 'renderman' - file_prefix = cmds.getAttr(ImagePrefixes[renderer]) + + file_prefix = cmds.getAttr( + RenderSettings.get_image_prefix_attr(renderer) + ) + if len(cameras) > 1: if re.search(cls.R_CAMERA_TOKEN, file_prefix): diff --git a/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py b/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py index e65150eb0f..6b6fb03eec 100644 --- a/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py +++ b/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py @@ -1,8 +1,8 @@ import pyblish.api +from openpype.client import get_subset_by_name import openpype.hosts.maya.api.action from openpype.pipeline import legacy_io -import openpype.api class ValidateRenderLayerAOVs(pyblish.api.InstancePlugin): @@ -33,26 +33,23 @@ class ValidateRenderLayerAOVs(pyblish.api.InstancePlugin): raise RuntimeError("Found unregistered subsets: {}".format(invalid)) def get_invalid(self, instance): - invalid = [] - asset_name = instance.data["asset"] + project_name = legacy_io.active_project() + asset_doc = instance.data["assetEntity"] render_passses = instance.data.get("renderPasses", []) for render_pass in render_passses: - is_valid = self.validate_subset_registered(asset_name, render_pass) + is_valid = self.validate_subset_registered( + project_name, asset_doc, render_pass + ) if not is_valid: invalid.append(render_pass) return invalid - def validate_subset_registered(self, asset_name, subset_name): + def validate_subset_registered(self, project_name, asset_doc, subset_name): """Check if subset is registered in the database under the asset""" - asset = legacy_io.find_one({"type": "asset", "name": asset_name}) - is_valid = legacy_io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset["_id"] - }) - - return is_valid + return get_subset_by_name( + project_name, subset_name, asset_doc["_id"], fields=["_id"] + ) diff --git a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py index ba6c1397ab..94e2633593 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py @@ -6,17 +6,26 @@ from collections import OrderedDict from maya import cmds, mel import pyblish.api -import openpype.api +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, +) from openpype.hosts.maya.api import lib +def get_redshift_image_format_labels(): + """Return nice labels for Redshift image formats.""" + var = "$g_redshiftImageFormatLabels" + return mel.eval("{0}={0}".format(var)) + + class ValidateRenderSettings(pyblish.api.InstancePlugin): """Validates the global render settings - * File Name Prefix must start with: `maya/` + * File Name Prefix must start with: `` all other token are customizable but sane values for Arnold are: - `maya///_` + `//_` token is supported also, useful for multiple renderable cameras per render layer. @@ -39,11 +48,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "Render Settings" hosts = ["maya"] families = ["renderlayer"] - actions = [openpype.api.RepairAction] + actions = [RepairAction] ImagePrefixes = { 'mentalray': 'defaultRenderGlobals.imageFilePrefix', @@ -55,12 +64,12 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): } ImagePrefixTokens = { - 'mentalray': 'maya///{aov_separator}', # noqa: E501 - 'arnold': 'maya///{aov_separator}', # noqa: E501 - 'redshift': 'maya///', - 'vray': 'maya///', + 'mentalray': '//{aov_separator}', # noqa: E501 + 'arnold': '//{aov_separator}', # noqa: E501 + 'redshift': '//', + 'vray': '//', 'renderman': '{aov_separator}..', - 'mayahardware2': 'maya///', + 'mayahardware2': '//', } _aov_chars = { @@ -71,7 +80,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): redshift_AOV_prefix = "/{aov_separator}" # noqa: E501 - renderman_dir_prefix = "maya//" + renderman_dir_prefix = "/" R_AOV_TOKEN = re.compile( r'%a||', re.IGNORECASE) @@ -81,8 +90,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): R_SCENE_TOKEN = re.compile(r'%s|', re.IGNORECASE) DEFAULT_PADDING = 4 - VRAY_PREFIX = "maya///" - DEFAULT_PREFIX = "maya///_" + VRAY_PREFIX = "//" + DEFAULT_PREFIX = "//_" def process(self, instance): @@ -94,6 +103,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): def get_invalid(cls, instance): invalid = False + multipart = False renderer = instance.data['renderer'] layer = instance.data['setMembers'] @@ -101,8 +111,9 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): # Get the node attributes for current renderer attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS['default']) + # Prefix attribute can return None when a value was never set prefix = lib.get_attr_in_layer(cls.ImagePrefixes[renderer], - layer=layer) + layer=layer) or "" padding = lib.get_attr_in_layer("{node}.{padding}".format(**attrs), layer=layer) @@ -112,22 +123,13 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): prefix = prefix.replace( "{aov_separator}", instance.data.get("aovSeparator", "_")) - required_prefix = "maya/" + default_prefix = cls.ImagePrefixTokens[renderer] if not anim_override: invalid = True cls.log.error("Animation needs to be enabled. Use the same " "frame for start and end to render single frame") - if renderer != "renderman" and not prefix.lower().startswith( - required_prefix): - invalid = True - cls.log.error( - ("Wrong image prefix [ {} ] " - " - doesn't start with: '{}'").format( - prefix, required_prefix) - ) - if not re.search(cls.R_LAYER_TOKEN, prefix): invalid = True cls.log.error("Wrong image prefix [ {} ] - " @@ -178,18 +180,22 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): redshift_AOV_prefix )) invalid = True - # get aov format - aov_ext = cmds.getAttr( - "{}.fileFormat".format(aov), asString=True) - default_ext = cmds.getAttr( - "redshiftOptions.imageFormat", asString=True) + # check aov file format + aov_ext = cmds.getAttr("{}.fileFormat".format(aov)) + default_ext = cmds.getAttr("redshiftOptions.imageFormat") + aov_type = cmds.getAttr("{}.aovType".format(aov)) + if aov_type == "Cryptomatte": + # redshift Cryptomatte AOV always uses "Cryptomatte (EXR)" + # so we ignore validating file format for it. + pass - if default_ext != aov_ext: - cls.log.error(("AOV file format is not the same " - "as the one set globally " - "{} != {}").format(default_ext, - aov_ext)) + elif default_ext != aov_ext: + labels = get_redshift_image_format_labels() + cls.log.error( + "AOV file format {} does not match global file format " + "{}".format(labels[aov_ext], labels[default_ext]) + ) invalid = True if renderer == "renderman": @@ -213,14 +219,16 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): cls.log.error("Wrong image prefix [ {} ] - " "You can't use '' token " "with merge AOVs turned on".format(prefix)) + default_prefix = re.sub( + cls.R_AOV_TOKEN, "", default_prefix) + # remove aov token from prefix to pass validation + default_prefix = default_prefix.split("{aov_separator}")[0] elif not re.search(cls.R_AOV_TOKEN, prefix): invalid = True cls.log.error("Wrong image prefix [ {} ] - " "doesn't have: '' or " "token".format(prefix)) - # prefix check - default_prefix = cls.ImagePrefixTokens[renderer] default_prefix = default_prefix.replace( "{aov_separator}", instance.data.get("aovSeparator", "_")) if prefix.lower() != default_prefix.lower(): @@ -238,18 +246,32 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): instance.context.data["project_settings"]["maya"]["publish"]["ValidateRenderSettings"].get( # noqa: E501 "{}_render_attributes".format(renderer)) or [] ) + settings_lights_flag = instance.context.data["project_settings"].get( + "maya", {}).get( + "RenderSettings", {}).get( + "enable_all_lights", False) + + instance_lights_flag = instance.data.get("renderSetupIncludeLights") + if settings_lights_flag != instance_lights_flag: + cls.log.warning('Instance flag for "Render Setup Include Lights" is set to {0} and Settings flag is set to {1}'.format(instance_lights_flag, settings_lights_flag)) # noqa # go through definitions and test if such node.attribute exists. # if so, compare its value from the one required. for attr, value in OrderedDict(validation_settings).items(): - # first get node of that type cls.log.debug("{}: {}".format(attr, value)) - node_type = attr.split(".")[0] - attribute_name = ".".join(attr.split(".")[1:]) + if "." not in attr: + cls.log.warning("Skipping invalid attribute defined in " + "validation settings: '{}'".format(attr)) + continue + + node_type, attribute_name = attr.split(".", 1) + + # first get node of that type nodes = cmds.ls(type=node_type) - if not isinstance(nodes, list): - cls.log.warning("No nodes of '{}' found.".format(node_type)) + if not nodes: + cls.log.warning( + "No nodes of type '{}' found.".format(node_type)) continue for node in nodes: @@ -287,6 +309,9 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): default = lib.RENDER_ATTRS['default'] render_attrs = lib.RENDER_ATTRS.get(renderer, default) + # Repair animation must be enabled + cmds.setAttr("defaultRenderGlobals.animation", True) + # Repair prefix if renderer != "renderman": node = render_attrs["node"] @@ -319,8 +344,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): cmds.optionMenuGrp("vrayRenderElementSeparator", v=instance.data.get("aovSeparator", "_")) cmds.setAttr( - "{}.fileNameRenderElementSeparator".format( - node), + "{}.fileNameRenderElementSeparator".format(node), instance.data.get("aovSeparator", "_"), type="string" ) diff --git a/openpype/hosts/maya/plugins/publish/validate_resources.py b/openpype/hosts/maya/plugins/publish/validate_resources.py index 08f0f5467c..b7bd47ad0a 100644 --- a/openpype/hosts/maya/plugins/publish/validate_resources.py +++ b/openpype/hosts/maya/plugins/publish/validate_resources.py @@ -2,7 +2,7 @@ import os from collections import defaultdict import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateResources(pyblish.api.InstancePlugin): @@ -17,7 +17,7 @@ class ValidateResources(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "Resources Unique" def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_review_subset_uniqueness.py b/openpype/hosts/maya/plugins/publish/validate_review_subset_uniqueness.py new file mode 100644 index 0000000000..361c594013 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_review_subset_uniqueness.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +import collections +import pyblish.api +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) + + +class ValidateReviewSubsetUniqueness(pyblish.api.ContextPlugin): + """Validates that review subset has unique name.""" + + order = ValidateContentsOrder + hosts = ["maya"] + families = ["review"] + label = "Validate Review Subset Unique" + + def process(self, context): + subset_names = [] + + for instance in context: + self.log.debug("Instance: {}".format(instance.data)) + if instance.data.get('publish'): + subset_names.append(instance.data.get('subset')) + + non_unique = \ + [item + for item, count in collections.Counter(subset_names).items() + if count > 1] + msg = ("Instance subset names {} are not unique. ".format(non_unique) + + "Ask admin to remove subset from DB for multiple reviews.") + formatting_data = { + "non_unique": ",".join(non_unique) + } + + if non_unique: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_contents.py b/openpype/hosts/maya/plugins/publish/validate_rig_contents.py index 6fe51d7b51..1096c95486 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_rig_contents.py @@ -1,7 +1,7 @@ from maya import cmds import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateRigContents(pyblish.api.InstancePlugin): @@ -13,7 +13,7 @@ class ValidateRigContents(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "Rig Contents" hosts = ["maya"] families = ["rig"] diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_controllers.py b/openpype/hosts/maya/plugins/publish/validate_rig_controllers.py index d5a1fd3529..1e42abdcd9 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_controllers.py +++ b/openpype/hosts/maya/plugins/publish/validate_rig_controllers.py @@ -2,7 +2,10 @@ from maya import cmds import pyblish.api -import openpype.api +from openpype.pipeline.publish import ( + ValidateContentsOrder, + RepairAction, +) import openpype.hosts.maya.api.action from openpype.hosts.maya.api.lib import undo_chunk @@ -25,11 +28,11 @@ class ValidateRigControllers(pyblish.api.InstancePlugin): - Break all incoming connections to keyable attributes """ - order = openpype.api.ValidateContentsOrder + 0.05 + order = ValidateContentsOrder + 0.05 label = "Rig Controllers" hosts = ["maya"] families = ["rig"] - actions = [openpype.api.RepairAction, + actions = [RepairAction, openpype.hosts.maya.api.action.SelectInvalidAction] # Default controller values diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_controllers_arnold_attributes.py b/openpype/hosts/maya/plugins/publish/validate_rig_controllers_arnold_attributes.py index 1f1db9156b..55b2ebd6d8 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_controllers_arnold_attributes.py +++ b/openpype/hosts/maya/plugins/publish/validate_rig_controllers_arnold_attributes.py @@ -1,8 +1,11 @@ from maya import cmds import pyblish.api -import openpype.api +from openpype.pipeline.publish import ( + ValidateContentsOrder, + RepairAction, +) from openpype.hosts.maya.api import lib import openpype.hosts.maya.api.action @@ -26,11 +29,11 @@ class ValidateRigControllersArnoldAttributes(pyblish.api.InstancePlugin): This validator will ensure they are hidden or unkeyable attributes. """ - order = openpype.api.ValidateContentsOrder + 0.05 + order = ValidateContentsOrder + 0.05 label = "Rig Controllers (Arnold Attributes)" hosts = ["maya"] families = ["rig"] - actions = [openpype.api.RepairAction, + actions = [RepairAction, openpype.hosts.maya.api.action.SelectInvalidAction] attributes = [ diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py b/openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py index 5df754fff4..d5bf7fd1cf 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py +++ b/openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py @@ -1,9 +1,13 @@ from maya import cmds import pyblish.api -import openpype.api + import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, +) class ValidateRigJointsHidden(pyblish.api.InstancePlugin): @@ -17,13 +21,13 @@ class ValidateRigJointsHidden(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ['maya'] families = ['rig'] version = (0, 1, 0) label = "Joints Hidden" actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction] + RepairAction] @staticmethod def get_invalid(instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py b/openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py index cc3723a6e1..03ba381f8d 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py +++ b/openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py @@ -1,9 +1,13 @@ import maya.cmds as cmds import pyblish.api -import openpype.api + import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, +) class ValidateRigOutSetNodeIds(pyblish.api.InstancePlugin): @@ -16,13 +20,13 @@ class ValidateRigOutSetNodeIds(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["rig"] hosts = ['maya'] label = 'Rig Out Set Node Ids' actions = [ openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction + RepairAction ] allow_history_only = False diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py b/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py index 7c5c540c60..f3ed1a36ef 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py +++ b/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py @@ -2,8 +2,11 @@ import pymel.core as pc import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, +) class ValidateRigOutputIds(pyblish.api.InstancePlugin): @@ -13,11 +16,11 @@ class ValidateRigOutputIds(pyblish.api.InstancePlugin): to ensure the id from the model is preserved through animation. """ - order = openpype.api.ValidateContentsOrder + 0.05 + order = ValidateContentsOrder + 0.05 label = "Rig Output Ids" hosts = ["maya"] families = ["rig"] - actions = [openpype.api.RepairAction, + actions = [RepairAction, openpype.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py b/openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py index 174bc44a6f..ec2bea220d 100644 --- a/openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py +++ b/openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py @@ -3,7 +3,8 @@ import os import maya.cmds as cmds import pyblish.api -import openpype.api + +from openpype.pipeline.publish import ValidatePipelineOrder def is_subdir(path, root_dir): @@ -28,7 +29,7 @@ def is_subdir(path, root_dir): class ValidateSceneSetWorkspace(pyblish.api.ContextPlugin): """Validate the scene is inside the currently set Maya workspace""" - order = openpype.api.ValidatePipelineOrder + order = ValidatePipelineOrder hosts = ['maya'] category = 'scene' version = (0, 1, 0) diff --git a/openpype/hosts/maya/plugins/publish/validate_setdress_root.py b/openpype/hosts/maya/plugins/publish/validate_setdress_root.py index 0b4842d208..5fd971f8c4 100644 --- a/openpype/hosts/maya/plugins/publish/validate_setdress_root.py +++ b/openpype/hosts/maya/plugins/publish/validate_setdress_root.py @@ -1,13 +1,11 @@ - import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateSetdressRoot(pyblish.api.InstancePlugin): - """ - """ + """Validate if set dress top root node is published.""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "SetDress Root" hosts = ["maya"] families = ["setdress"] diff --git a/openpype/hosts/maya/plugins/publish/validate_shader_name.py b/openpype/hosts/maya/plugins/publish/validate_shader_name.py index 24111f0ad4..b3e51f011d 100644 --- a/openpype/hosts/maya/plugins/publish/validate_shader_name.py +++ b/openpype/hosts/maya/plugins/publish/validate_shader_name.py @@ -1,9 +1,10 @@ +import re from maya import cmds import pyblish.api -import openpype.api + import openpype.hosts.maya.api.action -import re +from openpype.pipeline.publish import ValidateContentsOrder class ValidateShaderName(pyblish.api.InstancePlugin): @@ -13,7 +14,7 @@ class ValidateShaderName(pyblish.api.InstancePlugin): """ optional = True - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["look"] hosts = ['maya'] label = 'Validate Shaders Name' diff --git a/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py b/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py index e08e06b50e..651c6bcec9 100644 --- a/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py +++ b/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py @@ -3,8 +3,12 @@ import re from maya import cmds import pyblish.api -import openpype.api + import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ( + ValidateContentsOrder, + RepairAction, +) def short_name(node): @@ -31,7 +35,7 @@ class ValidateShapeDefaultNames(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ['maya'] families = ['model'] category = 'cleanup' @@ -39,7 +43,7 @@ class ValidateShapeDefaultNames(pyblish.api.InstancePlugin): version = (0, 1, 0) label = "Shape Default Naming" actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction] + RepairAction] @staticmethod def _define_default_name(shape): diff --git a/openpype/hosts/maya/plugins/publish/validate_shape_render_stats.py b/openpype/hosts/maya/plugins/publish/validate_shape_render_stats.py index 714451bb98..f58c0aaf81 100644 --- a/openpype/hosts/maya/plugins/publish/validate_shape_render_stats.py +++ b/openpype/hosts/maya/plugins/publish/validate_shape_render_stats.py @@ -1,20 +1,23 @@ import pyblish.api -import openpype.api from maya import cmds import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ( + RepairAction, + ValidateMeshOrder, +) class ValidateShapeRenderStats(pyblish.api.Validator): """Ensure all render stats are set to the default values.""" - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder hosts = ['maya'] families = ['model'] label = 'Shape Default Render Stats' actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction] + RepairAction] defaults = {'castsShadows': 1, 'receiveShadows': 1, diff --git a/openpype/hosts/maya/plugins/publish/validate_shape_zero.py b/openpype/hosts/maya/plugins/publish/validate_shape_zero.py index 343eaccb7d..7a7e9a0aee 100644 --- a/openpype/hosts/maya/plugins/publish/validate_shape_zero.py +++ b/openpype/hosts/maya/plugins/publish/validate_shape_zero.py @@ -1,9 +1,13 @@ from maya import cmds import pyblish.api -import openpype.api + import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib +from openpype.pipeline.publish import ( + ValidateContentsOrder, + RepairAction, +) class ValidateShapeZero(pyblish.api.Validator): @@ -13,13 +17,13 @@ class ValidateShapeZero(pyblish.api.Validator): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["maya"] families = ["model"] label = "Shape Zero (Freeze)" actions = [ openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.api.RepairAction + RepairAction ] @staticmethod diff --git a/openpype/hosts/maya/plugins/publish/validate_single_assembly.py b/openpype/hosts/maya/plugins/publish/validate_single_assembly.py index 9fb3a47e6d..8771ca58d1 100644 --- a/openpype/hosts/maya/plugins/publish/validate_single_assembly.py +++ b/openpype/hosts/maya/plugins/publish/validate_single_assembly.py @@ -1,5 +1,5 @@ import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateSingleAssembly(pyblish.api.InstancePlugin): @@ -17,7 +17,7 @@ class ValidateSingleAssembly(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ['maya'] families = ['rig', 'animation'] label = 'Single Assembly' diff --git a/openpype/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py b/openpype/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py index 54a86d27cf..8221c18b17 100644 --- a/openpype/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py +++ b/openpype/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py @@ -1,7 +1,10 @@ # -*- coding: utf-8 -*- import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError + +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) from maya import cmds @@ -9,7 +12,7 @@ from maya import cmds class ValidateSkeletalMeshHierarchy(pyblish.api.InstancePlugin): """Validates that nodes has common root.""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["maya"] families = ["skeletalMesh"] label = "Skeletal Mesh Top Node" diff --git a/openpype/hosts/maya/plugins/publish/validate_skinCluster_deformer_set.py b/openpype/hosts/maya/plugins/publish/validate_skinCluster_deformer_set.py index 8c804786f3..b45d2b120a 100644 --- a/openpype/hosts/maya/plugins/publish/validate_skinCluster_deformer_set.py +++ b/openpype/hosts/maya/plugins/publish/validate_skinCluster_deformer_set.py @@ -1,8 +1,9 @@ from maya import cmds import pyblish.api -import openpype.api + import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateSkinclusterDeformerSet(pyblish.api.InstancePlugin): @@ -14,7 +15,7 @@ class ValidateSkinclusterDeformerSet(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ['maya'] families = ['fbx'] label = "Skincluster Deformer Relationships" diff --git a/openpype/hosts/maya/plugins/publish/validate_step_size.py b/openpype/hosts/maya/plugins/publish/validate_step_size.py index 172ac5f26e..294458f63c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_step_size.py +++ b/openpype/hosts/maya/plugins/publish/validate_step_size.py @@ -1,6 +1,7 @@ import pyblish.api -import openpype.api + import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateStepSize(pyblish.api.InstancePlugin): @@ -10,7 +11,7 @@ class ValidateStepSize(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = 'Step size' families = ['camera', 'pointcache', diff --git a/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py b/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py index 6f5ff24b9c..4615e2ec07 100644 --- a/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py +++ b/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py @@ -3,8 +3,9 @@ from maya import cmds import pyblish.api -import openpype.api + import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateTransformNamingSuffix(pyblish.api.InstancePlugin): @@ -27,7 +28,7 @@ class ValidateTransformNamingSuffix(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ['maya'] families = ['model'] category = 'cleanup' diff --git a/openpype/hosts/maya/plugins/publish/validate_transform_zero.py b/openpype/hosts/maya/plugins/publish/validate_transform_zero.py index fdd09658d1..da569195e8 100644 --- a/openpype/hosts/maya/plugins/publish/validate_transform_zero.py +++ b/openpype/hosts/maya/plugins/publish/validate_transform_zero.py @@ -1,8 +1,9 @@ from maya import cmds import pyblish.api -import openpype.api + import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateTransformZero(pyblish.api.Validator): @@ -14,7 +15,7 @@ class ValidateTransformZero(pyblish.api.Validator): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["maya"] families = ["model"] category = "geometry" diff --git a/openpype/plugins/publish/validate_unique_names.py b/openpype/hosts/maya/plugins/publish/validate_unique_names.py similarity index 86% rename from openpype/plugins/publish/validate_unique_names.py rename to openpype/hosts/maya/plugins/publish/validate_unique_names.py index 459c90e6c1..05776ee0f3 100644 --- a/openpype/plugins/publish/validate_unique_names.py +++ b/openpype/hosts/maya/plugins/publish/validate_unique_names.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.api import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateUniqueNames(pyblish.api.Validator): @@ -12,7 +12,7 @@ class ValidateUniqueNames(pyblish.api.Validator): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["maya"] families = ["model"] label = "Unique transform name" @@ -23,7 +23,7 @@ class ValidateUniqueNames(pyblish.api.Validator): """Returns the invalid transforms in the instance. Returns: - list: Non unique name transforms + list: Non-unique name transforms. """ diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py b/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py index c05121a1b0..4211e76a73 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py @@ -2,13 +2,15 @@ from maya import cmds import pyblish.api -import openpype.api + +from openpype.pipeline.publish import ValidateMeshOrder +import openpype.hosts.maya.api.action class ValidateUnrealMeshTriangulated(pyblish.api.InstancePlugin): """Validate if mesh is made of triangles for Unreal Engine""" - order = openpype.api.ValidateMeshOrder + order = ValidateMeshOrder hosts = ["maya"] families = ["staticMesh"] category = "geometry" diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py b/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py index 33788d1835..1425190b82 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py @@ -3,10 +3,11 @@ import re import pyblish.api -import openpype.api + import openpype.hosts.maya.api.action from openpype.pipeline import legacy_io -from openpype.api import get_project_settings +from openpype.settings import get_project_settings +from openpype.pipeline.publish import ValidateContentsOrder class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): @@ -50,7 +51,7 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): """ optional = True - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["maya"] families = ["staticMesh"] label = "Unreal Static Mesh Name" diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py b/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py index 5e1b04889f..dd699735d9 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py @@ -2,7 +2,11 @@ from maya import cmds import pyblish.api -import openpype.api + +from openpype.pipeline.publish import ( + ValidateContentsOrder, + RepairAction, +) class ValidateUnrealUpAxis(pyblish.api.ContextPlugin): @@ -10,11 +14,11 @@ class ValidateUnrealUpAxis(pyblish.api.ContextPlugin): optional = True active = False - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["maya"] families = ["staticMesh"] label = "Unreal Up-Axis check" - actions = [openpype.api.RepairAction] + actions = [RepairAction] def process(self, context): assert cmds.upAxis(q=True, axis=True) == "z", ( diff --git a/openpype/hosts/maya/plugins/publish/validate_visible_only.py b/openpype/hosts/maya/plugins/publish/validate_visible_only.py new file mode 100644 index 0000000000..faf634f258 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_visible_only.py @@ -0,0 +1,51 @@ +import pyblish.api + +from openpype.hosts.maya.api.lib import iter_visible_nodes_in_range +import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder + + +class ValidateAlembicVisibleOnly(pyblish.api.InstancePlugin): + """Validates at least a single node is visible in frame range. + + This validation only validates if the `visibleOnly` flag is enabled + on the instance - otherwise the validation is skipped. + + """ + order = ValidateContentsOrder + 0.05 + label = "Alembic Visible Only" + hosts = ["maya"] + families = ["pointcache", "animation"] + actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + + def process(self, instance): + + if not instance.data.get("visibleOnly", False): + self.log.debug("Visible only is disabled. Validation skipped..") + return + + invalid = self.get_invalid(instance) + if invalid: + start, end = self.get_frame_range(instance) + raise RuntimeError("No visible nodes found in " + "frame range {}-{}.".format(start, end)) + + @classmethod + def get_invalid(cls, instance): + + if instance.data["family"] == "animation": + # Special behavior to use the nodes in out_SET + nodes = instance.data["out_hierarchy"] + else: + nodes = instance[:] + + start, end = cls.get_frame_range(instance) + if not any(iter_visible_nodes_in_range(nodes, start, end)): + # Return the nodes we have considered so the user can identify + # them with the select invalid action + return nodes + + @staticmethod + def get_frame_range(instance): + data = instance.data + return data["frameStartHandle"], data["frameEndHandle"] diff --git a/openpype/hosts/maya/plugins/publish/validate_vray_distributed_rendering.py b/openpype/hosts/maya/plugins/publish/validate_vray_distributed_rendering.py index 5e35565383..366f3bd10e 100644 --- a/openpype/hosts/maya/plugins/publish/validate_vray_distributed_rendering.py +++ b/openpype/hosts/maya/plugins/publish/validate_vray_distributed_rendering.py @@ -1,6 +1,9 @@ import pyblish.api -import openpype.api from openpype.hosts.maya.api import lib +from openpype.pipeline.publish import ( + ValidateContentsOrder, + RepairAction, +) from maya import cmds @@ -15,10 +18,10 @@ class ValidateVRayDistributedRendering(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "VRay Distributed Rendering" families = ["renderlayer"] - actions = [openpype.api.RepairAction] + actions = [RepairAction] # V-Ray attribute names enabled_attr = "vraySettings.sys_distributed_rendering_on" diff --git a/openpype/hosts/maya/plugins/publish/validate_vray_referenced_aovs.py b/openpype/hosts/maya/plugins/publish/validate_vray_referenced_aovs.py index 7a48c29b7d..39c721e717 100644 --- a/openpype/hosts/maya/plugins/publish/validate_vray_referenced_aovs.py +++ b/openpype/hosts/maya/plugins/publish/validate_vray_referenced_aovs.py @@ -4,7 +4,7 @@ import pyblish.api import types from maya import cmds -import openpype.hosts.maya.api.action +from openpype.pipeline.publish import RepairContextAction class ValidateVrayReferencedAOVs(pyblish.api.InstancePlugin): @@ -20,7 +20,7 @@ class ValidateVrayReferencedAOVs(pyblish.api.InstancePlugin): label = 'VRay Referenced AOVs' hosts = ['maya'] families = ['renderlayer'] - actions = [openpype.api.RepairContextAction] + actions = [RepairContextAction] def process(self, instance): """Plugin main entry point.""" diff --git a/openpype/hosts/maya/plugins/publish/validate_vray_translator_settings.py b/openpype/hosts/maya/plugins/publish/validate_vray_translator_settings.py index 1deabde4a2..f49811c2c0 100644 --- a/openpype/hosts/maya/plugins/publish/validate_vray_translator_settings.py +++ b/openpype/hosts/maya/plugins/publish/validate_vray_translator_settings.py @@ -1,8 +1,11 @@ # -*- coding: utf-8 -*- """Validate VRay Translator settings.""" import pyblish.api -import openpype.api -from openpype.plugin import contextplugin_should_run +from openpype.pipeline.publish import ( + context_plugin_should_run, + RepairContextAction, + ValidateContentsOrder, +) from maya import cmds @@ -10,15 +13,15 @@ from maya import cmds class ValidateVRayTranslatorEnabled(pyblish.api.ContextPlugin): """Validate VRay Translator settings for extracting vrscenes.""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "VRay Translator Settings" families = ["vrayscene_layer"] - actions = [openpype.api.RepairContextAction] + actions = [RepairContextAction] def process(self, context): """Plugin entry point.""" # Workaround bug pyblish-base#250 - if not contextplugin_should_run(self, context): + if not context_plugin_should_run(self, context): return invalid = self.get_invalid(context) diff --git a/openpype/hosts/maya/plugins/publish/validate_vrayproxy_members.py b/openpype/hosts/maya/plugins/publish/validate_vrayproxy_members.py index b94e5cbbed..855a96e6b9 100644 --- a/openpype/hosts/maya/plugins/publish/validate_vrayproxy_members.py +++ b/openpype/hosts/maya/plugins/publish/validate_vrayproxy_members.py @@ -1,5 +1,4 @@ import pyblish.api -import openpype.api from maya import cmds diff --git a/openpype/hosts/maya/plugins/publish/validate_yeti_renderscript_callbacks.py b/openpype/hosts/maya/plugins/publish/validate_yeti_renderscript_callbacks.py index 79cd09315e..a864a18cee 100644 --- a/openpype/hosts/maya/plugins/publish/validate_yeti_renderscript_callbacks.py +++ b/openpype/hosts/maya/plugins/publish/validate_yeti_renderscript_callbacks.py @@ -1,7 +1,7 @@ from maya import cmds import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin): @@ -20,7 +20,7 @@ class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "Yeti Render Script Callbacks" hosts = ["maya"] families = ["renderlayer"] diff --git a/openpype/hosts/maya/plugins/publish/validate_yeti_rig_cache_state.py b/openpype/hosts/maya/plugins/publish/validate_yeti_rig_cache_state.py index 5610733577..4842134b12 100644 --- a/openpype/hosts/maya/plugins/publish/validate_yeti_rig_cache_state.py +++ b/openpype/hosts/maya/plugins/publish/validate_yeti_rig_cache_state.py @@ -1,7 +1,7 @@ import pyblish.api -import openpype.action import maya.cmds as cmds import openpype.hosts.maya.api.action +from openpype.pipeline.publish import RepairAction class ValidateYetiRigCacheState(pyblish.api.InstancePlugin): @@ -17,7 +17,7 @@ class ValidateYetiRigCacheState(pyblish.api.InstancePlugin): label = "Yeti Rig Cache State" hosts = ["maya"] families = ["yetiRig"] - actions = [openpype.action.RepairAction, + actions = [RepairAction, openpype.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_yeti_rig_input_in_instance.py b/openpype/hosts/maya/plugins/publish/validate_yeti_rig_input_in_instance.py index 651c8da849..ebef44774d 100644 --- a/openpype/hosts/maya/plugins/publish/validate_yeti_rig_input_in_instance.py +++ b/openpype/hosts/maya/plugins/publish/validate_yeti_rig_input_in_instance.py @@ -1,14 +1,15 @@ from maya import cmds import pyblish.api -import openpype.api + import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ValidateContentsOrder class ValidateYetiRigInputShapesInInstance(pyblish.api.Validator): """Validate if all input nodes are part of the instance's hierarchy""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["maya"] families = ["yetiRig"] label = "Yeti Rig Input Shapes In Instance" diff --git a/openpype/hosts/maya/resources/workspace.mel b/openpype/hosts/maya/resources/workspace.mel deleted file mode 100644 index f7213fa4f6..0000000000 --- a/openpype/hosts/maya/resources/workspace.mel +++ /dev/null @@ -1,11 +0,0 @@ -//Maya 2018 Project Definition - -workspace -fr "shaders" "renderData/shaders"; -workspace -fr "alembicCache" "cache/alembic"; -workspace -fr "mayaAscii" ""; -workspace -fr "mayaBinary" ""; -workspace -fr "renderData" "renderData"; -workspace -fr "fileCache" "cache/nCache"; -workspace -fr "scene" ""; -workspace -fr "sourceImages" "sourceimages"; -workspace -fr "images" "renders"; diff --git a/openpype/hosts/maya/startup/userSetup.py b/openpype/hosts/maya/startup/userSetup.py index a3ab483add..40cd51f2d8 100644 --- a/openpype/hosts/maya/startup/userSetup.py +++ b/openpype/hosts/maya/startup/userSetup.py @@ -1,10 +1,11 @@ import os -from openpype.api import get_project_settings +from openpype.settings import get_project_settings from openpype.pipeline import install_host -from openpype.hosts.maya import api +from openpype.hosts.maya.api import MayaHost from maya import cmds -install_host(api) +host = MayaHost() +install_host(host) print("starting OpenPype usersetup") diff --git a/openpype/hosts/nuke/__init__.py b/openpype/hosts/nuke/__init__.py index 134a6621c4..8ab565939b 100644 --- a/openpype/hosts/nuke/__init__.py +++ b/openpype/hosts/nuke/__init__.py @@ -1,41 +1,10 @@ -import os -import platform +from .addon import ( + NUKE_ROOT_DIR, + NukeAddon, +) -def add_implementation_envs(env, _app): - # Add requirements to NUKE_PATH - pype_root = os.environ["OPENPYPE_REPOS_ROOT"] - new_nuke_paths = [ - os.path.join(pype_root, "openpype", "hosts", "nuke", "startup") - ] - old_nuke_path = env.get("NUKE_PATH") or "" - for path in old_nuke_path.split(os.pathsep): - if not path: - continue - - norm_path = os.path.normpath(path) - if norm_path not in new_nuke_paths: - new_nuke_paths.append(norm_path) - - env["NUKE_PATH"] = os.pathsep.join(new_nuke_paths) - env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) - - # Try to add QuickTime to PATH - quick_time_path = "C:/Program Files (x86)/QuickTime/QTSystem" - if platform.system() == "windows" and os.path.exists(quick_time_path): - path_value = env.get("PATH") or "" - path_paths = [ - path - for path in path_value.split(os.pathsep) - if path - ] - path_paths.append(quick_time_path) - env["PATH"] = os.pathsep.join(path_paths) - - # Set default values if are not already set via settings - defaults = { - "LOGLEVEL": "DEBUG" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value +__all__ = ( + "NUKE_ROOT_DIR", + "NukeAddon", +) diff --git a/openpype/hosts/nuke/addon.py b/openpype/hosts/nuke/addon.py new file mode 100644 index 0000000000..1c5d5c4005 --- /dev/null +++ b/openpype/hosts/nuke/addon.py @@ -0,0 +1,62 @@ +import os +import platform +from openpype.modules import OpenPypeModule, IHostAddon + +NUKE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class NukeAddon(OpenPypeModule, IHostAddon): + name = "nuke" + host_name = "nuke" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + # Add requirements to NUKE_PATH + new_nuke_paths = [ + os.path.join(NUKE_ROOT_DIR, "startup") + ] + old_nuke_path = env.get("NUKE_PATH") or "" + for path in old_nuke_path.split(os.pathsep): + if not path: + continue + + norm_path = os.path.normpath(path) + if norm_path not in new_nuke_paths: + new_nuke_paths.append(norm_path) + + env["NUKE_PATH"] = os.pathsep.join(new_nuke_paths) + env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) + + # Add vendor to PYTHONPATH + python_path = env["PYTHONPATH"] + python_path_parts = [] + if python_path: + python_path_parts = python_path.split(os.pathsep) + vendor_path = os.path.join(NUKE_ROOT_DIR, "vendor") + python_path_parts.insert(0, vendor_path) + env["PYTHONPATH"] = os.pathsep.join(python_path_parts) + + # Set default values if are not already set via settings + defaults = { + "LOGLEVEL": "DEBUG" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + # Try to add QuickTime to PATH + quick_time_path = "C:/Program Files (x86)/QuickTime/QTSystem" + if platform.system() == "windows" and os.path.exists(quick_time_path): + path_value = env.get("PATH") or "" + path_paths = [ + path + for path in path_value.split(os.pathsep) + if path + ] + path_paths.append(quick_time_path) + env["PATH"] = os.pathsep.join(path_paths) + + def get_workfile_extensions(self): + return [".nk"] diff --git a/openpype/hosts/nuke/api/__init__.py b/openpype/hosts/nuke/api/__init__.py index b571c4098c..c65058874b 100644 --- a/openpype/hosts/nuke/api/__init__.py +++ b/openpype/hosts/nuke/api/__init__.py @@ -8,9 +8,6 @@ from .workio import ( ) from .command import ( - reset_frame_range, - get_handles, - reset_resolution, viewer_update_and_undo_stop ) @@ -24,9 +21,15 @@ from .pipeline import ( containerise, parse_container, update_container, + + get_workfile_build_placeholder_plugins, ) from .lib import ( - maintained_selection + maintained_selection, + reset_selection, + get_view_process_node, + duplicate_node, + convert_knob_value_to_correct_type ) from .utils import ( @@ -42,9 +45,6 @@ __all__ = ( "current_file", "work_root", - "reset_frame_range", - "get_handles", - "reset_resolution", "viewer_update_and_undo_stop", "OpenPypeCreator", @@ -57,7 +57,13 @@ __all__ = ( "parse_container", "update_container", + "get_workfile_build_placeholder_plugins", + "maintained_selection", + "reset_selection", + "get_view_process_node", + "duplicate_node", + "convert_knob_value_to_correct_type", "colorspace_exists_on_node", "get_colorspace_list" diff --git a/openpype/hosts/nuke/api/actions.py b/openpype/hosts/nuke/api/actions.py index c4a6f0fb84..92b83560da 100644 --- a/openpype/hosts/nuke/api/actions.py +++ b/openpype/hosts/nuke/api/actions.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.api import get_errored_instances_from_context +from openpype.pipeline.publish import get_errored_instances_from_context from .lib import ( reset_selection, select_nodes diff --git a/openpype/hosts/nuke/api/command.py b/openpype/hosts/nuke/api/command.py index c756c48a12..2f772469d8 100644 --- a/openpype/hosts/nuke/api/command.py +++ b/openpype/hosts/nuke/api/command.py @@ -1,124 +1,10 @@ import logging import contextlib import nuke -from bson.objectid import ObjectId - -from openpype.pipeline import legacy_io log = logging.getLogger(__name__) -def reset_frame_range(): - """ Set frame range to current asset - Also it will set a Viewer range with - displayed handles - """ - - fps = float(legacy_io.Session.get("AVALON_FPS", 25)) - - nuke.root()["fps"].setValue(fps) - name = legacy_io.Session["AVALON_ASSET"] - asset = legacy_io.find_one({"name": name, "type": "asset"}) - asset_data = asset["data"] - - handles = get_handles(asset) - - frame_start = int(asset_data.get( - "frameStart", - asset_data.get("edit_in"))) - - frame_end = int(asset_data.get( - "frameEnd", - asset_data.get("edit_out"))) - - if not all([frame_start, frame_end]): - missing = ", ".join(["frame_start", "frame_end"]) - msg = "'{}' are not set for asset '{}'!".format(missing, name) - log.warning(msg) - nuke.message(msg) - return - - frame_start -= handles - frame_end += handles - - nuke.root()["first_frame"].setValue(frame_start) - nuke.root()["last_frame"].setValue(frame_end) - - # setting active viewers - vv = nuke.activeViewer().node() - vv["frame_range_lock"].setValue(True) - vv["frame_range"].setValue("{0}-{1}".format( - int(asset_data["frameStart"]), - int(asset_data["frameEnd"])) - ) - - -def get_handles(asset): - """ Gets handles data - - Arguments: - asset (dict): avalon asset entity - - Returns: - handles (int) - """ - data = asset["data"] - if "handles" in data and data["handles"] is not None: - return int(data["handles"]) - - parent_asset = None - if "visualParent" in data: - vp = data["visualParent"] - if vp is not None: - parent_asset = legacy_io.find_one({"_id": ObjectId(vp)}) - - if parent_asset is None: - parent_asset = legacy_io.find_one({"_id": ObjectId(asset["parent"])}) - - if parent_asset is not None: - return get_handles(parent_asset) - else: - return 0 - - -def reset_resolution(): - """Set resolution to project resolution.""" - project = legacy_io.find_one({"type": "project"}) - p_data = project["data"] - - width = p_data.get("resolution_width", - p_data.get("resolutionWidth")) - height = p_data.get("resolution_height", - p_data.get("resolutionHeight")) - - if not all([width, height]): - missing = ", ".join(["width", "height"]) - msg = "No resolution information `{0}` found for '{1}'.".format( - missing, - project["name"]) - log.warning(msg) - nuke.message(msg) - return - - current_width = nuke.root()["format"].value().width() - current_height = nuke.root()["format"].value().height() - - if width != current_width or height != current_height: - - fmt = None - for f in nuke.formats(): - if f.width() == width and f.height() == height: - fmt = f.name() - - if not fmt: - nuke.addFormat( - "{0} {1} {2}".format(int(width), int(height), project["name"]) - ) - fmt = project["name"] - - nuke.root()["format"].setValue(fmt) - - @contextlib.contextmanager def viewer_update_and_undo_stop(): """Lock viewer from updating and stop recording undo steps""" diff --git a/openpype/hosts/nuke/api/gizmo_menu.py b/openpype/hosts/nuke/api/gizmo_menu.py new file mode 100644 index 0000000000..9edfc62e3b --- /dev/null +++ b/openpype/hosts/nuke/api/gizmo_menu.py @@ -0,0 +1,86 @@ +import os +import re +import nuke + +from openpype.lib import Logger + +log = Logger.get_logger(__name__) + + +class GizmoMenu(): + def __init__(self, title, icon=None): + + self.toolbar = self._create_toolbar_menu( + title, + icon=icon + ) + + self._script_actions = [] + + def _create_toolbar_menu(self, name, icon=None): + nuke_node_menu = nuke.menu("Nodes") + return nuke_node_menu.addMenu( + name, + icon=icon + ) + + def _make_menu_path(self, path, icon=None): + parent = self.toolbar + for folder in re.split(r"/|\\", path): + if not folder: + continue + existing_menu = parent.findItem(folder) + if existing_menu: + parent = existing_menu + else: + parent = parent.addMenu(folder, icon=icon) + + return parent + + def build_from_configuration(self, configuration): + for menu in configuration: + # Construct parent path else parent is toolbar + parent = self.toolbar + gizmo_toolbar_path = menu.get("gizmo_toolbar_path") + if gizmo_toolbar_path: + parent = self._make_menu_path(gizmo_toolbar_path) + + for item in menu["sub_gizmo_list"]: + assert isinstance(item, dict), "Configuration is wrong!" + + if not item.get("title"): + continue + + item_type = item.get("sourcetype") + + if item_type == ("python" or "file"): + parent.addCommand( + item["title"], + command=str(item["command"]), + icon=item.get("icon"), + shortcut=item.get("hotkey") + ) + + # add separator + # Special behavior for separators + elif item_type == "separator": + parent.addSeparator() + + # add submenu + # items should hold a collection of submenu items (dict) + elif item_type == "menu": + # assert "items" in item, "Menu is missing 'items' key" + parent.addMenu( + item['title'], + icon=item.get('icon') + ) + + def add_gizmo_path(self, gizmo_paths): + for gizmo_path in gizmo_paths: + if os.path.isdir(gizmo_path): + for folder in os.listdir(gizmo_path): + if os.path.isdir(os.path.join(gizmo_path, folder)): + nuke.pluginAddPath(os.path.join(gizmo_path, folder)) + nuke.pluginAddPath(gizmo_path) + else: + log.warning("This path doesn't exist: {}".format(gizmo_path)) diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index ba8aa7a8db..2691b7447a 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -3,32 +3,50 @@ from pprint import pformat import re import six import platform +import tempfile import contextlib from collections import OrderedDict import clique -from bson.objectid import ObjectId import nuke +from Qt import QtCore, QtWidgets -from openpype.api import ( +from openpype.client import ( + get_project, + get_asset_by_name, + get_versions, + get_last_versions, + get_representations, +) + +from openpype.host import HostDirmap +from openpype.tools.utils import host_tools +from openpype.lib import ( + env_value_to_bool, Logger, - Anatomy, - BuildWorkfile, get_version_from_path, +) + +from openpype.settings import ( + get_project_settings, get_anatomy_settings, - get_workdir_data, - get_asset, get_current_project_settings, ) -from openpype.tools.utils import host_tools -from openpype.lib.path_tools import HostDirmap -from openpype.settings import get_project_settings from openpype.modules import ModulesManager +from openpype.pipeline.template_data import get_template_data_with_names from openpype.pipeline import ( discover_legacy_creator_plugins, legacy_io, + Anatomy, ) +from openpype.pipeline.context_tools import ( + get_current_project_asset, + get_custom_workfile_template_from_session +) +from openpype.pipeline.workfile import BuildWorkfile + +from . import gizmo_menu from .workio import ( save_file, @@ -52,11 +70,31 @@ class Context: main_window = None context_label = None project_name = os.getenv("AVALON_PROJECT") + # Workfile related code workfiles_launched = False + workfiles_tool_timer = None + # Seems unused _project_doc = None +def get_main_window(): + """Acquire Nuke's main window""" + if Context.main_window is None: + from Qt import QtWidgets + + top_widgets = QtWidgets.QApplication.topLevelWidgets() + name = "Foundry::UI::DockMainWindow" + for widget in top_widgets: + if ( + widget.inherits("QMainWindow") + and widget.metaObject().className() == name + ): + Context.main_window = widget + break + return Context.main_window + + class Knobby(object): """For creating knob which it's type isn't mapped in `create_knobs` @@ -373,7 +411,7 @@ def add_write_node_legacy(name, **kwarg): Returns: node (obj): nuke write node """ - frame_range = kwarg.get("use_range_limit", None) + use_range_limit = kwarg.get("use_range_limit", None) w = nuke.createNode( "Write", @@ -391,10 +429,10 @@ def add_write_node_legacy(name, **kwarg): log.debug(e) continue - if frame_range: + if use_range_limit: w["use_limit"].setValue(True) - w["first"].setValue(frame_range[0]) - w["last"].setValue(frame_range[1]) + w["first"].setValue(kwarg["frame_range"][0]) + w["last"].setValue(kwarg["frame_range"][1]) return w @@ -409,7 +447,7 @@ def add_write_node(name, file_path, knobs, **kwarg): Returns: node (obj): nuke write node """ - frame_range = kwarg.get("use_range_limit", None) + use_range_limit = kwarg.get("use_range_limit", None) w = nuke.createNode( "Write", @@ -420,10 +458,10 @@ def add_write_node(name, file_path, knobs, **kwarg): # finally add knob overrides set_node_knobs_from_settings(w, knobs, **kwarg) - if frame_range: + if use_range_limit: w["use_limit"].setValue(True) - w["first"].setValue(frame_range[0]) - w["last"].setValue(frame_range[1]) + w["first"].setValue(kwarg["frame_range"][0]) + w["last"].setValue(kwarg["frame_range"][1]) return w @@ -525,7 +563,15 @@ def get_node_path(path, padding=4): def get_nuke_imageio_settings(): - return get_anatomy_settings(Context.project_name)["imageio"]["nuke"] + project_imageio = get_project_settings( + Context.project_name)["nuke"]["imageio"] + + # backward compatibility for project started before 3.10 + # those are still having `__legacy__` knob types + if not project_imageio["enabled"]: + return get_anatomy_settings(Context.project_name)["imageio"]["nuke"] + + return get_project_settings(Context.project_name)["nuke"]["imageio"] def get_created_node_imageio_setting_legacy(nodeclass, creator, subset): @@ -537,7 +583,9 @@ def get_created_node_imageio_setting_legacy(nodeclass, creator, subset): imageio_nodes = get_nuke_imageio_settings()["nodes"] required_nodes = imageio_nodes["requiredNodes"] - override_nodes = imageio_nodes["overrideNodes"] + + # HACK: for backward compatibility this needs to be optional + override_nodes = imageio_nodes.get("overrideNodes", []) imageio_node = None for node in required_nodes: @@ -707,6 +755,20 @@ def get_imageio_input_colorspace(filename): return preset_clrsp +def get_view_process_node(): + reset_selection() + + ipn_orig = None + for v in nuke.allNodes(filter="Viewer"): + ipn = v['input_process_node'].getValue() + if "VIEWER_INPUT" not in ipn: + ipn_orig = nuke.toNode(ipn) + ipn_orig.setSelected(True) + + if ipn_orig: + return duplicate_node(ipn_orig) + + def on_script_load(): ''' Callback for ffmpeg support ''' @@ -730,47 +792,84 @@ def check_inventory_versions(): from .pipeline import parse_container # get all Loader nodes by avalon attribute metadata - for each in nuke.allNodes(): - container = parse_container(each) + node_with_repre_id = [] + repre_ids = set() + # Find all containers and collect it's node and representation ids + for node in nuke.allNodes(): + container = parse_container(node) if container: node = nuke.toNode(container["objectName"]) avalon_knob_data = read_avalon_data(node) + repre_id = avalon_knob_data["representation"] - # get representation from io - representation = legacy_io.find_one({ - "type": "representation", - "_id": ObjectId(avalon_knob_data["representation"]) - }) + repre_ids.add(repre_id) + node_with_repre_id.append((node, repre_id)) - # Failsafe for not finding the representation. - if not representation: - log.warning( - "Could not find the representation on " - "node \"{}\"".format(node.name()) - ) - continue + # Skip if nothing was found + if not repre_ids: + return - # Get start frame from version data - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + project_name = legacy_io.active_project() + # Find representations based on found containers + repre_docs = get_representations( + project_name, + representation_ids=repre_ids, + fields=["_id", "parent"] + ) + # Store representations by id and collect version ids + repre_docs_by_id = {} + version_ids = set() + for repre_doc in repre_docs: + # Use stringed representation id to match value in containers + repre_id = str(repre_doc["_id"]) + repre_docs_by_id[repre_id] = repre_doc + version_ids.add(repre_doc["parent"]) - # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct("name") + version_docs = get_versions( + project_name, version_ids, fields=["_id", "name", "parent"] + ) + # Store versions by id and collect subset ids + version_docs_by_id = {} + subset_ids = set() + for version_doc in version_docs: + version_docs_by_id[version_doc["_id"]] = version_doc + subset_ids.add(version_doc["parent"]) - max_version = max(versions) + # Query last versions based on subset ids + last_versions_by_subset_id = get_last_versions( + project_name, subset_ids=subset_ids, fields=["_id", "parent"] + ) - # check the available version and do match - # change color of node if not max version - if version.get("name") not in [max_version]: - node["tile_color"].setValue(int("0xd84f20ff", 16)) - else: - node["tile_color"].setValue(int("0x4ecd25ff", 16)) + # Loop through collected container nodes and their representation ids + for item in node_with_repre_id: + # Some python versions of nuke can't unfold tuple in for loop + node, repre_id = item + repre_doc = repre_docs_by_id.get(repre_id) + # Failsafe for not finding the representation. + if not repre_doc: + log.warning(( + "Could not find the representation on node \"{}\"" + ).format(node.name())) + continue + + version_id = repre_doc["parent"] + version_doc = version_docs_by_id.get(version_id) + if not version_doc: + log.warning(( + "Could not find the version on node \"{}\"" + ).format(node.name())) + continue + + # Get last version based on subset id + subset_id = version_doc["parent"] + last_version = last_versions_by_subset_id[subset_id] + # Check if last version is same as current version + if last_version["_id"] == version_doc["_id"]: + color_value = "0x4ecd25ff" + else: + color_value = "0xd84f20ff" + node["tile_color"].setValue(int(color_value, 16)) def writes_version_sync(): @@ -841,19 +940,17 @@ def get_render_path(node): ''' Generate Render path from presets regarding avalon knob data ''' avalon_knob_data = read_avalon_data(node) - data = {'avalon': avalon_knob_data} nuke_imageio_writes = get_imageio_node_setting( - node_class=avalon_knob_data["family"], + node_class=avalon_knob_data["families"], plugin_name=avalon_knob_data["creator"], subset=avalon_knob_data["subset"] ) - host_name = os.environ.get("AVALON_APP") - data.update({ - "app": host_name, + data = { + "avalon": avalon_knob_data, "nuke_imageio_writes": nuke_imageio_writes - }) + } anatomy_filled = format_anatomy(data) return anatomy_filled["render"]["path"].replace("\\", "/") @@ -895,15 +992,12 @@ def format_anatomy(data): file = script_name() data["version"] = get_version_from_path(file) - project_doc = legacy_io.find_one({"type": "project"}) - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": data["avalon"]["asset"] - }) + project_name = anatomy.project_name + asset_name = data["avalon"]["asset"] task_name = os.environ["AVALON_TASK"] host_name = os.environ["AVALON_APP"] - context_data = get_workdir_data( - project_doc, asset_doc, task_name, host_name + context_data = get_template_data_with_names( + project_name, asset_name, task_name, host_name ) data.update(context_data) data.update({ @@ -1061,10 +1155,8 @@ def create_write_node( if knob["name"] == "file_type": representation = knob["value"] - host_name = os.environ.get("AVALON_APP") try: data.update({ - "app": host_name, "imageio_writes": imageio_writes, "representation": representation, }) @@ -1528,28 +1620,35 @@ def set_node_knobs_from_settings(node, knob_settings, **kwargs): if not knob_value: continue - # first convert string types to string - # just to ditch unicode - if isinstance(knob_value, six.text_type): - knob_value = str(knob_value) - - # set correctly knob types - if knob_type == "bool": - knob_value = bool(knob_value) - elif knob_type == "decimal_number": - knob_value = float(knob_value) - elif knob_type == "number": - knob_value = int(knob_value) - elif knob_type == "text": - knob_value = knob_value - elif knob_type == "color_gui": - knob_value = color_gui_to_int(knob_value) - elif knob_type in ["2d_vector", "3d_vector", "color"]: - knob_value = [float(v) for v in knob_value] + knob_value = convert_knob_value_to_correct_type( + knob_type, knob_value) node[knob_name].setValue(knob_value) +def convert_knob_value_to_correct_type(knob_type, knob_value): + # first convert string types to string + # just to ditch unicode + if isinstance(knob_value, six.text_type): + knob_value = str(knob_value) + + # set correctly knob types + if knob_type == "bool": + knob_value = bool(knob_value) + elif knob_type == "decimal_number": + knob_value = float(knob_value) + elif knob_type == "number": + knob_value = int(knob_value) + elif knob_type == "text": + knob_value = knob_value + elif knob_type == "color_gui": + knob_value = color_gui_to_int(knob_value) + elif knob_type in ["2d_vector", "3d_vector", "color"]: + knob_value = [float(v) for v in knob_value] + + return knob_value + + def color_gui_to_int(color_gui): hex_value = ( "0x{0:0>2x}{1:0>2x}{2:0>2x}{3:0>2x}").format(*color_gui) @@ -1688,17 +1787,18 @@ class WorkfileSettings(object): """ - def __init__(self, - root_node=None, - nodes=None, - **kwargs): - Context._project_doc = kwargs.get( - "project") or legacy_io.find_one({"type": "project"}) + def __init__(self, root_node=None, nodes=None, **kwargs): + project_doc = kwargs.get("project") + if project_doc is None: + project_name = legacy_io.active_project() + project_doc = get_project(project_name) + + Context._project_doc = project_doc self._asset = ( kwargs.get("asset_name") or legacy_io.Session["AVALON_ASSET"] ) - self._asset_entity = get_asset(self._asset) + self._asset_entity = get_current_project_asset(self._asset) self._root_node = root_node or nuke.root() self._nodes = self.get_nodes(nodes=nodes) @@ -1857,7 +1957,7 @@ class WorkfileSettings(object): families.append(avalon_knob_data.get("families")) nuke_imageio_writes = get_imageio_node_setting( - node_class=avalon_knob_data["family"], + node_class=avalon_knob_data["families"], plugin_name=avalon_knob_data["creator"], subset=avalon_knob_data["subset"] ) @@ -1879,15 +1979,25 @@ class WorkfileSettings(object): if not write_node: return - # write all knobs to node - for knob in nuke_imageio_writes["knobs"]: - value = knob["value"] - if isinstance(value, six.text_type): - value = str(value) - if str(value).startswith("0x"): - value = int(value, 16) + try: + # write all knobs to node + for knob in nuke_imageio_writes["knobs"]: + value = knob["value"] + if isinstance(value, six.text_type): + value = str(value) + if str(value).startswith("0x"): + value = int(value, 16) - write_node[knob["name"]].setValue(value) + log.debug("knob: {}| value: {}".format( + knob["name"], value + )) + write_node[knob["name"]].setValue(value) + except TypeError: + log.warning( + "Legacy workflow didnt work, switching to current") + + set_node_knobs_from_settings( + write_node, nuke_imageio_writes["knobs"]) def set_reads_colorspace(self, read_clrs_inputs): """ Setting colorspace to Read nodes @@ -1944,12 +2054,14 @@ class WorkfileSettings(object): # get imageio nuke_colorspace = get_nuke_imageio_settings() + log.info("Setting colorspace to workfile...") try: self.set_root_colorspace(nuke_colorspace["workfile"]) except AttributeError: msg = "set_colorspace(): missing `workfile` settings in template" nuke.message(msg) + log.info("Setting colorspace to viewers...") try: self.set_viewers_colorspace(nuke_colorspace["viewer"]) except AttributeError: @@ -1957,24 +2069,18 @@ class WorkfileSettings(object): nuke.message(msg) log.error(msg) + log.info("Setting colorspace to write nodes...") try: self.set_writes_colorspace() except AttributeError as _error: nuke.message(_error) log.error(_error) + log.info("Setting colorspace to read nodes...") read_clrs_inputs = nuke_colorspace["regexInputs"].get("inputs", []) if read_clrs_inputs: self.set_reads_colorspace(read_clrs_inputs) - try: - for key in nuke_colorspace: - log.debug("Preset's colorspace key: {}".format(key)) - except TypeError: - msg = "Nuke is not in templates! Contact your supervisor!" - nuke.message(msg) - log.error(msg) - def reset_frame_range_handles(self): """Set frame range to current asset""" @@ -2043,9 +2149,10 @@ class WorkfileSettings(object): def reset_resolution(self): """Set resolution to project resolution.""" log.info("Resetting resolution") - project = legacy_io.find_one({"type": "project"}) - asset = legacy_io.Session["AVALON_ASSET"] - asset = legacy_io.find_one({"name": asset, "type": "asset"}) + project_name = legacy_io.active_project() + project = get_project(project_name) + asset_name = legacy_io.Session["AVALON_ASSET"] + asset = get_asset_by_name(project_name, asset_name) asset_data = asset.get('data', {}) data = { @@ -2147,29 +2254,6 @@ class WorkfileSettings(object): set_context_favorites(favorite_items) -def get_hierarchical_attr(entity, attr, default=None): - attr_parts = attr.split('.') - value = entity - for part in attr_parts: - value = value.get(part) - if not value: - break - - if value or entity["type"].lower() == "project": - return value - - parent_id = entity["parent"] - if ( - entity["type"].lower() == "asset" - and entity.get("data", {}).get("visualParent") - ): - parent_id = entity["data"]["visualParent"] - - parent = legacy_io.find_one({"_id": parent_id}) - - return get_hierarchical_attr(parent, attr) - - def get_write_node_template_attr(node): ''' Gets all defined data from presets @@ -2178,15 +2262,14 @@ def get_write_node_template_attr(node): avalon_knob_data = read_avalon_data(node) # get template data nuke_imageio_writes = get_imageio_node_setting( - node_class=avalon_knob_data["family"], + node_class=avalon_knob_data["families"], plugin_name=avalon_knob_data["creator"], subset=avalon_knob_data["subset"] ) + # collecting correct data - correct_data = OrderedDict({ - "file": get_render_path(node) - }) + correct_data = OrderedDict() # adding imageio knob presets for k, v in nuke_imageio_writes.items(): @@ -2343,12 +2426,19 @@ def select_nodes(nodes): def launch_workfiles_app(): - '''Function letting start workfiles after start of host - ''' - from openpype.lib import ( - env_value_to_bool - ) - from .pipeline import get_main_window + """Show workfiles tool on nuke launch. + + Trigger to show workfiles tool on application launch. Can be executed only + once all other calls are ignored. + + Workfiles tool show is deffered after application initialization using + QTimer. + """ + + if Context.workfiles_launched: + return + + Context.workfiles_launched = True # get all imortant settings open_at_start = env_value_to_bool( @@ -2359,20 +2449,49 @@ def launch_workfiles_app(): if not open_at_start: return - if not Context.workfiles_launched: - Context.workfiles_launched = True - main_window = get_main_window() - host_tools.show_workfiles(parent=main_window) + # Show workfiles tool using timer + # - this will be probably triggered during initialization in that case + # the application is not be able to show uis so it must be + # deffered using timer + # - timer should be processed when initialization ends + # When applications starts to process events. + timer = QtCore.QTimer() + timer.timeout.connect(_launch_workfile_app) + timer.setInterval(100) + Context.workfiles_tool_timer = timer + timer.start() + + +def _launch_workfile_app(): + # Safeguard to not show window when application is still starting up + # or is already closing down. + closing_down = QtWidgets.QApplication.closingDown() + starting_up = QtWidgets.QApplication.startingUp() + + # Stop the timer if application finished start up of is closing down + if closing_down or not starting_up: + Context.workfiles_tool_timer.stop() + Context.workfiles_tool_timer = None + + # Skip if application is starting up or closing down + if starting_up or closing_down: + return + + # Make sure on top is enabled on first show so the window is not hidden + # under main nuke window + # - this happened on Centos 7 and it is because the focus of nuke + # changes to the main window after showing because of initialization + # which moves workfiles tool under it + host_tools.show_workfiles(parent=None, on_top=True) def process_workfile_builder(): - from openpype.lib import ( - env_value_to_bool, - get_custom_workfile_template - ) + # to avoid looping of the callback, remove it! + nuke.removeOnCreate(process_workfile_builder, nodeClass="Root") # get state from settings - workfile_builder = get_current_project_settings()["nuke"].get( + project_settings = get_current_project_settings() + workfile_builder = project_settings["nuke"].get( "workfile_builder", {}) # get all imortant settings @@ -2382,7 +2501,6 @@ def process_workfile_builder(): # get settings createfv_on = workfile_builder.get("create_first_version") or None - custom_templates = workfile_builder.get("custom_templates") or None builder_on = workfile_builder.get("builder_on_start") or None last_workfile_path = os.environ.get("AVALON_LAST_WORKFILE") @@ -2390,8 +2508,8 @@ def process_workfile_builder(): # generate first version in file not existing and feature is enabled if createfv_on and not os.path.exists(last_workfile_path): # get custom template path if any - custom_template_path = get_custom_workfile_template( - custom_templates + custom_template_path = get_custom_workfile_template_from_session( + project_settings=project_settings ) # if custom template is defined @@ -2425,9 +2543,6 @@ def process_workfile_builder(): if not openlv_on or not os.path.exists(last_workfile_path): return - # to avoid looping of the callback, remove it! - nuke.removeOnCreate(process_workfile_builder, nodeClass="Root") - log.info("Opening last workfile...") # open workfile open_file(last_workfile_path) @@ -2498,21 +2613,81 @@ def recreate_instance(origin_node, avalon_data=None): return new_node -class NukeDirmap(HostDirmap): - def __init__(self, host_name, project_settings, sync_module, file_name): - """ - Args: - host_name (str): Nuke - project_settings (dict): settings of current project - sync_module (SyncServerModule): to limit reinitialization - file_name (str): full path of referenced file from workfiles - """ - self.host_name = host_name - self.project_settings = project_settings - self.file_name = file_name - self.sync_module = sync_module +def add_scripts_gizmo(): - self._mapping = None # cache mapping + # load configuration of custom menu + project_settings = get_project_settings(os.getenv("AVALON_PROJECT")) + platform_name = platform.system().lower() + + for gizmo_settings in project_settings["nuke"]["gizmo"]: + gizmo_list_definition = gizmo_settings["gizmo_definition"] + toolbar_name = gizmo_settings["toolbar_menu_name"] + # gizmo_toolbar_path = gizmo_settings["gizmo_toolbar_path"] + gizmo_source_dir = gizmo_settings.get( + "gizmo_source_dir", {}).get(platform_name) + toolbar_icon_path = gizmo_settings.get( + "toolbar_icon_path", {}).get(platform_name) + + if not gizmo_source_dir: + log.debug("Skipping studio gizmo `{}`, " + "no gizmo path found.".format(toolbar_name) + ) + return + + if not gizmo_list_definition: + log.debug("Skipping studio gizmo `{}`, " + "no definition found.".format(toolbar_name) + ) + return + + if toolbar_icon_path: + try: + toolbar_icon_path = toolbar_icon_path.format(**os.environ) + except KeyError as e: + log.error( + "This environment variable doesn't exist: {}".format(e) + ) + + existing_gizmo_path = [] + for source_dir in gizmo_source_dir: + try: + resolve_source_dir = source_dir.format(**os.environ) + except KeyError as e: + log.error( + "This environment variable doesn't exist: {}".format(e) + ) + continue + if not os.path.exists(resolve_source_dir): + log.warning( + "The source of gizmo `{}` does not exists".format( + resolve_source_dir + ) + ) + continue + existing_gizmo_path.append(resolve_source_dir) + + # run the launcher for Nuke toolbar + toolbar_menu = gizmo_menu.GizmoMenu( + title=toolbar_name, + icon=toolbar_icon_path + ) + + # apply configuration + toolbar_menu.add_gizmo_path(existing_gizmo_path) + toolbar_menu.build_from_configuration(gizmo_list_definition) + + +class NukeDirmap(HostDirmap): + def __init__(self, file_name, *args, **kwargs): + """ + Args: + file_name (str): full path of referenced file from workfiles + *args (tuple): Positional arguments for 'HostDirmap' class + **kwargs (dict): Keyword arguments for 'HostDirmap' class + """ + + self.file_name = file_name + super(NukeDirmap, self).__init__(*args, **kwargs) def on_enable_dirmap(self): pass @@ -2532,14 +2707,20 @@ class NukeDirmap(HostDirmap): class DirmapCache: """Caching class to get settings and sync_module easily and only once.""" + _project_name = None _project_settings = None _sync_module = None + @classmethod + def project_name(cls): + if cls._project_name is None: + cls._project_name = os.getenv("AVALON_PROJECT") + return cls._project_name + @classmethod def project_settings(cls): if cls._project_settings is None: - cls._project_settings = get_project_settings( - os.getenv("AVALON_PROJECT")) + cls._project_settings = get_project_settings(cls.project_name()) return cls._project_settings @classmethod @@ -2549,15 +2730,63 @@ class DirmapCache: return cls._sync_module +@contextlib.contextmanager +def node_tempfile(): + """Create a temp file where node is pasted during duplication. + + This is to avoid using clipboard for node duplication. + """ + + tmp_file = tempfile.NamedTemporaryFile( + mode="w", prefix="openpype_nuke_temp_", suffix=".nk", delete=False + ) + tmp_file.close() + node_tempfile_path = tmp_file.name + + try: + # Yield the path where node can be copied + yield node_tempfile_path + + finally: + # Remove the file at the end + os.remove(node_tempfile_path) + + +def duplicate_node(node): + reset_selection() + + # select required node for duplication + node.setSelected(True) + + with node_tempfile() as filepath: + # copy selected to temp filepath + nuke.nodeCopy(filepath) + + # reset selection + reset_selection() + + # paste node and selection is on it only + dupli_node = nuke.nodePaste(filepath) + + # reset selection + reset_selection() + + return dupli_node + + def dirmap_file_name_filter(file_name): """Nuke callback function with single full path argument. Checks project settings for potential mapping from source to dest. """ - dirmap_processor = NukeDirmap("nuke", - DirmapCache.project_settings(), - DirmapCache.sync_module(), - file_name) + + dirmap_processor = NukeDirmap( + file_name, + "nuke", + DirmapCache.project_name(), + DirmapCache.project_settings(), + DirmapCache.sync_module(), + ) dirmap_processor.process_dirmap() if os.path.exists(dirmap_processor.file_name): return dirmap_processor.file_name @@ -2604,3 +2833,144 @@ def ls_img_sequence(path): } return False + + +def get_group_io_nodes(nodes): + """Get the input and the output of a group of nodes.""" + + if not nodes: + raise ValueError("there is no nodes in the list") + + input_node = None + output_node = None + + if len(nodes) == 1: + input_node = output_node = nodes[0] + + else: + for node in nodes: + if "Input" in node.name(): + input_node = node + + if "Output" in node.name(): + output_node = node + + if input_node is not None and output_node is not None: + break + + if input_node is None: + raise ValueError("No Input found") + + if output_node is None: + raise ValueError("No Output found") + return input_node, output_node + + +def get_extreme_positions(nodes): + """Get the 4 numbers that represent the box of a group of nodes.""" + + if not nodes: + raise ValueError("there is no nodes in the list") + + nodes_xpos = [n.xpos() for n in nodes] + \ + [n.xpos() + n.screenWidth() for n in nodes] + + nodes_ypos = [n.ypos() for n in nodes] + \ + [n.ypos() + n.screenHeight() for n in nodes] + + min_x, min_y = (min(nodes_xpos), min(nodes_ypos)) + max_x, max_y = (max(nodes_xpos), max(nodes_ypos)) + return min_x, min_y, max_x, max_y + + +def refresh_node(node): + """Correct a bug caused by the multi-threading of nuke. + + Refresh the node to make sure that it takes the desired attributes. + """ + + x = node.xpos() + y = node.ypos() + nuke.autoplaceSnap(node) + node.setXYpos(x, y) + + +def refresh_nodes(nodes): + for node in nodes: + refresh_node(node) + + +def get_names_from_nodes(nodes): + """Get list of nodes names. + + Args: + nodes(List[nuke.Node]): List of nodes to convert into names. + + Returns: + List[str]: Name of passed nodes. + """ + + return [ + node.name() + for node in nodes + ] + + +def get_nodes_by_names(names): + """Get list of nuke nodes based on their names. + + Args: + names (List[str]): List of node names to be found. + + Returns: + List[nuke.Node]: List of nodes found by name. + """ + + return [ + nuke.toNode(name) + for name in names + ] + + +def get_viewer_config_from_string(input_string): + """Convert string to display and viewer string + + Args: + input_string (str): string with viewer + + Raises: + IndexError: if more then one slash in input string + IndexError: if missing closing bracket + + Returns: + tuple[str]: display, viewer + """ + display = None + viewer = input_string + # check if () or / or \ in name + if "/" in viewer: + split = viewer.split("/") + + # rise if more then one column + if len(split) > 2: + raise IndexError(( + "Viewer Input string is not correct. " + "more then two `/` slashes! {}" + ).format(input_string)) + + viewer = split[1] + display = split[0] + elif "(" in viewer: + pattern = r"([\w\d\s]+).*[(](.*)[)]" + result = re.findall(pattern, viewer) + try: + result = result.pop() + display = str(result[1]).rstrip() + viewer = str(result[0]).rstrip() + except IndexError: + raise IndexError(( + "Viewer Input string is not correct. " + "Missing bracket! {}" + ).format(input_string)) + + return (display, viewer) diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py index 2785eb65cd..fb707ca44c 100644 --- a/openpype/hosts/nuke/api/pipeline.py +++ b/openpype/hosts/nuke/api/pipeline.py @@ -7,12 +7,8 @@ import nuke import pyblish.api import openpype -from openpype.api import ( - Logger, - BuildWorkfile, - get_current_project_settings -) -from openpype.lib import register_event_callback +from openpype.settings import get_current_project_settings +from openpype.lib import register_event_callback, Logger from openpype.pipeline import ( register_loader_plugin_path, register_creator_plugin_path, @@ -22,10 +18,13 @@ from openpype.pipeline import ( deregister_inventory_action_path, AVALON_CONTAINER_ID, ) +from openpype.pipeline.workfile import BuildWorkfile from openpype.tools.utils import host_tools from .command import viewer_update_and_undo_stop from .lib import ( + Context, + get_main_window, add_publish_knob, WorkfileSettings, process_workfile_builder, @@ -33,7 +32,13 @@ from .lib import ( check_inventory_versions, set_avalon_knob_data, read_avalon_data, - Context +) +from .workfile_template_builder import ( + NukePlaceholderLoadPlugin, + build_workfile_template, + update_workfile_template, + create_placeholder, + update_placeholder, ) log = Logger.get_logger(__name__) @@ -53,23 +58,6 @@ if os.getenv("PYBLISH_GUI", None): pyblish.api.register_gui(os.getenv("PYBLISH_GUI", None)) -def get_main_window(): - """Acquire Nuke's main window""" - if Context.main_window is None: - from Qt import QtWidgets - - top_widgets = QtWidgets.QApplication.topLevelWidgets() - name = "Foundry::UI::DockMainWindow" - for widget in top_widgets: - if ( - widget.inherits("QMainWindow") - and widget.metaObject().className() == name - ): - Context.main_window = widget - break - return Context.main_window - - def reload_config(): """Attempt to reload pipeline at run-time. @@ -78,7 +66,6 @@ def reload_config(): """ for module in ( - "openpype.api", "openpype.hosts.nuke.api.actions", "openpype.hosts.nuke.api.menu", "openpype.hosts.nuke.api.plugin", @@ -120,8 +107,9 @@ def install(): nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root") nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root") nuke.addOnCreate(process_workfile_builder, nodeClass="Root") - nuke.addOnCreate(launch_workfiles_app, nodeClass="Root") + _install_menu() + launch_workfiles_app() def uninstall(): @@ -141,6 +129,20 @@ def uninstall(): _uninstall_menu() +def _show_workfiles(): + # Make sure parent is not set + # - this makes Workfiles tool as separated window which + # avoid issues with reopening + # - it is possible to explicitly change on top flag of the tool + host_tools.show_workfiles(parent=None, on_top=False) + + +def get_workfile_build_placeholder_plugins(): + return [ + NukePlaceholderLoadPlugin + ] + + def _install_menu(): # uninstall original avalon menu main_window = get_main_window() @@ -157,7 +159,7 @@ def _install_menu(): menu.addSeparator() menu.addCommand( "Work Files...", - lambda: host_tools.show_workfiles(parent=main_window) + _show_workfiles ) menu.addSeparator() @@ -210,6 +212,24 @@ def _install_menu(): lambda: BuildWorkfile().process() ) + menu_template = menu.addMenu("Template Builder") # creating template menu + menu_template.addCommand( + "Build Workfile from template", + lambda: build_workfile_template() + ) + menu_template.addCommand( + "Update Workfile", + lambda: update_workfile_template() + ) + menu_template.addSeparator() + menu_template.addCommand( + "Create Place Holder", + lambda: create_placeholder() + ) + menu_template.addCommand( + "Update Place Holder", + lambda: update_placeholder() + ) menu.addSeparator() menu.addCommand( "Experimental tools...", @@ -344,6 +364,9 @@ def containerise(node, set_avalon_knob_data(node, data) + # set tab to first native + node.setTab(0) + return node diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index 2bad6f2c78..5981a8b386 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -6,7 +6,7 @@ from abc import abstractmethod import nuke -from openpype.api import get_current_project_settings +from openpype.settings import get_current_project_settings from openpype.pipeline import ( LegacyCreator, LoaderPlugin, @@ -14,11 +14,13 @@ from openpype.pipeline import ( from .lib import ( Knobby, check_subsetname_exists, - reset_selection, maintained_selection, set_avalon_knob_data, add_publish_knob, - get_nuke_imageio_settings + get_nuke_imageio_settings, + set_node_knobs_from_settings, + get_view_process_node, + get_viewer_config_from_string ) @@ -180,8 +182,6 @@ class ExporterReview(object): # get first and last frame self.first_frame = min(self.collection.indexes) self.last_frame = max(self.collection.indexes) - if "slate" in self.instance.data["families"]: - self.first_frame += 1 else: self.fname = os.path.basename(self.path_in) self.fhead = os.path.splitext(self.fname)[0] + "." @@ -191,7 +191,20 @@ class ExporterReview(object): if "#" in self.fhead: self.fhead = self.fhead.replace("#", "")[:-1] - def get_representation_data(self, tags=None, range=False): + def get_representation_data( + self, tags=None, range=False, + custom_tags=None + ): + """ Add representation data to self.data + + Args: + tags (list[str], optional): list of defined tags. + Defaults to None. + range (bool, optional): flag for adding ranges. + Defaults to False. + custom_tags (list[str], optional): user inputed custom tags. + Defaults to None. + """ add_tags = tags or [] repre = { "name": self.name, @@ -201,6 +214,9 @@ class ExporterReview(object): "tags": [self.name.replace("_", "-")] + add_tags } + if custom_tags: + repre["custom_tags"] = custom_tags + if range: repre.update({ "frameStart": self.first_frame, @@ -215,37 +231,6 @@ class ExporterReview(object): self.data["representations"].append(repre) - def get_view_input_process_node(self): - """ - Will get any active view process. - - Arguments: - self (class): in object definition - - Returns: - nuke.Node: copy node of Input Process node - """ - reset_selection() - ipn_orig = None - for v in nuke.allNodes(filter="Viewer"): - ip = v["input_process"].getValue() - ipn = v["input_process_node"].getValue() - if "VIEWER_INPUT" not in ipn and ip: - ipn_orig = nuke.toNode(ipn) - ipn_orig.setSelected(True) - - if ipn_orig: - # copy selected to clipboard - nuke.nodeCopy("%clipboard%") - # reset selection - reset_selection() - # paste node and selection is on it only - nuke.nodePaste("%clipboard%") - # assign to variable - ipn = nuke.selectedNode() - - return ipn - def get_imageio_baking_profile(self): from . import lib as opnlib nuke_imageio = opnlib.get_nuke_imageio_settings() @@ -310,7 +295,7 @@ class ExporterReviewLut(ExporterReview): self._temp_nodes = [] self.log.info("Deleted nodes...") - def generate_lut(self): + def generate_lut(self, **kwargs): bake_viewer_process = kwargs["bake_viewer_process"] bake_viewer_input_process_node = kwargs[ "bake_viewer_input_process"] @@ -328,7 +313,7 @@ class ExporterReviewLut(ExporterReview): if bake_viewer_process: # Node View Process if bake_viewer_input_process_node: - ipn = self.get_view_input_process_node() + ipn = get_view_process_node() if ipn is not None: # connect ipn.setInput(0, self.previous_node) @@ -344,7 +329,8 @@ class ExporterReviewLut(ExporterReview): dag_node.setInput(0, self.previous_node) self._temp_nodes.append(dag_node) self.previous_node = dag_node - self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes)) + self.log.debug( + "OCIODisplay... `{}`".format(self._temp_nodes)) # GenerateLUT gen_lut_node = nuke.createNode("GenerateLUT") @@ -447,6 +433,7 @@ class ExporterReviewMov(ExporterReview): return path def generate_mov(self, farm=False, **kwargs): + add_tags = [] self.publish_on_farm = farm read_raw = kwargs["read_raw"] reformat_node_add = kwargs["reformat_node_add"] @@ -465,10 +452,10 @@ class ExporterReviewMov(ExporterReview): self.log.debug(">> baking_view_profile `{}`".format( baking_view_profile)) - add_tags = kwargs.get("add_tags", []) + add_custom_tags = kwargs.get("add_custom_tags", []) self.log.info( - "__ add_tags: `{0}`".format(add_tags)) + "__ add_custom_tags: `{0}`".format(add_custom_tags)) subset = self.instance.data["subset"] self._temp_nodes[subset] = [] @@ -497,16 +484,7 @@ class ExporterReviewMov(ExporterReview): add_tags.append("reformated") rf_node = nuke.createNode("Reformat") - for kn_conf in reformat_node_config: - _type = kn_conf["type"] - k_name = str(kn_conf["name"]) - k_value = kn_conf["value"] - - # to remove unicode as nuke doesn't like it - if _type == "string": - k_value = str(kn_conf["value"]) - - rf_node[k_name].setValue(k_value) + set_node_knobs_from_settings(rf_node, reformat_node_config) # connect rf_node.setInput(0, self.previous_node) @@ -519,7 +497,7 @@ class ExporterReviewMov(ExporterReview): if bake_viewer_process: if bake_viewer_input_process_node: # View Process node - ipn = self.get_view_input_process_node() + ipn = get_view_process_node() if ipn is not None: # connect ipn.setInput(0, self.previous_node) @@ -532,7 +510,15 @@ class ExporterReviewMov(ExporterReview): if not self.viewer_lut_raw: # OCIODisplay dag_node = nuke.createNode("OCIODisplay") - dag_node["view"].setValue(str(baking_view_profile)) + + display, viewer = get_viewer_config_from_string( + str(baking_view_profile) + ) + if display: + dag_node["display"].setValue(display) + + # assign viewer + dag_node["view"].setValue(viewer) # connect dag_node.setInput(0, self.previous_node) @@ -583,6 +569,7 @@ class ExporterReviewMov(ExporterReview): # ---------- generate representation data self.get_representation_data( tags=["review", "delete"] + add_tags, + custom_tags=add_custom_tags, range=True ) diff --git a/openpype/hosts/nuke/api/utils.py b/openpype/hosts/nuke/api/utils.py index 5b0c607292..6bcb752dd1 100644 --- a/openpype/hosts/nuke/api/utils.py +++ b/openpype/hosts/nuke/api/utils.py @@ -1,7 +1,7 @@ import os import nuke -from openpype.api import resources +from openpype import resources from .lib import maintained_selection diff --git a/openpype/hosts/nuke/api/workfile_template_builder.py b/openpype/hosts/nuke/api/workfile_template_builder.py new file mode 100644 index 0000000000..7a2e442e32 --- /dev/null +++ b/openpype/hosts/nuke/api/workfile_template_builder.py @@ -0,0 +1,578 @@ +import collections + +import nuke + +from openpype.pipeline import registered_host +from openpype.pipeline.workfile.workfile_template_builder import ( + AbstractTemplateBuilder, + PlaceholderPlugin, + LoadPlaceholderItem, + PlaceholderLoadMixin, +) +from openpype.tools.workfile_template_build import ( + WorkfileBuildPlaceholderDialog, +) + +from .lib import ( + find_free_space_to_paste_nodes, + get_extreme_positions, + get_group_io_nodes, + imprint, + refresh_node, + refresh_nodes, + reset_selection, + get_names_from_nodes, + get_nodes_by_names, + select_nodes, + duplicate_node, + node_tempfile, +) + +PLACEHOLDER_SET = "PLACEHOLDERS_SET" + + +class NukeTemplateBuilder(AbstractTemplateBuilder): + """Concrete implementation of AbstractTemplateBuilder for maya""" + + def import_template(self, path): + """Import template into current scene. + Block if a template is already loaded. + + Args: + path (str): A path to current template (usually given by + get_template_path implementation) + + Returns: + bool: Wether the template was succesfully imported or not + """ + + # TODO check if the template is already imported + + nuke.nodePaste(path) + reset_selection() + + return True + + +class NukePlaceholderPlugin(PlaceholderPlugin): + node_color = 4278190335 + + def _collect_scene_placeholders(self): + # Cache placeholder data to shared data + placeholder_nodes = self.builder.get_shared_populate_data( + "placeholder_nodes" + ) + if placeholder_nodes is None: + placeholder_nodes = {} + all_groups = collections.deque() + all_groups.append(nuke.thisGroup()) + while all_groups: + group = all_groups.popleft() + for node in group.nodes(): + if isinstance(node, nuke.Group): + all_groups.append(node) + + node_knobs = node.knobs() + if ( + "builder_type" not in node_knobs + or "is_placeholder" not in node_knobs + or not node.knob("is_placeholder").value() + ): + continue + + if "empty" in node_knobs and node.knob("empty").value(): + continue + + placeholder_nodes[node.fullName()] = node + + self.builder.set_shared_populate_data( + "placeholder_nodes", placeholder_nodes + ) + return placeholder_nodes + + def create_placeholder(self, placeholder_data): + placeholder_data["plugin_identifier"] = self.identifier + + placeholder = nuke.nodes.NoOp() + placeholder.setName("PLACEHOLDER") + placeholder.knob("tile_color").setValue(self.node_color) + + imprint(placeholder, placeholder_data) + imprint(placeholder, {"is_placeholder": True}) + placeholder.knob("is_placeholder").setVisible(False) + + def update_placeholder(self, placeholder_item, placeholder_data): + node = nuke.toNode(placeholder_item.scene_identifier) + imprint(node, placeholder_data) + + def _parse_placeholder_node_data(self, node): + placeholder_data = {} + for key in self.get_placeholder_keys(): + knob = node.knob(key) + value = None + if knob is not None: + value = knob.getValue() + placeholder_data[key] = value + return placeholder_data + + +class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin): + identifier = "nuke.load" + label = "Nuke load" + + def _parse_placeholder_node_data(self, node): + placeholder_data = super( + NukePlaceholderLoadPlugin, self + )._parse_placeholder_node_data(node) + + node_knobs = node.knobs() + nb_children = 0 + if "nb_children" in node_knobs: + nb_children = int(node_knobs["nb_children"].getValue()) + placeholder_data["nb_children"] = nb_children + + siblings = [] + if "siblings" in node_knobs: + siblings = node_knobs["siblings"].values() + placeholder_data["siblings"] = siblings + + node_full_name = node.fullName() + placeholder_data["group_name"] = node_full_name.rpartition(".")[0] + placeholder_data["last_loaded"] = [] + placeholder_data["delete"] = False + return placeholder_data + + def _get_loaded_repre_ids(self): + loaded_representation_ids = self.builder.get_shared_populate_data( + "loaded_representation_ids" + ) + if loaded_representation_ids is None: + loaded_representation_ids = set() + for node in nuke.allNodes(): + if "repre_id" in node.knobs(): + loaded_representation_ids.add( + node.knob("repre_id").getValue() + ) + + self.builder.set_shared_populate_data( + "loaded_representation_ids", loaded_representation_ids + ) + return loaded_representation_ids + + def _before_repre_load(self, placeholder, representation): + placeholder.data["nodes_init"] = nuke.allNodes() + placeholder.data["last_repre_id"] = str(representation["_id"]) + + def collect_placeholders(self): + output = [] + scene_placeholders = self._collect_scene_placeholders() + for node_name, node in scene_placeholders.items(): + plugin_identifier_knob = node.knob("plugin_identifier") + if ( + plugin_identifier_knob is None + or plugin_identifier_knob.getValue() != self.identifier + ): + continue + + placeholder_data = self._parse_placeholder_node_data(node) + # TODO do data validations and maybe updgrades if are invalid + output.append( + LoadPlaceholderItem(node_name, placeholder_data, self) + ) + + return output + + def populate_placeholder(self, placeholder): + self.populate_load_placeholder(placeholder) + + def repopulate_placeholder(self, placeholder): + repre_ids = self._get_loaded_repre_ids() + self.populate_load_placeholder(placeholder, repre_ids) + + def get_placeholder_options(self, options=None): + return self.get_load_plugin_options(options) + + def cleanup_placeholder(self, placeholder, failed): + # deselect all selected nodes + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + # getting the latest nodes added + # TODO get from shared populate data! + nodes_init = placeholder.data["nodes_init"] + nodes_loaded = list(set(nuke.allNodes()) - set(nodes_init)) + self.log.debug("Loaded nodes: {}".format(nodes_loaded)) + if not nodes_loaded: + return + + placeholder.data["delete"] = True + + nodes_loaded = self._move_to_placeholder_group( + placeholder, nodes_loaded + ) + placeholder.data["last_loaded"] = nodes_loaded + refresh_nodes(nodes_loaded) + + # positioning of the loaded nodes + min_x, min_y, _, _ = get_extreme_positions(nodes_loaded) + for node in nodes_loaded: + xpos = (node.xpos() - min_x) + placeholder_node.xpos() + ypos = (node.ypos() - min_y) + placeholder_node.ypos() + node.setXYpos(xpos, ypos) + refresh_nodes(nodes_loaded) + + # fix the problem of z_order for backdrops + self._fix_z_order(placeholder) + self._imprint_siblings(placeholder) + + if placeholder.data["nb_children"] == 0: + # save initial nodes postions and dimensions, update them + # and set inputs and outputs of loaded nodes + + self._imprint_inits() + self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded) + self._set_loaded_connections(placeholder) + + elif placeholder.data["siblings"]: + # create copies of placeholder siblings for the new loaded nodes, + # set their inputs and outpus and update all nodes positions and + # dimensions and siblings names + + siblings = get_nodes_by_names(placeholder.data["siblings"]) + refresh_nodes(siblings) + copies = self._create_sib_copies(placeholder) + new_nodes = list(copies.values()) # copies nodes + self._update_nodes(new_nodes, nodes_loaded) + placeholder_node.removeKnob(placeholder_node.knob("siblings")) + new_nodes_name = get_names_from_nodes(new_nodes) + imprint(placeholder_node, {"siblings": new_nodes_name}) + self._set_copies_connections(placeholder, copies) + + self._update_nodes( + nuke.allNodes(), + new_nodes + nodes_loaded, + 20 + ) + + new_siblings = get_names_from_nodes(new_nodes) + placeholder.data["siblings"] = new_siblings + + else: + # if the placeholder doesn't have siblings, the loaded + # nodes will be placed in a free space + + xpointer, ypointer = find_free_space_to_paste_nodes( + nodes_loaded, direction="bottom", offset=200 + ) + node = nuke.createNode("NoOp") + reset_selection() + nuke.delete(node) + for node in nodes_loaded: + xpos = (node.xpos() - min_x) + xpointer + ypos = (node.ypos() - min_y) + ypointer + node.setXYpos(xpos, ypos) + + placeholder.data["nb_children"] += 1 + reset_selection() + # go back to root group + nuke.root().begin() + + def _move_to_placeholder_group(self, placeholder, nodes_loaded): + """ + opening the placeholder's group and copying loaded nodes in it. + + Returns : + nodes_loaded (list): the new list of pasted nodes + """ + + groups_name = placeholder.data["group_name"] + reset_selection() + select_nodes(nodes_loaded) + if groups_name: + with node_tempfile() as filepath: + nuke.nodeCopy(filepath) + for node in nuke.selectedNodes(): + nuke.delete(node) + group = nuke.toNode(groups_name) + group.begin() + nuke.nodePaste(filepath) + nodes_loaded = nuke.selectedNodes() + return nodes_loaded + + def _fix_z_order(self, placeholder): + """Fix the problem of z_order when a backdrop is loaded.""" + + nodes_loaded = placeholder.data["last_loaded"] + loaded_backdrops = [] + bd_orders = set() + for node in nodes_loaded: + if isinstance(node, nuke.BackdropNode): + loaded_backdrops.append(node) + bd_orders.add(node.knob("z_order").getValue()) + + if not bd_orders: + return + + sib_orders = set() + for node_name in placeholder.data["siblings"]: + node = nuke.toNode(node_name) + if isinstance(node, nuke.BackdropNode): + sib_orders.add(node.knob("z_order").getValue()) + + if not sib_orders: + return + + min_order = min(bd_orders) + max_order = max(sib_orders) + for backdrop_node in loaded_backdrops: + z_order = backdrop_node.knob("z_order").getValue() + backdrop_node.knob("z_order").setValue( + z_order + max_order - min_order + 1) + + def _imprint_siblings(self, placeholder): + """ + - add siblings names to placeholder attributes (nodes loaded with it) + - add Id to the attributes of all the other nodes + """ + + loaded_nodes = placeholder.data["last_loaded"] + loaded_nodes_set = set(loaded_nodes) + data = {"repre_id": str(placeholder.data["last_repre_id"])} + + for node in loaded_nodes: + node_knobs = node.knobs() + if "builder_type" not in node_knobs: + # save the id of representation for all imported nodes + imprint(node, data) + node.knob("repre_id").setVisible(False) + refresh_node(node) + continue + + if ( + "is_placeholder" not in node_knobs + or ( + "is_placeholder" in node_knobs + and node.knob("is_placeholder").value() + ) + ): + siblings = list(loaded_nodes_set - {node}) + siblings_name = get_names_from_nodes(siblings) + siblings = {"siblings": siblings_name} + imprint(node, siblings) + + def _imprint_inits(self): + """Add initial positions and dimensions to the attributes""" + + for node in nuke.allNodes(): + refresh_node(node) + imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) + node.knob("x_init").setVisible(False) + node.knob("y_init").setVisible(False) + width = node.screenWidth() + height = node.screenHeight() + if "bdwidth" in node.knobs(): + imprint(node, {"w_init": width, "h_init": height}) + node.knob("w_init").setVisible(False) + node.knob("h_init").setVisible(False) + refresh_node(node) + + def _update_nodes( + self, placeholder, nodes, considered_nodes, offset_y=None + ): + """Adjust backdrop nodes dimensions and positions. + + Considering some nodes sizes. + + Args: + nodes (list): list of nodes to update + considered_nodes (list): list of nodes to consider while updating + positions and dimensions + offset (int): distance between copies + """ + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) + + diff_x = diff_y = 0 + contained_nodes = [] # for backdrops + + if offset_y is None: + width_ph = placeholder_node.screenWidth() + height_ph = placeholder_node.screenHeight() + diff_y = max_y - min_y - height_ph + diff_x = max_x - min_x - width_ph + contained_nodes = [placeholder_node] + min_x = placeholder_node.xpos() + min_y = placeholder_node.ypos() + else: + siblings = get_nodes_by_names(placeholder.data["siblings"]) + minX, _, maxX, _ = get_extreme_positions(siblings) + diff_y = max_y - min_y + 20 + diff_x = abs(max_x - min_x - maxX + minX) + contained_nodes = considered_nodes + + if diff_y <= 0 and diff_x <= 0: + return + + for node in nodes: + refresh_node(node) + + if ( + node == placeholder_node + or node in considered_nodes + ): + continue + + if ( + not isinstance(node, nuke.BackdropNode) + or ( + isinstance(node, nuke.BackdropNode) + and not set(contained_nodes) <= set(node.getNodes()) + ) + ): + if offset_y is None and node.xpos() >= min_x: + node.setXpos(node.xpos() + diff_x) + + if node.ypos() >= min_y: + node.setYpos(node.ypos() + diff_y) + + else: + width = node.screenWidth() + height = node.screenHeight() + node.knob("bdwidth").setValue(width + diff_x) + node.knob("bdheight").setValue(height + diff_y) + + refresh_node(node) + + def _set_loaded_connections(self, placeholder): + """ + set inputs and outputs of loaded nodes""" + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + input_node, output_node = get_group_io_nodes( + placeholder.data["last_loaded"] + ) + for node in placeholder_node.dependent(): + for idx in range(node.inputs()): + if node.input(idx) == placeholder_node: + node.setInput(idx, output_node) + + for node in placeholder_node.dependencies(): + for idx in range(placeholder_node.inputs()): + if placeholder_node.input(idx) == node: + input_node.setInput(0, node) + + def _create_sib_copies(self, placeholder): + """ creating copies of the palce_holder siblings (the ones who were + loaded with it) for the new nodes added + + Returns : + copies (dict) : with copied nodes names and their copies + """ + + copies = {} + siblings = get_nodes_by_names(placeholder.data["siblings"]) + for node in siblings: + new_node = duplicate_node(node) + + x_init = int(new_node.knob("x_init").getValue()) + y_init = int(new_node.knob("y_init").getValue()) + new_node.setXYpos(x_init, y_init) + if isinstance(new_node, nuke.BackdropNode): + w_init = new_node.knob("w_init").getValue() + h_init = new_node.knob("h_init").getValue() + new_node.knob("bdwidth").setValue(w_init) + new_node.knob("bdheight").setValue(h_init) + refresh_node(node) + + if "repre_id" in node.knobs().keys(): + node.removeKnob(node.knob("repre_id")) + copies[node.name()] = new_node + return copies + + def _set_copies_connections(self, placeholder, copies): + """Set inputs and outputs of the copies. + + Args: + copies (dict): Copied nodes by their names. + """ + + last_input, last_output = get_group_io_nodes( + placeholder.data["last_loaded"] + ) + siblings = get_nodes_by_names(placeholder.data["siblings"]) + siblings_input, siblings_output = get_group_io_nodes(siblings) + copy_input = copies[siblings_input.name()] + copy_output = copies[siblings_output.name()] + + for node_init in siblings: + if node_init == siblings_output: + continue + + node_copy = copies[node_init.name()] + for node in node_init.dependent(): + for idx in range(node.inputs()): + if node.input(idx) != node_init: + continue + + if node in siblings: + copies[node.name()].setInput(idx, node_copy) + else: + last_input.setInput(0, node_copy) + + for node in node_init.dependencies(): + for idx in range(node_init.inputs()): + if node_init.input(idx) != node: + continue + + if node_init == siblings_input: + copy_input.setInput(idx, node) + elif node in siblings: + node_copy.setInput(idx, copies[node.name()]) + else: + node_copy.setInput(idx, last_output) + + siblings_input.setInput(0, copy_output) + + +def build_workfile_template(*args): + builder = NukeTemplateBuilder(registered_host()) + builder.build_template() + + +def update_workfile_template(*args): + builder = NukeTemplateBuilder(registered_host()) + builder.rebuild_template() + + +def create_placeholder(*args): + host = registered_host() + builder = NukeTemplateBuilder(host) + window = WorkfileBuildPlaceholderDialog(host, builder) + window.exec_() + + +def update_placeholder(*args): + host = registered_host() + builder = NukeTemplateBuilder(host) + placeholder_items_by_id = { + placeholder_item.scene_identifier: placeholder_item + for placeholder_item in builder.get_placeholders() + } + placeholder_items = [] + for node in nuke.selectedNodes(): + node_name = node.fullName() + if node_name in placeholder_items_by_id: + placeholder_items.append(placeholder_items_by_id[node_name]) + + # TODO show UI at least + if len(placeholder_items) == 0: + raise ValueError("No node selected") + + if len(placeholder_items) > 1: + raise ValueError("Too many selected nodes") + + placeholder_item = placeholder_items[0] + window = WorkfileBuildPlaceholderDialog(host, builder) + window.set_update_mode(placeholder_item) + window.exec_() diff --git a/openpype/hosts/nuke/api/workio.py b/openpype/hosts/nuke/api/workio.py index 68fcb0927f..65b86bf01b 100644 --- a/openpype/hosts/nuke/api/workio.py +++ b/openpype/hosts/nuke/api/workio.py @@ -2,11 +2,9 @@ import os import nuke -from openpype.pipeline import HOST_WORKFILE_EXTENSIONS - def file_extensions(): - return HOST_WORKFILE_EXTENSIONS["nuke"] + return [".nk"] def has_unsaved_changes(): diff --git a/openpype/hosts/nuke/plugins/create/create_write_prerender.py b/openpype/hosts/nuke/plugins/create/create_write_prerender.py index 32ee1fd86f..fec97167fb 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_prerender.py +++ b/openpype/hosts/nuke/plugins/create/create_write_prerender.py @@ -27,6 +27,10 @@ class CreateWritePrerender(plugin.AbstractWriteRender): # add fpath_template write_data["fpath_template"] = self.fpath_template write_data["use_range_limit"] = self.use_range_limit + write_data["frame_range"] = ( + nuke.root()["first_frame"].value(), + nuke.root()["last_frame"].value() + ) if not self.is_legacy(): return create_write_node( diff --git a/openpype/hosts/nuke/plugins/create/create_write_still.py b/openpype/hosts/nuke/plugins/create/create_write_still.py index 4007ccf51e..bb08e8c2c6 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_still.py +++ b/openpype/hosts/nuke/plugins/create/create_write_still.py @@ -2,7 +2,20 @@ import nuke from openpype.hosts.nuke.api import plugin from openpype.hosts.nuke.api.lib import ( - create_write_node, create_write_node_legacy) + create_write_node, + create_write_node_legacy, + get_created_node_imageio_setting_legacy +) + +# HACK: just to disable still image on projects which +# are not having anatomy imageio preset for CreateWriteStill +# TODO: remove this code as soon as it will be obsolete +imageio_writes = get_created_node_imageio_setting_legacy( + "Write", + "CreateWriteStill", + "stillMain" +) +print(imageio_writes["knobs"]) class CreateWriteStill(plugin.AbstractWriteRender): diff --git a/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py b/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py index c04c939a8d..764499ff0c 100644 --- a/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py +++ b/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py @@ -1,4 +1,4 @@ -from openpype.api import Logger +from openpype.lib import Logger from openpype.pipeline import InventoryAction from openpype.hosts.nuke.api.lib import set_avalon_knob_data diff --git a/openpype/hosts/nuke/plugins/load/actions.py b/openpype/hosts/nuke/plugins/load/actions.py index d364a4f3a1..69f56c7305 100644 --- a/openpype/hosts/nuke/plugins/load/actions.py +++ b/openpype/hosts/nuke/plugins/load/actions.py @@ -2,10 +2,10 @@ """ -from openpype.api import Logger +from openpype.lib import Logger from openpype.pipeline import load -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) class SetFrameRangeLoader(load.LoaderPlugin): diff --git a/openpype/hosts/nuke/plugins/load/load_backdrop.py b/openpype/hosts/nuke/plugins/load/load_backdrop.py index 143fdf1f30..164ab6f9f4 100644 --- a/openpype/hosts/nuke/plugins/load/load_backdrop.py +++ b/openpype/hosts/nuke/plugins/load/load_backdrop.py @@ -1,6 +1,10 @@ import nuke import nukescripts +from openpype.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) from openpype.pipeline import ( legacy_io, load, @@ -188,18 +192,17 @@ class LoadBackdropNodes(load.LoaderPlugin): # get main variables # Get version from io - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + project_name = legacy_io.active_project() + version_doc = get_version_by_id(project_name, representation["parent"]) + # get corresponding node GN = nuke.toNode(container['objectName']) file = get_representation_path(representation).replace("\\", "/") - context = representation["context"] + name = container['name'] - version_data = version.get("data", {}) - vname = version.get("name", None) + version_data = version_doc.get("data", {}) + vname = version_doc.get("name", None) first = version_data.get("frameStart", None) last = version_data.get("frameEnd", None) namespace = container['namespace'] @@ -237,20 +240,18 @@ class LoadBackdropNodes(load.LoaderPlugin): GN["name"].setValue(object_name) # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') - - max_version = max(versions) + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) # change color of node - if version.get("name") not in [max_version]: - GN["tile_color"].setValue(int("0xd88467ff", 16)) + if version_doc["_id"] == last_version_doc["_id"]: + color_value = self.node_color else: - GN["tile_color"].setValue(int(self.node_color, 16)) + color_value = "0xd88467ff" + GN["tile_color"].setValue(int(color_value, 16)) - self.log.info("updated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version_doc.get("name"))) return update_container(GN, data_imprint) diff --git a/openpype/hosts/nuke/plugins/load/load_camera_abc.py b/openpype/hosts/nuke/plugins/load/load_camera_abc.py index 964ca5ec90..9fef7424c8 100644 --- a/openpype/hosts/nuke/plugins/load/load_camera_abc.py +++ b/openpype/hosts/nuke/plugins/load/load_camera_abc.py @@ -1,5 +1,9 @@ import nuke +from openpype.client import ( + get_version_by_id, + get_last_version_by_subset_id +) from openpype.pipeline import ( legacy_io, load, @@ -61,6 +65,9 @@ class AlembicCameraLoader(load.LoaderPlugin): object_name, file), inpanel=False ) + # hide property panel + camera_node.hideControlPanel() + camera_node.forceValidate() camera_node["frame_rate"].setValue(float(fps)) @@ -102,17 +109,16 @@ class AlembicCameraLoader(load.LoaderPlugin): None """ # Get version from io - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + project_name = legacy_io.active_project() + version_doc = get_version_by_id(project_name, representation["parent"]) + object_name = container['objectName'] # get corresponding node camera_node = nuke.toNode(object_name) # get main variables - version_data = version.get("data", {}) - vname = version.get("name", None) + version_data = version_doc.get("data", {}) + vname = version_doc.get("name", None) first = version_data.get("frameStart", None) last = version_data.get("frameEnd", None) fps = version_data.get("fps") or nuke.root()["fps"].getValue() @@ -165,28 +171,27 @@ class AlembicCameraLoader(load.LoaderPlugin): d.setInput(index, camera_node) # color node by correct color by actual version - self.node_version_color(version, camera_node) + self.node_version_color(version_doc, camera_node) - self.log.info("updated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version_doc.get("name"))) return update_container(camera_node, data_imprint) - def node_version_color(self, version, node): + def node_version_color(self, version_doc, node): """ Coloring a node by correct color by actual version """ # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') - - max_version = max(versions) + project_name = legacy_io.active_project() + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) # change color of node - if version.get("name") not in [max_version]: - node["tile_color"].setValue(int("0xd88467ff", 16)) + if version_doc["_id"] == last_version_doc["_id"]: + color_value = self.node_color else: - node["tile_color"].setValue(int(self.node_color, 16)) + color_value = "0xd88467ff" + node["tile_color"].setValue(int(color_value, 16)) def switch(self, container, representation): self.update(container, representation) diff --git a/openpype/hosts/nuke/plugins/load/load_clip.py b/openpype/hosts/nuke/plugins/load/load_clip.py index 681561e303..565d777811 100644 --- a/openpype/hosts/nuke/plugins/load/load_clip.py +++ b/openpype/hosts/nuke/plugins/load/load_clip.py @@ -1,6 +1,12 @@ import nuke import qargparse - +from pprint import pformat +from copy import deepcopy +from openpype.lib import Logger +from openpype.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) from openpype.pipeline import ( legacy_io, get_representation_path, @@ -23,6 +29,7 @@ class LoadClip(plugin.NukeLoader): Either it is image sequence or video file. """ + log = Logger.get_logger(__name__) families = [ "source", @@ -50,20 +57,28 @@ class LoadClip(plugin.NukeLoader): script_start = int(nuke.root()["first_frame"].value()) # option gui - defaults = { - "start_at_workfile": True + options_defaults = { + "start_at_workfile": True, + "add_retime": True } - options = [ - qargparse.Boolean( - "start_at_workfile", - help="Load at workfile start frame", - default=True - ) - ] - node_name_template = "{class_name}_{ext}" + @classmethod + def get_options(cls, *args): + return [ + qargparse.Boolean( + "start_at_workfile", + help="Load at workfile start frame", + default=cls.options_defaults["start_at_workfile"] + ), + qargparse.Boolean( + "add_retime", + help="Load with retime", + default=cls.options_defaults["add_retime"] + ) + ] + @classmethod def get_representations(cls): return ( @@ -73,24 +88,31 @@ class LoadClip(plugin.NukeLoader): ) def load(self, context, name, namespace, options): - repre = context["representation"] + representation = context["representation"] # reste container id so it is always unique for each instance self.reset_container_id() - is_sequence = len(repre["files"]) > 1 + is_sequence = len(representation["files"]) > 1 - file = self.fname.replace("\\", "/") + if is_sequence: + representation = self._representation_with_hash_in_frame( + representation + ) + filepath = get_representation_path(representation).replace("\\", "/") + self.log.debug("_ filepath: {}".format(filepath)) start_at_workfile = options.get( - "start_at_workfile", self.defaults["start_at_workfile"]) + "start_at_workfile", self.options_defaults["start_at_workfile"]) + + add_retime = options.get( + "add_retime", self.options_defaults["add_retime"]) version = context['version'] version_data = version.get("data", {}) - repre_id = repre["_id"] + repre_id = representation["_id"] - repre_cont = repre["context"] - - self.log.info("version_data: {}\n".format(version_data)) + self.log.debug("_ version_data: {}\n".format( + pformat(version_data))) self.log.debug( "Representation id `{}` ".format(repre_id)) @@ -106,36 +128,33 @@ class LoadClip(plugin.NukeLoader): duration = last - first first = 1 last = first + duration - elif "#" not in file: - frame = repre_cont.get("frame") - assert frame, "Representation is not sequence" - - padding = len(frame) - file = file.replace(frame, "#" * padding) # Fallback to asset name when namespace is None if namespace is None: namespace = context['asset']['name'] - if not file: + if not filepath: self.log.warning( "Representation id `{}` is failing to load".format(repre_id)) return - read_name = self._get_node_name(repre) + read_name = self._get_node_name(representation) # Create the Loader with the filename path set read_node = nuke.createNode( "Read", "name {}".format(read_name)) + # hide property panel + read_node.hideControlPanel() + # to avoid multiple undo steps for rest of process # we will switch off undo-ing with viewer_update_and_undo_stop(): - read_node["file"].setValue(file) + read_node["file"].setValue(filepath) used_colorspace = self._set_colorspace( - read_node, version_data, repre["data"]) + read_node, version_data, representation["data"]) self._set_range_to_node(read_node, first, last, start_at_workfile) @@ -147,18 +166,29 @@ class LoadClip(plugin.NukeLoader): data_imprint = {} for k in add_keys: if k == 'version': - data_imprint.update({k: context["version"]['name']}) + version_doc = context["version"] + if version_doc["type"] == "hero_version": + version = "hero" + else: + version = version_doc.get("name") + + if version: + data_imprint[k] = version + elif k == 'colorspace': - colorspace = repre["data"].get(k) + colorspace = representation["data"].get(k) colorspace = colorspace or version_data.get(k) data_imprint["db_colorspace"] = colorspace if used_colorspace: data_imprint["used_colorspace"] = used_colorspace else: - data_imprint.update( - {k: context["version"]['data'].get(k, str(None))}) + data_imprint[k] = context["version"]['data'].get( + k, str(None)) - data_imprint.update({"objectName": read_name}) + data_imprint["objectName"] = read_name + + if add_retime and version_data.get("retime", None): + data_imprint["addRetime"] = True read_node["tile_color"].setValue(int("0x4ecd25ff", 16)) @@ -170,7 +200,7 @@ class LoadClip(plugin.NukeLoader): loader=self.__class__.__name__, data=data_imprint) - if version_data.get("retime", None): + if add_retime and version_data.get("retime", None): self._make_retimes(read_node, version_data) self.set_as_member(read_node) @@ -180,6 +210,20 @@ class LoadClip(plugin.NukeLoader): def switch(self, container, representation): self.update(container, representation) + def _representation_with_hash_in_frame(self, representation): + """Convert frame key value to padded hash + + Args: + representation (dict): representation data + + Returns: + dict: altered representation data + """ + representation = deepcopy(representation) + frame = representation["context"]["frame"] + representation["context"]["frame"] = "#" * len(str(frame)) + return representation + def update(self, container, representation): """Update the Loader's path @@ -192,19 +236,27 @@ class LoadClip(plugin.NukeLoader): is_sequence = len(representation["files"]) > 1 read_node = nuke.toNode(container['objectName']) - file = get_representation_path(representation).replace("\\", "/") - start_at_workfile = bool("start at" in read_node['frame_mode'].value()) + if is_sequence: + representation = self._representation_with_hash_in_frame( + representation + ) + filepath = get_representation_path(representation).replace("\\", "/") + self.log.debug("_ filepath: {}".format(filepath)) - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) - version_data = version.get("data", {}) + start_at_workfile = "start at" in read_node['frame_mode'].value() + + add_retime = [ + key for key in read_node.knobs().keys() + if "addRetime" in key + ] + + project_name = legacy_io.active_project() + version_doc = get_version_by_id(project_name, representation["parent"]) + + version_data = version_doc.get("data", {}) repre_id = representation["_id"] - repre_cont = representation["context"] - # colorspace profile colorspace = representation["data"].get("colorspace") colorspace = colorspace or version_data.get("colorspace") @@ -221,14 +273,8 @@ class LoadClip(plugin.NukeLoader): duration = last - first first = 1 last = first + duration - elif "#" not in file: - frame = repre_cont.get("frame") - assert frame, "Representation is not sequence" - padding = len(frame) - file = file.replace(frame, "#" * padding) - - if not file: + if not filepath: self.log.warning( "Representation id `{}` is failing to load".format(repre_id)) return @@ -236,14 +282,14 @@ class LoadClip(plugin.NukeLoader): read_name = self._get_node_name(representation) read_node["name"].setValue(read_name) - read_node["file"].setValue(file) + read_node["file"].setValue(filepath) # to avoid multiple undo steps for rest of process # we will switch off undo-ing with viewer_update_and_undo_stop(): used_colorspace = self._set_colorspace( read_node, version_data, representation["data"], - path=file) + path=filepath) self._set_range_to_node(read_node, first, last, start_at_workfile) @@ -251,7 +297,7 @@ class LoadClip(plugin.NukeLoader): "representation": str(representation["_id"]), "frameStart": str(first), "frameEnd": str(last), - "version": str(version.get("name")), + "version": str(version_doc.get("name")), "db_colorspace": colorspace, "source": version_data.get("source"), "handleStart": str(self.handle_start), @@ -264,28 +310,26 @@ class LoadClip(plugin.NukeLoader): if used_colorspace: updated_dict["used_colorspace"] = used_colorspace + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) # change color of read_node - # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') - - max_version = max(versions) - - if version.get("name") not in [max_version]: - read_node["tile_color"].setValue(int("0xd84f20ff", 16)) + if version_doc["_id"] == last_version_doc["_id"]: + color_value = "0x4ecd25ff" else: - read_node["tile_color"].setValue(int("0x4ecd25ff", 16)) + color_value = "0xd84f20ff" + read_node["tile_color"].setValue(int(color_value, 16)) # Update the imprinted representation update_container( read_node, updated_dict ) - self.log.info("updated to version: {}".format(version.get("name"))) + self.log.info( + "updated to version: {}".format(version_doc.get("name")) + ) - if version_data.get("retime", None): + if add_retime and version_data.get("retime", None): self._make_retimes(read_node, version_data) else: self.clear_members(read_node) @@ -317,8 +361,10 @@ class LoadClip(plugin.NukeLoader): time_warp_nodes = version_data.get('timewarps', []) last_node = None source_id = self.get_container_id(parent_node) - self.log.info("__ source_id: {}".format(source_id)) - self.log.info("__ members: {}".format(self.get_members(parent_node))) + self.log.debug("__ source_id: {}".format(source_id)) + self.log.debug("__ members: {}".format( + self.get_members(parent_node))) + dependent_nodes = self.clear_members(parent_node) with maintained_selection(): @@ -397,7 +443,7 @@ class LoadClip(plugin.NukeLoader): colorspace = repre_data.get("colorspace") colorspace = colorspace or version_data.get("colorspace") - # colorspace from `project_anatomy/imageio/nuke/regexInputs` + # colorspace from `project_settings/nuke/imageio/regexInputs` iio_colorspace = get_imageio_input_colorspace(path) # Set colorspace defined in version data diff --git a/openpype/hosts/nuke/plugins/load/load_effects.py b/openpype/hosts/nuke/plugins/load/load_effects.py index 6a30330ed0..cef4b0a5fc 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects.py +++ b/openpype/hosts/nuke/plugins/load/load_effects.py @@ -3,6 +3,10 @@ from collections import OrderedDict import nuke import six +from openpype.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) from openpype.pipeline import ( legacy_io, load, @@ -85,6 +89,9 @@ class LoadEffects(load.LoaderPlugin): "Group", "name {}_1".format(object_name)) + # hide property panel + GN.hideControlPanel() + # adding content to the group node with GN: pre_node = nuke.createNode("Input") @@ -148,17 +155,16 @@ class LoadEffects(load.LoaderPlugin): """ # get main variables # Get version from io - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + project_name = legacy_io.active_project() + version_doc = get_version_by_id(project_name, representation["parent"]) + # get corresponding node GN = nuke.toNode(container['objectName']) file = get_representation_path(representation).replace("\\", "/") name = container['name'] - version_data = version.get("data", {}) - vname = version.get("name", None) + version_data = version_doc.get("data", {}) + vname = version_doc.get("name", None) first = version_data.get("frameStart", None) last = version_data.get("frameEnd", None) workfile_first_frame = int(nuke.root()["first_frame"].getValue()) @@ -243,21 +249,19 @@ class LoadEffects(load.LoaderPlugin): # try to find parent read node self.connect_read_node(GN, namespace, json_f["assignTo"]) - # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') - - max_version = max(versions) + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) # change color of node - if version.get("name") not in [max_version]: - GN["tile_color"].setValue(int("0xd84f20ff", 16)) + if version_doc["_id"] == last_version_doc["_id"]: + color_value = "0x3469ffff" else: - GN["tile_color"].setValue(int("0x3469ffff", 16)) + color_value = "0xd84f20ff" - self.log.info("updated to version: {}".format(version.get("name"))) + GN["tile_color"].setValue(int(color_value, 16)) + + self.log.info("updated to version: {}".format(version_doc.get("name"))) def connect_read_node(self, group_node, asset, subset): """ diff --git a/openpype/hosts/nuke/plugins/load/load_effects_ip.py b/openpype/hosts/nuke/plugins/load/load_effects_ip.py index eaf151b3b8..9bd40be816 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_effects_ip.py @@ -3,6 +3,10 @@ from collections import OrderedDict import six import nuke +from openpype.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) from openpype.pipeline import ( legacy_io, load, @@ -86,6 +90,9 @@ class LoadEffectsInputProcess(load.LoaderPlugin): "Group", "name {}_1".format(object_name)) + # hide property panel + GN.hideControlPanel() + # adding content to the group node with GN: pre_node = nuke.createNode("Input") @@ -153,17 +160,16 @@ class LoadEffectsInputProcess(load.LoaderPlugin): # get main variables # Get version from io - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + project_name = legacy_io.active_project() + version_doc = get_version_by_id(project_name, representation["parent"]) + # get corresponding node GN = nuke.toNode(container['objectName']) file = get_representation_path(representation).replace("\\", "/") name = container['name'] - version_data = version.get("data", {}) - vname = version.get("name", None) + version_data = version_doc.get("data", {}) + vname = version_doc.get("name", None) first = version_data.get("frameStart", None) last = version_data.get("frameEnd", None) workfile_first_frame = int(nuke.root()["first_frame"].getValue()) @@ -251,20 +257,18 @@ class LoadEffectsInputProcess(load.LoaderPlugin): # return # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') - - max_version = max(versions) + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) # change color of node - if version.get("name") not in [max_version]: - GN["tile_color"].setValue(int("0xd84f20ff", 16)) + if version_doc["_id"] == last_version_doc["_id"]: + color_value = "0x3469ffff" else: - GN["tile_color"].setValue(int("0x3469ffff", 16)) + color_value = "0xd84f20ff" + GN["tile_color"].setValue(int(color_value, 16)) - self.log.info("updated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version_doc.get("name"))) def connect_active_viewer(self, group_node): """ diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo.py b/openpype/hosts/nuke/plugins/load/load_gizmo.py index 4ea9d64d7d..9a18eeef5c 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo.py @@ -1,5 +1,9 @@ import nuke +from openpype.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) from openpype.pipeline import ( legacy_io, load, @@ -101,17 +105,16 @@ class LoadGizmo(load.LoaderPlugin): # get main variables # Get version from io - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + project_name = legacy_io.active_project() + version_doc = get_version_by_id(project_name, representation["parent"]) + # get corresponding node GN = nuke.toNode(container['objectName']) file = get_representation_path(representation).replace("\\", "/") name = container['name'] - version_data = version.get("data", {}) - vname = version.get("name", None) + version_data = version_doc.get("data", {}) + vname = version_doc.get("name", None) first = version_data.get("frameStart", None) last = version_data.get("frameEnd", None) namespace = container['namespace'] @@ -148,21 +151,18 @@ class LoadGizmo(load.LoaderPlugin): GN.setXYpos(xpos, ypos) GN["name"].setValue(object_name) - # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') - - max_version = max(versions) + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) # change color of node - if version.get("name") not in [max_version]: - GN["tile_color"].setValue(int("0xd88467ff", 16)) + if version_doc["_id"] == last_version_doc["_id"]: + color_value = self.node_color else: - GN["tile_color"].setValue(int(self.node_color, 16)) + color_value = "0xd88467ff" + GN["tile_color"].setValue(int(color_value, 16)) - self.log.info("updated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version_doc.get("name"))) return update_container(GN, data_imprint) diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py index 38dd70935e..2890dbfd2c 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py @@ -1,6 +1,10 @@ import nuke import six +from openpype.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) from openpype.pipeline import ( legacy_io, load, @@ -108,17 +112,16 @@ class LoadGizmoInputProcess(load.LoaderPlugin): # get main variables # Get version from io - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + project_name = legacy_io.active_project() + version_doc = get_version_by_id(project_name, representation["parent"]) + # get corresponding node GN = nuke.toNode(container['objectName']) file = get_representation_path(representation).replace("\\", "/") name = container['name'] - version_data = version.get("data", {}) - vname = version.get("name", None) + version_data = version_doc.get("data", {}) + vname = version_doc.get("name", None) first = version_data.get("frameStart", None) last = version_data.get("frameEnd", None) namespace = container['namespace'] @@ -155,21 +158,18 @@ class LoadGizmoInputProcess(load.LoaderPlugin): GN.setXYpos(xpos, ypos) GN["name"].setValue(object_name) - # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') - - max_version = max(versions) + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) # change color of node - if version.get("name") not in [max_version]: - GN["tile_color"].setValue(int("0xd88467ff", 16)) + if version_doc["_id"] == last_version_doc["_id"]: + color_value = self.node_color else: - GN["tile_color"].setValue(int(self.node_color, 16)) + color_value = "0xd88467ff" + GN["tile_color"].setValue(int(color_value, 16)) - self.log.info("updated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version_doc.get("name"))) return update_container(GN, data_imprint) diff --git a/openpype/hosts/nuke/plugins/load/load_image.py b/openpype/hosts/nuke/plugins/load/load_image.py index 6df286a4f7..49dc12f588 100644 --- a/openpype/hosts/nuke/plugins/load/load_image.py +++ b/openpype/hosts/nuke/plugins/load/load_image.py @@ -2,6 +2,10 @@ import nuke import qargparse +from openpype.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) from openpype.pipeline import ( legacy_io, load, @@ -58,7 +62,9 @@ class LoadImage(load.LoaderPlugin): def load(self, context, name, namespace, options): self.log.info("__ options: `{}`".format(options)) - frame_number = options.get("frame_number", 1) + frame_number = options.get( + "frame_number", int(nuke.root()["first_frame"].getValue()) + ) version = context['version'] version_data = version.get("data", {}) @@ -108,6 +114,10 @@ class LoadImage(load.LoaderPlugin): r = nuke.createNode( "Read", "name {}".format(read_name)) + + # hide property panel + r.hideControlPanel() + r["file"].setValue(file) # Set colorspace defined in version data @@ -186,20 +196,13 @@ class LoadImage(load.LoaderPlugin): format(frame_number, "0{}".format(padding))) # Get start frame from version data - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + project_name = legacy_io.active_project() + version_doc = get_version_by_id(project_name, representation["parent"]) + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) - # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') - - max_version = max(versions) - - version_data = version.get("data", {}) + version_data = version_doc.get("data", {}) last = first = int(frame_number) @@ -215,7 +218,7 @@ class LoadImage(load.LoaderPlugin): "representation": str(representation["_id"]), "frameStart": str(first), "frameEnd": str(last), - "version": str(version.get("name")), + "version": str(version_doc.get("name")), "colorspace": version_data.get("colorspace"), "source": version_data.get("source"), "fps": str(version_data.get("fps")), @@ -223,17 +226,18 @@ class LoadImage(load.LoaderPlugin): }) # change color of node - if version.get("name") not in [max_version]: - node["tile_color"].setValue(int("0xd84f20ff", 16)) + if version_doc["_id"] == last_version_doc["_id"]: + color_value = "0x4ecd25ff" else: - node["tile_color"].setValue(int("0x4ecd25ff", 16)) + color_value = "0xd84f20ff" + node["tile_color"].setValue(int(color_value, 16)) # Update the imprinted representation update_container( node, updated_dict ) - self.log.info("updated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version_doc.get("name"))) def remove(self, container): node = nuke.toNode(container['objectName']) diff --git a/openpype/hosts/nuke/plugins/load/load_model.py b/openpype/hosts/nuke/plugins/load/load_model.py index 9788bb25d2..ad985e83c6 100644 --- a/openpype/hosts/nuke/plugins/load/load_model.py +++ b/openpype/hosts/nuke/plugins/load/load_model.py @@ -1,5 +1,9 @@ import nuke +from openpype.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) from openpype.pipeline import ( legacy_io, load, @@ -15,13 +19,13 @@ from openpype.hosts.nuke.api import ( class AlembicModelLoader(load.LoaderPlugin): """ - This will load alembic model into script. + This will load alembic model or anim into script. """ - families = ["model"] + families = ["model", "pointcache", "animation"] representations = ["abc"] - label = "Load Alembic Model" + label = "Load Alembic" icon = "cube" color = "orange" node_color = "0x4ecd91ff" @@ -59,7 +63,17 @@ class AlembicModelLoader(load.LoaderPlugin): object_name, file), inpanel=False ) + + # hide property panel + model_node.hideControlPanel() + model_node.forceValidate() + + # Ensure all items are imported and selected. + scene_view = model_node.knob('scene_view') + scene_view.setImportedItems(scene_view.getAllItems()) + scene_view.setSelectedItems(scene_view.getAllItems()) + model_node["frame_rate"].setValue(float(fps)) # workaround because nuke's bug is not adding @@ -100,17 +114,15 @@ class AlembicModelLoader(load.LoaderPlugin): None """ # Get version from io - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + project_name = legacy_io.active_project() + version_doc = get_version_by_id(project_name, representation["parent"]) object_name = container['objectName'] # get corresponding node model_node = nuke.toNode(object_name) # get main variables - version_data = version.get("data", {}) - vname = version.get("name", None) + version_data = version_doc.get("data", {}) + vname = version_doc.get("name", None) first = version_data.get("frameStart", None) last = version_data.get("frameEnd", None) fps = version_data.get("fps") or nuke.root()["fps"].getValue() @@ -142,6 +154,11 @@ class AlembicModelLoader(load.LoaderPlugin): model_node["frame_rate"].setValue(float(fps)) model_node["file"].setValue(file) + # Ensure all items are imported and selected. + scene_view = model_node.knob('scene_view') + scene_view.setImportedItems(scene_view.getAllItems()) + scene_view.setSelectedItems(scene_view.getAllItems()) + # workaround because nuke's bug is # not adding animation keys properly xpos = model_node.xpos() @@ -163,28 +180,26 @@ class AlembicModelLoader(load.LoaderPlugin): d.setInput(index, model_node) # color node by correct color by actual version - self.node_version_color(version, model_node) + self.node_version_color(version_doc, model_node) - self.log.info("updated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version_doc.get("name"))) return update_container(model_node, data_imprint) def node_version_color(self, version, node): - """ Coloring a node by correct color by actual version - """ - # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') + """ Coloring a node by correct color by actual version""" - max_version = max(versions) + project_name = legacy_io.active_project() + last_version_doc = get_last_version_by_subset_id( + project_name, version["parent"], fields=["_id"] + ) # change color of node - if version.get("name") not in [max_version]: - node["tile_color"].setValue(int("0xd88467ff", 16)) + if version["_id"] == last_version_doc["_id"]: + color_value = self.node_color else: - node["tile_color"].setValue(int(self.node_color, 16)) + color_value = "0xd88467ff" + node["tile_color"].setValue(int(color_value, 16)) def switch(self, container, representation): self.update(container, representation) diff --git a/openpype/hosts/nuke/plugins/load/load_script_precomp.py b/openpype/hosts/nuke/plugins/load/load_script_precomp.py index bd351ad785..f0972f85d2 100644 --- a/openpype/hosts/nuke/plugins/load/load_script_precomp.py +++ b/openpype/hosts/nuke/plugins/load/load_script_precomp.py @@ -1,5 +1,9 @@ import nuke +from openpype.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) from openpype.pipeline import ( legacy_io, load, @@ -67,6 +71,9 @@ class LinkAsGroup(load.LoaderPlugin): "Precomp", "file {}".format(file)) + # hide property panel + P.hideControlPanel() + # Set colorspace defined in version data colorspace = context["version"]["data"].get("colorspace", None) self.log.info("colorspace: {}\n".format(colorspace)) @@ -116,29 +123,23 @@ class LinkAsGroup(load.LoaderPlugin): root = get_representation_path(representation).replace("\\", "/") # Get start frame from version data - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) - - # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') - - max_version = max(versions) + project_name = legacy_io.active_project() + version_doc = get_version_by_id(project_name, representation["parent"]) + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) updated_dict = {} + version_data = version_doc["data"] updated_dict.update({ "representation": str(representation["_id"]), - "frameEnd": version["data"].get("frameEnd"), - "version": version.get("name"), - "colorspace": version["data"].get("colorspace"), - "source": version["data"].get("source"), - "handles": version["data"].get("handles"), - "fps": version["data"].get("fps"), - "author": version["data"].get("author") + "frameEnd": version_data.get("frameEnd"), + "version": version_doc.get("name"), + "colorspace": version_data.get("colorspace"), + "source": version_data.get("source"), + "handles": version_data.get("handles"), + "fps": version_data.get("fps"), + "author": version_data.get("author") }) # Update the imprinted representation @@ -150,12 +151,13 @@ class LinkAsGroup(load.LoaderPlugin): node["file"].setValue(root) # change color of node - if version.get("name") not in [max_version]: - node["tile_color"].setValue(int("0xd84f20ff", 16)) + if version_doc["_id"] == last_version_doc["_id"]: + color_value = "0xff0ff0ff" else: - node["tile_color"].setValue(int("0xff0ff0ff", 16)) + color_value = "0xd84f20ff" + node["tile_color"].setValue(int(color_value, 16)) - self.log.info("updated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version_doc.get("name"))) def remove(self, container): node = nuke.toNode(container['objectName']) diff --git a/openpype/hosts/nuke/plugins/publish/collect_reads.py b/openpype/hosts/nuke/plugins/publish/collect_reads.py index 4d6944f523..b79d9646d5 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_reads.py +++ b/openpype/hosts/nuke/plugins/publish/collect_reads.py @@ -3,6 +3,7 @@ import re import nuke import pyblish.api +from openpype.client import get_asset_by_name from openpype.pipeline import legacy_io @@ -16,12 +17,11 @@ class CollectNukeReads(pyblish.api.InstancePlugin): families = ["source"] def process(self, instance): - asset_data = legacy_io.find_one({ - "type": "asset", - "name": legacy_io.Session["AVALON_ASSET"] - }) + project_name = legacy_io.active_project() + asset_name = legacy_io.Session["AVALON_ASSET"] + asset_doc = get_asset_by_name(project_name, asset_name) - self.log.debug("asset_data: {}".format(asset_data["data"])) + self.log.debug("asset_doc: {}".format(asset_doc["data"])) self.log.debug("checking instance: {}".format(instance)) @@ -127,7 +127,7 @@ class CollectNukeReads(pyblish.api.InstancePlugin): "frameStart": first_frame, "frameEnd": last_frame, "colorspace": colorspace, - "handles": int(asset_data["data"].get("handles", 0)), + "handles": int(asset_doc["data"].get("handles", 0)), "step": 1, "fps": int(nuke.root()['fps'].value()) }) diff --git a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py b/openpype/hosts/nuke/plugins/publish/collect_slate_node.py index 4257ed3131..bfe32d8fd1 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py +++ b/openpype/hosts/nuke/plugins/publish/collect_slate_node.py @@ -33,6 +33,7 @@ class CollectSlate(pyblish.api.InstancePlugin): if slate_node: instance.data["slateNode"] = slate_node + instance.data["slate"] = True instance.data["families"].append("slate") instance.data["versionData"]["families"].append("slate") self.log.info( diff --git a/openpype/hosts/nuke/plugins/publish/extract_backdrop.py b/openpype/hosts/nuke/plugins/publish/extract_backdrop.py index 0a2df0898e..d1e5c4cc5a 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_backdrop.py +++ b/openpype/hosts/nuke/plugins/publish/extract_backdrop.py @@ -4,7 +4,7 @@ import nuke import pyblish.api -import openpype +from openpype.pipeline import publish from openpype.hosts.nuke.api.lib import ( maintained_selection, reset_selection, @@ -12,7 +12,7 @@ from openpype.hosts.nuke.api.lib import ( ) -class ExtractBackdropNode(openpype.api.Extractor): +class ExtractBackdropNode(publish.Extractor): """Extracting content of backdrop nodes Will create nuke script only with containing nodes. diff --git a/openpype/hosts/nuke/plugins/publish/extract_camera.py b/openpype/hosts/nuke/plugins/publish/extract_camera.py index 54f65a0be3..b751bfab03 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_camera.py +++ b/openpype/hosts/nuke/plugins/publish/extract_camera.py @@ -5,11 +5,12 @@ from pprint import pformat import nuke import pyblish.api -import openpype.api + +from openpype.pipeline import publish from openpype.hosts.nuke.api.lib import maintained_selection -class ExtractCamera(openpype.api.Extractor): +class ExtractCamera(publish.Extractor): """ 3D camera exctractor """ label = 'Exctract Camera' diff --git a/openpype/hosts/nuke/plugins/publish/extract_gizmo.py b/openpype/hosts/nuke/plugins/publish/extract_gizmo.py index 2d5bfdeb5e..3047ad6724 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_gizmo.py +++ b/openpype/hosts/nuke/plugins/publish/extract_gizmo.py @@ -3,7 +3,7 @@ import nuke import pyblish.api -import openpype +from openpype.pipeline import publish from openpype.hosts.nuke.api import utils as pnutils from openpype.hosts.nuke.api.lib import ( maintained_selection, @@ -12,7 +12,7 @@ from openpype.hosts.nuke.api.lib import ( ) -class ExtractGizmo(openpype.api.Extractor): +class ExtractGizmo(publish.Extractor): """Extracting Gizmo (Group) node Will create nuke script only with the Gizmo node. diff --git a/openpype/hosts/nuke/plugins/publish/extract_model.py b/openpype/hosts/nuke/plugins/publish/extract_model.py index 0375263338..d82cb3110b 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_model.py +++ b/openpype/hosts/nuke/plugins/publish/extract_model.py @@ -2,14 +2,15 @@ import os from pprint import pformat import nuke import pyblish.api -import openpype.api + +from openpype.pipeline import publish from openpype.hosts.nuke.api.lib import ( maintained_selection, select_nodes ) -class ExtractModel(openpype.api.Extractor): +class ExtractModel(publish.Extractor): """ 3D model exctractor """ label = 'Exctract Model' diff --git a/openpype/hosts/nuke/plugins/publish/extract_render_local.py b/openpype/hosts/nuke/plugins/publish/extract_render_local.py index 50a5d01483..843d588786 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_render_local.py +++ b/openpype/hosts/nuke/plugins/publish/extract_render_local.py @@ -1,13 +1,15 @@ -import pyblish.api -import nuke import os -import openpype + +import pyblish.api import clique +import nuke + +from openpype.pipeline import publish -class NukeRenderLocal(openpype.api.Extractor): +class NukeRenderLocal(publish.Extractor): # TODO: rewrite docstring to nuke - """Render the current Fusion composition locally. + """Render the current Nuke composition locally. Extract the result of savers by starting a comp render This will run the local render of Fusion. @@ -31,10 +33,6 @@ class NukeRenderLocal(openpype.api.Extractor): first_frame = instance.data.get("frameStartHandle", None) - # exception for slate workflow - if "slate" in families: - first_frame -= 1 - last_frame = instance.data.get("frameEndHandle", None) node_subset_name = instance.data.get("name", None) @@ -42,12 +40,22 @@ class NukeRenderLocal(openpype.api.Extractor): self.log.info("Start frame: {}".format(first_frame)) self.log.info("End frame: {}".format(last_frame)) - # write node url might contain nuke's ctl expressin - # as [python ...]/path... - path = node["file"].evaluate() + node_file = node["file"] + # Collecte expected filepaths for each frame + # - for cases that output is still image is first created set of + # paths which is then sorted and converted to list + expected_paths = list(sorted({ + node_file.evaluate(frame) + for frame in range(first_frame, last_frame + 1) + })) + # Extract only filenames for representation + filenames = [ + os.path.basename(filepath) + for filepath in expected_paths + ] # Ensure output directory exists. - out_dir = os.path.dirname(path) + out_dir = os.path.dirname(expected_paths[0]) if not os.path.exists(out_dir): os.makedirs(out_dir) @@ -58,30 +66,28 @@ class NukeRenderLocal(openpype.api.Extractor): int(last_frame) ) - # exception for slate workflow - if "slate" in families: - first_frame += 1 - ext = node["file_type"].value() if "representations" not in instance.data: instance.data["representations"] = [] - collected_frames = os.listdir(out_dir) - if len(collected_frames) == 1: + if len(filenames) == 1: repre = { 'name': ext, 'ext': ext, - 'files': collected_frames.pop(), + 'files': filenames[0], "stagingDir": out_dir } else: repre = { 'name': ext, 'ext': ext, - 'frameStart': "%0{}d".format( - len(str(last_frame))) % first_frame, - 'files': collected_frames, + 'frameStart': ( + "{{:0>{}}}" + .format(len(str(last_frame))) + .format(first_frame) + ), + 'files': filenames, "stagingDir": out_dir } instance.data["representations"].append(repre) @@ -96,16 +102,19 @@ class NukeRenderLocal(openpype.api.Extractor): instance.data['family'] = 'render' families.remove('render.local') families.insert(0, "render2d") + instance.data["anatomyData"]["family"] = "render" elif "prerender.local" in families: instance.data['family'] = 'prerender' families.remove('prerender.local') families.insert(0, "prerender") + instance.data["anatomyData"]["family"] = "prerender" elif "still.local" in families: instance.data['family'] = 'image' families.remove('still.local') + instance.data["anatomyData"]["family"] = "image" instance.data["families"] = families - collections, remainder = clique.assemble(collected_frames) + collections, remainder = clique.assemble(filenames) self.log.info('collections: {}'.format(str(collections))) if collections: @@ -114,4 +123,4 @@ class NukeRenderLocal(openpype.api.Extractor): self.log.info('Finished render') - self.log.debug("instance extracted: {}".format(instance.data)) + self.log.debug("_ instance.data: {}".format(instance.data)) diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data.py b/openpype/hosts/nuke/plugins/publish/extract_review_data.py index 38a8140cff..3c85b21b08 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data.py @@ -1,10 +1,11 @@ import os -import pyblish.api -import openpype from pprint import pformat +import pyblish.api + +from openpype.pipeline import publish -class ExtractReviewData(openpype.api.Extractor): +class ExtractReviewData(publish.Extractor): """Extracts review tag into available representation """ diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py b/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py index 4cf2fd7d9f..67779e9599 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py @@ -1,11 +1,12 @@ import os import pyblish.api -import openpype + +from openpype.pipeline import publish from openpype.hosts.nuke.api import plugin from openpype.hosts.nuke.api.lib import maintained_selection -class ExtractReviewDataLut(openpype.api.Extractor): +class ExtractReviewDataLut(publish.Extractor): """Extracts movie and thumbnail with baked in luts must be run after extract_render_local.py diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py index 2a79d600ba..3fcfc2a4b5 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py @@ -1,12 +1,14 @@ import os import re +from pprint import pformat import pyblish.api -import openpype + +from openpype.pipeline import publish from openpype.hosts.nuke.api import plugin from openpype.hosts.nuke.api.lib import maintained_selection -class ExtractReviewDataMov(openpype.api.Extractor): +class ExtractReviewDataMov(publish.Extractor): """Extracts movie and thumbnail with baked in luts must be run after extract_render_local.py @@ -50,6 +52,8 @@ class ExtractReviewDataMov(openpype.api.Extractor): with maintained_selection(): generated_repres = [] for o_name, o_data in self.outputs.items(): + self.log.debug( + "o_name: {}, o_data: {}".format(o_name, pformat(o_data))) f_families = o_data["filter"]["families"] f_task_types = o_data["filter"]["task_types"] f_subsets = o_data["filter"]["subsets"] @@ -88,14 +92,23 @@ class ExtractReviewDataMov(openpype.api.Extractor): # check if settings have more then one preset # so we dont need to add outputName to representation # in case there is only one preset - multiple_presets = bool(len(self.outputs.keys()) > 1) + multiple_presets = len(self.outputs.keys()) > 1 + + # adding bake presets to instance data for other plugins + if not instance.data.get("bakePresets"): + instance.data["bakePresets"] = {} + # add preset to bakePresets + instance.data["bakePresets"][o_name] = o_data # create exporter instance exporter = plugin.ExporterReviewMov( self, instance, o_name, o_data["extension"], multiple_presets) - if "render.farm" in families: + if ( + "render.farm" in families or + "prerender.farm" in families + ): if "review" in instance.data["families"]: instance.data["families"].remove("review") diff --git a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py index fb52fc18b4..e7197b4fa8 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py +++ b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py @@ -1,28 +1,33 @@ import os +from pprint import pformat import nuke import copy import pyblish.api +import six -import openpype -from openpype.hosts.nuke.api.lib import maintained_selection +from openpype.pipeline import publish +from openpype.hosts.nuke.api import ( + maintained_selection, + duplicate_node, + get_view_process_node +) -class ExtractSlateFrame(openpype.api.Extractor): +class ExtractSlateFrame(publish.Extractor): """Extracts movie and thumbnail with baked in luts must be run after extract_render_local.py """ - order = pyblish.api.ExtractorOrder - 0.001 + order = pyblish.api.ExtractorOrder + 0.011 label = "Extract Slate Frame" families = ["slate"] hosts = ["nuke"] # Settings values - # - can be extended by other attributes from node in the future key_value_mapping = { "f_submission_note": [True, "{comment}"], "f_submitting_for": [True, "{intent[value]}"], @@ -30,44 +35,107 @@ class ExtractSlateFrame(openpype.api.Extractor): } def process(self, instance): - if hasattr(self, "viewer_lut_raw"): - self.viewer_lut_raw = self.viewer_lut_raw - else: - self.viewer_lut_raw = False + + if "representations" not in instance.data: + instance.data["representations"] = [] + + self._create_staging_dir(instance) with maintained_selection(): self.log.debug("instance: {}".format(instance)) self.log.debug("instance.data[families]: {}".format( instance.data["families"])) - self.render_slate(instance) + if instance.data.get("bakePresets"): + for o_name, o_data in instance.data["bakePresets"].items(): + self.log.info("_ o_name: {}, o_data: {}".format( + o_name, pformat(o_data))) + self.render_slate( + instance, + o_name, + o_data["bake_viewer_process"], + o_data["bake_viewer_input_process"] + ) + else: + # backward compatibility + self.render_slate(instance) + + # also render image to sequence + self._render_slate_to_sequence(instance) + + def _create_staging_dir(self, instance): - def render_slate(self, instance): - node_subset_name = instance.data.get("name", None) - node = instance[0] # group node self.log.info("Creating staging dir...") - if "representations" not in instance.data: - instance.data["representations"] = list() - staging_dir = os.path.normpath( - os.path.dirname(instance.data['path'])) + os.path.dirname(instance.data["path"])) instance.data["stagingDir"] = staging_dir self.log.info( "StagingDir `{0}`...".format(instance.data["stagingDir"])) - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] - handle_start = instance.data["handleStart"] - handle_end = instance.data["handleEnd"] + def _check_frames_exists(self, instance): + # rendering path from group write node + fpath = instance.data["path"] - frame_length = int( - (frame_end - frame_start + 1) + (handle_start + handle_end) - ) + # instance frame range with handles + first = instance.data["frameStartHandle"] + last = instance.data["frameEndHandle"] + + padding = fpath.count('#') + + test_path_template = fpath + if padding: + repl_string = "#" * padding + test_path_template = fpath.replace( + repl_string, "%0{}d".format(padding)) + + for frame in range(first, last + 1): + test_file = test_path_template % frame + if not os.path.exists(test_file): + self.log.debug("__ test_file: `{}`".format(test_file)) + return None + + return True + + def render_slate( + self, + instance, + output_name=None, + bake_viewer_process=True, + bake_viewer_input_process=True + ): + """Slate frame renderer + + Args: + instance (PyblishInstance): Pyblish instance with subset data + output_name (str, optional): + Slate variation name. Defaults to None. + bake_viewer_process (bool, optional): + Switch for viewer profile baking. Defaults to True. + bake_viewer_input_process (bool, optional): + Switch for input process node baking. Defaults to True. + """ + slate_node = instance.data["slateNode"] + + # rendering path from group write node + fpath = instance.data["path"] + + # instance frame range with handles + first_frame = instance.data["frameStartHandle"] + last_frame = instance.data["frameEndHandle"] + + # fill slate node with comments + self.add_comment_slate_node(instance, slate_node) + + # solve output name if any is set + _output_name = output_name or "" + if _output_name: + _output_name = "_" + _output_name + + slate_first_frame = first_frame - 1 - temporary_nodes = [] collection = instance.data.get("collection", None) if collection: @@ -75,100 +143,161 @@ class ExtractSlateFrame(openpype.api.Extractor): fname = os.path.basename(collection.format( "{head}{padding}{tail}")) fhead = collection.format("{head}") - - collected_frames_len = int(len(collection.indexes)) - - # get first and last frame - first_frame = min(collection.indexes) - 1 - self.log.info('frame_length: {}'.format(frame_length)) - self.log.info( - 'len(collection.indexes): {}'.format(collected_frames_len) - ) - if ("slate" in instance.data["families"]) \ - and (frame_length != collected_frames_len): - first_frame += 1 - - last_frame = first_frame else: - fname = os.path.basename(instance.data.get("path", None)) + fname = os.path.basename(fpath) fhead = os.path.splitext(fname)[0] + "." - first_frame = instance.data.get("frameStartHandle", None) - 1 - last_frame = first_frame if "#" in fhead: fhead = fhead.replace("#", "")[:-1] - previous_node = node + self.log.debug("__ first_frame: {}".format(first_frame)) + self.log.debug("__ slate_first_frame: {}".format(slate_first_frame)) - # get input process and connect it to baking - ipn = self.get_view_process_node() - if ipn is not None: - ipn.setInput(0, previous_node) - previous_node = ipn - temporary_nodes.append(ipn) + above_slate_node = slate_node.dependencies().pop() + # fallback if files does not exists + if self._check_frames_exists(instance): + # Read node + r_node = nuke.createNode("Read") + r_node["file"].setValue(fpath) + r_node["first"].setValue(first_frame) + r_node["origfirst"].setValue(first_frame) + r_node["last"].setValue(last_frame) + r_node["origlast"].setValue(last_frame) + r_node["colorspace"].setValue(instance.data["colorspace"]) + previous_node = r_node + temporary_nodes = [previous_node] - if not self.viewer_lut_raw: + # adding copy metadata node for correct frame metadata + cm_node = nuke.createNode("CopyMetaData") + cm_node.setInput(0, previous_node) + cm_node.setInput(1, above_slate_node) + previous_node = cm_node + temporary_nodes.append(cm_node) + + else: + previous_node = above_slate_node + temporary_nodes = [] + + # only create colorspace baking if toggled on + if bake_viewer_process: + if bake_viewer_input_process: + # get input process and connect it to baking + ipn = get_view_process_node() + if ipn is not None: + ipn.setInput(0, previous_node) + previous_node = ipn + temporary_nodes.append(ipn) + + # add duplicate slate node and connect to previous + duply_slate_node = duplicate_node(slate_node) + duply_slate_node.setInput(0, previous_node) + previous_node = duply_slate_node + temporary_nodes.append(duply_slate_node) + + # add viewer display transformation node dag_node = nuke.createNode("OCIODisplay") dag_node.setInput(0, previous_node) previous_node = dag_node temporary_nodes.append(dag_node) + else: + # add duplicate slate node and connect to previous + duply_slate_node = duplicate_node(slate_node) + duply_slate_node.setInput(0, previous_node) + previous_node = duply_slate_node + temporary_nodes.append(duply_slate_node) + # create write node write_node = nuke.createNode("Write") - file = fhead + "slate.png" - path = os.path.join(staging_dir, file).replace("\\", "/") - instance.data["slateFrame"] = path + file = fhead[:-1] + _output_name + "_slate.png" + path = os.path.join( + instance.data["stagingDir"], file).replace("\\", "/") + + # add slate path to `slateFrames` instance data attr + if not instance.data.get("slateFrames"): + instance.data["slateFrames"] = {} + + instance.data["slateFrames"][output_name or "*"] = path + + # create write node write_node["file"].setValue(path) write_node["file_type"].setValue("png") write_node["raw"].setValue(1) write_node.setInput(0, previous_node) temporary_nodes.append(write_node) - # fill slate node with comments - self.add_comment_slate_node(instance) - # Render frames - nuke.execute(write_node.name(), int(first_frame), int(last_frame)) - # also render slate as sequence frame - nuke.execute(node_subset_name, int(first_frame), int(last_frame)) - - self.log.debug( - "slate frame path: {}".format(instance.data["slateFrame"])) + nuke.execute( + write_node.name(), int(slate_first_frame), int(slate_first_frame)) # Clean up for node in temporary_nodes: nuke.delete(node) - def get_view_process_node(self): - # Select only the target node - if nuke.selectedNodes(): - [n.setSelected(False) for n in nuke.selectedNodes()] + def _render_slate_to_sequence(self, instance): + # set slate frame + first_frame = instance.data["frameStartHandle"] + last_frame = instance.data["frameEndHandle"] + slate_first_frame = first_frame - 1 - ipn_orig = None - for v in [n for n in nuke.allNodes() - if "Viewer" in n.Class()]: - ip = v['input_process'].getValue() - ipn = v['input_process_node'].getValue() - if "VIEWER_INPUT" not in ipn and ip: - ipn_orig = nuke.toNode(ipn) - ipn_orig.setSelected(True) + # render slate as sequence frame + nuke.execute( + instance.data["name"], + int(slate_first_frame), + int(slate_first_frame) + ) - if ipn_orig: - nuke.nodeCopy('%clipboard%') + # Add file to representation files + # - get write node + write_node = instance.data["writeNode"] + # - evaluate filepaths for first frame and slate frame + first_filename = os.path.basename( + write_node["file"].evaluate(first_frame)) + slate_filename = os.path.basename( + write_node["file"].evaluate(slate_first_frame)) - [n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all + # Find matching representation based on first filename + matching_repre = None + is_sequence = None + for repre in instance.data["representations"]: + files = repre["files"] + if ( + not isinstance(files, six.string_types) + and first_filename in files + ): + matching_repre = repre + is_sequence = True + break - nuke.nodePaste('%clipboard%') + elif files == first_filename: + matching_repre = repre + is_sequence = False + break - ipn = nuke.selectedNode() - - return ipn - - def add_comment_slate_node(self, instance): - node = instance.data.get("slateNode") - if not node: + if not matching_repre: + self.log.info(( + "Matching reresentaion was not found." + " Representation files were not filled with slate." + )) return + # Add frame to matching representation files + if not is_sequence: + matching_repre["files"] = [first_filename, slate_filename] + elif slate_filename not in matching_repre["files"]: + matching_repre["files"].insert(0, slate_filename) + matching_repre["frameStart"] = ( + "{{:0>{}}}" + .format(len(str(last_frame))) + .format(slate_first_frame) + ) + self.log.debug( + "__ matching_repre: {}".format(pformat(matching_repre))) + + self.log.warning("Added slate frame to representation files") + + def add_comment_slate_node(self, instance, node): + comment = instance.context.data.get("comment") intent = instance.context.data.get("intent") if not isinstance(intent, dict): @@ -186,8 +315,8 @@ class ExtractSlateFrame(openpype.api.Extractor): "intent": intent }) - for key, value in self.key_value_mapping.items(): - enabled, template = value + for key, _values in self.key_value_mapping.items(): + enabled, template = _values if not enabled: self.log.debug("Key \"{}\" is disabled".format(key)) continue @@ -221,5 +350,5 @@ class ExtractSlateFrame(openpype.api.Extractor): )) except NameError: self.log.warning(( - "Failed to set value \"{}\" on node attribute \"{}\"" + "Failed to set value \"{0}\" on node attribute \"{0}\"" ).format(value)) diff --git a/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py b/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py index ef6d486ca2..19eae9638b 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py @@ -2,30 +2,38 @@ import sys import os import nuke import pyblish.api -import openpype -from openpype.hosts.nuke.api.lib import maintained_selection + +from openpype.pipeline import publish +from openpype.hosts.nuke.api import ( + maintained_selection, + get_view_process_node +) if sys.version_info[0] >= 3: unicode = str -class ExtractThumbnail(openpype.api.Extractor): +class ExtractThumbnail(publish.Extractor): """Extracts movie and thumbnail with baked in luts must be run after extract_render_local.py """ - order = pyblish.api.ExtractorOrder + 0.01 + order = pyblish.api.ExtractorOrder + 0.011 label = "Extract Thumbnail" families = ["review"] hosts = ["nuke"] - # presets + # settings + use_rendered = False + bake_viewer_process = True + bake_viewer_input_process = True nodes = {} + def process(self, instance): if "render.farm" in instance.data["families"]: return @@ -35,14 +43,37 @@ class ExtractThumbnail(openpype.api.Extractor): self.log.debug("instance.data[families]: {}".format( instance.data["families"])) - self.render_thumbnail(instance) + if instance.data.get("bakePresets"): + for o_name, o_data in instance.data["bakePresets"].items(): + self.render_thumbnail(instance, o_name, **o_data) + else: + viewer_process_swithes = { + "bake_viewer_process": True, + "bake_viewer_input_process": True + } + self.render_thumbnail(instance, None, **viewer_process_swithes) + + def render_thumbnail(self, instance, output_name=None, **kwargs): + first_frame = instance.data["frameStartHandle"] + last_frame = instance.data["frameEndHandle"] + + # find frame range and define middle thumb frame + mid_frame = int((last_frame - first_frame) / 2) + + # solve output name if any is set + output_name = output_name or "" + if output_name: + output_name = "_" + output_name + + bake_viewer_process = kwargs["bake_viewer_process"] + bake_viewer_input_process_node = kwargs[ + "bake_viewer_input_process"] - def render_thumbnail(self, instance): node = instance[0] # group node self.log.info("Creating staging dir...") if "representations" not in instance.data: - instance.data["representations"] = list() + instance.data["representations"] = [] staging_dir = os.path.normpath( os.path.dirname(instance.data['path'])) @@ -53,7 +84,11 @@ class ExtractThumbnail(openpype.api.Extractor): "StagingDir `{0}`...".format(instance.data["stagingDir"])) temporary_nodes = [] + + # try to connect already rendered images + previous_node = node collection = instance.data.get("collection", None) + self.log.debug("__ collection: `{}`".format(collection)) if collection: # get path @@ -61,43 +96,38 @@ class ExtractThumbnail(openpype.api.Extractor): "{head}{padding}{tail}")) fhead = collection.format("{head}") - # get first and last frame - first_frame = min(collection.indexes) - last_frame = max(collection.indexes) + thumb_fname = list(collection)[mid_frame] else: - fname = os.path.basename(instance.data.get("path", None)) + fname = thumb_fname = os.path.basename( + instance.data.get("path", None)) fhead = os.path.splitext(fname)[0] + "." - first_frame = instance.data.get("frameStart", None) - last_frame = instance.data.get("frameEnd", None) + + self.log.debug("__ fhead: `{}`".format(fhead)) if "#" in fhead: fhead = fhead.replace("#", "")[:-1] - path_render = os.path.join(staging_dir, fname).replace("\\", "/") - # check if file exist otherwise connect to write node - if os.path.isfile(path_render): + path_render = os.path.join( + staging_dir, thumb_fname).replace("\\", "/") + self.log.debug("__ path_render: `{}`".format(path_render)) + + if self.use_rendered and os.path.isfile(path_render): + # check if file exist otherwise connect to write node rnode = nuke.createNode("Read") rnode["file"].setValue(path_render) - rnode["first"].setValue(first_frame) - rnode["origfirst"].setValue(first_frame) - rnode["last"].setValue(last_frame) - rnode["origlast"].setValue(last_frame) + # turn it raw if none of baking is ON + if all([ + not self.bake_viewer_input_process, + not self.bake_viewer_process + ]): + rnode["raw"].setValue(True) + temporary_nodes.append(rnode) previous_node = rnode - else: - previous_node = node - - # get input process and connect it to baking - ipn = self.get_view_process_node() - if ipn is not None: - ipn.setInput(0, previous_node) - previous_node = ipn - temporary_nodes.append(ipn) reformat_node = nuke.createNode("Reformat") - ref_node = self.nodes.get("Reformat", None) if ref_node: for k, v in ref_node: @@ -110,14 +140,24 @@ class ExtractThumbnail(openpype.api.Extractor): previous_node = reformat_node temporary_nodes.append(reformat_node) - dag_node = nuke.createNode("OCIODisplay") - dag_node.setInput(0, previous_node) - previous_node = dag_node - temporary_nodes.append(dag_node) + # only create colorspace baking if toggled on + if bake_viewer_process: + if bake_viewer_input_process_node: + # get input process and connect it to baking + ipn = get_view_process_node() + if ipn is not None: + ipn.setInput(0, previous_node) + previous_node = ipn + temporary_nodes.append(ipn) + + dag_node = nuke.createNode("OCIODisplay") + dag_node.setInput(0, previous_node) + previous_node = dag_node + temporary_nodes.append(dag_node) # create write node write_node = nuke.createNode("Write") - file = fhead + "jpg" + file = fhead[:-1] + output_name + ".jpg" name = "thumbnail" path = os.path.join(staging_dir, file).replace("\\", "/") instance.data["thumbnail"] = path @@ -128,26 +168,18 @@ class ExtractThumbnail(openpype.api.Extractor): temporary_nodes.append(write_node) tags = ["thumbnail", "publish_on_farm"] - # retime for - mid_frame = int((int(last_frame) - int(first_frame)) / 2) \ - + int(first_frame) - first_frame = int(last_frame) / 2 - last_frame = int(last_frame) / 2 - repre = { 'name': name, 'ext': "jpg", "outputName": "thumb", 'files': file, "stagingDir": staging_dir, - "frameStart": first_frame, - "frameEnd": last_frame, "tags": tags } instance.data["representations"].append(repre) # Render frames - nuke.execute(write_node.name(), int(mid_frame), int(mid_frame)) + nuke.execute(write_node.name(), mid_frame, mid_frame) self.log.debug( "representations: {}".format(instance.data["representations"])) @@ -155,30 +187,3 @@ class ExtractThumbnail(openpype.api.Extractor): # Clean up for node in temporary_nodes: nuke.delete(node) - - def get_view_process_node(self): - - # Select only the target node - if nuke.selectedNodes(): - [n.setSelected(False) for n in nuke.selectedNodes()] - - ipn_orig = None - for v in [n for n in nuke.allNodes() - if "Viewer" == n.Class()]: - ip = v['input_process'].getValue() - ipn = v['input_process_node'].getValue() - if "VIEWER_INPUT" not in ipn and ip: - ipn_orig = nuke.toNode(ipn) - ipn_orig.setSelected(True) - - if ipn_orig: - nuke.nodeCopy('%clipboard%') - - # Deselect all - [n.setSelected(False) for n in nuke.selectedNodes()] - - nuke.nodePaste('%clipboard%') - - ipn = nuke.selectedNode() - - return ipn diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_asset_name.xml b/openpype/hosts/nuke/plugins/publish/help/validate_asset_name.xml new file mode 100644 index 0000000000..1097909a5f --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/help/validate_asset_name.xml @@ -0,0 +1,18 @@ + + + + Shot/Asset mame + +## Invalid Shot/Asset name in subset + +Following Node with name `{node_name}`: +Is in context of `{correct_name}` but Node _asset_ knob is set as `{wrong_name}`. + +### How to repair? + +1. Either use Repair or Select button. +2. If you chose Select then rename asset knob to correct name. +3. Hit Reload button on the publisher. + + + \ No newline at end of file diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_backdrop.xml b/openpype/hosts/nuke/plugins/publish/help/validate_backdrop.xml new file mode 100644 index 0000000000..ab1b650773 --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/help/validate_backdrop.xml @@ -0,0 +1,36 @@ + + + + Found multiple outputs + +## Invalid output amount + +Backdrop is having more than one outgoing connections. + +### How to repair? + +1. Use button `Center node in node graph` and navigate to the backdrop. +2. Reorganize nodes the way only one outgoing connection is present. +3. Hit reload button on the publisher. + + +### How could this happen? + +More than one node, which are found above the backdrop, are linked downstream or more output connections from a node also linked downstream. + + + + Empty backdrop + +## Invalid empty backdrop + +Backdrop is empty and no nodes are found above it. + +### How to repair? + +1. Use button `Center node in node graph` and navigate to the backdrop. +2. Add any node above it or delete it. +3. Hit reload button on the publisher. + + + \ No newline at end of file diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_gizmo.xml b/openpype/hosts/nuke/plugins/publish/help/validate_gizmo.xml new file mode 100644 index 0000000000..f39a41a4f9 --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/help/validate_gizmo.xml @@ -0,0 +1,36 @@ + + + + Found multiple outputs + +## Invalid amount of Output nodes + +Group node `{node_name}` is having more than one Output node. + +### How to repair? + +1. Use button `Open Group`. +2. Remove redundant Output node. +3. Hit reload button on the publisher. + + +### How could this happen? + +Perhaps you had created exciently more than one Output node. + + + + Missing Input nodes + +## Missing Input nodes + +Make sure there is at least one connected Input node inside the group node with name `{node_name}` + +### How to repair? + +1. Use button `Open Group`. +2. Add at least one Input node and connect to other nodes. +3. Hit reload button on the publisher. + + + \ No newline at end of file diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_knobs.xml b/openpype/hosts/nuke/plugins/publish/help/validate_knobs.xml new file mode 100644 index 0000000000..76c184f653 --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/help/validate_knobs.xml @@ -0,0 +1,18 @@ + + + + Knobs value + +## Invalid node's knobs values + +Following node knobs needs to be repaired: + +{invalid_items} + +### How to repair? + +1. Use Repair button. +2. Hit Reload button on the publisher. + + + \ No newline at end of file diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_output_resolution.xml b/openpype/hosts/nuke/plugins/publish/help/validate_output_resolution.xml new file mode 100644 index 0000000000..08a88a993e --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/help/validate_output_resolution.xml @@ -0,0 +1,16 @@ + + + + Output format + +## Invalid format setting + +Either the Reformat node inside of the render group is missing or the Reformat node output format knob is not set to `root.format`. + +### How to repair? + +1. Use Repair button. +2. Hit Reload button on the publisher. + + + \ No newline at end of file diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_proxy_mode.xml b/openpype/hosts/nuke/plugins/publish/help/validate_proxy_mode.xml new file mode 100644 index 0000000000..6fe5d5d43e --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/help/validate_proxy_mode.xml @@ -0,0 +1,16 @@ + + + + Proxy mode + +## Invalid proxy mode value + +Nuke is set to use Proxy. This is not supported by publisher. + +### How to repair? + +1. Use Repair button. +2. Hit Reload button on the publisher. + + + \ No newline at end of file diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_rendered_frames.xml b/openpype/hosts/nuke/plugins/publish/help/validate_rendered_frames.xml new file mode 100644 index 0000000000..434081c269 --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/help/validate_rendered_frames.xml @@ -0,0 +1,17 @@ + + + + Rendered Frames + +## Missing Rendered Frames + +Render node "{node_name}" is set to "Use existing frames", but frames are missing. + +### How to repair? + +1. Use Repair button. +2. Set different target. +2. Hit Reload button on the publisher. + + + \ No newline at end of file diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_script_attributes.xml b/openpype/hosts/nuke/plugins/publish/help/validate_script_attributes.xml new file mode 100644 index 0000000000..871fc629ce --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/help/validate_script_attributes.xml @@ -0,0 +1,18 @@ + + + + Script attributes + +## Invalid Script attributes + +Following script root attributes need to be fixed: + +{failed_attributes} + +### How to repair? + +1. Use Repair. +2. Hit Reload button on the publisher. + + + \ No newline at end of file diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_write_nodes.xml b/openpype/hosts/nuke/plugins/publish/help/validate_write_nodes.xml new file mode 100644 index 0000000000..cdf85102bc --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/help/validate_write_nodes.xml @@ -0,0 +1,18 @@ + + + + Knobs values + +## Invalid node's knobs values + +Following write node knobs needs to be repaired: + +{xml_msg} + +### How to repair? + +1. Use Repair button. +2. Hit Reload button on the publisher. + + + \ No newline at end of file diff --git a/openpype/hosts/nuke/plugins/publish/precollect_instances.py b/openpype/hosts/nuke/plugins/publish/precollect_instances.py index 1a8fa3e6ad..b396056eb9 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_instances.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_instances.py @@ -1,7 +1,6 @@ import nuke import pyblish.api -from openpype.pipeline import legacy_io from openpype.hosts.nuke.api.lib import ( add_publish_knob, get_avalon_knob_data @@ -20,12 +19,6 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): sync_workfile_version_on_families = [] def process(self, context): - asset_data = legacy_io.find_one({ - "type": "asset", - "name": legacy_io.Session["AVALON_ASSET"] - }) - - self.log.debug("asset_data: {}".format(asset_data["data"])) instances = [] root = nuke.root() @@ -57,7 +50,7 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): # establish families family = avalon_knob_data["family"] families_ak = avalon_knob_data.get("families", []) - families = list() + families = [] # except disabled nodes but exclude backdrops in test if ("nukenodes" not in family) and (node["disable"].value()): @@ -101,6 +94,7 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): # Farm rendering self.log.info("flagged for farm render") instance.data["transfer"] = False + instance.data["farm"] = True families.append("{}.farm".format(family)) family = families_ak.lower() @@ -117,10 +111,10 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): self.log.debug("__ families: `{}`".format(families)) # Get format - format = root['format'].value() - resolution_width = format.width() - resolution_height = format.height() - pixel_aspect = format.pixelAspect() + format_ = root['format'].value() + resolution_width = format_.width() + resolution_height = format_.height() + pixel_aspect = format_.pixelAspect() # get publish knob value if "publish" not in node.knobs(): @@ -131,8 +125,11 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): self.log.debug("__ _families_test: `{}`".format(_families_test)) for family_test in _families_test: if family_test in self.sync_workfile_version_on_families: - self.log.debug("Syncing version with workfile for '{}'" - .format(family_test)) + self.log.debug( + "Syncing version with workfile for '{}'".format( + family_test + ) + ) # get version to instance for integration instance.data['version'] = instance.context.data['version'] @@ -151,15 +148,11 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): "resolutionWidth": resolution_width, "resolutionHeight": resolution_height, "pixelAspect": pixel_aspect, - "review": review + "review": review, + "representations": [] }) self.log.info("collected instance: {}".format(instance.data)) instances.append(instance) - # create instances in context data if not are created yet - if not context.data.get("instances"): - context.data["instances"] = list() - - context.data["instances"].extend(instances) self.log.debug("context: {}".format(context)) diff --git a/openpype/hosts/nuke/plugins/publish/precollect_workfile.py b/openpype/hosts/nuke/plugins/publish/precollect_workfile.py index a2d1c80628..316c651b66 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_workfile.py @@ -3,11 +3,13 @@ import os import nuke import pyblish.api -import openpype.api as pype + +from openpype.lib import get_version_from_path from openpype.hosts.nuke.api.lib import ( add_publish_knob, get_avalon_knob_data ) +from openpype.pipeline import KnownPublishError class CollectWorkfile(pyblish.api.ContextPlugin): @@ -17,11 +19,17 @@ class CollectWorkfile(pyblish.api.ContextPlugin): label = "Pre-collect Workfile" hosts = ['nuke'] - def process(self, context): + def process(self, context): # sourcery skip: avoid-builtin-shadow root = nuke.root() current_file = os.path.normpath(nuke.root().name()) + if current_file.lower() == "root": + raise KnownPublishError( + "Workfile is not correct file name. \n" + "Use workfile tool to manage the name correctly." + ) + knob_data = get_avalon_knob_data(root) add_publish_knob(root) @@ -67,27 +75,13 @@ class CollectWorkfile(pyblish.api.ContextPlugin): "fps": root['fps'].value(), "currentFile": current_file, - "version": int(pype.get_version_from_path(current_file)), + "version": int(get_version_from_path(current_file)), "host": pyblish.api.current_host(), "hostVersion": nuke.NUKE_VERSION_STRING } context.data.update(script_data) - # creating instance data - instance.data.update({ - "subset": subset, - "label": base_name, - "name": base_name, - "publish": root.knob('publish').value(), - "family": family, - "families": [family], - "representations": list() - }) - - # adding basic script data - instance.data.update(script_data) - # creating representation representation = { 'name': 'nk', @@ -96,12 +90,18 @@ class CollectWorkfile(pyblish.api.ContextPlugin): "stagingDir": staging_dir, } - instance.data["representations"].append(representation) + # creating instance data + instance.data.update({ + "subset": subset, + "label": base_name, + "name": base_name, + "publish": root.knob('publish').value(), + "family": family, + "families": [family], + "representations": [representation] + }) + + # adding basic script data + instance.data.update(script_data) self.log.info('Publishing script version') - - # create instances in context data if not are created yet - if not context.data.get("instances"): - context.data["instances"] = list() - - context.data["instances"].append(instance) diff --git a/openpype/hosts/nuke/plugins/publish/precollect_writes.py b/openpype/hosts/nuke/plugins/publish/precollect_writes.py index 8669f4f485..17c4bc30cf 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_writes.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_writes.py @@ -4,7 +4,10 @@ from pprint import pformat import nuke import pyblish.api -import openpype.api as pype +from openpype.client import ( + get_last_version_by_subset_name, + get_representations, +) from openpype.pipeline import ( legacy_io, get_representation_path, @@ -32,6 +35,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): if node is None: return + instance.data["writeNode"] = node self.log.debug("checking instance: {}".format(instance)) # Determine defined file type @@ -53,9 +57,21 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): first_frame = int(node["first"].getValue()) last_frame = int(node["last"].getValue()) - # get path + # Prepare expected output paths by evaluating each frame of write node + # - paths are first collected to set to avoid duplicated paths, then + # sorted and converted to list + node_file = node["file"] + expected_paths = list(sorted({ + node_file.evaluate(frame) + for frame in range(first_frame, last_frame + 1) + })) + expected_filenames = [ + os.path.basename(filepath) + for filepath in expected_paths + ] path = nuke.filename(node) output_dir = os.path.dirname(path) + self.log.debug('output dir: {}'.format(output_dir)) # create label @@ -72,16 +88,19 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): if "representations" not in instance.data: instance.data["representations"] = list() - representation = { - 'name': ext, - 'ext': ext, - "stagingDir": output_dir, - "tags": list() - } + representation = { + 'name': ext, + 'ext': ext, + "stagingDir": output_dir, + "tags": list() + } try: - collected_frames = [f for f in os.listdir(output_dir) - if ext in f] + collected_frames = [ + filename + for filename in os.listdir(output_dir) + if filename in expected_filenames + ] if collected_frames: collected_frames_len = len(collected_frames) frame_start_str = "%0{}d".format( @@ -125,8 +144,10 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): self.log.debug("colorspace: `{}`".format(colorspace)) version_data = { - "families": [f.replace(".local", "").replace(".farm", "") - for f in _families_test if "write" not in f], + "families": [ + _f.replace(".local", "").replace(".farm", "") + for _f in _families_test if "write" != _f + ], "colorspace": colorspace } @@ -175,24 +196,10 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "frameEndHandle": last_frame, }) - # * Add audio to instance if exists. - # Find latest versions document - version_doc = pype.get_latest_version( - instance.data["asset"], "audioMain" - ) - repre_doc = None - if version_doc: - # Try to find it's representation (Expected there is only one) - repre_doc = legacy_io.find_one( - {"type": "representation", "parent": version_doc["_id"]} - ) - - # Add audio to instance if representation was found - if repre_doc: - instance.data["audio"] = [{ - "offset": 0, - "filename": get_representation_path(repre_doc) - }] + # make sure rendered sequence on farm will + # be used for exctract review + if not instance.data["review"]: + instance.data["useSequenceForReview"] = False self.log.debug("instance.data: {}".format(pformat(instance.data))) diff --git a/openpype/hosts/nuke/plugins/publish/validate_instance_in_context.py b/openpype/hosts/nuke/plugins/publish/validate_asset_name.py similarity index 72% rename from openpype/hosts/nuke/plugins/publish/validate_instance_in_context.py rename to openpype/hosts/nuke/plugins/publish/validate_asset_name.py index 842f74b6f6..52731140ff 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_instance_in_context.py +++ b/openpype/hosts/nuke/plugins/publish/validate_asset_name.py @@ -3,20 +3,20 @@ from __future__ import absolute_import import nuke - import pyblish.api -import openpype.api -from openpype.hosts.nuke.api.lib import ( - recreate_instance, - reset_selection, - select_nodes + +import openpype.hosts.nuke.api.lib as nlib +import openpype.hosts.nuke.api as nuke_api +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, ) class SelectInvalidInstances(pyblish.api.Action): """Select invalid instances in Outliner.""" - label = "Select Instances" + label = "Select" icon = "briefcase" on = "failed" @@ -39,6 +39,7 @@ class SelectInvalidInstances(pyblish.api.Action): instances = pyblish.api.instances_by_plugin(failed, plugin) if instances: + self.deselect() self.log.info( "Selecting invalid nodes: %s" % ", ".join( [str(x) for x in instances] @@ -50,12 +51,12 @@ class SelectInvalidInstances(pyblish.api.Action): self.deselect() def select(self, instances): - select_nodes( + nlib.select_nodes( [nuke.toNode(str(x)) for x in instances] ) def deselect(self): - reset_selection() + nlib.reset_selection() class RepairSelectInvalidInstances(pyblish.api.Action): @@ -85,12 +86,12 @@ class RepairSelectInvalidInstances(pyblish.api.Action): context_asset = context.data["assetEntity"]["name"] for instance in instances: origin_node = instance[0] - recreate_instance( + nuke_api.lib.recreate_instance( origin_node, avalon_data={"asset": context_asset} ) -class ValidateInstanceInContext(pyblish.api.InstancePlugin): +class ValidateCorrectAssetName(pyblish.api.InstancePlugin): """Validator to check if instance asset match context asset. When working in per-shot style you always publish data in context of @@ -99,15 +100,31 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin): Action on this validator will select invalid instances in Outliner. """ - - order = openpype.api.ValidateContentsOrder - label = "Instance in same Context" + order = ValidateContentsOrder + label = "Validate correct asset name" hosts = ["nuke"] - actions = [SelectInvalidInstances, RepairSelectInvalidInstances] + actions = [ + SelectInvalidInstances, + RepairSelectInvalidInstances + ] optional = True def process(self, instance): asset = instance.data.get("asset") context_asset = instance.context.data["assetEntity"]["name"] - msg = "{} has asset {}".format(instance.name, asset) - assert asset == context_asset, msg + + msg = ( + "Instance `{}` has wrong shot/asset name:\n" + "Correct: `{}` | Wrong: `{}`").format( + instance.name, asset, context_asset) + + self.log.debug(msg) + + if asset != context_asset: + raise PublishXmlValidationError( + self, msg, formatting_data={ + "node_name": instance[0]["name"].value(), + "wrong_name": asset, + "correct_name": context_asset + } + ) diff --git a/openpype/hosts/nuke/plugins/publish/validate_backdrop.py b/openpype/hosts/nuke/plugins/publish/validate_backdrop.py index e2843d146e..17dc79dc56 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_backdrop.py +++ b/openpype/hosts/nuke/plugins/publish/validate_backdrop.py @@ -1,6 +1,7 @@ import nuke import pyblish from openpype.hosts.nuke.api.lib import maintained_selection +from openpype.pipeline import PublishXmlValidationError class SelectCenterInNodeGraph(pyblish.api.Action): @@ -47,8 +48,9 @@ class SelectCenterInNodeGraph(pyblish.api.Action): @pyblish.api.log class ValidateBackdrop(pyblish.api.InstancePlugin): - """Validate amount of nodes on backdrop node in case user - forgotten to add nodes above the publishing backdrop node""" + """ Validate amount of nodes on backdrop node in case user + forgoten to add nodes above the publishing backdrop node. + """ order = pyblish.api.ValidatorOrder optional = True @@ -63,8 +65,25 @@ class ValidateBackdrop(pyblish.api.InstancePlugin): msg_multiple_outputs = ( "Only one outcoming connection from " "\"{}\" is allowed").format(instance.data["name"]) - assert len(connections_out.keys()) <= 1, msg_multiple_outputs - msg_no_content = "No content on backdrop node: \"{}\"".format( + if len(connections_out.keys()) > 1: + raise PublishXmlValidationError( + self, + msg_multiple_outputs, + "multiple_outputs" + ) + + msg_no_nodes = "No content on backdrop node: \"{}\"".format( instance.data["name"]) - assert len(instance) > 1, msg_no_content + + self.log.debug( + "Amount of nodes on instance: {}".format( + len(instance)) + ) + + if len(instance) == 1: + raise PublishXmlValidationError( + self, + msg_no_nodes, + "no_nodes" + ) diff --git a/openpype/hosts/nuke/plugins/publish/validate_gizmo.py b/openpype/hosts/nuke/plugins/publish/validate_gizmo.py index d0d930f50c..2321bd1fd4 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_gizmo.py +++ b/openpype/hosts/nuke/plugins/publish/validate_gizmo.py @@ -1,6 +1,7 @@ -import nuke import pyblish -from openpype.hosts.nuke.api.lib import maintained_selection +from openpype.pipeline import PublishXmlValidationError +from openpype.hosts.nuke.api import maintained_selection +import nuke class OpenFailedGroupNode(pyblish.api.Action): @@ -8,7 +9,7 @@ class OpenFailedGroupNode(pyblish.api.Action): Centering failed instance node in node grap """ - label = "Open Gizmo in Node Graph" + label = "Open Group" icon = "wrench" on = "failed" @@ -48,11 +49,23 @@ class ValidateGizmo(pyblish.api.InstancePlugin): with grpn: connections_out = nuke.allNodes('Output') - msg_multiple_outputs = "Only one outcoming connection from " - "\"{}\" is allowed".format(instance.data["name"]) - assert len(connections_out) <= 1, msg_multiple_outputs + msg_multiple_outputs = ( + "Only one outcoming connection from " + "\"{}\" is allowed").format(instance.data["name"]) + + if len(connections_out) > 1: + raise PublishXmlValidationError( + self, msg_multiple_outputs, "multiple_outputs", + {"node_name": grpn["name"].value()} + ) connections_in = nuke.allNodes('Input') - msg_missing_inputs = "At least one Input node has to be used in: " - "\"{}\"".format(instance.data["name"]) - assert len(connections_in) >= 1, msg_missing_inputs + msg_missing_inputs = ( + "At least one Input node has to be inside Group: " + "\"{}\"").format(instance.data["name"]) + + if len(connections_in) == 0: + raise PublishXmlValidationError( + self, msg_missing_inputs, "no_inputs", + {"node_name": grpn["name"].value()} + ) diff --git a/openpype/hosts/nuke/plugins/publish/validate_knobs.py b/openpype/hosts/nuke/plugins/publish/validate_knobs.py index d290ff4541..d44f27791a 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_knobs.py +++ b/openpype/hosts/nuke/plugins/publish/validate_knobs.py @@ -1,7 +1,11 @@ import nuke - +import six import pyblish.api -import openpype.api + +from openpype.pipeline.publish import ( + RepairContextAction, + PublishXmlValidationError, +) class ValidateKnobs(pyblish.api.ContextPlugin): @@ -23,15 +27,25 @@ class ValidateKnobs(pyblish.api.ContextPlugin): order = pyblish.api.ValidatorOrder label = "Validate Knobs" hosts = ["nuke"] - actions = [openpype.api.RepairContextAction] + actions = [RepairContextAction] optional = True def process(self, context): - invalid = self.get_invalid(context, compute=True) if invalid: - raise RuntimeError( - "Found knobs with invalid values:\n{}".format(invalid) + invalid_items = [ + ( + "Node __{node_name}__ with knob _{label}_ " + "expecting _{expected}_, " + "but is set to _{current}_" + ).format(**i) + for i in invalid + ] + raise PublishXmlValidationError( + self, + "Found knobs with invalid values:\n{}".format(invalid), + formatting_data={ + "invalid_items": "\n".join(invalid_items)} ) @classmethod @@ -54,15 +68,24 @@ class ValidateKnobs(pyblish.api.ContextPlugin): # Filter families. families = [instance.data["family"]] families += instance.data.get("families", []) - families = list(set(families) & set(cls.knobs.keys())) + if not families: continue # Get all knobs to validate. knobs = {} for family in families: + # check if dot in family + if "." in family: + family = family.split(".")[0] + + # avoid families not in settings + if family not in cls.knobs: + continue + + # get presets of knobs for preset in cls.knobs[family]: - knobs.update({preset: cls.knobs[family][preset]}) + knobs[preset] = cls.knobs[family][preset] # Get invalid knobs. nodes = [] @@ -71,8 +94,7 @@ class ValidateKnobs(pyblish.api.ContextPlugin): nodes.append(node) if node.Class() == "Group": node.begin() - for i in nuke.allNodes(): - nodes.append(i) + nodes.extend(iter(nuke.allNodes())) node.end() for node in nodes: @@ -84,6 +106,7 @@ class ValidateKnobs(pyblish.api.ContextPlugin): if node[knob].value() != expected: invalid_knobs.append( { + "node_name": node.name(), "knob": node[knob], "name": node[knob].name(), "label": node[knob].label(), @@ -99,7 +122,9 @@ class ValidateKnobs(pyblish.api.ContextPlugin): def repair(cls, instance): invalid = cls.get_invalid(instance) for data in invalid: - if isinstance(data["expected"], unicode): + # TODO: will need to improve type definitions + # with the new settings for knob types + if isinstance(data["expected"], six.text_type): data["knob"].setValue(str(data["expected"])) continue diff --git a/openpype/hosts/nuke/plugins/publish/validate_output_resolution.py b/openpype/hosts/nuke/plugins/publish/validate_output_resolution.py index 27094b8d74..1e59880f90 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_output_resolution.py +++ b/openpype/hosts/nuke/plugins/publish/validate_output_resolution.py @@ -1,43 +1,9 @@ -import nuke - import pyblish.api - -class RepairWriteResolutionDifference(pyblish.api.Action): - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - - # Get the errored instances - failed = [] - for result in context.data["results"]: - if (result["error"] is not None and result["instance"] is not None - and result["instance"] not in failed): - failed.append(result["instance"]) - - # Apply pyblish.logic to get the instances for the plug-in - instances = pyblish.api.instances_by_plugin(failed, plugin) - - for instance in instances: - reformat = instance[0].dependencies()[0] - if reformat.Class() != "Reformat": - reformat = nuke.nodes.Reformat(inputs=[instance[0].input(0)]) - - xpos = instance[0].xpos() - ypos = instance[0].ypos() - 26 - - dependent_ypos = instance[0].dependencies()[0].ypos() - if (instance[0].ypos() - dependent_ypos) <= 51: - xpos += 110 - - reformat.setXYpos(xpos, ypos) - - instance[0].setInput(0, reformat) - - reformat["resize"].setValue("none") +from openpype.hosts.nuke.api import maintained_selection +from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import RepairAction +import nuke class ValidateOutputResolution(pyblish.api.InstancePlugin): @@ -52,27 +18,75 @@ class ValidateOutputResolution(pyblish.api.InstancePlugin): families = ["render", "render.local", "render.farm"] label = "Write Resolution" hosts = ["nuke"] - actions = [RepairWriteResolutionDifference] + actions = [RepairAction] + + missing_msg = "Missing Reformat node in render group node" + resolution_msg = "Reformat is set to wrong format" def process(self, instance): + invalid = self.get_invalid(instance) + if invalid: + raise PublishXmlValidationError(self, invalid) - # Skip bounding box check if a reformat node exists. - if instance[0].dependencies()[0].Class() == "Reformat": - return + @classmethod + def get_reformat(cls, instance): + reformat = None + for inode in instance: + if inode.Class() != "Reformat": + continue + reformat = inode - msg = "Bounding box is outside the format." - assert self.check_resolution(instance), msg + return reformat - def check_resolution(self, instance): - node = instance[0] + @classmethod + def get_invalid(cls, instance): + def _check_resolution(instance, reformat): + root_width = instance.data["resolutionWidth"] + root_height = instance.data["resolutionHeight"] - root_width = instance.data["resolutionWidth"] - root_height = instance.data["resolutionHeight"] + write_width = reformat.format().width() + write_height = reformat.format().height() - write_width = node.format().width() - write_height = node.format().height() + if (root_width != write_width) or (root_height != write_height): + return None + else: + return True - if (root_width != write_width) or (root_height != write_height): - return None - else: - return True + # check if reformat is in render node + reformat = cls.get_reformat(instance) + if not reformat: + return cls.missing_msg + + # check if reformat is set to correct root format + correct_format = _check_resolution(instance, reformat) + if not correct_format: + return cls.resolution_msg + + @classmethod + def repair(cls, instance): + invalid = cls.get_invalid(instance) + grp_node = instance[0] + + if cls.missing_msg == invalid: + # make sure we are inside of the group node + with grp_node: + # find input node and select it + _input = None + for inode in instance: + if inode.Class() != "Input": + continue + _input = inode + + # add reformat node under it + with maintained_selection(): + _input['selected'].setValue(True) + _rfn = nuke.createNode("Reformat", "name Reformat01") + _rfn["resize"].setValue(0) + _rfn["black_outside"].setValue(1) + + cls.log.info("I am adding reformat node") + + if cls.resolution_msg == invalid: + reformat = cls.get_reformat(instance) + reformat["format"].setValue(nuke.root()["format"].value()) + cls.log.info("I am fixing reformat to root.format") diff --git a/openpype/hosts/nuke/plugins/publish/validate_proxy_mode.py b/openpype/hosts/nuke/plugins/publish/validate_proxy_mode.py index 9c6ca03ffd..dac240ad19 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_proxy_mode.py +++ b/openpype/hosts/nuke/plugins/publish/validate_proxy_mode.py @@ -1,5 +1,6 @@ import pyblish import nuke +from openpype.pipeline import PublishXmlValidationError class FixProxyMode(pyblish.api.Action): @@ -7,7 +8,7 @@ class FixProxyMode(pyblish.api.Action): Togger off proxy switch OFF """ - label = "Proxy toggle to OFF" + label = "Repair" icon = "wrench" on = "failed" @@ -30,4 +31,7 @@ class ValidateProxyMode(pyblish.api.ContextPlugin): rootNode = nuke.root() isProxy = rootNode["proxy"].value() - assert not isProxy, "Proxy mode should be toggled OFF" + if isProxy: + raise PublishXmlValidationError( + self, "Proxy mode should be toggled OFF" + ) diff --git a/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py b/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py index af5e8e9d27..237ff423e5 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py +++ b/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py @@ -1,7 +1,7 @@ import os import pyblish.api -from openpype.api import ValidationException import clique +from openpype.pipeline import PublishXmlValidationError @pyblish.api.log @@ -36,7 +36,7 @@ class RepairActionBase(pyblish.api.Action): class RepairCollectionActionToLocal(RepairActionBase): - label = "Repair > rerender with `Local` machine" + label = "Repair - rerender with \"Local\"" def process(self, context, plugin): instances = self.get_instance(context, plugin) @@ -44,7 +44,7 @@ class RepairCollectionActionToLocal(RepairActionBase): class RepairCollectionActionToFarm(RepairActionBase): - label = "Repair > rerender `On farm` with remote machines" + label = "Repair - rerender with \"On farm\"" def process(self, context, plugin): instances = self.get_instance(context, plugin) @@ -63,6 +63,10 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): def process(self, instance): + f_data = { + "node_name": instance[0]["name"].value() + } + for repre in instance.data["representations"]: if not repre.get("files"): @@ -71,7 +75,8 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): "Check properties of write node (group) and" "select 'Local' option in 'Publish' dropdown.") self.log.error(msg) - raise ValidationException(msg) + raise PublishXmlValidationError( + self, msg, formatting_data=f_data) if isinstance(repre["files"], str): return @@ -82,30 +87,33 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): collection = collections[0] - fstartH = instance.data["frameStartHandle"] - fendH = instance.data["frameEndHandle"] + f_start_h = instance.data["frameStartHandle"] + f_end_h = instance.data["frameEndHandle"] - frame_length = int(fendH - fstartH + 1) + frame_length = int(f_end_h - f_start_h + 1) if frame_length != 1: if len(collections) != 1: msg = "There are multiple collections in the folder" self.log.error(msg) - raise ValidationException(msg) + raise PublishXmlValidationError( + self, msg, formatting_data=f_data) if not collection.is_contiguous(): msg = "Some frames appear to be missing" self.log.error(msg) - raise ValidationException(msg) + raise PublishXmlValidationError( + self, msg, formatting_data=f_data) - collected_frames_len = int(len(collection.indexes)) + collected_frames_len = len(collection.indexes) coll_start = min(collection.indexes) coll_end = max(collection.indexes) self.log.info("frame_length: {}".format(frame_length)) self.log.info("collected_frames_len: {}".format( collected_frames_len)) - self.log.info("fstartH-fendH: {}-{}".format(fstartH, fendH)) + self.log.info("f_start_h-f_end_h: {}-{}".format( + f_start_h, f_end_h)) self.log.info( "coll_start-coll_end: {}-{}".format(coll_start, coll_end)) @@ -116,13 +124,19 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): if ("slate" in instance.data["families"]) \ and (frame_length != collected_frames_len): collected_frames_len -= 1 - fstartH += 1 + f_start_h += 1 - assert ((collected_frames_len >= frame_length) - and (coll_start <= fstartH) - and (coll_end >= fendH)), ( - "{} missing frames. Use repair to render all frames" - ).format(__name__) + if ( + collected_frames_len != frame_length + and coll_start <= f_start_h + and coll_end >= f_end_h + ): + raise PublishXmlValidationError( + self, ( + "{} missing frames. Use repair to " + "render all frames" + ).format(__name__), formatting_data=f_data + ) instance.data["collection"] = collection diff --git a/openpype/hosts/nuke/plugins/publish/validate_script.py b/openpype/hosts/nuke/plugins/publish/validate_script.py deleted file mode 100644 index 10c9e93f8b..0000000000 --- a/openpype/hosts/nuke/plugins/publish/validate_script.py +++ /dev/null @@ -1,124 +0,0 @@ -import pyblish.api - -from openpype import lib -from openpype.pipeline import legacy_io - - -@pyblish.api.log -class ValidateScript(pyblish.api.InstancePlugin): - """ Validates file output. """ - - order = pyblish.api.ValidatorOrder + 0.1 - families = ["workfile"] - label = "Check script settings" - hosts = ["nuke"] - optional = True - - def process(self, instance): - ctx_data = instance.context.data - asset_name = ctx_data["asset"] - asset = lib.get_asset(asset_name) - asset_data = asset["data"] - - # These attributes will be checked - attributes = [ - "fps", - "frameStart", - "frameEnd", - "resolutionWidth", - "resolutionHeight", - "handleStart", - "handleEnd" - ] - - # Value of these attributes can be found on parents - hierarchical_attributes = [ - "fps", - "resolutionWidth", - "resolutionHeight", - "pixelAspect", - "handleStart", - "handleEnd" - ] - - missing_attributes = [] - asset_attributes = {} - for attr in attributes: - if attr in asset_data: - asset_attributes[attr] = asset_data[attr] - - elif attr in hierarchical_attributes: - # Try to find fps on parent - parent = asset['parent'] - if asset_data['visualParent'] is not None: - parent = asset_data['visualParent'] - - value = self.check_parent_hierarchical(parent, attr) - if value is None: - missing_attributes.append(attr) - else: - asset_attributes[attr] = value - else: - missing_attributes.append(attr) - - # Raise error if attributes weren't found on asset in database - if len(missing_attributes) > 0: - atr = ", ".join(missing_attributes) - msg = 'Missing attributes "{}" in asset "{}"' - message = msg.format(atr, asset_name) - raise ValueError(message) - - # Get handles from database, Default is 0 (if not found) - handle_start = 0 - handle_end = 0 - if "handleStart" in asset_attributes: - handle_start = asset_attributes["handleStart"] - if "handleEnd" in asset_attributes: - handle_end = asset_attributes["handleEnd"] - - asset_attributes["fps"] = float("{0:.4f}".format( - asset_attributes["fps"])) - - # Get values from nukescript - script_attributes = { - "handleStart": ctx_data["handleStart"], - "handleEnd": ctx_data["handleEnd"], - "fps": float("{0:.4f}".format(ctx_data["fps"])), - "frameStart": ctx_data["frameStart"], - "frameEnd": ctx_data["frameEnd"], - "resolutionWidth": ctx_data["resolutionWidth"], - "resolutionHeight": ctx_data["resolutionHeight"], - "pixelAspect": ctx_data["pixelAspect"] - } - - # Compare asset's values Nukescript X Database - not_matching = [] - for attr in attributes: - self.log.debug("asset vs script attribute \"{}\": {}, {}".format( - attr, asset_attributes[attr], script_attributes[attr]) - ) - if asset_attributes[attr] != script_attributes[attr]: - not_matching.append(attr) - - # Raise error if not matching - if len(not_matching) > 0: - msg = "Attributes '{}' are not set correctly" - # Alert user that handles are set if Frame start/end not match - if ( - (("frameStart" in not_matching) or ("frameEnd" in not_matching)) and - ((handle_start > 0) or (handle_end > 0)) - ): - msg += " (`handle_start` are set to {})".format(handle_start) - msg += " (`handle_end` are set to {})".format(handle_end) - message = msg.format(", ".join(not_matching)) - raise ValueError(message) - - def check_parent_hierarchical(self, entityId, attr): - if entityId is None: - return None - entity = legacy_io.find_one({"_id": entityId}) - if attr in entity['data']: - self.log.info(attr) - return entity['data'][attr] - else: - return self.check_parent_hierarchical(entity['parent'], attr) diff --git a/openpype/hosts/nuke/plugins/publish/validate_script_attributes.py b/openpype/hosts/nuke/plugins/publish/validate_script_attributes.py new file mode 100644 index 0000000000..f0632f8080 --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/validate_script_attributes.py @@ -0,0 +1,127 @@ +from pprint import pformat +import pyblish.api + +from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import RepairAction +from openpype.hosts.nuke.api.lib import ( + get_avalon_knob_data, + WorkfileSettings +) +import nuke + + +@pyblish.api.log +class ValidateScriptAttributes(pyblish.api.InstancePlugin): + """ Validates file output. """ + + order = pyblish.api.ValidatorOrder + 0.1 + families = ["workfile"] + label = "Validatte script attributes" + hosts = ["nuke"] + optional = True + actions = [RepairAction] + + def process(self, instance): + root = nuke.root() + knob_data = get_avalon_knob_data(root) + asset = instance.data["assetEntity"] + # get asset data frame values + frame_start = asset["data"]["frameStart"] + frame_end = asset["data"]["frameEnd"] + handle_start = asset["data"]["handleStart"] + handle_end = asset["data"]["handleEnd"] + + # These attributes will be checked + attributes = [ + "fps", + "frameStart", + "frameEnd", + "resolutionWidth", + "resolutionHeight", + "handleStart", + "handleEnd" + ] + + # get only defined attributes from asset data + asset_attributes = { + attr: asset["data"][attr] + for attr in attributes + if attr in asset["data"] + } + # fix float to max 4 digints (only for evaluating) + fps_data = float("{0:.4f}".format( + asset_attributes["fps"])) + # fix frame values to include handles + asset_attributes.update({ + "frameStart": frame_start - handle_start, + "frameEnd": frame_end + handle_end, + "fps": fps_data + }) + + self.log.debug(pformat( + asset_attributes + )) + + # Get format + _format = root["format"].value() + + # Get values from nukescript + script_attributes = { + "handleStart": int(knob_data["handleStart"]), + "handleEnd": int(knob_data["handleEnd"]), + "fps": float("{0:.4f}".format(root['fps'].value())), + "frameStart": int(root["first_frame"].getValue()), + "frameEnd": int(root["last_frame"].getValue()), + "resolutionWidth": _format.width(), + "resolutionHeight": _format.height(), + "pixelAspect": _format.pixelAspect() + } + self.log.debug(pformat( + script_attributes + )) + + # Compare asset's values Nukescript X Database + not_matching = [] + for attr in attributes: + self.log.debug( + "Asset vs Script attribute \"{}\": {}, {}".format( + attr, + asset_attributes[attr], + script_attributes[attr] + ) + ) + if asset_attributes[attr] != script_attributes[attr]: + not_matching.append({ + "name": attr, + "expected": asset_attributes[attr], + "actual": script_attributes[attr] + }) + + # Raise error if not matching + if not_matching: + msg = "Following attributes are not set correctly: \n{}" + attrs_wrong_str = "\n".join([ + ( + "`{0}` is set to `{1}`, " + "but should be set to `{2}`" + ).format(at["name"], at["actual"], at["expected"]) + for at in not_matching + ]) + attrs_wrong_html = "
".join([ + ( + "-- __{0}__ is set to __{1}__, " + "but should be set to __{2}__" + ).format(at["name"], at["actual"], at["expected"]) + for at in not_matching + ]) + raise PublishXmlValidationError( + self, msg.format(attrs_wrong_str), + formatting_data={ + "failed_attributes": attrs_wrong_html + } + ) + + @classmethod + def repair(cls, instance): + cls.log.debug("__ repairing instance: {}".format(instance)) + WorkfileSettings().set_context_settings() diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py b/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py index 9fb57c1698..699526ef57 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py +++ b/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py @@ -3,8 +3,9 @@ import toml import nuke import pyblish.api -import openpype.api + from openpype.pipeline import discover_creator_plugins +from openpype.pipeline.publish import RepairAction from openpype.hosts.nuke.api.lib import get_avalon_knob_data @@ -16,7 +17,7 @@ class ValidateWriteLegacy(pyblish.api.InstancePlugin): families = ["write"] label = "Validate Write Legacy" hosts = ["nuke"] - actions = [openpype.api.RepairAction] + actions = [RepairAction] def process(self, instance): node = instance[0] diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_nodes.py b/openpype/hosts/nuke/plugins/publish/validate_write_nodes.py index c0d5c8f402..3e2881f298 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_write_nodes.py +++ b/openpype/hosts/nuke/plugins/publish/validate_write_nodes.py @@ -1,10 +1,11 @@ -import os import pyblish.api -import openpype.utils +from openpype.pipeline.publish import get_errored_instances_from_context from openpype.hosts.nuke.api.lib import ( get_write_node_template_attr, - get_node_path + set_node_knobs_from_settings, + color_gui_to_int ) +from openpype.pipeline import PublishXmlValidationError @pyblish.api.log @@ -14,18 +15,29 @@ class RepairNukeWriteNodeAction(pyblish.api.Action): icon = "wrench" def process(self, context, plugin): - instances = openpype.utils.filter_instances(context, plugin) + instances = get_errored_instances_from_context(context) for instance in instances: - node = instance[1] - correct_data = get_write_node_template_attr(node) - for k, v in correct_data.items(): - node[k].setValue(v) + write_group_node = instance[0] + # get write node from inside of group + write_node = None + for x in instance: + if x.Class() == "Write": + write_node = x + + correct_data = get_write_node_template_attr(write_group_node) + + set_node_knobs_from_settings(write_node, correct_data["knobs"]) + self.log.info("Node attributes were fixed") class ValidateNukeWriteNode(pyblish.api.InstancePlugin): - """ Validates file output. """ + """ Validate Write node's knobs. + + Compare knobs on write node inside the render group + with settings. At the moment supporting only `file` knob. + """ order = pyblish.api.ValidatorOrder optional = True @@ -35,38 +47,75 @@ class ValidateNukeWriteNode(pyblish.api.InstancePlugin): hosts = ["nuke"] def process(self, instance): + write_group_node = instance[0] - node = instance[1] - correct_data = get_write_node_template_attr(node) + # get write node from inside of group + write_node = None + for x in instance: + if x.Class() == "Write": + write_node = x + + if write_node is None: + return + + correct_data = get_write_node_template_attr(write_group_node) + + if correct_data: + check_knobs = correct_data["knobs"] + else: + return check = [] - for k, v in correct_data.items(): - if k is 'file': - padding = len(v.split('#')) - ref_path = get_node_path(v, padding) - n_path = get_node_path(node[k].value(), padding) - isnt = False - for i, p in enumerate(ref_path): - if str(n_path[i]) not in str(p): - if not isnt: - isnt = True - else: - continue - if isnt: - check.append([k, v, node[k].value()]) + self.log.debug("__ write_node: {}".format( + write_node + )) + + for knob_data in check_knobs: + key = knob_data["name"] + value = knob_data["value"] + node_value = write_node[key].value() + + # fix type differences + if type(node_value) in (int, float): + try: + if isinstance(value, list): + value = color_gui_to_int(value) + else: + value = float(value) + node_value = float(node_value) + except ValueError: + value = str(value) else: - if str(node[k].value()) not in str(v): - check.append([k, v, node[k].value()]) + value = str(value) + node_value = str(node_value) + + self.log.debug("__ key: {} | value: {}".format( + key, value + )) + if ( + node_value != value + and key != "file" + and key != "tile_color" + ): + check.append([key, value, write_node[key].value()]) self.log.info(check) - msg = "Node's attribute `{0}` is not correct!\n" \ - "\nCorrect: `{1}` \n\nWrong: `{2}` \n\n" - if check: - print_msg = "" - for item in check: - print_msg += msg.format(item[0], item[1], item[2]) - print_msg += "`RMB` click to the validator and `A` to fix!" + self._make_error(check) - assert not check, print_msg + def _make_error(self, check): + # sourcery skip: merge-assign-and-aug-assign, move-assign-in-block + dbg_msg = "Write node's knobs values are not correct!\n" + msg_add = "Knob '{0}' > Correct: `{1}` > Wrong: `{2}`" + + details = [ + msg_add.format(item[0], item[1], item[2]) + for item in check + ] + xml_msg = "
".join(details) + dbg_msg += "\n\t".join(details) + + raise PublishXmlValidationError( + self, dbg_msg, formatting_data={"xml_msg": xml_msg} + ) diff --git a/openpype/hosts/nuke/startup/KnobScripter/__init__.py b/openpype/hosts/nuke/startup/KnobScripter/__init__.py deleted file mode 100644 index 8fe91d63f5..0000000000 --- a/openpype/hosts/nuke/startup/KnobScripter/__init__.py +++ /dev/null @@ -1 +0,0 @@ -import knob_scripter \ No newline at end of file diff --git a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_clearConsole.png b/openpype/hosts/nuke/startup/KnobScripter/icons/icon_clearConsole.png deleted file mode 100644 index 75ac04ef84..0000000000 Binary files a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_clearConsole.png and /dev/null differ diff --git a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_download.png b/openpype/hosts/nuke/startup/KnobScripter/icons/icon_download.png deleted file mode 100644 index 1e3e9b7631..0000000000 Binary files a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_download.png and /dev/null differ diff --git a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_exitnode.png b/openpype/hosts/nuke/startup/KnobScripter/icons/icon_exitnode.png deleted file mode 100644 index 7714cd2b92..0000000000 Binary files a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_exitnode.png and /dev/null differ diff --git a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_pick.png b/openpype/hosts/nuke/startup/KnobScripter/icons/icon_pick.png deleted file mode 100644 index 2395537550..0000000000 Binary files a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_pick.png and /dev/null differ diff --git a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_prefs.png b/openpype/hosts/nuke/startup/KnobScripter/icons/icon_prefs.png deleted file mode 100644 index efef5ffc92..0000000000 Binary files a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_prefs.png and /dev/null differ diff --git a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_prefs2.png b/openpype/hosts/nuke/startup/KnobScripter/icons/icon_prefs2.png deleted file mode 100644 index 5c3c941d59..0000000000 Binary files a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_prefs2.png and /dev/null differ diff --git a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_refresh.png b/openpype/hosts/nuke/startup/KnobScripter/icons/icon_refresh.png deleted file mode 100644 index 559bfd74ab..0000000000 Binary files a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_refresh.png and /dev/null differ diff --git a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_run.png b/openpype/hosts/nuke/startup/KnobScripter/icons/icon_run.png deleted file mode 100644 index 6b2e4ddc23..0000000000 Binary files a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_run.png and /dev/null differ diff --git a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_save.png b/openpype/hosts/nuke/startup/KnobScripter/icons/icon_save.png deleted file mode 100644 index e29c667f34..0000000000 Binary files a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_save.png and /dev/null differ diff --git a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_search.png b/openpype/hosts/nuke/startup/KnobScripter/icons/icon_search.png deleted file mode 100644 index d4ed2e1a2b..0000000000 Binary files a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_search.png and /dev/null differ diff --git a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_snippets.png b/openpype/hosts/nuke/startup/KnobScripter/icons/icon_snippets.png deleted file mode 100644 index 479c44f19e..0000000000 Binary files a/openpype/hosts/nuke/startup/KnobScripter/icons/icon_snippets.png and /dev/null differ diff --git a/openpype/hosts/nuke/startup/KnobScripter/knob_scripter.py b/openpype/hosts/nuke/startup/KnobScripter/knob_scripter.py deleted file mode 100644 index 368ee64e32..0000000000 --- a/openpype/hosts/nuke/startup/KnobScripter/knob_scripter.py +++ /dev/null @@ -1,4196 +0,0 @@ -# ------------------------------------------------- -# KnobScripter by Adrian Pueyo -# Complete python script editor for Nuke -# adrianpueyo.com, 2016-2019 -import string -import traceback -from webbrowser import open as openUrl -from threading import Event, Thread -import platform -import subprocess -from functools import partial -import re -import sys -from nukescripts import panels -import json -import os -import nuke -version = "2.3 wip" -date = "Aug 12 2019" -# ------------------------------------------------- - - -# Symlinks on windows... -if os.name == "nt": - def symlink_ms(source, link_name): - import ctypes - csl = ctypes.windll.kernel32.CreateSymbolicLinkW - csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32) - csl.restype = ctypes.c_ubyte - flags = 1 if os.path.isdir(source) else 0 - try: - if csl(link_name, source.replace('/', '\\'), flags) == 0: - raise ctypes.WinError() - except: - pass - os.symlink = symlink_ms - -try: - if nuke.NUKE_VERSION_MAJOR < 11: - from PySide import QtCore, QtGui, QtGui as QtWidgets - from PySide.QtCore import Qt - else: - from PySide2 import QtWidgets, QtGui, QtCore - from PySide2.QtCore import Qt -except ImportError: - from Qt import QtCore, QtGui, QtWidgets - -KS_DIR = os.path.dirname(__file__) -icons_path = KS_DIR + "/icons/" -DebugMode = False -AllKnobScripters = [] # All open instances at a given time - -PrefsPanel = "" -SnippetEditPanel = "" - -nuke.tprint('KnobScripter v{}, built {}.\nCopyright (c) 2016-2019 Adrian Pueyo. All Rights Reserved.'.format(version, date)) - - -class KnobScripter(QtWidgets.QWidget): - - def __init__(self, node="", knob="knobChanged"): - super(KnobScripter, self).__init__() - - # Autosave the other knobscripters and add this one - for ks in AllKnobScripters: - try: - ks.autosave() - except: - pass - if self not in AllKnobScripters: - AllKnobScripters.append(self) - - self.nodeMode = (node != "") - if node == "": - self.node = nuke.toNode("root") - else: - self.node = node - - self.isPane = False - self.knob = knob - # For the option to also display the knob labels on the knob dropdown - self.show_labels = False - self.unsavedKnobs = {} - self.modifiedKnobs = set() - self.scrollPos = {} - self.cursorPos = {} - self.fontSize = 10 - self.font = "Monospace" - self.tabSpaces = 4 - self.windowDefaultSize = [500, 300] - self.color_scheme = "sublime" # Can be nuke or sublime - self.pinned = 1 - self.toLoadKnob = True - self.frw_open = False # Find replace widget closed by default - self.icon_size = 17 - self.btn_size = 24 - self.qt_icon_size = QtCore.QSize(self.icon_size, self.icon_size) - self.qt_btn_size = QtCore.QSize(self.btn_size, self.btn_size) - self.origConsoleText = "" - self.nukeSE = self.findSE() - self.nukeSEOutput = self.findSEOutput(self.nukeSE) - self.nukeSEInput = self.findSEInput(self.nukeSE) - self.nukeSERunBtn = self.findSERunBtn(self.nukeSE) - - self.scripts_dir = os.path.expandvars( - os.path.expanduser("~/.nuke/KnobScripter_Scripts")) - self.current_folder = "scripts" - self.folder_index = 0 - self.current_script = "Untitled.py" - self.current_script_modified = False - self.script_index = 0 - self.toAutosave = False - - # Load prefs - self.prefs_txt = os.path.expandvars( - os.path.expanduser("~/.nuke/KnobScripter_Prefs.txt")) - self.loadedPrefs = self.loadPrefs() - if self.loadedPrefs != []: - try: - if "font_size" in self.loadedPrefs: - self.fontSize = self.loadedPrefs['font_size'] - self.windowDefaultSize = [ - self.loadedPrefs['window_default_w'], self.loadedPrefs['window_default_h']] - self.tabSpaces = self.loadedPrefs['tab_spaces'] - self.pinned = self.loadedPrefs['pin_default'] - if "font" in self.loadedPrefs: - self.font = self.loadedPrefs['font'] - if "color_scheme" in self.loadedPrefs: - self.color_scheme = self.loadedPrefs['color_scheme'] - if "show_labels" in self.loadedPrefs: - self.show_labels = self.loadedPrefs['show_labels'] - except TypeError: - log("KnobScripter: Failed to load preferences.") - - # Load snippets - self.snippets_txt_path = os.path.expandvars( - os.path.expanduser("~/.nuke/KnobScripter_Snippets.txt")) - self.snippets = self.loadSnippets(maxDepth=5) - - # Current state of script (loaded when exiting node mode) - self.state_txt_path = os.path.expandvars( - os.path.expanduser("~/.nuke/KnobScripter_State.txt")) - - # Init UI - self.initUI() - - # Talk to Nuke's Script Editor - self.setSEOutputEvent() # Make the output windowS listen! - self.clearConsole() - - def initUI(self): - ''' Initializes the tool UI''' - # ------------------- - # 1. MAIN WINDOW - # ------------------- - self.resize(self.windowDefaultSize[0], self.windowDefaultSize[1]) - self.setWindowTitle("KnobScripter - %s %s" % - (self.node.fullName(), self.knob)) - self.setObjectName("com.adrianpueyo.knobscripter") - self.move(QtGui.QCursor().pos() - QtCore.QPoint(32, 74)) - - # --------------------- - # 2. TOP BAR - # --------------------- - # --- - # 2.1. Left buttons - self.change_btn = QtWidgets.QToolButton() - # self.exit_node_btn.setIcon(QtGui.QIcon(KS_DIR+"/KnobScripter/icons/icons8-delete-26.png")) - self.change_btn.setIcon(QtGui.QIcon(icons_path + "icon_pick.png")) - self.change_btn.setIconSize(self.qt_icon_size) - self.change_btn.setFixedSize(self.qt_btn_size) - self.change_btn.setToolTip( - "Change to node if selected. Otherwise, change to Script Mode.") - self.change_btn.clicked.connect(self.changeClicked) - - # --- - # 2.2.A. Node mode UI - self.exit_node_btn = QtWidgets.QToolButton() - self.exit_node_btn.setIcon(QtGui.QIcon( - icons_path + "icon_exitnode.png")) - self.exit_node_btn.setIconSize(self.qt_icon_size) - self.exit_node_btn.setFixedSize(self.qt_btn_size) - self.exit_node_btn.setToolTip( - "Exit the node, and change to Script Mode.") - self.exit_node_btn.clicked.connect(self.exitNodeMode) - self.current_node_label_node = QtWidgets.QLabel(" Node:") - self.current_node_label_name = QtWidgets.QLabel(self.node.fullName()) - self.current_node_label_name.setStyleSheet("font-weight:bold;") - self.current_knob_label = QtWidgets.QLabel("Knob: ") - self.current_knob_dropdown = QtWidgets.QComboBox() - self.current_knob_dropdown.setSizeAdjustPolicy( - QtWidgets.QComboBox.AdjustToContents) - self.updateKnobDropdown() - self.current_knob_dropdown.currentIndexChanged.connect( - lambda: self.loadKnobValue(False, updateDict=True)) - - # Layout - self.node_mode_bar_layout = QtWidgets.QHBoxLayout() - self.node_mode_bar_layout.addWidget(self.exit_node_btn) - self.node_mode_bar_layout.addSpacing(2) - self.node_mode_bar_layout.addWidget(self.current_node_label_node) - self.node_mode_bar_layout.addWidget(self.current_node_label_name) - self.node_mode_bar_layout.addSpacing(2) - self.node_mode_bar_layout.addWidget(self.current_knob_dropdown) - self.node_mode_bar = QtWidgets.QWidget() - self.node_mode_bar.setLayout(self.node_mode_bar_layout) - - self.node_mode_bar_layout.setContentsMargins(0, 0, 0, 0) - - # --- - # 2.2.B. Script mode UI - self.script_label = QtWidgets.QLabel("Script: ") - - self.current_folder_dropdown = QtWidgets.QComboBox() - self.current_folder_dropdown.setSizeAdjustPolicy( - QtWidgets.QComboBox.AdjustToContents) - self.current_folder_dropdown.currentIndexChanged.connect( - self.folderDropdownChanged) - # self.current_folder_dropdown.setEditable(True) - # self.current_folder_dropdown.lineEdit().setReadOnly(True) - # self.current_folder_dropdown.lineEdit().setAlignment(Qt.AlignRight) - - self.current_script_dropdown = QtWidgets.QComboBox() - self.current_script_dropdown.setSizeAdjustPolicy( - QtWidgets.QComboBox.AdjustToContents) - self.updateFoldersDropdown() - self.updateScriptsDropdown() - self.current_script_dropdown.currentIndexChanged.connect( - self.scriptDropdownChanged) - - # Layout - self.script_mode_bar_layout = QtWidgets.QHBoxLayout() - self.script_mode_bar_layout.addWidget(self.script_label) - self.script_mode_bar_layout.addSpacing(2) - self.script_mode_bar_layout.addWidget(self.current_folder_dropdown) - self.script_mode_bar_layout.addWidget(self.current_script_dropdown) - self.script_mode_bar = QtWidgets.QWidget() - self.script_mode_bar.setLayout(self.script_mode_bar_layout) - - self.script_mode_bar_layout.setContentsMargins(0, 0, 0, 0) - - # --- - # 2.3. File-system buttons - # Refresh dropdowns - self.refresh_btn = QtWidgets.QToolButton() - self.refresh_btn.setIcon(QtGui.QIcon(icons_path + "icon_refresh.png")) - self.refresh_btn.setIconSize(QtCore.QSize(50, 50)) - self.refresh_btn.setIconSize(self.qt_icon_size) - self.refresh_btn.setFixedSize(self.qt_btn_size) - self.refresh_btn.setToolTip("Refresh the dropdowns.\nShortcut: F5") - self.refresh_btn.setShortcut('F5') - self.refresh_btn.clicked.connect(self.refreshClicked) - - # Reload script - self.reload_btn = QtWidgets.QToolButton() - self.reload_btn.setIcon(QtGui.QIcon(icons_path + "icon_download.png")) - self.reload_btn.setIconSize(QtCore.QSize(50, 50)) - self.reload_btn.setIconSize(self.qt_icon_size) - self.reload_btn.setFixedSize(self.qt_btn_size) - self.reload_btn.setToolTip( - "Reload the current script. Will overwrite any changes made to it.\nShortcut: Ctrl+R") - self.reload_btn.setShortcut('Ctrl+R') - self.reload_btn.clicked.connect(self.reloadClicked) - - # Save script - self.save_btn = QtWidgets.QToolButton() - self.save_btn.setIcon(QtGui.QIcon(icons_path + "icon_save.png")) - self.save_btn.setIconSize(QtCore.QSize(50, 50)) - self.save_btn.setIconSize(self.qt_icon_size) - self.save_btn.setFixedSize(self.qt_btn_size) - self.save_btn.setToolTip( - "Save the script into the selected knob or python file.\nShortcut: Ctrl+S") - self.save_btn.setShortcut('Ctrl+S') - self.save_btn.clicked.connect(self.saveClicked) - - # Layout - self.top_file_bar_layout = QtWidgets.QHBoxLayout() - self.top_file_bar_layout.addWidget(self.refresh_btn) - self.top_file_bar_layout.addWidget(self.reload_btn) - self.top_file_bar_layout.addWidget(self.save_btn) - - # --- - # 2.4. Right Side buttons - - # Run script - self.run_script_button = QtWidgets.QToolButton() - self.run_script_button.setIcon( - QtGui.QIcon(icons_path + "icon_run.png")) - self.run_script_button.setIconSize(self.qt_icon_size) - # self.run_script_button.setIconSize(self.qt_icon_size) - self.run_script_button.setFixedSize(self.qt_btn_size) - self.run_script_button.setToolTip( - "Execute the current selection on the KnobScripter, or the whole script if no selection.\nShortcut: Ctrl+Enter") - self.run_script_button.clicked.connect(self.runScript) - - # Clear console - self.clear_console_button = QtWidgets.QToolButton() - self.clear_console_button.setIcon( - QtGui.QIcon(icons_path + "icon_clearConsole.png")) - self.clear_console_button.setIconSize(QtCore.QSize(50, 50)) - self.clear_console_button.setIconSize(self.qt_icon_size) - self.clear_console_button.setFixedSize(self.qt_btn_size) - self.clear_console_button.setToolTip( - "Clear the text in the console window.\nShortcut: Click Backspace on the console.") - self.clear_console_button.clicked.connect(self.clearConsole) - - # FindReplace button - self.find_button = QtWidgets.QToolButton() - self.find_button.setIcon(QtGui.QIcon(icons_path + "icon_search.png")) - self.find_button.setIconSize(self.qt_icon_size) - self.find_button.setFixedSize(self.qt_btn_size) - self.find_button.setToolTip( - "Call the snippets by writing the shortcut and pressing Tab.\nShortcut: Ctrl+F") - self.find_button.setShortcut('Ctrl+F') - #self.find_button.setMaximumWidth(self.find_button.fontMetrics().boundingRect("Find").width() + 20) - self.find_button.setCheckable(True) - self.find_button.setFocusPolicy(QtCore.Qt.NoFocus) - self.find_button.clicked[bool].connect(self.toggleFRW) - if self.frw_open: - self.find_button.toggle() - - # Snippets - self.snippets_button = QtWidgets.QToolButton() - self.snippets_button.setIcon( - QtGui.QIcon(icons_path + "icon_snippets.png")) - self.snippets_button.setIconSize(QtCore.QSize(50, 50)) - self.snippets_button.setIconSize(self.qt_icon_size) - self.snippets_button.setFixedSize(self.qt_btn_size) - self.snippets_button.setToolTip( - "Call the snippets by writing the shortcut and pressing Tab.") - self.snippets_button.clicked.connect(self.openSnippets) - - # PIN - ''' - self.pin_button = QtWidgets.QPushButton("P") - self.pin_button.setCheckable(True) - if self.pinned: - self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint) - self.pin_button.toggle() - self.pin_button.setToolTip("Toggle 'Always On Top'. Keeps the KnobScripter on top of all other windows.") - self.pin_button.setFocusPolicy(QtCore.Qt.NoFocus) - self.pin_button.setFixedSize(self.qt_btn_size) - self.pin_button.clicked[bool].connect(self.pin) - ''' - - # Prefs - self.createPrefsMenu() - self.prefs_button = QtWidgets.QPushButton() - self.prefs_button.setIcon(QtGui.QIcon(icons_path + "icon_prefs.png")) - self.prefs_button.setIconSize(self.qt_icon_size) - self.prefs_button.setFixedSize( - QtCore.QSize(self.btn_size + 10, self.btn_size)) - # self.prefs_button.clicked.connect(self.openPrefs) - self.prefs_button.setMenu(self.prefsMenu) - self.prefs_button.setStyleSheet("text-align:left;padding-left:2px;") - #self.prefs_button.setMaximumWidth(self.prefs_button.fontMetrics().boundingRect("Prefs").width() + 12) - - # Layout - self.top_right_bar_layout = QtWidgets.QHBoxLayout() - self.top_right_bar_layout.addWidget(self.run_script_button) - self.top_right_bar_layout.addWidget(self.clear_console_button) - self.top_right_bar_layout.addWidget(self.find_button) - # self.top_right_bar_layout.addWidget(self.snippets_button) - # self.top_right_bar_layout.addWidget(self.pin_button) - # self.top_right_bar_layout.addSpacing(10) - self.top_right_bar_layout.addWidget(self.prefs_button) - - # --- - # Layout - self.top_layout = QtWidgets.QHBoxLayout() - self.top_layout.setContentsMargins(0, 0, 0, 0) - # self.top_layout.setSpacing(10) - self.top_layout.addWidget(self.change_btn) - self.top_layout.addWidget(self.node_mode_bar) - self.top_layout.addWidget(self.script_mode_bar) - self.node_mode_bar.setVisible(False) - # self.top_layout.addSpacing(10) - self.top_layout.addLayout(self.top_file_bar_layout) - self.top_layout.addStretch() - self.top_layout.addLayout(self.top_right_bar_layout) - - # ---------------------- - # 3. SCRIPTING SECTION - # ---------------------- - # Splitter - self.splitter = QtWidgets.QSplitter(Qt.Vertical) - - # Output widget - self.script_output = ScriptOutputWidget(parent=self) - self.script_output.setReadOnly(1) - self.script_output.setAcceptRichText(0) - self.script_output.setTabStopWidth( - self.script_output.tabStopWidth() / 4) - self.script_output.setFocusPolicy(Qt.ClickFocus) - self.script_output.setAutoFillBackground(0) - self.script_output.installEventFilter(self) - - # Script Editor - self.script_editor = KnobScripterTextEditMain(self, self.script_output) - self.script_editor.setMinimumHeight(30) - self.script_editor.setStyleSheet( - 'background:#282828;color:#EEE;') # Main Colors - self.script_editor.textChanged.connect(self.setModified) - self.highlighter = KSScriptEditorHighlighter( - self.script_editor.document(), self) - self.script_editor.cursorPositionChanged.connect(self.setTextSelection) - self.script_editor_font = QtGui.QFont() - self.script_editor_font.setFamily(self.font) - self.script_editor_font.setStyleHint(QtGui.QFont.Monospace) - self.script_editor_font.setFixedPitch(True) - self.script_editor_font.setPointSize(self.fontSize) - self.script_editor.setFont(self.script_editor_font) - self.script_editor.setTabStopWidth( - self.tabSpaces * QtGui.QFontMetrics(self.script_editor_font).width(' ')) - - # Add input and output to splitter - self.splitter.addWidget(self.script_output) - self.splitter.addWidget(self.script_editor) - self.splitter.setStretchFactor(0, 0) - - # FindReplace widget - self.frw = FindReplaceWidget(self) - self.frw.setVisible(self.frw_open) - - # --- - # Layout - self.scripting_layout = QtWidgets.QVBoxLayout() - self.scripting_layout.setContentsMargins(0, 0, 0, 0) - self.scripting_layout.setSpacing(0) - self.scripting_layout.addWidget(self.splitter) - self.scripting_layout.addWidget(self.frw) - - # --------------- - # MASTER LAYOUT - # --------------- - self.master_layout = QtWidgets.QVBoxLayout() - self.master_layout.setSpacing(5) - self.master_layout.setContentsMargins(8, 8, 8, 8) - self.master_layout.addLayout(self.top_layout) - self.master_layout.addLayout(self.scripting_layout) - # self.master_layout.addLayout(self.bottom_layout) - self.setLayout(self.master_layout) - - # ---------------- - # MAIN WINDOW UI - # ---------------- - size_policy = QtWidgets.QSizePolicy( - QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum) - self.setSizePolicy(size_policy) - self.setMinimumWidth(160) - - if self.pinned: - self.setWindowFlags(self.windowFlags() | - QtCore.Qt.WindowStaysOnTopHint) - - # Set default values based on mode - if self.nodeMode: - self.current_knob_dropdown.blockSignals(True) - self.node_mode_bar.setVisible(True) - self.script_mode_bar.setVisible(False) - self.setCurrentKnob(self.knob) - self.loadKnobValue(check=False) - self.setKnobModified(False) - self.current_knob_dropdown.blockSignals(False) - self.splitter.setSizes([0, 1]) - else: - self.exitNodeMode() - self.script_editor.setFocus() - - # Preferences submenus - def createPrefsMenu(self): - - # Actions - self.echoAct = QtWidgets.QAction("Echo python commands", self, checkable=True, - statusTip="Toggle nuke's 'Echo all python commands to ScriptEditor'", triggered=self.toggleEcho) - if nuke.toNode("preferences").knob("echoAllCommands").value(): - self.echoAct.toggle() - self.pinAct = QtWidgets.QAction("Always on top", self, checkable=True, - statusTip="Keeps the KnobScripter window always on top or not.", triggered=self.togglePin) - if self.pinned: - self.setWindowFlags(self.windowFlags() | - QtCore.Qt.WindowStaysOnTopHint) - self.pinAct.toggle() - self.helpAct = QtWidgets.QAction( - "&Help", self, statusTip="Open the KnobScripter help in your browser.", shortcut="F1", triggered=self.showHelp) - self.nukepediaAct = QtWidgets.QAction( - "Show in Nukepedia", self, statusTip="Open the KnobScripter download page on Nukepedia.", triggered=self.showInNukepedia) - self.githubAct = QtWidgets.QAction( - "Show in GitHub", self, statusTip="Open the KnobScripter repo on GitHub.", triggered=self.showInGithub) - self.snippetsAct = QtWidgets.QAction( - "Snippets", self, statusTip="Open the Snippets editor.", triggered=self.openSnippets) - self.snippetsAct.setIcon(QtGui.QIcon(icons_path + "icon_snippets.png")) - # self.snippetsAct = QtWidgets.QAction("Keywords", self, statusTip="Add custom keywords.", triggered=self.openSnippets) #TODO THIS - self.prefsAct = QtWidgets.QAction( - "Preferences", self, statusTip="Open the Preferences panel.", triggered=self.openPrefs) - self.prefsAct.setIcon(QtGui.QIcon(icons_path + "icon_prefs.png")) - - # Menus - self.prefsMenu = QtWidgets.QMenu("Preferences") - self.prefsMenu.addAction(self.echoAct) - self.prefsMenu.addAction(self.pinAct) - self.prefsMenu.addSeparator() - self.prefsMenu.addAction(self.nukepediaAct) - self.prefsMenu.addAction(self.githubAct) - self.prefsMenu.addSeparator() - self.prefsMenu.addAction(self.helpAct) - self.prefsMenu.addSeparator() - self.prefsMenu.addAction(self.snippetsAct) - self.prefsMenu.addAction(self.prefsAct) - - def initEcho(self): - ''' Initializes the echo chechable QAction based on nuke's state ''' - echo_knob = nuke.toNode("preferences").knob("echoAllCommands") - self.echoAct.setChecked(echo_knob.value()) - - def toggleEcho(self): - ''' Toggle the "Echo python commands" from Nuke ''' - echo_knob = nuke.toNode("preferences").knob("echoAllCommands") - echo_knob.setValue(self.echoAct.isChecked()) - - def togglePin(self): - ''' Toggle "always on top" based on the submenu button ''' - self.pin(self.pinAct.isChecked()) - - def showInNukepedia(self): - openUrl("http://www.nukepedia.com/python/ui/knobscripter") - - def showInGithub(self): - openUrl("https://github.com/adrianpueyo/KnobScripter") - - def showHelp(self): - openUrl("https://vimeo.com/adrianpueyo/knobscripter2") - - # Node Mode - - def updateKnobDropdown(self): - ''' Populate knob dropdown list ''' - self.current_knob_dropdown.clear() # First remove all items - defaultKnobs = ["knobChanged", "onCreate", "onScriptLoad", "onScriptSave", "onScriptClose", "onDestroy", - "updateUI", "autolabel", "beforeRender", "beforeFrameRender", "afterFrameRender", "afterRender"] - permittedKnobClasses = ["PyScript_Knob", "PythonCustomKnob"] - counter = 0 - for i in self.node.knobs(): - if i not in defaultKnobs and self.node.knob(i).Class() in permittedKnobClasses: - if self.show_labels: - i_full = "{} ({})".format(self.node.knob(i).label(), i) - else: - i_full = i - - if i in self.unsavedKnobs.keys(): - self.current_knob_dropdown.addItem(i_full + "(*)", i) - else: - self.current_knob_dropdown.addItem(i_full, i) - - counter += 1 - if counter > 0: - self.current_knob_dropdown.insertSeparator(counter) - counter += 1 - self.current_knob_dropdown.insertSeparator(counter) - counter += 1 - for i in self.node.knobs(): - if i in defaultKnobs: - if i in self.unsavedKnobs.keys(): - self.current_knob_dropdown.addItem(i + "(*)", i) - else: - self.current_knob_dropdown.addItem(i, i) - counter += 1 - return - - def loadKnobValue(self, check=True, updateDict=False): - ''' Get the content of the knob value and populate the editor ''' - if self.toLoadKnob == False: - return - dropdown_value = self.current_knob_dropdown.itemData( - self.current_knob_dropdown.currentIndex()) # knobChanged... - try: - obtained_knobValue = str(self.node[dropdown_value].value()) - obtained_scrollValue = 0 - edited_knobValue = self.script_editor.toPlainText() - except: - error_message = QtWidgets.QMessageBox.information( - None, "", "Unable to find %s.%s" % (self.node.name(), dropdown_value)) - error_message.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - error_message.exec_() - return - # If there were changes to the previous knob, update the dictionary - if updateDict == True: - self.unsavedKnobs[self.knob] = edited_knobValue - self.scrollPos[self.knob] = self.script_editor.verticalScrollBar( - ).value() - prev_knob = self.knob # knobChanged... - - self.knob = self.current_knob_dropdown.itemData( - self.current_knob_dropdown.currentIndex()) # knobChanged... - - if check and obtained_knobValue != edited_knobValue: - msgBox = QtWidgets.QMessageBox() - msgBox.setText("The Script Editor has been modified.") - msgBox.setInformativeText( - "Do you want to overwrite the current code on this editor?") - msgBox.setStandardButtons( - QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) - msgBox.setIcon(QtWidgets.QMessageBox.Question) - msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) - reply = msgBox.exec_() - if reply == QtWidgets.QMessageBox.No: - self.setCurrentKnob(prev_knob) - return - # If order comes from a dropdown update, update value from dictionary if possible, otherwise update normally - self.setWindowTitle("KnobScripter - %s %s" % - (self.node.name(), self.knob)) - if updateDict: - if self.knob in self.unsavedKnobs: - if self.unsavedKnobs[self.knob] == obtained_knobValue: - self.script_editor.setPlainText(obtained_knobValue) - self.setKnobModified(False) - else: - obtained_knobValue = self.unsavedKnobs[self.knob] - self.script_editor.setPlainText(obtained_knobValue) - self.setKnobModified(True) - else: - self.script_editor.setPlainText(obtained_knobValue) - self.setKnobModified(False) - - if self.knob in self.scrollPos: - obtained_scrollValue = self.scrollPos[self.knob] - else: - self.script_editor.setPlainText(obtained_knobValue) - - cursor = self.script_editor.textCursor() - self.script_editor.setTextCursor(cursor) - self.script_editor.verticalScrollBar().setValue(obtained_scrollValue) - return - - def loadAllKnobValues(self): - ''' Load all knobs button's function ''' - if len(self.unsavedKnobs) >= 1: - msgBox = QtWidgets.QMessageBox() - msgBox.setText( - "Do you want to reload all python and callback knobs?") - msgBox.setInformativeText( - "Unsaved changes on this editor will be lost.") - msgBox.setStandardButtons( - QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) - msgBox.setIcon(QtWidgets.QMessageBox.Question) - msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) - reply = msgBox.exec_() - if reply == QtWidgets.QMessageBox.No: - return - self.unsavedKnobs = {} - return - - def saveKnobValue(self, check=True): - ''' Save the text from the editor to the node's knobChanged knob ''' - dropdown_value = self.current_knob_dropdown.itemData( - self.current_knob_dropdown.currentIndex()) - try: - obtained_knobValue = str(self.node[dropdown_value].value()) - self.knob = dropdown_value - except: - error_message = QtWidgets.QMessageBox.information( - None, "", "Unable to find %s.%s" % (self.node.name(), dropdown_value)) - error_message.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - error_message.exec_() - return - edited_knobValue = self.script_editor.toPlainText() - if check and obtained_knobValue != edited_knobValue: - msgBox = QtWidgets.QMessageBox() - msgBox.setText("Do you want to overwrite %s.%s?" % - (self.node.name(), dropdown_value)) - msgBox.setStandardButtons( - QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) - msgBox.setIcon(QtWidgets.QMessageBox.Question) - msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) - reply = msgBox.exec_() - if reply == QtWidgets.QMessageBox.No: - return - self.node[dropdown_value].setValue(edited_knobValue) - self.setKnobModified( - modified=False, knob=dropdown_value, changeTitle=True) - nuke.tcl("modified 1") - if self.knob in self.unsavedKnobs: - del self.unsavedKnobs[self.knob] - return - - def saveAllKnobValues(self, check=True): - ''' Save all knobs button's function ''' - if self.updateUnsavedKnobs() > 0 and check: - msgBox = QtWidgets.QMessageBox() - msgBox.setText( - "Do you want to save all modified python and callback knobs?") - msgBox.setStandardButtons( - QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) - msgBox.setIcon(QtWidgets.QMessageBox.Question) - msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) - reply = msgBox.exec_() - if reply == QtWidgets.QMessageBox.No: - return - saveErrors = 0 - savedCount = 0 - for k in self.unsavedKnobs.copy(): - try: - self.node.knob(k).setValue(self.unsavedKnobs[k]) - del self.unsavedKnobs[k] - savedCount += 1 - nuke.tcl("modified 1") - except: - saveErrors += 1 - if saveErrors > 0: - errorBox = QtWidgets.QMessageBox() - errorBox.setText("Error saving %s knob%s." % - (str(saveErrors), int(saveErrors > 1) * "s")) - errorBox.setIcon(QtWidgets.QMessageBox.Warning) - errorBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - errorBox.setDefaultButton(QtWidgets.QMessageBox.Yes) - reply = errorBox.exec_() - else: - log("KnobScripter: %s knobs saved" % str(savedCount)) - return - - def setCurrentKnob(self, knobToSet): - ''' Set current knob ''' - KnobDropdownItems = [] - for i in range(self.current_knob_dropdown.count()): - if self.current_knob_dropdown.itemData(i) is not None: - KnobDropdownItems.append( - self.current_knob_dropdown.itemData(i)) - else: - KnobDropdownItems.append("---") - if knobToSet in KnobDropdownItems: - index = KnobDropdownItems.index(knobToSet) - self.current_knob_dropdown.setCurrentIndex(index) - return - - def updateUnsavedKnobs(self, first_time=False): - ''' Clear unchanged knobs from the dict and return the number of unsaved knobs ''' - if not self.node: - # Node has been deleted, so simply return 0. Who cares. - return 0 - edited_knobValue = self.script_editor.toPlainText() - self.unsavedKnobs[self.knob] = edited_knobValue - if len(self.unsavedKnobs) > 0: - for k in self.unsavedKnobs.copy(): - if self.node.knob(k): - if str(self.node.knob(k).value()) == str(self.unsavedKnobs[k]): - del self.unsavedKnobs[k] - else: - del self.unsavedKnobs[k] - # Set appropriate knobs modified... - knobs_dropdown = self.current_knob_dropdown - all_knobs = [knobs_dropdown.itemData(i) - for i in range(knobs_dropdown.count())] - for key in all_knobs: - if key in self.unsavedKnobs.keys(): - self.setKnobModified( - modified=True, knob=key, changeTitle=False) - else: - self.setKnobModified( - modified=False, knob=key, changeTitle=False) - - return len(self.unsavedKnobs) - - def setKnobModified(self, modified=True, knob="", changeTitle=True): - ''' Sets the current knob modified, title and whatever else we need ''' - if knob == "": - knob = self.knob - if modified: - self.modifiedKnobs.add(knob) - else: - self.modifiedKnobs.discard(knob) - - if changeTitle: - title_modified_string = " [modified]" - windowTitle = self.windowTitle().split(title_modified_string)[0] - if modified == True: - windowTitle += title_modified_string - self.setWindowTitle(windowTitle) - - try: - knobs_dropdown = self.current_knob_dropdown - kd_index = knobs_dropdown.currentIndex() - kd_data = knobs_dropdown.itemData(kd_index) - if self.show_labels and i not in defaultKnobs: - kd_data = "{} ({})".format( - self.node.knob(kd_data).label(), kd_data) - if modified == False: - knobs_dropdown.setItemText(kd_index, kd_data) - else: - knobs_dropdown.setItemText(kd_index, kd_data + "(*)") - except: - pass - - # Script Mode - def updateFoldersDropdown(self): - ''' Populate folders dropdown list ''' - self.current_folder_dropdown.blockSignals(True) - self.current_folder_dropdown.clear() # First remove all items - defaultFolders = ["scripts"] - scriptFolders = [] - counter = 0 - for f in defaultFolders: - self.makeScriptFolder(f) - self.current_folder_dropdown.addItem(f + "/", f) - counter += 1 - - try: - scriptFolders = sorted([f for f in os.listdir(self.scripts_dir) if os.path.isdir( - os.path.join(self.scripts_dir, f))]) # Accepts symlinks!!! - except: - log("Couldn't read any script folders.") - - for f in scriptFolders: - fname = f.split("/")[-1] - if fname in defaultFolders: - continue - self.current_folder_dropdown.addItem(fname + "/", fname) - counter += 1 - - # print scriptFolders - if counter > 0: - self.current_folder_dropdown.insertSeparator(counter) - counter += 1 - # self.current_folder_dropdown.insertSeparator(counter) - #counter += 1 - self.current_folder_dropdown.addItem("New", "create new") - self.current_folder_dropdown.addItem("Open...", "open in browser") - self.current_folder_dropdown.addItem("Add custom", "add custom path") - self.folder_index = self.current_folder_dropdown.currentIndex() - self.current_folder = self.current_folder_dropdown.itemData( - self.folder_index) - self.current_folder_dropdown.blockSignals(False) - return - - def updateScriptsDropdown(self): - ''' Populate py scripts dropdown list ''' - self.current_script_dropdown.blockSignals(True) - self.current_script_dropdown.clear() # First remove all items - QtWidgets.QApplication.processEvents() - log("# Updating scripts dropdown...") - log("scripts dir:" + self.scripts_dir) - log("current folder:" + self.current_folder) - log("previous current script:" + self.current_script) - #current_folder = self.current_folder_dropdown.itemData(self.current_folder_dropdown.currentIndex()) - current_folder_path = os.path.join( - self.scripts_dir, self.current_folder) - defaultScripts = ["Untitled.py"] - found_scripts = [] - counter = 0 - # All files and folders inside of the folder - dir_list = os.listdir(current_folder_path) - try: - found_scripts = sorted([f for f in dir_list if f.endswith(".py")]) - found_temp_scripts = [ - f for f in dir_list if f.endswith(".py.autosave")] - except: - log("Couldn't find any scripts in the selected folder.") - if not len(found_scripts): - for s in defaultScripts: - if s + ".autosave" in found_temp_scripts: - self.current_script_dropdown.addItem(s + "(*)", s) - else: - self.current_script_dropdown.addItem(s, s) - counter += 1 - else: - for s in defaultScripts: - if s + ".autosave" in found_temp_scripts: - self.current_script_dropdown.addItem(s + "(*)", s) - elif s in found_scripts: - self.current_script_dropdown.addItem(s, s) - for s in found_scripts: - if s in defaultScripts: - continue - sname = s.split("/")[-1] - if s + ".autosave" in found_temp_scripts: - self.current_script_dropdown.addItem(sname + "(*)", sname) - else: - self.current_script_dropdown.addItem(sname, sname) - counter += 1 - # else: #Add the found scripts to the dropdown - if counter > 0: - counter += 1 - self.current_script_dropdown.insertSeparator(counter) - counter += 1 - self.current_script_dropdown.insertSeparator(counter) - self.current_script_dropdown.addItem("New", "create new") - self.current_script_dropdown.addItem("Duplicate", "create duplicate") - self.current_script_dropdown.addItem("Delete", "delete script") - self.current_script_dropdown.addItem("Open", "open in browser") - #self.script_index = self.current_script_dropdown.currentIndex() - self.script_index = 0 - self.current_script = self.current_script_dropdown.itemData( - self.script_index) - log("Finished updating scripts dropdown.") - log("current_script:" + self.current_script) - self.current_script_dropdown.blockSignals(False) - return - - def makeScriptFolder(self, name="scripts"): - folder_path = os.path.join(self.scripts_dir, name) - if not os.path.exists(folder_path): - try: - os.makedirs(folder_path) - return True - except: - print "Couldn't create the scripting folders.\nPlease check your OS write permissions." - return False - - def makeScriptFile(self, name="Untitled.py", folder="scripts", empty=True): - script_path = os.path.join(self.scripts_dir, self.current_folder, name) - if not os.path.isfile(script_path): - try: - self.current_script_file = open(script_path, 'w') - return True - except: - print "Couldn't create the scripting folders.\nPlease check your OS write permissions." - return False - - def setCurrentFolder(self, folderName): - ''' Set current folder ON THE DROPDOWN ONLY''' - folderList = [self.current_folder_dropdown.itemData( - i) for i in range(self.current_folder_dropdown.count())] - if folderName in folderList: - index = folderList.index(folderName) - self.current_folder_dropdown.setCurrentIndex(index) - self.current_folder = folderName - self.folder_index = self.current_folder_dropdown.currentIndex() - self.current_folder = self.current_folder_dropdown.itemData( - self.folder_index) - return - - def setCurrentScript(self, scriptName): - ''' Set current script ON THE DROPDOWN ONLY ''' - scriptList = [self.current_script_dropdown.itemData( - i) for i in range(self.current_script_dropdown.count())] - if scriptName in scriptList: - index = scriptList.index(scriptName) - self.current_script_dropdown.setCurrentIndex(index) - self.current_script = scriptName - self.script_index = self.current_script_dropdown.currentIndex() - self.current_script = self.current_script_dropdown.itemData( - self.script_index) - return - - def loadScriptContents(self, check=False, pyOnly=False, folder=""): - ''' Get the contents of the selected script and populate the editor ''' - log("# About to load script contents now.") - obtained_scrollValue = 0 - obtained_cursorPosValue = [0, 0] # Position, anchor - if folder == "": - folder = self.current_folder - script_path = os.path.join( - self.scripts_dir, folder, self.current_script) - script_path_temp = script_path + ".autosave" - if (self.current_folder + "/" + self.current_script) in self.scrollPos: - obtained_scrollValue = self.scrollPos[self.current_folder + - "/" + self.current_script] - if (self.current_folder + "/" + self.current_script) in self.cursorPos: - obtained_cursorPosValue = self.cursorPos[self.current_folder + - "/" + self.current_script] - - # 1: If autosave exists and pyOnly is false, load it - if os.path.isfile(script_path_temp) and not pyOnly: - log("Loading .py.autosave file\n---") - with open(script_path_temp, 'r') as script: - content = script.read() - self.script_editor.setPlainText(content) - self.setScriptModified(True) - self.script_editor.verticalScrollBar().setValue(obtained_scrollValue) - - # 2: Try to load the .py as first priority, if it exists - elif os.path.isfile(script_path): - log("Loading .py file\n---") - with open(script_path, 'r') as script: - content = script.read() - current_text = self.script_editor.toPlainText().encode("utf8") - if check and current_text != content and current_text.strip() != "": - msgBox = QtWidgets.QMessageBox() - msgBox.setText("The script has been modified.") - msgBox.setInformativeText( - "Do you want to overwrite the current code on this editor?") - msgBox.setStandardButtons( - QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) - msgBox.setIcon(QtWidgets.QMessageBox.Question) - msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) - reply = msgBox.exec_() - if reply == QtWidgets.QMessageBox.No: - return - # Clear trash - if os.path.isfile(script_path_temp): - os.remove(script_path_temp) - log("Removed " + script_path_temp) - self.setScriptModified(False) - self.script_editor.setPlainText(content) - self.script_editor.verticalScrollBar().setValue(obtained_scrollValue) - self.setScriptModified(False) - self.loadScriptState() - self.setScriptState() - - # 3: If .py doesn't exist... only then stick to the autosave - elif os.path.isfile(script_path_temp): - with open(script_path_temp, 'r') as script: - content = script.read() - - msgBox = QtWidgets.QMessageBox() - msgBox.setText("The .py file hasn't been found.") - msgBox.setInformativeText( - "Do you want to clear the current code on this editor?") - msgBox.setStandardButtons( - QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) - msgBox.setIcon(QtWidgets.QMessageBox.Question) - msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) - reply = msgBox.exec_() - if reply == QtWidgets.QMessageBox.No: - return - - # Clear trash - os.remove(script_path_temp) - log("Removed " + script_path_temp) - self.script_editor.setPlainText("") - self.updateScriptsDropdown() - self.loadScriptContents(check=False) - self.loadScriptState() - self.setScriptState() - - else: - content = "" - self.script_editor.setPlainText(content) - self.setScriptModified(False) - if self.current_folder + "/" + self.current_script in self.scrollPos: - del self.scrollPos[self.current_folder + - "/" + self.current_script] - if self.current_folder + "/" + self.current_script in self.cursorPos: - del self.cursorPos[self.current_folder + - "/" + self.current_script] - - self.setWindowTitle("KnobScripter - %s/%s" % - (self.current_folder, self.current_script)) - return - - def saveScriptContents(self, temp=True): - ''' Save the current contents of the editor into the python file. If temp == True, saves a .py.autosave file ''' - log("\n# About to save script contents now.") - log("Temp mode is: " + str(temp)) - log("self.current_folder: " + self.current_folder) - log("self.current_script: " + self.current_script) - script_path = os.path.join( - self.scripts_dir, self.current_folder, self.current_script) - script_path_temp = script_path + ".autosave" - orig_content = "" - content = self.script_editor.toPlainText().encode('utf8') - - if temp == True: - if os.path.isfile(script_path): - with open(script_path, 'r') as script: - orig_content = script.read() - # If script path doesn't exist and autosave does but the script is empty... - elif content == "" and os.path.isfile(script_path_temp): - os.remove(script_path_temp) - return - if content != orig_content: - with open(script_path_temp, 'w') as script: - script.write(content) - else: - if os.path.isfile(script_path_temp): - os.remove(script_path_temp) - log("Nothing to save") - return - else: - with open(script_path, 'w') as script: - script.write(self.script_editor.toPlainText().encode('utf8')) - # Clear trash - if os.path.isfile(script_path_temp): - os.remove(script_path_temp) - log("Removed " + script_path_temp) - self.setScriptModified(False) - self.saveScrollValue() - self.saveCursorPosValue() - log("Saved " + script_path + "\n---") - return - - def deleteScript(self, check=True, folder=""): - ''' Get the contents of the selected script and populate the editor ''' - log("# About to delete the .py and/or autosave script now.") - if folder == "": - folder = self.current_folder - script_path = os.path.join( - self.scripts_dir, folder, self.current_script) - script_path_temp = script_path + ".autosave" - if check: - msgBox = QtWidgets.QMessageBox() - msgBox.setText("You're about to delete this script.") - msgBox.setInformativeText( - "Are you sure you want to delete {}?".format(self.current_script)) - msgBox.setStandardButtons( - QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) - msgBox.setIcon(QtWidgets.QMessageBox.Question) - msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - msgBox.setDefaultButton(QtWidgets.QMessageBox.No) - reply = msgBox.exec_() - if reply == QtWidgets.QMessageBox.No: - return False - - if os.path.isfile(script_path_temp): - os.remove(script_path_temp) - log("Removed " + script_path_temp) - - if os.path.isfile(script_path): - os.remove(script_path) - log("Removed " + script_path) - - return True - - def folderDropdownChanged(self): - '''Executed when the current folder dropdown is changed''' - self.saveScriptState() - log("# folder dropdown changed") - folders_dropdown = self.current_folder_dropdown - fd_value = folders_dropdown.currentText() - fd_index = folders_dropdown.currentIndex() - fd_data = folders_dropdown.itemData(fd_index) - if fd_data == "create new": - panel = FileNameDialog(self, mode="folder") - # panel.setWidth(260) - # panel.addSingleLineInput("Name:","") - if panel.exec_(): - # Accepted - folder_name = panel.text - if os.path.isdir(os.path.join(self.scripts_dir, folder_name)): - self.messageBox("Folder already exists.") - self.setCurrentFolder(self.current_folder) - if self.makeScriptFolder(name=folder_name): - self.saveScriptContents(temp=True) - # Success creating the folder - self.current_folder = folder_name - self.updateFoldersDropdown() - self.setCurrentFolder(folder_name) - self.updateScriptsDropdown() - self.loadScriptContents(check=False) - else: - self.messageBox("There was a problem creating the folder.") - self.current_folder_dropdown.blockSignals(True) - self.current_folder_dropdown.setCurrentIndex( - self.folder_index) - self.current_folder_dropdown.blockSignals(False) - else: - # Canceled/rejected - self.current_folder_dropdown.blockSignals(True) - self.current_folder_dropdown.setCurrentIndex(self.folder_index) - self.current_folder_dropdown.blockSignals(False) - return - - elif fd_data == "open in browser": - current_folder_path = os.path.join( - self.scripts_dir, self.current_folder) - self.openInFileBrowser(current_folder_path) - self.current_folder_dropdown.blockSignals(True) - self.current_folder_dropdown.setCurrentIndex(self.folder_index) - self.current_folder_dropdown.blockSignals(False) - return - - elif fd_data == "add custom path": - folder_path = nuke.getFilename('Select custom folder.') - if folder_path is not None: - if folder_path.endswith("/"): - aliasName = folder_path.split("/")[-2] - else: - aliasName = folder_path.split("/")[-1] - if not os.path.isdir(folder_path): - self.messageBox( - "Folder not found. Please try again with the full path to a folder.") - elif not len(aliasName): - self.messageBox( - "Folder with the same name already exists. Please delete or rename it first.") - else: - # All good - os.symlink(folder_path, os.path.join( - self.scripts_dir, aliasName)) - self.saveScriptContents(temp=True) - self.current_folder = aliasName - self.updateFoldersDropdown() - self.setCurrentFolder(aliasName) - self.updateScriptsDropdown() - self.loadScriptContents(check=False) - self.script_editor.setFocus() - return - self.current_folder_dropdown.blockSignals(True) - self.current_folder_dropdown.setCurrentIndex(self.folder_index) - self.current_folder_dropdown.blockSignals(False) - else: - # 1: Save current script as temp if needed - self.saveScriptContents(temp=True) - # 2: Set the new folder in the variables - self.current_folder = fd_data - self.folder_index = fd_index - # 3: Update the scripts dropdown - self.updateScriptsDropdown() - # 4: Load the current script! - self.loadScriptContents() - self.script_editor.setFocus() - - self.loadScriptState() - self.setScriptState() - - return - - def scriptDropdownChanged(self): - '''Executed when the current script dropdown is changed. Should only be called by the manual dropdown change. Not by other functions.''' - self.saveScriptState() - scripts_dropdown = self.current_script_dropdown - sd_value = scripts_dropdown.currentText() - sd_index = scripts_dropdown.currentIndex() - sd_data = scripts_dropdown.itemData(sd_index) - if sd_data == "create new": - self.current_script_dropdown.blockSignals(True) - panel = FileNameDialog(self, mode="script") - if panel.exec_(): - # Accepted - script_name = panel.text + ".py" - script_path = os.path.join( - self.scripts_dir, self.current_folder, script_name) - log(script_name) - log(script_path) - if os.path.isfile(script_path): - self.messageBox("Script already exists.") - self.current_script_dropdown.setCurrentIndex( - self.script_index) - if self.makeScriptFile(name=script_name, folder=self.current_folder): - # Success creating the folder - self.saveScriptContents(temp=True) - self.updateScriptsDropdown() - if self.current_script != "Untitled.py": - self.script_editor.setPlainText("") - self.current_script = script_name - self.setCurrentScript(script_name) - self.saveScriptContents(temp=False) - # self.loadScriptContents() - else: - self.messageBox("There was a problem creating the script.") - self.current_script_dropdown.setCurrentIndex( - self.script_index) - else: - # Canceled/rejected - self.current_script_dropdown.setCurrentIndex(self.script_index) - return - self.current_script_dropdown.blockSignals(False) - - elif sd_data == "create duplicate": - self.current_script_dropdown.blockSignals(True) - current_folder_path = os.path.join( - self.scripts_dir, self.current_folder, self.current_script) - current_script_path = os.path.join( - self.scripts_dir, self.current_folder, self.current_script) - - current_name = self.current_script - if self.current_script.endswith(".py"): - current_name = current_name[:-3] - - test_name = current_name - while True: - test_name += "_copy" - new_script_path = os.path.join( - self.scripts_dir, self.current_folder, test_name + ".py") - if not os.path.isfile(new_script_path): - break - - script_name = test_name + ".py" - - if self.makeScriptFile(name=script_name, folder=self.current_folder): - # Success creating the folder - self.saveScriptContents(temp=True) - self.updateScriptsDropdown() - # self.script_editor.setPlainText("") - self.current_script = script_name - self.setCurrentScript(script_name) - self.script_editor.setFocus() - else: - self.messageBox("There was a problem duplicating the script.") - self.current_script_dropdown.setCurrentIndex(self.script_index) - - self.current_script_dropdown.blockSignals(False) - - elif sd_data == "open in browser": - current_script_path = os.path.join( - self.scripts_dir, self.current_folder, self.current_script) - self.openInFileBrowser(current_script_path) - self.current_script_dropdown.blockSignals(True) - self.current_script_dropdown.setCurrentIndex(self.script_index) - self.current_script_dropdown.blockSignals(False) - return - - elif sd_data == "delete script": - if self.deleteScript(): - self.updateScriptsDropdown() - self.loadScriptContents() - else: - self.current_script_dropdown.blockSignals(True) - self.current_script_dropdown.setCurrentIndex(self.script_index) - self.current_script_dropdown.blockSignals(False) - - else: - self.saveScriptContents() - self.current_script = sd_data - self.script_index = sd_index - self.setCurrentScript(self.current_script) - self.loadScriptContents() - self.script_editor.setFocus() - self.loadScriptState() - self.setScriptState() - return - - def setScriptModified(self, modified=True): - ''' Sets self.current_script_modified, title and whatever else we need ''' - self.current_script_modified = modified - title_modified_string = " [modified]" - windowTitle = self.windowTitle().split(title_modified_string)[0] - if modified == True: - windowTitle += title_modified_string - self.setWindowTitle(windowTitle) - try: - scripts_dropdown = self.current_script_dropdown - sd_index = scripts_dropdown.currentIndex() - sd_data = scripts_dropdown.itemData(sd_index) - if modified == False: - scripts_dropdown.setItemText(sd_index, sd_data) - else: - scripts_dropdown.setItemText(sd_index, sd_data + "(*)") - except: - pass - - def openInFileBrowser(self, path=""): - OS = platform.system() - if not os.path.exists(path): - path = KS_DIR - if OS == "Windows": - os.startfile(path) - elif OS == "Darwin": - subprocess.Popen(["open", path]) - else: - subprocess.Popen(["xdg-open", path]) - - def loadScriptState(self): - ''' - Loads the last state of the script from a file inside the SE directory's root. - SAVES self.scroll_pos, self.cursor_pos, self.last_open_script - ''' - self.state_dict = {} - if not os.path.isfile(self.state_txt_path): - return False - else: - with open(self.state_txt_path, "r") as f: - self.state_dict = json.load(f) - - log("Loading script state into self.state_dict, self.scrollPos, self.cursorPos") - log(self.state_dict) - - if "scroll_pos" in self.state_dict: - self.scrollPos = self.state_dict["scroll_pos"] - if "cursor_pos" in self.state_dict: - self.cursorPos = self.state_dict["cursor_pos"] - - def setScriptState(self): - ''' - Sets the already script state from self.state_dict into the current script if applicable - ''' - script_fullname = self.current_folder + "/" + self.current_script - - if "scroll_pos" in self.state_dict: - if script_fullname in self.state_dict["scroll_pos"]: - self.script_editor.verticalScrollBar().setValue( - int(self.state_dict["scroll_pos"][script_fullname])) - - if "cursor_pos" in self.state_dict: - if script_fullname in self.state_dict["cursor_pos"]: - cursor = self.script_editor.textCursor() - cursor.setPosition(int( - self.state_dict["cursor_pos"][script_fullname][1]), QtGui.QTextCursor.MoveAnchor) - cursor.setPosition(int( - self.state_dict["cursor_pos"][script_fullname][0]), QtGui.QTextCursor.KeepAnchor) - self.script_editor.setTextCursor(cursor) - - if 'splitter_sizes' in self.state_dict: - self.splitter.setSizes(self.state_dict['splitter_sizes']) - - def setLastScript(self): - if 'last_folder' in self.state_dict and 'last_script' in self.state_dict: - self.updateFoldersDropdown() - self.setCurrentFolder(self.state_dict['last_folder']) - self.updateScriptsDropdown() - self.setCurrentScript(self.state_dict['last_script']) - self.loadScriptContents() - self.script_editor.setFocus() - - def saveScriptState(self): - ''' Stores the current state of the script into a file inside the SE directory's root ''' - log("About to save script state...") - ''' - # self.state_dict = {} - if os.path.isfile(self.state_txt_path): - with open(self.state_txt_path, "r") as f: - self.state_dict = json.load(f) - - if "scroll_pos" in self.state_dict: - self.scrollPos = self.state_dict["scroll_pos"] - if "cursor_pos" in self.state_dict: - self.cursorPos = self.state_dict["cursor_pos"] - - ''' - self.loadScriptState() - - # Overwrite current values into the scriptState - self.saveScrollValue() - self.saveCursorPosValue() - - self.state_dict['scroll_pos'] = self.scrollPos - self.state_dict['cursor_pos'] = self.cursorPos - self.state_dict['last_folder'] = self.current_folder - self.state_dict['last_script'] = self.current_script - self.state_dict['splitter_sizes'] = self.splitter.sizes() - - with open(self.state_txt_path, "w") as f: - state = json.dump(self.state_dict, f, sort_keys=True, indent=4) - return state - - # Autosave background loop - def autosave(self): - if self.toAutosave: - # Save the script... - self.saveScriptContents() - self.toAutosave = False - self.saveScriptState() - log("autosaving...") - return - - # Global stuff - def setTextSelection(self): - self.highlighter.selected_text = self.script_editor.textCursor().selection().toPlainText() - return - - def eventFilter(self, object, event): - if event.type() == QtCore.QEvent.KeyPress: - return QtWidgets.QWidget.eventFilter(self, object, event) - else: - return QtWidgets.QWidget.eventFilter(self, object, event) - - def resizeEvent(self, res_event): - w = self.frameGeometry().width() - self.current_node_label_node.setVisible(w > 460) - self.script_label.setVisible(w > 460) - return super(KnobScripter, self).resizeEvent(res_event) - - def changeClicked(self, newNode=""): - ''' Change node ''' - try: - print "Changing from " + self.node.name() - except: - self.node = None - if not len(nuke.selectedNodes()): - self.exitNodeMode() - return - nuke.menu("Nuke").findItem( - "Edit/Node/Update KnobScripter Context").invoke() - selection = knobScripterSelectedNodes - if self.nodeMode: # Only update the number of unsaved knobs if we were already in node mode - if self.node is not None: - updatedCount = self.updateUnsavedKnobs() - else: - updatedCount = 0 - else: - updatedCount = 0 - self.autosave() - if newNode != "" and nuke.exists(newNode): - selection = [newNode] - elif not len(selection): - node_dialog = ChooseNodeDialog(self) - if node_dialog.exec_(): - # Accepted - selection = [nuke.toNode(node_dialog.name)] - else: - return - - # Change to node mode... - self.node_mode_bar.setVisible(True) - self.script_mode_bar.setVisible(False) - if not self.nodeMode: - self.saveScriptContents() - self.toAutosave = False - self.saveScriptState() - self.splitter.setSizes([0, 1]) - self.nodeMode = True - - # If already selected, pass - if self.node is not None and selection[0].fullName() == self.node.fullName(): - self.messageBox("Please select a different node first!") - return - elif updatedCount > 0: - msgBox = QtWidgets.QMessageBox() - msgBox.setText( - "Save changes to %s knob%s before changing the node?" % (str(updatedCount), int(updatedCount > 1) * "s")) - msgBox.setStandardButtons( - QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel) - msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) - reply = msgBox.exec_() - if reply == QtWidgets.QMessageBox.Yes: - self.saveAllKnobValues(check=False) - elif reply == QtWidgets.QMessageBox.Cancel: - return - if len(selection) > 1: - self.messageBox( - "More than one node selected.\nChanging knobChanged editor to %s" % selection[0].fullName()) - # Reinitialise everything, wooo! - self.current_knob_dropdown.blockSignals(True) - self.node = selection[0] - - self.script_editor.setPlainText("") - self.unsavedKnobs = {} - self.scrollPos = {} - self.setWindowTitle("KnobScripter - %s %s" % - (self.node.fullName(), self.knob)) - self.current_node_label_name.setText(self.node.fullName()) - - self.toLoadKnob = False - self.updateKnobDropdown() # onee - # self.current_knob_dropdown.repaint() - # self.current_knob_dropdown.setMinimumWidth(self.current_knob_dropdown.minimumSizeHint().width()) - self.toLoadKnob = True - self.setCurrentKnob(self.knob) - self.loadKnobValue(False) - self.script_editor.setFocus() - self.setKnobModified(False) - self.current_knob_dropdown.blockSignals(False) - # self.current_knob_dropdown.setMinimumContentsLength(80) - return - - def exitNodeMode(self): - self.nodeMode = False - self.setWindowTitle("KnobScripter - Script Mode") - self.node_mode_bar.setVisible(False) - self.script_mode_bar.setVisible(True) - self.node = nuke.toNode("root") - # self.updateFoldersDropdown() - # self.updateScriptsDropdown() - self.splitter.setSizes([1, 1]) - self.loadScriptState() - self.setLastScript() - - self.loadScriptContents(check=False) - self.setScriptState() - - def clearConsole(self): - self.origConsoleText = self.nukeSEOutput.document().toPlainText().encode("utf8") - self.script_output.setPlainText("") - - def toggleFRW(self, frw_pressed): - self.frw_open = frw_pressed - self.frw.setVisible(self.frw_open) - if self.frw_open: - self.frw.find_lineEdit.setFocus() - self.frw.find_lineEdit.selectAll() - else: - self.script_editor.setFocus() - return - - def openSnippets(self): - ''' Whenever the 'snippets' button is pressed... open the panel ''' - global SnippetEditPanel - if SnippetEditPanel == "": - SnippetEditPanel = SnippetsPanel(self) - - if not SnippetEditPanel.isVisible(): - SnippetEditPanel.reload() - - if SnippetEditPanel.show(): - self.snippets = self.loadSnippets(maxDepth=5) - SnippetEditPanel = "" - - def loadSnippets(self, path="", maxDepth=5, depth=0): - ''' - Load prefs recursive. When maximum recursion depth, ignores paths. - ''' - max_depth = maxDepth - cur_depth = depth - if path == "": - path = self.snippets_txt_path - if not os.path.isfile(path): - return {} - else: - loaded_snippets = {} - with open(path, "r") as f: - file = json.load(f) - for i, (key, val) in enumerate(file.items()): - if re.match(r"\[custom-path-[0-9]+\]$", key): - if cur_depth < max_depth: - new_dict = self.loadSnippets( - path=val, maxDepth=max_depth, depth=cur_depth + 1) - loaded_snippets.update(new_dict) - else: - loaded_snippets[key] = val - return loaded_snippets - - def messageBox(self, the_text=""): - ''' Just a simple message box ''' - if self.isPane: - msgBox = QtWidgets.QMessageBox() - else: - msgBox = QtWidgets.QMessageBox(self) - msgBox.setText(the_text) - msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - msgBox.exec_() - - def openPrefs(self): - ''' Open the preferences panel ''' - global PrefsPanel - if PrefsPanel == "": - PrefsPanel = KnobScripterPrefs(self) - - if PrefsPanel.show(): - PrefsPanel = "" - - def loadPrefs(self): - ''' Load prefs ''' - if not os.path.isfile(self.prefs_txt): - return [] - else: - with open(self.prefs_txt, "r") as f: - prefs = json.load(f) - return prefs - - def runScript(self): - ''' Run the current script... ''' - self.script_editor.runScript() - - def saveScrollValue(self): - ''' Save scroll values ''' - if self.nodeMode: - self.scrollPos[self.knob] = self.script_editor.verticalScrollBar( - ).value() - else: - self.scrollPos[self.current_folder + "/" + - self.current_script] = self.script_editor.verticalScrollBar().value() - - def saveCursorPosValue(self): - ''' Save cursor pos and anchor values ''' - self.cursorPos[self.current_folder + "/" + self.current_script] = [ - self.script_editor.textCursor().position(), self.script_editor.textCursor().anchor()] - - def closeEvent(self, close_event): - if self.nodeMode: - updatedCount = self.updateUnsavedKnobs() - if updatedCount > 0: - msgBox = QtWidgets.QMessageBox() - msgBox.setText("Save changes to %s knob%s before closing?" % ( - str(updatedCount), int(updatedCount > 1) * "s")) - msgBox.setStandardButtons( - QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel) - msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) - reply = msgBox.exec_() - if reply == QtWidgets.QMessageBox.Yes: - self.saveAllKnobValues(check=False) - close_event.accept() - return - elif reply == QtWidgets.QMessageBox.Cancel: - close_event.ignore() - return - else: - close_event.accept() - else: - self.autosave() - if self in AllKnobScripters: - AllKnobScripters.remove(self) - close_event.accept() - - # Landing functions - - def refreshClicked(self): - ''' Function to refresh the dropdowns ''' - if self.nodeMode: - knob = self.current_knob_dropdown.itemData( - self.current_knob_dropdown.currentIndex()).encode('UTF8') - self.current_knob_dropdown.blockSignals(True) - self.current_knob_dropdown.clear() # First remove all items - self.updateKnobDropdown() - availableKnobs = [] - for i in range(self.current_knob_dropdown.count()): - if self.current_knob_dropdown.itemData(i) is not None: - availableKnobs.append( - self.current_knob_dropdown.itemData(i).encode('UTF8')) - if knob in availableKnobs: - self.setCurrentKnob(knob) - self.current_knob_dropdown.blockSignals(False) - else: - folder = self.current_folder - script = self.current_script - self.autosave() - self.updateFoldersDropdown() - self.setCurrentFolder(folder) - self.updateScriptsDropdown() - self.setCurrentScript(script) - self.script_editor.setFocus() - - def reloadClicked(self): - if self.nodeMode: - self.loadKnobValue() - else: - log("Node mode is off") - self.loadScriptContents(check=True, pyOnly=True) - - def saveClicked(self): - if self.nodeMode: - self.saveKnobValue(False) - else: - self.saveScriptContents(temp=False) - - def setModified(self): - if self.nodeMode: - self.setKnobModified(True) - elif not self.current_script_modified: - self.setScriptModified(True) - if not self.nodeMode: - self.toAutosave = True - - def pin(self, pressed): - if pressed: - self.setWindowFlags(self.windowFlags() | - QtCore.Qt.WindowStaysOnTopHint) - self.pinned = True - self.show() - else: - self.setWindowFlags(self.windowFlags() & ~ - QtCore.Qt.WindowStaysOnTopHint) - self.pinned = False - self.show() - - def findSE(self): - for widget in QtWidgets.QApplication.allWidgets(): - if "Script Editor" in widget.windowTitle(): - return widget - - # FunctiosaveScrollValuens for Nuke's Script Editor - def findScriptEditors(self): - script_editors = [] - for widget in QtWidgets.QApplication.allWidgets(): - if "Script Editor" in widget.windowTitle() and len(widget.children()) > 5: - script_editors.append(widget) - return script_editors - - def findSEInput(self, se): - return se.children()[-1].children()[0] - - def findSEOutput(self, se): - return se.children()[-1].children()[1] - - def findSERunBtn(self, se): - for btn in se.children(): - try: - if "Run the current script" in btn.toolTip(): - return btn - except: - pass - return False - - def setSEOutputEvent(self): - nukeScriptEditors = self.findScriptEditors() - # Take the console from the first script editor found... - self.origConsoleText = self.nukeSEOutput.document().toPlainText().encode("utf8") - for se in nukeScriptEditors: - se_output = self.findSEOutput(se) - se_output.textChanged.connect( - partial(consoleChanged, se_output, self)) - consoleChanged(se_output, self) # Initialise. - - -class KnobScripterPane(KnobScripter): - def __init__(self, node="", knob="knobChanged"): - super(KnobScripterPane, self).__init__() - self.isPane = True - - def showEvent(self, the_event): - try: - killPaneMargins(self) - except: - pass - return KnobScripter.showEvent(self, the_event) - - def hideEvent(self, the_event): - self.autosave() - return KnobScripter.hideEvent(self, the_event) - - -def consoleChanged(self, ks): - ''' This will be called every time the ScriptEditor Output text is changed ''' - try: - if ks: # KS exists - ksOutput = ks.script_output # The console TextEdit widget - ksText = self.document().toPlainText().encode("utf8") - # The text from the console that will be omitted - origConsoleText = ks.origConsoleText - if ksText.startswith(origConsoleText): - ksText = ksText[len(origConsoleText):] - else: - ks.origConsoleText = "" - ksOutput.setPlainText(ksText) - ksOutput.verticalScrollBar().setValue(ksOutput.verticalScrollBar().maximum()) - except: - pass - - -def killPaneMargins(widget_object): - if widget_object: - target_widgets = set() - target_widgets.add(widget_object.parentWidget().parentWidget()) - target_widgets.add(widget_object.parentWidget( - ).parentWidget().parentWidget().parentWidget()) - - for widget_layout in target_widgets: - try: - widget_layout.layout().setContentsMargins(0, 0, 0, 0) - except: - pass - - -def debug(lev=0): - ''' Convenience function to set the KnobScripter on debug mode''' - # levels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL] - # for handler in logging.root.handlers[:]: - # logging.root.removeHandler(handler) - # logging.basicConfig(level=levels[lev]) - # Changed to a shitty way for now - global DebugMode - DebugMode = True - - -def log(text): - ''' Display a debug info message. Yes, in a stupid way. I know.''' - global DebugMode - if DebugMode: - print(text) - - -# --------------------------------------------------------------------- -# Dialogs -# --------------------------------------------------------------------- -class FileNameDialog(QtWidgets.QDialog): - ''' - Dialog for creating new... (mode = "folder", "script" or "knob"). - ''' - - def __init__(self, parent=None, mode="folder", text=""): - if parent.isPane: - super(FileNameDialog, self).__init__() - else: - super(FileNameDialog, self).__init__(parent) - #self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint) - self.mode = mode - self.text = text - - title = "Create new {}.".format(self.mode) - self.setWindowTitle(title) - - self.initUI() - - def initUI(self): - # Widgets - self.name_label = QtWidgets.QLabel("Name: ") - self.name_label.setAlignment( - QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) - self.name_lineEdit = QtWidgets.QLineEdit() - self.name_lineEdit.setText(self.text) - self.name_lineEdit.textChanged.connect(self.nameChanged) - - # Buttons - self.button_box = QtWidgets.QDialogButtonBox( - QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel) - self.button_box.button( - QtWidgets.QDialogButtonBox.Ok).setEnabled(self.text != "") - self.button_box.accepted.connect(self.clickedOk) - self.button_box.rejected.connect(self.clickedCancel) - - # Layout - self.master_layout = QtWidgets.QVBoxLayout() - self.name_layout = QtWidgets.QHBoxLayout() - self.name_layout.addWidget(self.name_label) - self.name_layout.addWidget(self.name_lineEdit) - self.master_layout.addLayout(self.name_layout) - self.master_layout.addWidget(self.button_box) - self.setLayout(self.master_layout) - - self.name_lineEdit.setFocus() - self.setMinimumWidth(250) - - def nameChanged(self): - txt = self.name_lineEdit.text() - m = r"[\w]*$" - if self.mode == "knob": # Knobs can't start with a number... - m = r"[a-zA-Z_]+" + m - - if re.match(m, txt) or txt == "": - self.text = txt - else: - self.name_lineEdit.setText(self.text) - - self.button_box.button( - QtWidgets.QDialogButtonBox.Ok).setEnabled(self.text != "") - return - - def clickedOk(self): - self.accept() - return - - def clickedCancel(self): - self.reject() - return - - -class TextInputDialog(QtWidgets.QDialog): - ''' - Simple dialog for a text input. - ''' - - def __init__(self, parent=None, name="", text="", title=""): - if parent.isPane: - super(TextInputDialog, self).__init__() - else: - super(TextInputDialog, self).__init__(parent) - #self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint) - - self.name = name # title of textinput - self.text = text # default content of textinput - - self.setWindowTitle(title) - - self.initUI() - - def initUI(self): - # Widgets - self.name_label = QtWidgets.QLabel(self.name + ": ") - self.name_label.setAlignment( - QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) - self.name_lineEdit = QtWidgets.QLineEdit() - self.name_lineEdit.setText(self.text) - self.name_lineEdit.textChanged.connect(self.nameChanged) - - # Buttons - self.button_box = QtWidgets.QDialogButtonBox( - QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel) - #self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setEnabled(self.text != "") - self.button_box.accepted.connect(self.clickedOk) - self.button_box.rejected.connect(self.clickedCancel) - - # Layout - self.master_layout = QtWidgets.QVBoxLayout() - self.name_layout = QtWidgets.QHBoxLayout() - self.name_layout.addWidget(self.name_label) - self.name_layout.addWidget(self.name_lineEdit) - self.master_layout.addLayout(self.name_layout) - self.master_layout.addWidget(self.button_box) - self.setLayout(self.master_layout) - - self.name_lineEdit.setFocus() - self.setMinimumWidth(250) - - def nameChanged(self): - self.text = self.name_lineEdit.text() - - def clickedOk(self): - self.accept() - return - - def clickedCancel(self): - self.reject() - return - - -class ChooseNodeDialog(QtWidgets.QDialog): - ''' - Dialog for selecting a node by its name. Only admits nodes that exist (including root, preferences...) - ''' - - def __init__(self, parent=None, name=""): - if parent.isPane: - super(ChooseNodeDialog, self).__init__() - else: - super(ChooseNodeDialog, self).__init__(parent) - - self.name = name # Name of node (will be "" by default) - self.allNodes = [] - - self.setWindowTitle("Enter the node's name...") - - self.initUI() - - def initUI(self): - # Widgets - self.name_label = QtWidgets.QLabel("Name: ") - self.name_label.setAlignment( - QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) - self.name_lineEdit = QtWidgets.QLineEdit() - self.name_lineEdit.setText(self.name) - self.name_lineEdit.textChanged.connect(self.nameChanged) - - self.allNodes = self.getAllNodes() - completer = QtWidgets.QCompleter(self.allNodes, self) - completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive) - self.name_lineEdit.setCompleter(completer) - - # Buttons - self.button_box = QtWidgets.QDialogButtonBox( - QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel) - self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setEnabled( - nuke.exists(self.name)) - self.button_box.accepted.connect(self.clickedOk) - self.button_box.rejected.connect(self.clickedCancel) - - # Layout - self.master_layout = QtWidgets.QVBoxLayout() - self.name_layout = QtWidgets.QHBoxLayout() - self.name_layout.addWidget(self.name_label) - self.name_layout.addWidget(self.name_lineEdit) - self.master_layout.addLayout(self.name_layout) - self.master_layout.addWidget(self.button_box) - self.setLayout(self.master_layout) - - self.name_lineEdit.setFocus() - self.setMinimumWidth(250) - - def getAllNodes(self): - self.allNodes = [n.fullName() for n in nuke.allNodes( - recurseGroups=True)] # if parent is in current context?? - self.allNodes.extend(["root", "preferences"]) - return self.allNodes - - def nameChanged(self): - self.name = self.name_lineEdit.text() - self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setEnabled( - self.name in self.allNodes) - - def clickedOk(self): - self.accept() - return - - def clickedCancel(self): - self.reject() - return - - -# ------------------------------------------------------------------------------------------------------ -# Script Editor Widget -# Wouter Gilsing built an incredibly useful python script editor for his Hotbox Manager, so I had it -# really easy for this part! -# Starting from his script editor, I changed the style and added the sublime-like functionality. -# I think this bit of code has the potential to get used in many nuke tools. -# Credit to him: http://www.woutergilsing.com/ -# Originally used on W_Hotbox v1.5: http://www.nukepedia.com/python/ui/w_hotbox -# ------------------------------------------------------------------------------------------------------ -class KnobScripterTextEdit(QtWidgets.QPlainTextEdit): - # Signal that will be emitted when the user has changed the text - userChangedEvent = QtCore.Signal() - - def __init__(self, knobScripter=""): - super(KnobScripterTextEdit, self).__init__() - - self.knobScripter = knobScripter - self.selected_text = "" - - # Setup line numbers - if self.knobScripter != "": - self.tabSpaces = self.knobScripter.tabSpaces - else: - self.tabSpaces = 4 - self.lineNumberArea = KSLineNumberArea(self) - self.blockCountChanged.connect(self.updateLineNumberAreaWidth) - self.updateRequest.connect(self.updateLineNumberArea) - self.updateLineNumberAreaWidth() - - # Highlight line - self.cursorPositionChanged.connect(self.highlightCurrentLine) - - # -------------------------------------------------------------------------------------------------- - # This is adapted from an original version by Wouter Gilsing. - # Extract from his original comments: - # While researching the implementation of line number, I had a look at Nuke's Blinkscript node. [..] - # thefoundry.co.uk/products/nuke/developers/100/pythonreference/nukescripts.blinkscripteditor-pysrc.html - # I stripped and modified the useful bits of the line number related parts of the code [..] - # Credits to theFoundry for writing the blinkscripteditor, best example code I could wish for. - # -------------------------------------------------------------------------------------------------- - - def lineNumberAreaWidth(self): - digits = 1 - maxNum = max(1, self.blockCount()) - while (maxNum >= 10): - maxNum /= 10 - digits += 1 - - space = 7 + self.fontMetrics().width('9') * digits - return space - - def updateLineNumberAreaWidth(self): - self.setViewportMargins(self.lineNumberAreaWidth(), 0, 0, 0) - - def updateLineNumberArea(self, rect, dy): - - if (dy): - self.lineNumberArea.scroll(0, dy) - else: - self.lineNumberArea.update( - 0, rect.y(), self.lineNumberArea.width(), rect.height()) - - if (rect.contains(self.viewport().rect())): - self.updateLineNumberAreaWidth() - - def resizeEvent(self, event): - QtWidgets.QPlainTextEdit.resizeEvent(self, event) - - cr = self.contentsRect() - self.lineNumberArea.setGeometry(QtCore.QRect( - cr.left(), cr.top(), self.lineNumberAreaWidth(), cr.height())) - - def lineNumberAreaPaintEvent(self, event): - - if self.isReadOnly(): - return - - painter = QtGui.QPainter(self.lineNumberArea) - painter.fillRect(event.rect(), QtGui.QColor(36, 36, 36)) # Number bg - - block = self.firstVisibleBlock() - blockNumber = block.blockNumber() - top = int(self.blockBoundingGeometry( - block).translated(self.contentOffset()).top()) - bottom = top + int(self.blockBoundingRect(block).height()) - currentLine = self.document().findBlock( - self.textCursor().position()).blockNumber() - - painter.setPen(self.palette().color(QtGui.QPalette.Text)) - - painterFont = QtGui.QFont() - painterFont.setFamily("Courier") - painterFont.setStyleHint(QtGui.QFont.Monospace) - painterFont.setFixedPitch(True) - if self.knobScripter != "": - painterFont.setPointSize(self.knobScripter.fontSize) - painter.setFont(self.knobScripter.script_editor_font) - - while (block.isValid() and top <= event.rect().bottom()): - - textColor = QtGui.QColor(110, 110, 110) # Numbers - - if blockNumber == currentLine and self.hasFocus(): - textColor = QtGui.QColor(255, 170, 0) # Number highlighted - - painter.setPen(textColor) - - number = "%s" % str(blockNumber + 1) - painter.drawText(-3, top, self.lineNumberArea.width(), - self.fontMetrics().height(), QtCore.Qt.AlignRight, number) - - # Move to the next block - block = block.next() - top = bottom - bottom = top + int(self.blockBoundingRect(block).height()) - blockNumber += 1 - - def keyPressEvent(self, event): - ''' - Custom actions for specific keystrokes - ''' - key = event.key() - ctrl = bool(event.modifiers() & Qt.ControlModifier) - alt = bool(event.modifiers() & Qt.AltModifier) - shift = bool(event.modifiers() & Qt.ShiftModifier) - pre_scroll = self.verticalScrollBar().value() - #modifiers = QtWidgets.QApplication.keyboardModifiers() - #ctrl = (modifiers == Qt.ControlModifier) - #shift = (modifiers == Qt.ShiftModifier) - - up_arrow = 16777235 - down_arrow = 16777237 - - # if Tab convert to Space - if key == 16777217: - self.indentation('indent') - - # if Shift+Tab remove indent - elif key == 16777218: - self.indentation('unindent') - - # if BackSpace try to snap to previous indent level - elif key == 16777219: - if not self.unindentBackspace(): - QtWidgets.QPlainTextEdit.keyPressEvent(self, event) - else: - # COOL BEHAVIORS SIMILAR TO SUBLIME GO NEXT! - cursor = self.textCursor() - cpos = cursor.position() - apos = cursor.anchor() - text_before_cursor = self.toPlainText()[:min(cpos, apos)] - text_after_cursor = self.toPlainText()[max(cpos, apos):] - text_all = self.toPlainText() - to_line_start = text_before_cursor[::-1].find("\n") - if to_line_start == -1: - # Position of the start of the line that includes the cursor selection start - linestart_pos = 0 - else: - linestart_pos = len(text_before_cursor) - to_line_start - - to_line_end = text_after_cursor.find("\n") - if to_line_end == -1: - # Position of the end of the line that includes the cursor selection end - lineend_pos = len(text_all) - else: - lineend_pos = max(cpos, apos) + to_line_end - - text_before_lines = text_all[:linestart_pos] - text_after_lines = text_all[lineend_pos:] - if len(text_after_lines) and text_after_lines.startswith("\n"): - text_after_lines = text_after_lines[1:] - text_lines = text_all[linestart_pos:lineend_pos] - - if cursor.hasSelection(): - selection = cursor.selection().toPlainText() - else: - selection = "" - if key == Qt.Key_ParenLeft and (len(selection) > 0 or re.match(r"[\s)}\];]+", text_after_cursor) or not len(text_after_cursor)): # ( - cursor.insertText("(" + selection + ")") - cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) - cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) - self.setTextCursor(cursor) - # ) - elif key == Qt.Key_ParenRight and text_after_cursor.startswith(")"): - cursor.movePosition(QtGui.QTextCursor.NextCharacter) - self.setTextCursor(cursor) - elif key == Qt.Key_BracketLeft and (len(selection) > 0 or re.match(r"[\s)}\];]+", text_after_cursor) or not len(text_after_cursor)): # [ - cursor.insertText("[" + selection + "]") - cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) - cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) - self.setTextCursor(cursor) - # ] - elif key in [Qt.Key_BracketRight, 43] and text_after_cursor.startswith("]"): - cursor.movePosition(QtGui.QTextCursor.NextCharacter) - self.setTextCursor(cursor) - elif key == Qt.Key_BraceLeft and (len(selection) > 0 or re.match(r"[\s)}\];]+", text_after_cursor) or not len(text_after_cursor)): # { - cursor.insertText("{" + selection + "}") - cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) - cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) - self.setTextCursor(cursor) - # } - elif key in [199, Qt.Key_BraceRight] and text_after_cursor.startswith("}"): - cursor.movePosition(QtGui.QTextCursor.NextCharacter) - self.setTextCursor(cursor) - elif key == 34: # " - if len(selection) > 0: - cursor.insertText('"' + selection + '"') - cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) - cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) - # and not re.search(r"(?:[\s)\]]+|$)",text_before_cursor): - elif text_after_cursor.startswith('"') and '"' in text_before_cursor.split("\n")[-1]: - cursor.movePosition(QtGui.QTextCursor.NextCharacter) - # If chars after cursor, act normal - elif not re.match(r"(?:[\s)\]]+|$)", text_after_cursor): - QtWidgets.QPlainTextEdit.keyPressEvent(self, event) - # If chars before cursor, act normal - elif not re.search(r"[\s.({\[,]$", text_before_cursor) and text_before_cursor != "": - QtWidgets.QPlainTextEdit.keyPressEvent(self, event) - else: - cursor.insertText('"' + selection + '"') - cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) - cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) - self.setTextCursor(cursor) - elif key == 39: # ' - if len(selection) > 0: - cursor.insertText("'" + selection + "'") - cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) - cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) - # and not re.search(r"(?:[\s)\]]+|$)",text_before_cursor): - elif text_after_cursor.startswith("'") and "'" in text_before_cursor.split("\n")[-1]: - cursor.movePosition(QtGui.QTextCursor.NextCharacter) - # If chars after cursor, act normal - elif not re.match(r"(?:[\s)\]]+|$)", text_after_cursor): - QtWidgets.QPlainTextEdit.keyPressEvent(self, event) - # If chars before cursor, act normal - elif not re.search(r"[\s.({\[,]$", text_before_cursor) and text_before_cursor != "": - QtWidgets.QPlainTextEdit.keyPressEvent(self, event) - else: - cursor.insertText("'" + selection + "'") - cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) - cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) - self.setTextCursor(cursor) - elif key == 35 and len(selection): # (yes, a hash) - # If there's a selection, insert a hash at the start of each line.. how the fuck? - if selection != "": - selection_split = selection.split("\n") - if all(i.startswith("#") for i in selection_split): - selection_commented = "\n".join( - [s[1:] for s in selection_split]) # Uncommented - else: - selection_commented = "#" + "\n#".join(selection_split) - cursor.insertText(selection_commented) - if apos > cpos: - cursor.setPosition( - apos + len(selection_commented) - len(selection), QtGui.QTextCursor.MoveAnchor) - cursor.setPosition(cpos, QtGui.QTextCursor.KeepAnchor) - else: - cursor.setPosition(apos, QtGui.QTextCursor.MoveAnchor) - cursor.setPosition( - cpos + len(selection_commented) - len(selection), QtGui.QTextCursor.KeepAnchor) - self.setTextCursor(cursor) - - elif key == 68 and ctrl and shift: # Ctrl+Shift+D, to duplicate text or line/s - - if not len(selection): - self.setPlainText( - text_before_lines + text_lines + "\n" + text_lines + "\n" + text_after_lines) - cursor.setPosition( - apos + len(text_lines) + 1, QtGui.QTextCursor.MoveAnchor) - cursor.setPosition( - cpos + len(text_lines) + 1, QtGui.QTextCursor.KeepAnchor) - self.setTextCursor(cursor) - self.verticalScrollBar().setValue(pre_scroll) - self.scrollToCursor() - else: - if text_before_cursor.endswith("\n") and not selection.startswith("\n"): - cursor.insertText(selection + "\n" + selection) - cursor.setPosition( - apos + len(selection) + 1, QtGui.QTextCursor.MoveAnchor) - cursor.setPosition( - cpos + len(selection) + 1, QtGui.QTextCursor.KeepAnchor) - else: - cursor.insertText(selection + selection) - cursor.setPosition( - apos + len(selection), QtGui.QTextCursor.MoveAnchor) - cursor.setPosition( - cpos + len(selection), QtGui.QTextCursor.KeepAnchor) - self.setTextCursor(cursor) - - # Ctrl+Shift+Up, to move the selected line/s up - elif key == up_arrow and ctrl and shift and len(text_before_lines): - prev_line_start_distance = text_before_lines[:-1][::-1].find( - "\n") - if prev_line_start_distance == -1: - prev_line_start_pos = 0 # Position of the start of the previous line - else: - prev_line_start_pos = len( - text_before_lines) - 1 - prev_line_start_distance - prev_line = text_before_lines[prev_line_start_pos:] - - text_before_prev_line = text_before_lines[:prev_line_start_pos] - - if prev_line.endswith("\n"): - prev_line = prev_line[:-1] - - if len(text_after_lines): - text_after_lines = "\n" + text_after_lines - - self.setPlainText( - text_before_prev_line + text_lines + "\n" + prev_line + text_after_lines) - cursor.setPosition(apos - len(prev_line) - 1, - QtGui.QTextCursor.MoveAnchor) - cursor.setPosition(cpos - len(prev_line) - 1, - QtGui.QTextCursor.KeepAnchor) - self.setTextCursor(cursor) - self.verticalScrollBar().setValue(pre_scroll) - self.scrollToCursor() - return - - elif key == down_arrow and ctrl and shift: # Ctrl+Shift+Up, to move the selected line/s up - if not len(text_after_lines): - text_after_lines = "" - next_line_end_distance = text_after_lines.find("\n") - if next_line_end_distance == -1: - next_line_end_pos = len(text_all) - else: - next_line_end_pos = next_line_end_distance - next_line = text_after_lines[:next_line_end_pos] - text_after_next_line = text_after_lines[next_line_end_pos:] - - self.setPlainText(text_before_lines + next_line + - "\n" + text_lines + text_after_next_line) - cursor.setPosition(apos + len(next_line) + 1, - QtGui.QTextCursor.MoveAnchor) - cursor.setPosition(cpos + len(next_line) + 1, - QtGui.QTextCursor.KeepAnchor) - self.setTextCursor(cursor) - self.verticalScrollBar().setValue(pre_scroll) - self.scrollToCursor() - return - - # If up key and nothing happens, go to start - elif key == up_arrow and not len(text_before_lines): - if not shift: - cursor.setPosition(0, QtGui.QTextCursor.MoveAnchor) - self.setTextCursor(cursor) - else: - cursor.setPosition(0, QtGui.QTextCursor.KeepAnchor) - self.setTextCursor(cursor) - - # If up key and nothing happens, go to start - elif key == down_arrow and not len(text_after_lines): - if not shift: - cursor.setPosition( - len(text_all), QtGui.QTextCursor.MoveAnchor) - self.setTextCursor(cursor) - else: - cursor.setPosition( - len(text_all), QtGui.QTextCursor.KeepAnchor) - self.setTextCursor(cursor) - - # if enter or return, match indent level - elif key in [16777220, 16777221]: - self.indentNewLine() - else: - QtWidgets.QPlainTextEdit.keyPressEvent(self, event) - - self.scrollToCursor() - - def scrollToCursor(self): - self.cursor = self.textCursor() - # Does nothing, but makes the scroll go to the right place... - self.cursor.movePosition(QtGui.QTextCursor.NoMove) - self.setTextCursor(self.cursor) - - def getCursorInfo(self): - - self.cursor = self.textCursor() - - self.firstChar = self.cursor.selectionStart() - self.lastChar = self.cursor.selectionEnd() - - self.noSelection = False - if self.firstChar == self.lastChar: - self.noSelection = True - - self.originalPosition = self.cursor.position() - self.cursorBlockPos = self.cursor.positionInBlock() - - def unindentBackspace(self): - ''' - #snap to previous indent level - ''' - self.getCursorInfo() - - if not self.noSelection or self.cursorBlockPos == 0: - return False - - # check text in front of cursor - textInFront = self.document().findBlock( - self.firstChar).text()[:self.cursorBlockPos] - - # check whether solely spaces - if textInFront != ' ' * self.cursorBlockPos: - return False - - # snap to previous indent level - spaces = len(textInFront) - for space in range(spaces - ((spaces - 1) / self.tabSpaces) * self.tabSpaces - 1): - self.cursor.deletePreviousChar() - - def indentNewLine(self): - - # in case selection covers multiple line, make it one line first - self.insertPlainText('') - - self.getCursorInfo() - - # check how many spaces after cursor - text = self.document().findBlock(self.firstChar).text() - - textInFront = text[:self.cursorBlockPos] - - if len(textInFront) == 0: - self.insertPlainText('\n') - return - - indentLevel = 0 - for i in textInFront: - if i == ' ': - indentLevel += 1 - else: - break - - indentLevel /= self.tabSpaces - - # find out whether textInFront's last character was a ':' - # if that's the case add another indent. - # ignore any spaces at the end, however also - # make sure textInFront is not just an indent - if textInFront.count(' ') != len(textInFront): - while textInFront[-1] == ' ': - textInFront = textInFront[:-1] - - if textInFront[-1] == ':': - indentLevel += 1 - - # new line - self.insertPlainText('\n') - # match indent - self.insertPlainText(' ' * (self.tabSpaces * indentLevel)) - - def indentation(self, mode): - - pre_scroll = self.verticalScrollBar().value() - self.getCursorInfo() - - # if nothing is selected and mode is set to indent, simply insert as many - # space as needed to reach the next indentation level. - if self.noSelection and mode == 'indent': - - remainingSpaces = self.tabSpaces - \ - (self.cursorBlockPos % self.tabSpaces) - self.insertPlainText(' ' * remainingSpaces) - return - - selectedBlocks = self.findBlocks(self.firstChar, self.lastChar) - beforeBlocks = self.findBlocks( - last=self.firstChar - 1, exclude=selectedBlocks) - afterBlocks = self.findBlocks( - first=self.lastChar + 1, exclude=selectedBlocks) - - beforeBlocksText = self.blocks2list(beforeBlocks) - selectedBlocksText = self.blocks2list(selectedBlocks, mode) - afterBlocksText = self.blocks2list(afterBlocks) - - combinedText = '\n'.join( - beforeBlocksText + selectedBlocksText + afterBlocksText) - - # make sure the line count stays the same - originalBlockCount = len(self.toPlainText().split('\n')) - combinedText = '\n'.join(combinedText.split('\n')[:originalBlockCount]) - - self.clear() - self.setPlainText(combinedText) - - if self.noSelection: - self.cursor.setPosition(self.lastChar) - - # check whether the the original selection was from top to bottom or vice versa - else: - if self.originalPosition == self.firstChar: - first = self.lastChar - last = self.firstChar - firstBlockSnap = QtGui.QTextCursor.EndOfBlock - lastBlockSnap = QtGui.QTextCursor.StartOfBlock - else: - first = self.firstChar - last = self.lastChar - firstBlockSnap = QtGui.QTextCursor.StartOfBlock - lastBlockSnap = QtGui.QTextCursor.EndOfBlock - - self.cursor.setPosition(first) - self.cursor.movePosition( - firstBlockSnap, QtGui.QTextCursor.MoveAnchor) - self.cursor.setPosition(last, QtGui.QTextCursor.KeepAnchor) - self.cursor.movePosition( - lastBlockSnap, QtGui.QTextCursor.KeepAnchor) - - self.setTextCursor(self.cursor) - self.verticalScrollBar().setValue(pre_scroll) - - def findBlocks(self, first=0, last=None, exclude=[]): - blocks = [] - if last == None: - last = self.document().characterCount() - for pos in range(first, last + 1): - block = self.document().findBlock(pos) - if block not in blocks and block not in exclude: - blocks.append(block) - return blocks - - def blocks2list(self, blocks, mode=None): - text = [] - for block in blocks: - blockText = block.text() - if mode == 'unindent': - if blockText.startswith(' ' * self.tabSpaces): - blockText = blockText[self.tabSpaces:] - self.lastChar -= self.tabSpaces - elif blockText.startswith('\t'): - blockText = blockText[1:] - self.lastChar -= 1 - - elif mode == 'indent': - blockText = ' ' * self.tabSpaces + blockText - self.lastChar += self.tabSpaces - - text.append(blockText) - - return text - - def highlightCurrentLine(self): - ''' - Highlight currently selected line - ''' - extraSelections = [] - - selection = QtWidgets.QTextEdit.ExtraSelection() - - lineColor = QtGui.QColor(62, 62, 62, 255) - - selection.format.setBackground(lineColor) - selection.format.setProperty( - QtGui.QTextFormat.FullWidthSelection, True) - selection.cursor = self.textCursor() - selection.cursor.clearSelection() - - extraSelections.append(selection) - - self.setExtraSelections(extraSelections) - self.scrollToCursor() - - def format(self, rgb, style=''): - ''' - Return a QtWidgets.QTextCharFormat with the given attributes. - ''' - color = QtGui.QColor(*rgb) - textFormat = QtGui.QTextCharFormat() - textFormat.setForeground(color) - - if 'bold' in style: - textFormat.setFontWeight(QtGui.QFont.Bold) - if 'italic' in style: - textFormat.setFontItalic(True) - if 'underline' in style: - textFormat.setUnderlineStyle(QtGui.QTextCharFormat.SingleUnderline) - - return textFormat - - -class KSLineNumberArea(QtWidgets.QWidget): - def __init__(self, scriptEditor): - super(KSLineNumberArea, self).__init__(scriptEditor) - - self.scriptEditor = scriptEditor - self.setStyleSheet("text-align: center;") - - def paintEvent(self, event): - self.scriptEditor.lineNumberAreaPaintEvent(event) - return - - -class KSScriptEditorHighlighter(QtGui.QSyntaxHighlighter): - ''' - This is also adapted from an original version by Wouter Gilsing. His comments: - - Modified, simplified version of some code found I found when researching: - wiki.python.org/moin/PyQt/Python%20syntax%20highlighting - They did an awesome job, so credits to them. I only needed to make some - modifications to make it fit my needs. - ''' - - def __init__(self, document, parent=None): - - super(KSScriptEditorHighlighter, self).__init__(document) - self.knobScripter = parent - self.script_editor = self.knobScripter.script_editor - self.selected_text = "" - self.selected_text_prev = "" - self.rules_sublime = "" - - self.styles = { - 'keyword': self.format([238, 117, 181], 'bold'), - 'string': self.format([242, 136, 135]), - 'comment': self.format([143, 221, 144]), - 'numbers': self.format([174, 129, 255]), - 'custom': self.format([255, 170, 0], 'italic'), - 'selected': self.format([255, 255, 255], 'bold underline'), - 'underline': self.format([240, 240, 240], 'underline'), - } - - self.keywords = [ - 'and', 'assert', 'break', 'class', 'continue', 'def', - 'del', 'elif', 'else', 'except', 'exec', 'finally', - 'for', 'from', 'global', 'if', 'import', 'in', - 'is', 'lambda', 'not', 'or', 'pass', 'print', - 'raise', 'return', 'try', 'while', 'yield', 'with', 'as' - ] - - self.operatorKeywords = [ - '=', '==', '!=', '<', '<=', '>', '>=', - '\+', '-', '\*', '/', '//', '\%', '\*\*', - '\+=', '-=', '\*=', '/=', '\%=', - '\^', '\|', '\&', '\~', '>>', '<<' - ] - - self.variableKeywords = ['int', 'str', - 'float', 'bool', 'list', 'dict', 'set'] - - self.numbers = ['True', 'False', 'None'] - self.loadAltStyles() - - self.tri_single = (QtCore.QRegExp("'''"), 1, self.styles['comment']) - self.tri_double = (QtCore.QRegExp('"""'), 2, self.styles['comment']) - - # rules - rules = [] - - rules += [(r'\b%s\b' % i, 0, self.styles['keyword']) - for i in self.keywords] - rules += [(i, 0, self.styles['keyword']) - for i in self.operatorKeywords] - rules += [(r'\b%s\b' % i, 0, self.styles['numbers']) - for i in self.numbers] - - rules += [ - - # integers - (r'\b[0-9]+\b', 0, self.styles['numbers']), - # Double-quoted string, possibly containing escape sequences - (r'"[^"\\]*(\\.[^"\\]*)*"', 0, self.styles['string']), - # Single-quoted string, possibly containing escape sequences - (r"'[^'\\]*(\\.[^'\\]*)*'", 0, self.styles['string']), - # From '#' until a newline - (r'#[^\n]*', 0, self.styles['comment']), - ] - - # Build a QRegExp for each pattern - self.rules_nuke = [(QtCore.QRegExp(pat), index, fmt) - for (pat, index, fmt) in rules] - self.rules = self.rules_nuke - - def loadAltStyles(self): - ''' Loads other color styles apart from Nuke's default. ''' - self.styles_sublime = { - 'base': self.format([255, 255, 255]), - 'keyword': self.format([237, 36, 110]), - 'string': self.format([237, 229, 122]), - 'comment': self.format([125, 125, 125]), - 'numbers': self.format([165, 120, 255]), - 'functions': self.format([184, 237, 54]), - 'blue': self.format([130, 226, 255], 'italic'), - 'arguments': self.format([255, 170, 10], 'italic'), - 'custom': self.format([200, 200, 200], 'italic'), - 'underline': self.format([240, 240, 240], 'underline'), - 'selected': self.format([255, 255, 255], 'bold underline'), - } - - self.keywords_sublime = [ - 'and', 'assert', 'break', 'continue', - 'del', 'elif', 'else', 'except', 'exec', 'finally', - 'for', 'from', 'global', 'if', 'import', 'in', - 'is', 'lambda', 'not', 'or', 'pass', 'print', - 'raise', 'return', 'try', 'while', 'yield', 'with', 'as' - ] - self.operatorKeywords_sublime = [ - '=', '==', '!=', '<', '<=', '>', '>=', - '\+', '-', '\*', '/', '//', '\%', '\*\*', - '\+=', '-=', '\*=', '/=', '\%=', - '\^', '\|', '\&', '\~', '>>', '<<' - ] - - self.baseKeywords_sublime = [ - ',', - ] - - self.customKeywords_sublime = [ - 'nuke', - ] - - self.blueKeywords_sublime = [ - 'def', 'class', 'int', 'str', 'float', 'bool', 'list', 'dict', 'set' - ] - - self.argKeywords_sublime = [ - 'self', - ] - - self.tri_single_sublime = (QtCore.QRegExp( - "'''"), 1, self.styles_sublime['comment']) - self.tri_double_sublime = (QtCore.QRegExp( - '"""'), 2, self.styles_sublime['comment']) - self.numbers_sublime = ['True', 'False', 'None'] - - # rules - - rules = [] - # First turn everything inside parentheses orange - rules += [(r"def [\w]+[\s]*\((.*)\)", 1, - self.styles_sublime['arguments'])] - # Now restore unwanted stuff... - rules += [(i, 0, self.styles_sublime['base']) - for i in self.baseKeywords_sublime] - rules += [(r"[^\(\w),.][\s]*[\w]+", 0, self.styles_sublime['base'])] - - # Everything else - rules += [(r'\b%s\b' % i, 0, self.styles_sublime['keyword']) - for i in self.keywords_sublime] - rules += [(i, 0, self.styles_sublime['keyword']) - for i in self.operatorKeywords_sublime] - rules += [(i, 0, self.styles_sublime['custom']) - for i in self.customKeywords_sublime] - rules += [(r'\b%s\b' % i, 0, self.styles_sublime['blue']) - for i in self.blueKeywords_sublime] - rules += [(i, 0, self.styles_sublime['arguments']) - for i in self.argKeywords_sublime] - rules += [(r'\b%s\b' % i, 0, self.styles_sublime['numbers']) - for i in self.numbers_sublime] - - rules += [ - - # integers - (r'\b[0-9]+\b', 0, self.styles_sublime['numbers']), - # Double-quoted string, possibly containing escape sequences - (r'"[^"\\]*(\\.[^"\\]*)*"', 0, self.styles_sublime['string']), - # Single-quoted string, possibly containing escape sequences - (r"'[^'\\]*(\\.[^'\\]*)*'", 0, self.styles_sublime['string']), - # From '#' until a newline - (r'#[^\n]*', 0, self.styles_sublime['comment']), - # Function definitions - (r"def[\s]+([\w\.]+)", 1, self.styles_sublime['functions']), - # Class definitions - (r"class[\s]+([\w\.]+)", 1, self.styles_sublime['functions']), - # Class argument (which is also a class so must be green) - (r"class[\s]+[\w\.]+[\s]*\((.*)\)", - 1, self.styles_sublime['functions']), - # Function arguments also pick their style... - (r"def[\s]+[\w]+[\s]*\(([\w]+)", 1, - self.styles_sublime['arguments']), - ] - - # Build a QRegExp for each pattern - self.rules_sublime = [(QtCore.QRegExp(pat), index, fmt) - for (pat, index, fmt) in rules] - - def format(self, rgb, style=''): - ''' - Return a QtWidgets.QTextCharFormat with the given attributes. - ''' - - color = QtGui.QColor(*rgb) - textFormat = QtGui.QTextCharFormat() - textFormat.setForeground(color) - - if 'bold' in style: - textFormat.setFontWeight(QtGui.QFont.Bold) - if 'italic' in style: - textFormat.setFontItalic(True) - if 'underline' in style: - textFormat.setUnderlineStyle(QtGui.QTextCharFormat.SingleUnderline) - - return textFormat - - def highlightBlock(self, text): - ''' - Apply syntax highlighting to the given block of text. - ''' - # Do other syntax formatting - - if self.knobScripter.color_scheme: - self.color_scheme = self.knobScripter.color_scheme - else: - self.color_scheme = "nuke" - - if self.color_scheme == "nuke": - self.rules = self.rules_nuke - elif self.color_scheme == "sublime": - self.rules = self.rules_sublime - - for expression, nth, format in self.rules: - index = expression.indexIn(text, 0) - - while index >= 0: - # We actually want the index of the nth match - index = expression.pos(nth) - length = len(expression.cap(nth)) - self.setFormat(index, length, format) - index = expression.indexIn(text, index + length) - - self.setCurrentBlockState(0) - - # Multi-line strings etc. based on selected scheme - if self.color_scheme == "nuke": - in_multiline = self.match_multiline(text, *self.tri_single) - if not in_multiline: - in_multiline = self.match_multiline(text, *self.tri_double) - elif self.color_scheme == "sublime": - in_multiline = self.match_multiline(text, *self.tri_single_sublime) - if not in_multiline: - in_multiline = self.match_multiline( - text, *self.tri_double_sublime) - - # TODO if there's a selection, highlight same occurrences in the full document. If no selection but something highlighted, unhighlight full document. (do it thru regex or sth) - - def match_multiline(self, text, delimiter, in_state, style): - ''' - Check whether highlighting requires multiple lines. - ''' - # If inside triple-single quotes, start at 0 - if self.previousBlockState() == in_state: - start = 0 - add = 0 - # Otherwise, look for the delimiter on this line - else: - start = delimiter.indexIn(text) - # Move past this match - add = delimiter.matchedLength() - - # As long as there's a delimiter match on this line... - while start >= 0: - # Look for the ending delimiter - end = delimiter.indexIn(text, start + add) - # Ending delimiter on this line? - if end >= add: - length = end - start + add + delimiter.matchedLength() - self.setCurrentBlockState(0) - # No; multi-line string - else: - self.setCurrentBlockState(in_state) - length = len(text) - start + add - # Apply formatting - self.setFormat(start, length, style) - # Look for the next match - start = delimiter.indexIn(text, start + length) - - # Return True if still inside a multi-line string, False otherwise - if self.currentBlockState() == in_state: - return True - else: - return False - -# -------------------------------------------------------------------------------------- -# Script Output Widget -# The output logger works the same way as Nuke's python script editor output window -# -------------------------------------------------------------------------------------- - - -class ScriptOutputWidget(QtWidgets.QTextEdit): - def __init__(self, parent=None): - super(ScriptOutputWidget, self).__init__(parent) - self.knobScripter = parent - self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, - QtWidgets.QSizePolicy.Expanding) - self.setMinimumHeight(20) - - def keyPressEvent(self, event): - ctrl = ((event.modifiers() and (Qt.ControlModifier)) != 0) - alt = ((event.modifiers() and (Qt.AltModifier)) != 0) - shift = ((event.modifiers() and (Qt.ShiftModifier)) != 0) - key = event.key() - if type(event) == QtGui.QKeyEvent: - # print event.key() - if key in [32]: # Space - return KnobScripter.keyPressEvent(self.knobScripter, event) - elif key in [Qt.Key_Backspace, Qt.Key_Delete]: - self.knobScripter.clearConsole() - return QtWidgets.QTextEdit.keyPressEvent(self, event) - - # def mousePressEvent(self, QMouseEvent): - # if QMouseEvent.button() == Qt.RightButton: - # self.knobScripter.clearConsole() - # QtWidgets.QTextEdit.mousePressEvent(self, QMouseEvent) - -# --------------------------------------------------------------------- -# Modified KnobScripterTextEdit to include snippets etc. -# --------------------------------------------------------------------- - - -class KnobScripterTextEditMain(KnobScripterTextEdit): - def __init__(self, knobScripter, output=None, parent=None): - super(KnobScripterTextEditMain, self).__init__(knobScripter) - self.knobScripter = knobScripter - self.script_output = output - self.nukeCompleter = None - self.currentNukeCompletion = None - - ######## - # FROM NUKE's SCRIPT EDITOR START - ######## - self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, - QtWidgets.QSizePolicy.Expanding) - - # Setup completer - self.nukeCompleter = QtWidgets.QCompleter(self) - self.nukeCompleter.setWidget(self) - self.nukeCompleter.setCompletionMode( - QtWidgets.QCompleter.UnfilteredPopupCompletion) - self.nukeCompleter.setCaseSensitivity(Qt.CaseSensitive) - try: - self.nukeCompleter.setModel(QtGui.QStringListModel()) - except: - self.nukeCompleter.setModel(QtCore.QStringListModel()) - - self.nukeCompleter.activated.connect(self.insertNukeCompletion) - self.nukeCompleter.highlighted.connect(self.completerHighlightChanged) - ######## - # FROM NUKE's SCRIPT EDITOR END - ######## - - def findLongestEndingMatch(self, text, dic): - ''' - If the text ends with a key in the dictionary, it returns the key and value. - If there are several matches, returns the longest one. - False if no matches. - ''' - longest = 0 # len of longest match - match_key = None - match_snippet = "" - for key, val in dic.items(): - #match = re.search(r"[\s\.({\[,;=+-]"+key+r"(?:[\s)\]\"]+|$)",text) - match = re.search(r"[\s\.({\[,;=+-]" + key + r"$", text) - if match or text == key: - if len(key) > longest: - longest = len(key) - match_key = key - match_snippet = val - if match_key is None: - return False - return match_key, match_snippet - - def placeholderToEnd(self, text, placeholder): - '''Returns distance (int) from the first occurrence of the placeholder, to the end of the string with placeholders removed''' - search = re.search(placeholder, text) - if not search: - return -1 - from_start = search.start() - total = len(re.sub(placeholder, "", text)) - to_end = total - from_start - return to_end - - def addSnippetText(self, snippet_text): - ''' Adds the selected text as a snippet (taking care of $$, $name$ etc) to the script editor ''' - cursor_placeholder_find = r"(? 1: - cursor_len = positions[1] - positions[0] - 2 - - text = re.sub(cursor_placeholder_find, "", text) - self.cursor.insertText(text) - if placeholder_to_end >= 0: - for i in range(placeholder_to_end): - self.cursor.movePosition(QtGui.QTextCursor.PreviousCharacter) - for i in range(cursor_len): - self.cursor.movePosition( - QtGui.QTextCursor.NextCharacter, QtGui.QTextCursor.KeepAnchor) - self.setTextCursor(self.cursor) - - def keyPressEvent(self, event): - - ctrl = bool(event.modifiers() & Qt.ControlModifier) - alt = bool(event.modifiers() & Qt.AltModifier) - shift = bool(event.modifiers() & Qt.ShiftModifier) - key = event.key() - - # ADAPTED FROM NUKE's SCRIPT EDITOR: - # Get completer state - self.nukeCompleterShowing = self.nukeCompleter.popup().isVisible() - - # BEFORE ANYTHING ELSE, IF SPECIAL MODIFIERS SIMPLY IGNORE THE REST - if not self.nukeCompleterShowing and (ctrl or shift or alt): - # Bypassed! - if key not in [Qt.Key_Return, Qt.Key_Enter, Qt.Key_Tab]: - KnobScripterTextEdit.keyPressEvent(self, event) - return - - # If the completer is showing - if self.nukeCompleterShowing: - tc = self.textCursor() - # If we're hitting enter, do completion - if key in [Qt.Key_Return, Qt.Key_Enter, Qt.Key_Tab]: - if not self.currentNukeCompletion: - self.nukeCompleter.setCurrentRow(0) - self.currentNukeCompletion = self.nukeCompleter.currentCompletion() - # print str(self.nukeCompleter.completionModel[0]) - self.insertNukeCompletion(self.currentNukeCompletion) - self.nukeCompleter.popup().hide() - self.nukeCompleterShowing = False - # If you're hitting right or escape, hide the popup - elif key == Qt.Key_Right or key == Qt.Key_Escape: - self.nukeCompleter.popup().hide() - self.nukeCompleterShowing = False - # If you hit tab, escape or ctrl-space, hide the completer - elif key == Qt.Key_Tab or key == Qt.Key_Escape or (ctrl and key == Qt.Key_Space): - self.currentNukeCompletion = "" - self.nukeCompleter.popup().hide() - self.nukeCompleterShowing = False - # If none of the above, update the completion model - else: - QtWidgets.QPlainTextEdit.keyPressEvent(self, event) - # Edit completion model - colNum = tc.columnNumber() - posNum = tc.position() - inputText = self.toPlainText() - inputTextSplit = inputText.splitlines() - runningLength = 0 - currentLine = None - for line in inputTextSplit: - length = len(line) - runningLength += length - if runningLength >= posNum: - currentLine = line - break - runningLength += 1 - if currentLine: - completionPart = currentLine.split(" ")[-1] - if "(" in completionPart: - completionPart = completionPart.split("(")[-1] - self.completeNukePartUnderCursor(completionPart) - return - - if type(event) == QtGui.QKeyEvent: - if key == Qt.Key_Escape: # Close the knobscripter... - self.knobScripter.close() - elif not ctrl and not alt and not shift and event.key() == Qt.Key_Tab: - self.placeholder = "$$" - # 1. Set the cursor - self.cursor = self.textCursor() - - # 2. Save text before and after - cpos = self.cursor.position() - text_before_cursor = self.toPlainText()[:cpos] - line_before_cursor = text_before_cursor.split('\n')[-1] - text_after_cursor = self.toPlainText()[cpos:] - - # 3. Check coincidences in snippets dicts - try: # Meaning snippet found - match_key, match_snippet = self.findLongestEndingMatch( - line_before_cursor, self.knobScripter.snippets) - for i in range(len(match_key)): - self.cursor.deletePreviousChar() - # This function takes care of adding the appropriate snippet and moving the cursor... - self.addSnippetText(match_snippet) - except: # Meaning snippet not found... - # ADAPTED FROM NUKE's SCRIPT EDITOR: - tc = self.textCursor() - allCode = self.toPlainText() - colNum = tc.columnNumber() - posNum = tc.position() - - # ...and if there's text in the editor - if len(allCode.split()) > 0: - # There is text in the editor - currentLine = tc.block().text() - - # If you're not at the end of the line just add a tab - if colNum < len(currentLine): - # If there isn't a ')' directly to the right of the cursor add a tab - if currentLine[colNum:colNum + 1] != ')': - KnobScripterTextEdit.keyPressEvent(self, event) - return - # Else show the completer - else: - completionPart = currentLine[:colNum].split( - " ")[-1] - if "(" in completionPart: - completionPart = completionPart.split( - "(")[-1] - - self.completeNukePartUnderCursor( - completionPart) - - return - - # If you are at the end of the line, - else: - # If there's nothing to the right of you add a tab - if currentLine[colNum - 1:] == "" or currentLine.endswith(" "): - KnobScripterTextEdit.keyPressEvent(self, event) - return - # Else update completionPart and show the completer - completionPart = currentLine.split(" ")[-1] - if "(" in completionPart: - completionPart = completionPart.split("(")[-1] - - self.completeNukePartUnderCursor(completionPart) - return - - KnobScripterTextEdit.keyPressEvent(self, event) - elif event.key() in [Qt.Key_Enter, Qt.Key_Return]: - modifiers = QtWidgets.QApplication.keyboardModifiers() - if modifiers == QtCore.Qt.ControlModifier: - self.runScript() - else: - KnobScripterTextEdit.keyPressEvent(self, event) - else: - KnobScripterTextEdit.keyPressEvent(self, event) - - def getPyObjects(self, text): - ''' Returns a list containing all the functions, classes and variables found within the selected python text (code) ''' - matches = [] - # 1: Remove text inside triple quotes (leaving the quotes) - text_clean = '""'.join(text.split('"""')[::2]) - text_clean = '""'.join(text_clean.split("'''")[::2]) - - # 2: Remove text inside of quotes (leaving the quotes) except if \" - lines = text_clean.split("\n") - text_clean = "" - for line in lines: - line_clean = '""'.join(line.split('"')[::2]) - line_clean = '""'.join(line_clean.split("'")[::2]) - line_clean = line_clean.split("#")[0] - text_clean += line_clean + "\n" - - # 3. Split into segments (lines plus ";") - segments = re.findall(r"[^\n;]+", text_clean) - - # 4. Go case by case. - for s in segments: - # Declared vars - matches += re.findall(r"([\w\.]+)(?=[,\s\w]*=[^=]+$)", s) - # Def functions and arguments - function = re.findall(r"[\s]*def[\s]+([\w\.]+)[\s]*\([\s]*", s) - if len(function): - matches += function - args = re.split(r"[\s]*def[\s]+([\w\.]+)[\s]*\([\s]*", s) - if len(args) > 1: - args = args[-1] - matches += re.findall( - r"(?adrianpueyo.com, 2016-2019') - kspSignature.setOpenExternalLinks(True) - kspSignature.setStyleSheet('''color:#555;font-size:9px;''') - kspSignature.setAlignment(QtCore.Qt.AlignRight) - - fontLabel = QtWidgets.QLabel("Font:") - self.fontBox = QtWidgets.QFontComboBox() - self.fontBox.setCurrentFont(QtGui.QFont(self.font)) - self.fontBox.currentFontChanged.connect(self.fontChanged) - - fontSizeLabel = QtWidgets.QLabel("Font size:") - self.fontSizeBox = QtWidgets.QSpinBox() - self.fontSizeBox.setValue(self.oldFontSize) - self.fontSizeBox.setMinimum(6) - self.fontSizeBox.setMaximum(100) - self.fontSizeBox.valueChanged.connect(self.fontSizeChanged) - - windowWLabel = QtWidgets.QLabel("Width (px):") - windowWLabel.setToolTip("Default window width in pixels") - self.windowWBox = QtWidgets.QSpinBox() - self.windowWBox.setValue(self.knobScripter.windowDefaultSize[0]) - self.windowWBox.setMinimum(200) - self.windowWBox.setMaximum(4000) - self.windowWBox.setToolTip("Default window width in pixels") - - windowHLabel = QtWidgets.QLabel("Height (px):") - windowHLabel.setToolTip("Default window height in pixels") - self.windowHBox = QtWidgets.QSpinBox() - self.windowHBox.setValue(self.knobScripter.windowDefaultSize[1]) - self.windowHBox.setMinimum(100) - self.windowHBox.setMaximum(2000) - self.windowHBox.setToolTip("Default window height in pixels") - - # TODO: "Grab current dimensions" button - - tabSpaceLabel = QtWidgets.QLabel("Tab spaces:") - tabSpaceLabel.setToolTip("Number of spaces to add with the tab key.") - self.tabSpace2 = QtWidgets.QRadioButton("2") - self.tabSpace4 = QtWidgets.QRadioButton("4") - tabSpaceButtonGroup = QtWidgets.QButtonGroup(self) - tabSpaceButtonGroup.addButton(self.tabSpace2) - tabSpaceButtonGroup.addButton(self.tabSpace4) - self.tabSpace2.setChecked(self.knobScripter.tabSpaces == 2) - self.tabSpace4.setChecked(self.knobScripter.tabSpaces == 4) - - pinDefaultLabel = QtWidgets.QLabel("Always on top:") - pinDefaultLabel.setToolTip("Default mode of the PIN toggle.") - self.pinDefaultOn = QtWidgets.QRadioButton("On") - self.pinDefaultOff = QtWidgets.QRadioButton("Off") - pinDefaultButtonGroup = QtWidgets.QButtonGroup(self) - pinDefaultButtonGroup.addButton(self.pinDefaultOn) - pinDefaultButtonGroup.addButton(self.pinDefaultOff) - self.pinDefaultOn.setChecked(self.knobScripter.pinned == True) - self.pinDefaultOff.setChecked(self.knobScripter.pinned == False) - self.pinDefaultOn.clicked.connect(lambda: self.knobScripter.pin(True)) - self.pinDefaultOff.clicked.connect( - lambda: self.knobScripter.pin(False)) - - colorSchemeLabel = QtWidgets.QLabel("Color scheme:") - colorSchemeLabel.setToolTip("Syntax highlighting text style.") - self.colorSchemeSublime = QtWidgets.QRadioButton("subl") - self.colorSchemeNuke = QtWidgets.QRadioButton("nuke") - colorSchemeButtonGroup = QtWidgets.QButtonGroup(self) - colorSchemeButtonGroup.addButton(self.colorSchemeSublime) - colorSchemeButtonGroup.addButton(self.colorSchemeNuke) - colorSchemeButtonGroup.buttonClicked.connect(self.colorSchemeChanged) - self.colorSchemeSublime.setChecked( - self.knobScripter.color_scheme == "sublime") - self.colorSchemeNuke.setChecked( - self.knobScripter.color_scheme == "nuke") - - showLabelsLabel = QtWidgets.QLabel("Show labels:") - showLabelsLabel.setToolTip( - "Display knob labels on the knob dropdown\nOtherwise, shows the internal name only.") - self.showLabelsOn = QtWidgets.QRadioButton("On") - self.showLabelsOff = QtWidgets.QRadioButton("Off") - showLabelsButtonGroup = QtWidgets.QButtonGroup(self) - showLabelsButtonGroup.addButton(self.showLabelsOn) - showLabelsButtonGroup.addButton(self.showLabelsOff) - self.showLabelsOn.setChecked(self.knobScripter.pinned == True) - self.showLabelsOff.setChecked(self.knobScripter.pinned == False) - self.showLabelsOn.clicked.connect(lambda: self.knobScripter.pin(True)) - self.showLabelsOff.clicked.connect( - lambda: self.knobScripter.pin(False)) - - self.buttonBox = QtWidgets.QDialogButtonBox( - QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel) - self.buttonBox.accepted.connect(self.savePrefs) - self.buttonBox.rejected.connect(self.cancelPrefs) - - # Loaded custom values - self.ksPrefs = self.knobScripter.loadPrefs() - if self.ksPrefs != []: - try: - self.fontSizeBox.setValue(self.ksPrefs['font_size']) - self.windowWBox.setValue(self.ksPrefs['window_default_w']) - self.windowHBox.setValue(self.ksPrefs['window_default_h']) - self.tabSpace2.setChecked(self.ksPrefs['tab_spaces'] == 2) - self.tabSpace4.setChecked(self.ksPrefs['tab_spaces'] == 4) - self.pinDefaultOn.setChecked(self.ksPrefs['pin_default'] == 1) - self.pinDefaultOff.setChecked(self.ksPrefs['pin_default'] == 0) - self.showLabelsOn.setChecked(self.ksPrefs['show_labels'] == 1) - self.showLabelsOff.setChecked(self.ksPrefs['show_labels'] == 0) - self.colorSchemeSublime.setChecked( - self.ksPrefs['color_scheme'] == "sublime") - self.colorSchemeNuke.setChecked( - self.ksPrefs['color_scheme'] == "nuke") - except: - pass - - # Layouts - font_layout = QtWidgets.QHBoxLayout() - font_layout.addWidget(fontLabel) - font_layout.addWidget(self.fontBox) - - fontSize_layout = QtWidgets.QHBoxLayout() - fontSize_layout.addWidget(fontSizeLabel) - fontSize_layout.addWidget(self.fontSizeBox) - - windowW_layout = QtWidgets.QHBoxLayout() - windowW_layout.addWidget(windowWLabel) - windowW_layout.addWidget(self.windowWBox) - - windowH_layout = QtWidgets.QHBoxLayout() - windowH_layout.addWidget(windowHLabel) - windowH_layout.addWidget(self.windowHBox) - - tabSpacesButtons_layout = QtWidgets.QHBoxLayout() - tabSpacesButtons_layout.addWidget(self.tabSpace2) - tabSpacesButtons_layout.addWidget(self.tabSpace4) - tabSpaces_layout = QtWidgets.QHBoxLayout() - tabSpaces_layout.addWidget(tabSpaceLabel) - tabSpaces_layout.addLayout(tabSpacesButtons_layout) - - pinDefaultButtons_layout = QtWidgets.QHBoxLayout() - pinDefaultButtons_layout.addWidget(self.pinDefaultOn) - pinDefaultButtons_layout.addWidget(self.pinDefaultOff) - pinDefault_layout = QtWidgets.QHBoxLayout() - pinDefault_layout.addWidget(pinDefaultLabel) - pinDefault_layout.addLayout(pinDefaultButtons_layout) - - showLabelsButtons_layout = QtWidgets.QHBoxLayout() - showLabelsButtons_layout.addWidget(self.showLabelsOn) - showLabelsButtons_layout.addWidget(self.showLabelsOff) - showLabels_layout = QtWidgets.QHBoxLayout() - showLabels_layout.addWidget(showLabelsLabel) - showLabels_layout.addLayout(showLabelsButtons_layout) - - colorSchemeButtons_layout = QtWidgets.QHBoxLayout() - colorSchemeButtons_layout.addWidget(self.colorSchemeSublime) - colorSchemeButtons_layout.addWidget(self.colorSchemeNuke) - colorScheme_layout = QtWidgets.QHBoxLayout() - colorScheme_layout.addWidget(colorSchemeLabel) - colorScheme_layout.addLayout(colorSchemeButtons_layout) - - self.master_layout = QtWidgets.QVBoxLayout() - self.master_layout.addWidget(kspTitle) - self.master_layout.addWidget(kspSignature) - self.master_layout.addWidget(kspLine) - self.master_layout.addLayout(font_layout) - self.master_layout.addLayout(fontSize_layout) - self.master_layout.addLayout(windowW_layout) - self.master_layout.addLayout(windowH_layout) - self.master_layout.addLayout(tabSpaces_layout) - self.master_layout.addLayout(pinDefault_layout) - self.master_layout.addLayout(showLabels_layout) - self.master_layout.addLayout(colorScheme_layout) - self.master_layout.addWidget(self.buttonBox) - self.setLayout(self.master_layout) - self.setFixedSize(self.minimumSize()) - - def savePrefs(self): - self.font = self.fontBox.currentFont().family() - ks_prefs = { - 'font_size': self.fontSizeBox.value(), - 'window_default_w': self.windowWBox.value(), - 'window_default_h': self.windowHBox.value(), - 'tab_spaces': self.tabSpaceValue(), - 'pin_default': self.pinDefaultValue(), - 'show_labels': self.showLabelsValue(), - 'font': self.font, - 'color_scheme': self.colorSchemeValue(), - } - self.knobScripter.script_editor_font.setFamily(self.font) - self.knobScripter.script_editor.setFont( - self.knobScripter.script_editor_font) - self.knobScripter.font = self.font - self.knobScripter.color_scheme = self.colorSchemeValue() - self.knobScripter.tabSpaces = self.tabSpaceValue() - self.knobScripter.script_editor.tabSpaces = self.tabSpaceValue() - with open(self.prefs_txt, "w") as f: - prefs = json.dump(ks_prefs, f, sort_keys=True, indent=4) - self.accept() - self.knobScripter.highlighter.rehighlight() - self.knobScripter.show_labels = self.showLabelsValue() - if self.knobScripter.nodeMode: - self.knobScripter.refreshClicked() - return prefs - - def cancelPrefs(self): - self.knobScripter.script_editor_font.setPointSize(self.oldFontSize) - self.knobScripter.script_editor.setFont( - self.knobScripter.script_editor_font) - self.knobScripter.color_scheme = self.oldScheme - self.knobScripter.highlighter.rehighlight() - self.reject() - - def fontSizeChanged(self): - self.knobScripter.script_editor_font.setPointSize( - self.fontSizeBox.value()) - self.knobScripter.script_editor.setFont( - self.knobScripter.script_editor_font) - return - - def fontChanged(self): - self.font = self.fontBox.currentFont().family() - self.knobScripter.script_editor_font.setFamily(self.font) - self.knobScripter.script_editor.setFont( - self.knobScripter.script_editor_font) - return - - def colorSchemeChanged(self): - self.knobScripter.color_scheme = self.colorSchemeValue() - self.knobScripter.highlighter.rehighlight() - return - - def tabSpaceValue(self): - return 2 if self.tabSpace2.isChecked() else 4 - - def pinDefaultValue(self): - return 1 if self.pinDefaultOn.isChecked() else 0 - - def showLabelsValue(self): - return 1 if self.showLabelsOn.isChecked() else 0 - - def colorSchemeValue(self): - return "nuke" if self.colorSchemeNuke.isChecked() else "sublime" - - def closeEvent(self, event): - self.cancelPrefs() - self.close() - - -def updateContext(): - ''' - Get the current selection of nodes with their appropriate context - Doing this outside the KnobScripter -> forces context update inside groups when needed - ''' - global knobScripterSelectedNodes - knobScripterSelectedNodes = nuke.selectedNodes() - return - -# -------------------------------- -# FindReplace -# -------------------------------- - - -class FindReplaceWidget(QtWidgets.QWidget): - ''' SearchReplace Widget for the knobscripter. FindReplaceWidget(editor = QPlainTextEdit) ''' - - def __init__(self, parent): - super(FindReplaceWidget, self).__init__(parent) - - self.editor = parent.script_editor - - self.initUI() - - def initUI(self): - - # -------------- - # Find Row - # -------------- - - # Widgets - self.find_label = QtWidgets.QLabel("Find:") - # self.find_label.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed) - self.find_label.setFixedWidth(50) - self.find_label.setAlignment( - QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) - self.find_lineEdit = QtWidgets.QLineEdit() - self.find_next_button = QtWidgets.QPushButton("Next") - self.find_next_button.clicked.connect(self.find) - self.find_prev_button = QtWidgets.QPushButton("Previous") - self.find_prev_button.clicked.connect(self.findBack) - self.find_lineEdit.returnPressed.connect(self.find_next_button.click) - - # Layout - self.find_layout = QtWidgets.QHBoxLayout() - self.find_layout.addWidget(self.find_label) - self.find_layout.addWidget(self.find_lineEdit, stretch=1) - self.find_layout.addWidget(self.find_next_button) - self.find_layout.addWidget(self.find_prev_button) - - # -------------- - # Replace Row - # -------------- - - # Widgets - self.replace_label = QtWidgets.QLabel("Replace:") - # self.replace_label.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed) - self.replace_label.setFixedWidth(50) - self.replace_label.setAlignment( - QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) - self.replace_lineEdit = QtWidgets.QLineEdit() - self.replace_button = QtWidgets.QPushButton("Replace") - self.replace_button.clicked.connect(self.replace) - self.replace_all_button = QtWidgets.QPushButton("Replace All") - self.replace_all_button.clicked.connect( - lambda: self.replace(rep_all=True)) - self.replace_lineEdit.returnPressed.connect(self.replace_button.click) - - # Layout - self.replace_layout = QtWidgets.QHBoxLayout() - self.replace_layout.addWidget(self.replace_label) - self.replace_layout.addWidget(self.replace_lineEdit, stretch=1) - self.replace_layout.addWidget(self.replace_button) - self.replace_layout.addWidget(self.replace_all_button) - - # Info text - self.info_text = QtWidgets.QLabel("") - self.info_text.setVisible(False) - self.info_text.mousePressEvent = lambda x: self.info_text.setVisible( - False) - #f = self.info_text.font() - # f.setItalic(True) - # self.info_text.setFont(f) - # self.info_text.clicked.connect(lambda:self.info_text.setVisible(False)) - - # Divider line - line = QtWidgets.QFrame() - line.setFrameShape(QtWidgets.QFrame.HLine) - line.setFrameShadow(QtWidgets.QFrame.Sunken) - line.setLineWidth(0) - line.setMidLineWidth(1) - line.setFrameShadow(QtWidgets.QFrame.Sunken) - - # -------------- - # Main Layout - # -------------- - - self.layout = QtWidgets.QVBoxLayout() - self.layout.addSpacing(4) - self.layout.addWidget(self.info_text) - self.layout.addLayout(self.find_layout) - self.layout.addLayout(self.replace_layout) - self.layout.setSpacing(4) - try: # >n11 - self.layout.setMargin(2) - except: # 0: # If not found but there are matches, start over - cursor.movePosition(QtGui.QTextCursor.Start) - self.editor.setTextCursor(cursor) - self.editor.find(find_str, flags) - else: - cursor.insertText(rep_str) - self.editor.find( - rep_str, flags | QtGui.QTextDocument.FindBackward) - - cursor.endEditBlock() - self.replace_lineEdit.setFocus() - return - - -# -------------------------------- -# Snippets -# -------------------------------- -class SnippetsPanel(QtWidgets.QDialog): - def __init__(self, parent): - super(SnippetsPanel, self).__init__(parent) - - self.knobScripter = parent - - self.setWindowFlags(self.windowFlags() | - QtCore.Qt.WindowStaysOnTopHint) - self.setWindowTitle("Snippet editor") - - self.snippets_txt_path = self.knobScripter.snippets_txt_path - self.snippets_dict = self.loadSnippetsDict(path=self.snippets_txt_path) - #self.snippets_dict = snippets_dic - - # self.saveSnippets(snippets_dic) - - self.initUI() - self.resize(500, 300) - - def initUI(self): - self.layout = QtWidgets.QVBoxLayout() - - # First Area (Titles) - title_layout = QtWidgets.QHBoxLayout() - shortcuts_label = QtWidgets.QLabel("Shortcut") - code_label = QtWidgets.QLabel("Code snippet") - title_layout.addWidget(shortcuts_label, stretch=1) - title_layout.addWidget(code_label, stretch=2) - self.layout.addLayout(title_layout) - - # Main Scroll area - self.scroll_content = QtWidgets.QWidget() - self.scroll_layout = QtWidgets.QVBoxLayout() - - self.buildSnippetWidgets() - - self.scroll_content.setLayout(self.scroll_layout) - - # Scroll Area Properties - self.scroll = QtWidgets.QScrollArea() - self.scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn) - self.scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) - self.scroll.setWidgetResizable(True) - self.scroll.setWidget(self.scroll_content) - - self.layout.addWidget(self.scroll) - - # File knob test - #self.filePath_lineEdit = SnippetFilePath(self) - # self.filePath_lineEdit - # self.layout.addWidget(self.filePath_lineEdit) - - # Lower buttons - self.bottom_layout = QtWidgets.QHBoxLayout() - - self.add_btn = QtWidgets.QPushButton("Add snippet") - self.add_btn.setToolTip("Create empty fields for an extra snippet.") - self.add_btn.clicked.connect(self.addSnippet) - self.bottom_layout.addWidget(self.add_btn) - - self.addPath_btn = QtWidgets.QPushButton("Add custom path") - self.addPath_btn.setToolTip( - "Add a custom path to an external snippets .txt file.") - self.addPath_btn.clicked.connect(self.addCustomPath) - self.bottom_layout.addWidget(self.addPath_btn) - - self.bottom_layout.addStretch() - - self.save_btn = QtWidgets.QPushButton('OK') - self.save_btn.setToolTip( - "Save the snippets into a json file and close the panel.") - self.save_btn.clicked.connect(self.okPressed) - self.bottom_layout.addWidget(self.save_btn) - - self.cancel_btn = QtWidgets.QPushButton("Cancel") - self.cancel_btn.setToolTip("Cancel any new snippets or modifications.") - self.cancel_btn.clicked.connect(self.close) - self.bottom_layout.addWidget(self.cancel_btn) - - self.apply_btn = QtWidgets.QPushButton('Apply') - self.apply_btn.setToolTip("Save the snippets into a json file.") - self.apply_btn.setShortcut('Ctrl+S') - self.apply_btn.clicked.connect(self.applySnippets) - self.bottom_layout.addWidget(self.apply_btn) - - self.help_btn = QtWidgets.QPushButton('Help') - self.help_btn.setShortcut('F1') - self.help_btn.clicked.connect(self.showHelp) - self.bottom_layout.addWidget(self.help_btn) - - self.layout.addLayout(self.bottom_layout) - - self.setLayout(self.layout) - - def reload(self): - ''' - Clears everything without saving and redoes the widgets etc. - Only to be called if the panel isn't shown meaning it's closed. - ''' - for i in reversed(range(self.scroll_layout.count())): - self.scroll_layout.itemAt(i).widget().deleteLater() - - self.snippets_dict = self.loadSnippetsDict(path=self.snippets_txt_path) - - self.buildSnippetWidgets() - - def buildSnippetWidgets(self): - for i, (key, val) in enumerate(self.snippets_dict.items()): - if re.match(r"\[custom-path-[0-9]+\]$", key): - file_edit = SnippetFilePath(val) - self.scroll_layout.insertWidget(-1, file_edit) - else: - snippet_edit = SnippetEdit(key, val, parent=self) - self.scroll_layout.insertWidget(-1, snippet_edit) - - def loadSnippetsDict(self, path=""): - ''' Load prefs. TO REMOVE ''' - if path == "": - path = self.knobScripter.snippets_txt_path - if not os.path.isfile(self.snippets_txt_path): - return {} - else: - with open(self.snippets_txt_path, "r") as f: - self.snippets = json.load(f) - return self.snippets - - def getSnippetsAsDict(self): - dic = {} - num_snippets = self.scroll_layout.count() - path_i = 1 - for s in range(num_snippets): - se = self.scroll_layout.itemAt(s).widget() - if se.__class__.__name__ == "SnippetEdit": - key = se.shortcut_editor.text() - val = se.script_editor.toPlainText() - if key != "": - dic[key] = val - else: - path = se.filepath_lineEdit.text() - if path != "": - dic["[custom-path-{}]".format(str(path_i))] = path - path_i += 1 - return dic - - def saveSnippets(self, snippets=""): - if snippets == "": - snippets = self.getSnippetsAsDict() - with open(self.snippets_txt_path, "w") as f: - prefs = json.dump(snippets, f, sort_keys=True, indent=4) - return prefs - - def applySnippets(self): - self.saveSnippets() - self.knobScripter.snippets = self.knobScripter.loadSnippets(maxDepth=5) - self.knobScripter.loadSnippets() - - def okPressed(self): - self.applySnippets() - self.accept() - - def addSnippet(self, key="", val=""): - se = SnippetEdit(key, val, parent=self) - self.scroll_layout.insertWidget(0, se) - self.show() - return se - - def addCustomPath(self, path=""): - cpe = SnippetFilePath(path) - self.scroll_layout.insertWidget(0, cpe) - self.show() - cpe.browseSnippets() - return cpe - - def showHelp(self): - ''' Create a new snippet, auto-completed with the help ''' - help_key = "help" - help_val = """Snippets are a convenient way to have code blocks that you can call through a shortcut.\n\n1. Simply write a shortcut on the text input field on the left. You can see this one is set to "test".\n\n2. Then, write a code or whatever in this script editor. You can include $$ as the placeholder for where you'll want the mouse cursor to appear.\n\n3. Finally, click OK or Apply to save the snippets. On the main script editor, you'll be able to call any snippet by writing the shortcut (in this example: help) and pressing the Tab key.\n\nIn order to remove a snippet, simply leave the shortcut and contents blank, and save the snippets.""" - help_se = self.addSnippet(help_key, help_val) - help_se.script_editor.resize(160, 160) - - -class SnippetEdit(QtWidgets.QWidget): - ''' Simple widget containing two fields, for the snippet shortcut and content ''' - - def __init__(self, key="", val="", parent=None): - super(SnippetEdit, self).__init__(parent) - - self.knobScripter = parent.knobScripter - self.color_scheme = self.knobScripter.color_scheme - self.layout = QtWidgets.QHBoxLayout() - - self.shortcut_editor = QtWidgets.QLineEdit(self) - f = self.shortcut_editor.font() - f.setWeight(QtGui.QFont.Bold) - self.shortcut_editor.setFont(f) - self.shortcut_editor.setText(str(key)) - #self.script_editor = QtWidgets.QTextEdit(self) - self.script_editor = KnobScripterTextEdit() - self.script_editor.setMinimumHeight(100) - self.script_editor.setStyleSheet( - 'background:#282828;color:#EEE;') # Main Colors - self.highlighter = KSScriptEditorHighlighter( - self.script_editor.document(), self) - self.script_editor_font = self.knobScripter.script_editor_font - self.script_editor.setFont(self.script_editor_font) - self.script_editor.resize(90, 90) - self.script_editor.setPlainText(str(val)) - self.layout.addWidget(self.shortcut_editor, - stretch=1, alignment=Qt.AlignTop) - self.layout.addWidget(self.script_editor, stretch=2) - self.layout.setContentsMargins(0, 0, 0, 0) - - self.setLayout(self.layout) - - -class SnippetFilePath(QtWidgets.QWidget): - ''' Simple widget containing a filepath lineEdit and a button to open the file browser ''' - - def __init__(self, path="", parent=None): - super(SnippetFilePath, self).__init__(parent) - - self.layout = QtWidgets.QHBoxLayout() - - self.custompath_label = QtWidgets.QLabel(self) - self.custompath_label.setText("Custom path: ") - - self.filepath_lineEdit = QtWidgets.QLineEdit(self) - self.filepath_lineEdit.setText(str(path)) - #self.script_editor = QtWidgets.QTextEdit(self) - self.filepath_lineEdit.setStyleSheet( - 'background:#282828;color:#EEE;') # Main Colors - self.script_editor_font = QtGui.QFont() - self.script_editor_font.setFamily("Courier") - self.script_editor_font.setStyleHint(QtGui.QFont.Monospace) - self.script_editor_font.setFixedPitch(True) - self.script_editor_font.setPointSize(11) - self.filepath_lineEdit.setFont(self.script_editor_font) - - self.file_button = QtWidgets.QPushButton(self) - self.file_button.setText("Browse...") - self.file_button.clicked.connect(self.browseSnippets) - - self.layout.addWidget(self.custompath_label) - self.layout.addWidget(self.filepath_lineEdit) - self.layout.addWidget(self.file_button) - self.layout.setContentsMargins(0, 10, 0, 10) - - self.setLayout(self.layout) - - def browseSnippets(self): - ''' Opens file panel for ...snippets.txt ''' - browseLocation = nuke.getFilename('Select snippets file', '*.txt') - - if not browseLocation: - return - - self.filepath_lineEdit.setText(browseLocation) - return - - -# -------------------------------- -# Implementation -# -------------------------------- - -def showKnobScripter(knob="knobChanged"): - selection = nuke.selectedNodes() - if not len(selection): - pan = KnobScripter() - else: - pan = KnobScripter(selection[0], knob) - pan.show() - - -def addKnobScripterPanel(): - global knobScripterPanel - try: - knobScripterPanel = panels.registerWidgetAsPanel('nuke.KnobScripterPane', 'Knob Scripter', - 'com.adrianpueyo.KnobScripterPane') - knobScripterPanel.addToPane(nuke.getPaneFor('Properties.1')) - - except: - knobScripterPanel = panels.registerWidgetAsPanel( - 'nuke.KnobScripterPane', 'Knob Scripter', 'com.adrianpueyo.KnobScripterPane') - - -nuke.KnobScripterPane = KnobScripterPane -log("KS LOADED") -ksShortcut = "alt+z" -addKnobScripterPanel() -nuke.menu('Nuke').addCommand( - 'Edit/Node/Open Floating Knob Scripter', showKnobScripter, ksShortcut) -nuke.menu('Nuke').addCommand('Edit/Node/Update KnobScripter Context', - updateContext).setVisible(False) diff --git a/openpype/hosts/nuke/startup/clear_rendered.py b/openpype/hosts/nuke/startup/clear_rendered.py index cf1d8ce170..744af71034 100644 --- a/openpype/hosts/nuke/startup/clear_rendered.py +++ b/openpype/hosts/nuke/startup/clear_rendered.py @@ -1,10 +1,11 @@ import os -from openpype.api import Logger -log = Logger().get_logger(__name__) +from openpype.lib import Logger def clear_rendered(dir_path): + log = Logger.get_logger(__name__) + for _f in os.listdir(dir_path): _f_path = os.path.join(dir_path, _f) log.info("Removing: `{}`".format(_f_path)) diff --git a/openpype/hosts/nuke/startup/init.py b/openpype/hosts/nuke/startup/init.py deleted file mode 100644 index d7560814bf..0000000000 --- a/openpype/hosts/nuke/startup/init.py +++ /dev/null @@ -1,4 +0,0 @@ -import nuke - -# default write mov -nuke.knobDefault('Write.mov.colorspace', 'sRGB') diff --git a/openpype/hosts/nuke/startup/menu.py b/openpype/hosts/nuke/startup/menu.py index 9ed43b2110..5e29121e9b 100644 --- a/openpype/hosts/nuke/startup/menu.py +++ b/openpype/hosts/nuke/startup/menu.py @@ -1,14 +1,17 @@ import nuke +import os -from openpype.api import Logger +from openpype.lib import Logger from openpype.pipeline import install_host from openpype.hosts.nuke import api from openpype.hosts.nuke.api.lib import ( on_script_load, check_inventory_versions, WorkfileSettings, - dirmap_file_name_filter + dirmap_file_name_filter, + add_scripts_gizmo ) +from openpype.settings import get_project_settings log = Logger.get_logger(__name__) @@ -28,3 +31,34 @@ nuke.addOnScriptLoad(WorkfileSettings().set_context_settings) nuke.addFilenameFilter(dirmap_file_name_filter) log.info('Automatic syncing of write file knob to script version') + + +def add_scripts_menu(): + try: + from scriptsmenu import launchfornuke + except ImportError: + log.warning( + "Skipping studio.menu install, because " + "'scriptsmenu' module seems unavailable." + ) + return + + # load configuration of custom menu + project_settings = get_project_settings(os.getenv("AVALON_PROJECT")) + config = project_settings["nuke"]["scriptsmenu"]["definition"] + _menu = project_settings["nuke"]["scriptsmenu"]["name"] + + if not config: + log.warning("Skipping studio menu, no definition found.") + return + + # run the launcher for Maya menu + studio_menu = launchfornuke.main(title=_menu.title()) + + # apply configuration + studio_menu.build_from_configuration(studio_menu, config) + + +add_scripts_menu() + +add_scripts_gizmo() diff --git a/openpype/hosts/nuke/startup/write_to_read.py b/openpype/hosts/nuke/startup/write_to_read.py index f5cf66b357..b7add40f47 100644 --- a/openpype/hosts/nuke/startup/write_to_read.py +++ b/openpype/hosts/nuke/startup/write_to_read.py @@ -2,8 +2,8 @@ import re import os import glob import nuke -from openpype.api import Logger -log = Logger().get_logger(__name__) +from openpype.lib import Logger +log = Logger.get_logger(__name__) SINGLE_FILE_FORMATS = ['avi', 'mp4', 'mxf', 'mov', 'mpg', 'mpeg', 'wmv', 'm4v', 'm2v'] diff --git a/openpype/hosts/nuke/vendor/google/protobuf/__init__.py b/openpype/hosts/nuke/vendor/google/protobuf/__init__.py new file mode 100644 index 0000000000..03f3b29ee7 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/__init__.py @@ -0,0 +1,33 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Copyright 2007 Google Inc. All Rights Reserved. + +__version__ = '3.20.1' diff --git a/openpype/hosts/nuke/vendor/google/protobuf/any_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/any_pb2.py new file mode 100644 index 0000000000..9121193d11 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/any_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/any.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"&\n\x03\x41ny\x12\x10\n\x08type_url\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\x42v\n\x13\x63om.google.protobufB\x08\x41nyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.any_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\010AnyProtoP\001Z,google.golang.org/protobuf/types/known/anypb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _ANY._serialized_start=46 + _ANY._serialized_end=84 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/api_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/api_pb2.py new file mode 100644 index 0000000000..1721b10a75 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/api_pb2.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/api.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2 +from google.protobuf import type_pb2 as google_dot_protobuf_dot_type__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19google/protobuf/api.proto\x12\x0fgoogle.protobuf\x1a$google/protobuf/source_context.proto\x1a\x1agoogle/protobuf/type.proto\"\x81\x02\n\x03\x41pi\x12\x0c\n\x04name\x18\x01 \x01(\t\x12(\n\x07methods\x18\x02 \x03(\x0b\x32\x17.google.protobuf.Method\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x36\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12&\n\x06mixins\x18\x06 \x03(\x0b\x32\x16.google.protobuf.Mixin\x12\'\n\x06syntax\x18\x07 \x01(\x0e\x32\x17.google.protobuf.Syntax\"\xd5\x01\n\x06Method\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10request_type_url\x18\x02 \x01(\t\x12\x19\n\x11request_streaming\x18\x03 \x01(\x08\x12\x19\n\x11response_type_url\x18\x04 \x01(\t\x12\x1a\n\x12response_streaming\x18\x05 \x01(\x08\x12(\n\x07options\x18\x06 \x03(\x0b\x32\x17.google.protobuf.Option\x12\'\n\x06syntax\x18\x07 \x01(\x0e\x32\x17.google.protobuf.Syntax\"#\n\x05Mixin\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04root\x18\x02 \x01(\tBv\n\x13\x63om.google.protobufB\x08\x41piProtoP\x01Z,google.golang.org/protobuf/types/known/apipb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.api_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\010ApiProtoP\001Z,google.golang.org/protobuf/types/known/apipb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _API._serialized_start=113 + _API._serialized_end=370 + _METHOD._serialized_start=373 + _METHOD._serialized_end=586 + _MIXIN._serialized_start=588 + _MIXIN._serialized_end=623 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/compiler/__init__.py b/openpype/hosts/nuke/vendor/google/protobuf/compiler/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/nuke/vendor/google/protobuf/compiler/plugin_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/compiler/plugin_pb2.py new file mode 100644 index 0000000000..715a891370 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/compiler/plugin_pb2.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/compiler/plugin.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n%google/protobuf/compiler/plugin.proto\x12\x18google.protobuf.compiler\x1a google/protobuf/descriptor.proto\"F\n\x07Version\x12\r\n\x05major\x18\x01 \x01(\x05\x12\r\n\x05minor\x18\x02 \x01(\x05\x12\r\n\x05patch\x18\x03 \x01(\x05\x12\x0e\n\x06suffix\x18\x04 \x01(\t\"\xba\x01\n\x14\x43odeGeneratorRequest\x12\x18\n\x10\x66ile_to_generate\x18\x01 \x03(\t\x12\x11\n\tparameter\x18\x02 \x01(\t\x12\x38\n\nproto_file\x18\x0f \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\x12;\n\x10\x63ompiler_version\x18\x03 \x01(\x0b\x32!.google.protobuf.compiler.Version\"\xc1\x02\n\x15\x43odeGeneratorResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x1a\n\x12supported_features\x18\x02 \x01(\x04\x12\x42\n\x04\x66ile\x18\x0f \x03(\x0b\x32\x34.google.protobuf.compiler.CodeGeneratorResponse.File\x1a\x7f\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0finsertion_point\x18\x02 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x0f \x01(\t\x12?\n\x13generated_code_info\x18\x10 \x01(\x0b\x32\".google.protobuf.GeneratedCodeInfo\"8\n\x07\x46\x65\x61ture\x12\x10\n\x0c\x46\x45\x41TURE_NONE\x10\x00\x12\x1b\n\x17\x46\x45\x41TURE_PROTO3_OPTIONAL\x10\x01\x42W\n\x1c\x63om.google.protobuf.compilerB\x0cPluginProtosZ)google.golang.org/protobuf/types/pluginpb') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.compiler.plugin_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\034com.google.protobuf.compilerB\014PluginProtosZ)google.golang.org/protobuf/types/pluginpb' + _VERSION._serialized_start=101 + _VERSION._serialized_end=171 + _CODEGENERATORREQUEST._serialized_start=174 + _CODEGENERATORREQUEST._serialized_end=360 + _CODEGENERATORRESPONSE._serialized_start=363 + _CODEGENERATORRESPONSE._serialized_end=684 + _CODEGENERATORRESPONSE_FILE._serialized_start=499 + _CODEGENERATORRESPONSE_FILE._serialized_end=626 + _CODEGENERATORRESPONSE_FEATURE._serialized_start=628 + _CODEGENERATORRESPONSE_FEATURE._serialized_end=684 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/descriptor.py b/openpype/hosts/nuke/vendor/google/protobuf/descriptor.py new file mode 100644 index 0000000000..ad70be9a11 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/descriptor.py @@ -0,0 +1,1224 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Descriptors essentially contain exactly the information found in a .proto +file, in types that make this information accessible in Python. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import threading +import warnings + +from google.protobuf.internal import api_implementation + +_USE_C_DESCRIPTORS = False +if api_implementation.Type() == 'cpp': + # Used by MakeDescriptor in cpp mode + import binascii + import os + from google.protobuf.pyext import _message + _USE_C_DESCRIPTORS = True + + +class Error(Exception): + """Base error for this module.""" + + +class TypeTransformationError(Error): + """Error transforming between python proto type and corresponding C++ type.""" + + +if _USE_C_DESCRIPTORS: + # This metaclass allows to override the behavior of code like + # isinstance(my_descriptor, FieldDescriptor) + # and make it return True when the descriptor is an instance of the extension + # type written in C++. + class DescriptorMetaclass(type): + def __instancecheck__(cls, obj): + if super(DescriptorMetaclass, cls).__instancecheck__(obj): + return True + if isinstance(obj, cls._C_DESCRIPTOR_CLASS): + return True + return False +else: + # The standard metaclass; nothing changes. + DescriptorMetaclass = type + + +class _Lock(object): + """Wrapper class of threading.Lock(), which is allowed by 'with'.""" + + def __new__(cls): + self = object.__new__(cls) + self._lock = threading.Lock() # pylint: disable=protected-access + return self + + def __enter__(self): + self._lock.acquire() + + def __exit__(self, exc_type, exc_value, exc_tb): + self._lock.release() + + +_lock = threading.Lock() + + +def _Deprecated(name): + if _Deprecated.count > 0: + _Deprecated.count -= 1 + warnings.warn( + 'Call to deprecated create function %s(). Note: Create unlinked ' + 'descriptors is going to go away. Please use get/find descriptors from ' + 'generated code or query the descriptor_pool.' + % name, + category=DeprecationWarning, stacklevel=3) + + +# Deprecated warnings will print 100 times at most which should be enough for +# users to notice and do not cause timeout. +_Deprecated.count = 100 + + +_internal_create_key = object() + + +class DescriptorBase(metaclass=DescriptorMetaclass): + + """Descriptors base class. + + This class is the base of all descriptor classes. It provides common options + related functionality. + + Attributes: + has_options: True if the descriptor has non-default options. Usually it + is not necessary to read this -- just call GetOptions() which will + happily return the default instance. However, it's sometimes useful + for efficiency, and also useful inside the protobuf implementation to + avoid some bootstrapping issues. + """ + + if _USE_C_DESCRIPTORS: + # The class, or tuple of classes, that are considered as "virtual + # subclasses" of this descriptor class. + _C_DESCRIPTOR_CLASS = () + + def __init__(self, options, serialized_options, options_class_name): + """Initialize the descriptor given its options message and the name of the + class of the options message. The name of the class is required in case + the options message is None and has to be created. + """ + self._options = options + self._options_class_name = options_class_name + self._serialized_options = serialized_options + + # Does this descriptor have non-default options? + self.has_options = (options is not None) or (serialized_options is not None) + + def _SetOptions(self, options, options_class_name): + """Sets the descriptor's options + + This function is used in generated proto2 files to update descriptor + options. It must not be used outside proto2. + """ + self._options = options + self._options_class_name = options_class_name + + # Does this descriptor have non-default options? + self.has_options = options is not None + + def GetOptions(self): + """Retrieves descriptor options. + + This method returns the options set or creates the default options for the + descriptor. + """ + if self._options: + return self._options + + from google.protobuf import descriptor_pb2 + try: + options_class = getattr(descriptor_pb2, + self._options_class_name) + except AttributeError: + raise RuntimeError('Unknown options class name %s!' % + (self._options_class_name)) + + with _lock: + if self._serialized_options is None: + self._options = options_class() + else: + self._options = _ParseOptions(options_class(), + self._serialized_options) + + return self._options + + +class _NestedDescriptorBase(DescriptorBase): + """Common class for descriptors that can be nested.""" + + def __init__(self, options, options_class_name, name, full_name, + file, containing_type, serialized_start=None, + serialized_end=None, serialized_options=None): + """Constructor. + + Args: + options: Protocol message options or None + to use default message options. + options_class_name (str): The class name of the above options. + name (str): Name of this protocol message type. + full_name (str): Fully-qualified name of this protocol message type, + which will include protocol "package" name and the name of any + enclosing types. + file (FileDescriptor): Reference to file info. + containing_type: if provided, this is a nested descriptor, with this + descriptor as parent, otherwise None. + serialized_start: The start index (inclusive) in block in the + file.serialized_pb that describes this descriptor. + serialized_end: The end index (exclusive) in block in the + file.serialized_pb that describes this descriptor. + serialized_options: Protocol message serialized options or None. + """ + super(_NestedDescriptorBase, self).__init__( + options, serialized_options, options_class_name) + + self.name = name + # TODO(falk): Add function to calculate full_name instead of having it in + # memory? + self.full_name = full_name + self.file = file + self.containing_type = containing_type + + self._serialized_start = serialized_start + self._serialized_end = serialized_end + + def CopyToProto(self, proto): + """Copies this to the matching proto in descriptor_pb2. + + Args: + proto: An empty proto instance from descriptor_pb2. + + Raises: + Error: If self couldn't be serialized, due to to few constructor + arguments. + """ + if (self.file is not None and + self._serialized_start is not None and + self._serialized_end is not None): + proto.ParseFromString(self.file.serialized_pb[ + self._serialized_start:self._serialized_end]) + else: + raise Error('Descriptor does not contain serialization.') + + +class Descriptor(_NestedDescriptorBase): + + """Descriptor for a protocol message type. + + Attributes: + name (str): Name of this protocol message type. + full_name (str): Fully-qualified name of this protocol message type, + which will include protocol "package" name and the name of any + enclosing types. + containing_type (Descriptor): Reference to the descriptor of the type + containing us, or None if this is top-level. + fields (list[FieldDescriptor]): Field descriptors for all fields in + this type. + fields_by_number (dict(int, FieldDescriptor)): Same + :class:`FieldDescriptor` objects as in :attr:`fields`, but indexed + by "number" attribute in each FieldDescriptor. + fields_by_name (dict(str, FieldDescriptor)): Same + :class:`FieldDescriptor` objects as in :attr:`fields`, but indexed by + "name" attribute in each :class:`FieldDescriptor`. + nested_types (list[Descriptor]): Descriptor references + for all protocol message types nested within this one. + nested_types_by_name (dict(str, Descriptor)): Same Descriptor + objects as in :attr:`nested_types`, but indexed by "name" attribute + in each Descriptor. + enum_types (list[EnumDescriptor]): :class:`EnumDescriptor` references + for all enums contained within this type. + enum_types_by_name (dict(str, EnumDescriptor)): Same + :class:`EnumDescriptor` objects as in :attr:`enum_types`, but + indexed by "name" attribute in each EnumDescriptor. + enum_values_by_name (dict(str, EnumValueDescriptor)): Dict mapping + from enum value name to :class:`EnumValueDescriptor` for that value. + extensions (list[FieldDescriptor]): All extensions defined directly + within this message type (NOT within a nested type). + extensions_by_name (dict(str, FieldDescriptor)): Same FieldDescriptor + objects as :attr:`extensions`, but indexed by "name" attribute of each + FieldDescriptor. + is_extendable (bool): Does this type define any extension ranges? + oneofs (list[OneofDescriptor]): The list of descriptors for oneof fields + in this message. + oneofs_by_name (dict(str, OneofDescriptor)): Same objects as in + :attr:`oneofs`, but indexed by "name" attribute. + file (FileDescriptor): Reference to file descriptor. + + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.Descriptor + + def __new__( + cls, + name=None, + full_name=None, + filename=None, + containing_type=None, + fields=None, + nested_types=None, + enum_types=None, + extensions=None, + options=None, + serialized_options=None, + is_extendable=True, + extension_ranges=None, + oneofs=None, + file=None, # pylint: disable=redefined-builtin + serialized_start=None, + serialized_end=None, + syntax=None, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindMessageTypeByName(full_name) + + # NOTE(tmarek): The file argument redefining a builtin is nothing we can + # fix right now since we don't know how many clients already rely on the + # name of the argument. + def __init__(self, name, full_name, filename, containing_type, fields, + nested_types, enum_types, extensions, options=None, + serialized_options=None, + is_extendable=True, extension_ranges=None, oneofs=None, + file=None, serialized_start=None, serialized_end=None, # pylint: disable=redefined-builtin + syntax=None, create_key=None): + """Arguments to __init__() are as described in the description + of Descriptor fields above. + + Note that filename is an obsolete argument, that is not used anymore. + Please use file.name to access this as an attribute. + """ + if create_key is not _internal_create_key: + _Deprecated('Descriptor') + + super(Descriptor, self).__init__( + options, 'MessageOptions', name, full_name, file, + containing_type, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + + # We have fields in addition to fields_by_name and fields_by_number, + # so that: + # 1. Clients can index fields by "order in which they're listed." + # 2. Clients can easily iterate over all fields with the terse + # syntax: for f in descriptor.fields: ... + self.fields = fields + for field in self.fields: + field.containing_type = self + self.fields_by_number = dict((f.number, f) for f in fields) + self.fields_by_name = dict((f.name, f) for f in fields) + self._fields_by_camelcase_name = None + + self.nested_types = nested_types + for nested_type in nested_types: + nested_type.containing_type = self + self.nested_types_by_name = dict((t.name, t) for t in nested_types) + + self.enum_types = enum_types + for enum_type in self.enum_types: + enum_type.containing_type = self + self.enum_types_by_name = dict((t.name, t) for t in enum_types) + self.enum_values_by_name = dict( + (v.name, v) for t in enum_types for v in t.values) + + self.extensions = extensions + for extension in self.extensions: + extension.extension_scope = self + self.extensions_by_name = dict((f.name, f) for f in extensions) + self.is_extendable = is_extendable + self.extension_ranges = extension_ranges + self.oneofs = oneofs if oneofs is not None else [] + self.oneofs_by_name = dict((o.name, o) for o in self.oneofs) + for oneof in self.oneofs: + oneof.containing_type = self + self.syntax = syntax or "proto2" + + @property + def fields_by_camelcase_name(self): + """Same FieldDescriptor objects as in :attr:`fields`, but indexed by + :attr:`FieldDescriptor.camelcase_name`. + """ + if self._fields_by_camelcase_name is None: + self._fields_by_camelcase_name = dict( + (f.camelcase_name, f) for f in self.fields) + return self._fields_by_camelcase_name + + def EnumValueName(self, enum, value): + """Returns the string name of an enum value. + + This is just a small helper method to simplify a common operation. + + Args: + enum: string name of the Enum. + value: int, value of the enum. + + Returns: + string name of the enum value. + + Raises: + KeyError if either the Enum doesn't exist or the value is not a valid + value for the enum. + """ + return self.enum_types_by_name[enum].values_by_number[value].name + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.DescriptorProto. + + Args: + proto: An empty descriptor_pb2.DescriptorProto. + """ + # This function is overridden to give a better doc comment. + super(Descriptor, self).CopyToProto(proto) + + +# TODO(robinson): We should have aggressive checking here, +# for example: +# * If you specify a repeated field, you should not be allowed +# to specify a default value. +# * [Other examples here as needed]. +# +# TODO(robinson): for this and other *Descriptor classes, we +# might also want to lock things down aggressively (e.g., +# prevent clients from setting the attributes). Having +# stronger invariants here in general will reduce the number +# of runtime checks we must do in reflection.py... +class FieldDescriptor(DescriptorBase): + + """Descriptor for a single field in a .proto file. + + Attributes: + name (str): Name of this field, exactly as it appears in .proto. + full_name (str): Name of this field, including containing scope. This is + particularly relevant for extensions. + index (int): Dense, 0-indexed index giving the order that this + field textually appears within its message in the .proto file. + number (int): Tag number declared for this field in the .proto file. + + type (int): (One of the TYPE_* constants below) Declared type. + cpp_type (int): (One of the CPPTYPE_* constants below) C++ type used to + represent this field. + + label (int): (One of the LABEL_* constants below) Tells whether this + field is optional, required, or repeated. + has_default_value (bool): True if this field has a default value defined, + otherwise false. + default_value (Varies): Default value of this field. Only + meaningful for non-repeated scalar fields. Repeated fields + should always set this to [], and non-repeated composite + fields should always set this to None. + + containing_type (Descriptor): Descriptor of the protocol message + type that contains this field. Set by the Descriptor constructor + if we're passed into one. + Somewhat confusingly, for extension fields, this is the + descriptor of the EXTENDED message, not the descriptor + of the message containing this field. (See is_extension and + extension_scope below). + message_type (Descriptor): If a composite field, a descriptor + of the message type contained in this field. Otherwise, this is None. + enum_type (EnumDescriptor): If this field contains an enum, a + descriptor of that enum. Otherwise, this is None. + + is_extension: True iff this describes an extension field. + extension_scope (Descriptor): Only meaningful if is_extension is True. + Gives the message that immediately contains this extension field. + Will be None iff we're a top-level (file-level) extension field. + + options (descriptor_pb2.FieldOptions): Protocol message field options or + None to use default field options. + + containing_oneof (OneofDescriptor): If the field is a member of a oneof + union, contains its descriptor. Otherwise, None. + + file (FileDescriptor): Reference to file descriptor. + """ + + # Must be consistent with C++ FieldDescriptor::Type enum in + # descriptor.h. + # + # TODO(robinson): Find a way to eliminate this repetition. + TYPE_DOUBLE = 1 + TYPE_FLOAT = 2 + TYPE_INT64 = 3 + TYPE_UINT64 = 4 + TYPE_INT32 = 5 + TYPE_FIXED64 = 6 + TYPE_FIXED32 = 7 + TYPE_BOOL = 8 + TYPE_STRING = 9 + TYPE_GROUP = 10 + TYPE_MESSAGE = 11 + TYPE_BYTES = 12 + TYPE_UINT32 = 13 + TYPE_ENUM = 14 + TYPE_SFIXED32 = 15 + TYPE_SFIXED64 = 16 + TYPE_SINT32 = 17 + TYPE_SINT64 = 18 + MAX_TYPE = 18 + + # Must be consistent with C++ FieldDescriptor::CppType enum in + # descriptor.h. + # + # TODO(robinson): Find a way to eliminate this repetition. + CPPTYPE_INT32 = 1 + CPPTYPE_INT64 = 2 + CPPTYPE_UINT32 = 3 + CPPTYPE_UINT64 = 4 + CPPTYPE_DOUBLE = 5 + CPPTYPE_FLOAT = 6 + CPPTYPE_BOOL = 7 + CPPTYPE_ENUM = 8 + CPPTYPE_STRING = 9 + CPPTYPE_MESSAGE = 10 + MAX_CPPTYPE = 10 + + _PYTHON_TO_CPP_PROTO_TYPE_MAP = { + TYPE_DOUBLE: CPPTYPE_DOUBLE, + TYPE_FLOAT: CPPTYPE_FLOAT, + TYPE_ENUM: CPPTYPE_ENUM, + TYPE_INT64: CPPTYPE_INT64, + TYPE_SINT64: CPPTYPE_INT64, + TYPE_SFIXED64: CPPTYPE_INT64, + TYPE_UINT64: CPPTYPE_UINT64, + TYPE_FIXED64: CPPTYPE_UINT64, + TYPE_INT32: CPPTYPE_INT32, + TYPE_SFIXED32: CPPTYPE_INT32, + TYPE_SINT32: CPPTYPE_INT32, + TYPE_UINT32: CPPTYPE_UINT32, + TYPE_FIXED32: CPPTYPE_UINT32, + TYPE_BYTES: CPPTYPE_STRING, + TYPE_STRING: CPPTYPE_STRING, + TYPE_BOOL: CPPTYPE_BOOL, + TYPE_MESSAGE: CPPTYPE_MESSAGE, + TYPE_GROUP: CPPTYPE_MESSAGE + } + + # Must be consistent with C++ FieldDescriptor::Label enum in + # descriptor.h. + # + # TODO(robinson): Find a way to eliminate this repetition. + LABEL_OPTIONAL = 1 + LABEL_REQUIRED = 2 + LABEL_REPEATED = 3 + MAX_LABEL = 3 + + # Must be consistent with C++ constants kMaxNumber, kFirstReservedNumber, + # and kLastReservedNumber in descriptor.h + MAX_FIELD_NUMBER = (1 << 29) - 1 + FIRST_RESERVED_FIELD_NUMBER = 19000 + LAST_RESERVED_FIELD_NUMBER = 19999 + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.FieldDescriptor + + def __new__(cls, name, full_name, index, number, type, cpp_type, label, + default_value, message_type, enum_type, containing_type, + is_extension, extension_scope, options=None, + serialized_options=None, + has_default_value=True, containing_oneof=None, json_name=None, + file=None, create_key=None): # pylint: disable=redefined-builtin + _message.Message._CheckCalledFromGeneratedFile() + if is_extension: + return _message.default_pool.FindExtensionByName(full_name) + else: + return _message.default_pool.FindFieldByName(full_name) + + def __init__(self, name, full_name, index, number, type, cpp_type, label, + default_value, message_type, enum_type, containing_type, + is_extension, extension_scope, options=None, + serialized_options=None, + has_default_value=True, containing_oneof=None, json_name=None, + file=None, create_key=None): # pylint: disable=redefined-builtin + """The arguments are as described in the description of FieldDescriptor + attributes above. + + Note that containing_type may be None, and may be set later if necessary + (to deal with circular references between message types, for example). + Likewise for extension_scope. + """ + if create_key is not _internal_create_key: + _Deprecated('FieldDescriptor') + + super(FieldDescriptor, self).__init__( + options, serialized_options, 'FieldOptions') + self.name = name + self.full_name = full_name + self.file = file + self._camelcase_name = None + if json_name is None: + self.json_name = _ToJsonName(name) + else: + self.json_name = json_name + self.index = index + self.number = number + self.type = type + self.cpp_type = cpp_type + self.label = label + self.has_default_value = has_default_value + self.default_value = default_value + self.containing_type = containing_type + self.message_type = message_type + self.enum_type = enum_type + self.is_extension = is_extension + self.extension_scope = extension_scope + self.containing_oneof = containing_oneof + if api_implementation.Type() == 'cpp': + if is_extension: + self._cdescriptor = _message.default_pool.FindExtensionByName(full_name) + else: + self._cdescriptor = _message.default_pool.FindFieldByName(full_name) + else: + self._cdescriptor = None + + @property + def camelcase_name(self): + """Camelcase name of this field. + + Returns: + str: the name in CamelCase. + """ + if self._camelcase_name is None: + self._camelcase_name = _ToCamelCase(self.name) + return self._camelcase_name + + @property + def has_presence(self): + """Whether the field distinguishes between unpopulated and default values. + + Raises: + RuntimeError: singular field that is not linked with message nor file. + """ + if self.label == FieldDescriptor.LABEL_REPEATED: + return False + if (self.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE or + self.containing_oneof): + return True + if hasattr(self.file, 'syntax'): + return self.file.syntax == 'proto2' + if hasattr(self.message_type, 'syntax'): + return self.message_type.syntax == 'proto2' + raise RuntimeError( + 'has_presence is not ready to use because field %s is not' + ' linked with message type nor file' % self.full_name) + + @staticmethod + def ProtoTypeToCppProtoType(proto_type): + """Converts from a Python proto type to a C++ Proto Type. + + The Python ProtocolBuffer classes specify both the 'Python' datatype and the + 'C++' datatype - and they're not the same. This helper method should + translate from one to another. + + Args: + proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*) + Returns: + int: descriptor.FieldDescriptor.CPPTYPE_*, the C++ type. + Raises: + TypeTransformationError: when the Python proto type isn't known. + """ + try: + return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type] + except KeyError: + raise TypeTransformationError('Unknown proto_type: %s' % proto_type) + + +class EnumDescriptor(_NestedDescriptorBase): + + """Descriptor for an enum defined in a .proto file. + + Attributes: + name (str): Name of the enum type. + full_name (str): Full name of the type, including package name + and any enclosing type(s). + + values (list[EnumValueDescriptor]): List of the values + in this enum. + values_by_name (dict(str, EnumValueDescriptor)): Same as :attr:`values`, + but indexed by the "name" field of each EnumValueDescriptor. + values_by_number (dict(int, EnumValueDescriptor)): Same as :attr:`values`, + but indexed by the "number" field of each EnumValueDescriptor. + containing_type (Descriptor): Descriptor of the immediate containing + type of this enum, or None if this is an enum defined at the + top level in a .proto file. Set by Descriptor's constructor + if we're passed into one. + file (FileDescriptor): Reference to file descriptor. + options (descriptor_pb2.EnumOptions): Enum options message or + None to use default enum options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.EnumDescriptor + + def __new__(cls, name, full_name, filename, values, + containing_type=None, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindEnumTypeByName(full_name) + + def __init__(self, name, full_name, filename, values, + containing_type=None, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + """Arguments are as described in the attribute description above. + + Note that filename is an obsolete argument, that is not used anymore. + Please use file.name to access this as an attribute. + """ + if create_key is not _internal_create_key: + _Deprecated('EnumDescriptor') + + super(EnumDescriptor, self).__init__( + options, 'EnumOptions', name, full_name, file, + containing_type, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + + self.values = values + for value in self.values: + value.type = self + self.values_by_name = dict((v.name, v) for v in values) + # Values are reversed to ensure that the first alias is retained. + self.values_by_number = dict((v.number, v) for v in reversed(values)) + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.EnumDescriptorProto. + + Args: + proto (descriptor_pb2.EnumDescriptorProto): An empty descriptor proto. + """ + # This function is overridden to give a better doc comment. + super(EnumDescriptor, self).CopyToProto(proto) + + +class EnumValueDescriptor(DescriptorBase): + + """Descriptor for a single value within an enum. + + Attributes: + name (str): Name of this value. + index (int): Dense, 0-indexed index giving the order that this + value appears textually within its enum in the .proto file. + number (int): Actual number assigned to this enum value. + type (EnumDescriptor): :class:`EnumDescriptor` to which this value + belongs. Set by :class:`EnumDescriptor`'s constructor if we're + passed into one. + options (descriptor_pb2.EnumValueOptions): Enum value options message or + None to use default enum value options options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.EnumValueDescriptor + + def __new__(cls, name, index, number, + type=None, # pylint: disable=redefined-builtin + options=None, serialized_options=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + # There is no way we can build a complete EnumValueDescriptor with the + # given parameters (the name of the Enum is not known, for example). + # Fortunately generated files just pass it to the EnumDescriptor() + # constructor, which will ignore it, so returning None is good enough. + return None + + def __init__(self, name, index, number, + type=None, # pylint: disable=redefined-builtin + options=None, serialized_options=None, create_key=None): + """Arguments are as described in the attribute description above.""" + if create_key is not _internal_create_key: + _Deprecated('EnumValueDescriptor') + + super(EnumValueDescriptor, self).__init__( + options, serialized_options, 'EnumValueOptions') + self.name = name + self.index = index + self.number = number + self.type = type + + +class OneofDescriptor(DescriptorBase): + """Descriptor for a oneof field. + + Attributes: + name (str): Name of the oneof field. + full_name (str): Full name of the oneof field, including package name. + index (int): 0-based index giving the order of the oneof field inside + its containing type. + containing_type (Descriptor): :class:`Descriptor` of the protocol message + type that contains this field. Set by the :class:`Descriptor` constructor + if we're passed into one. + fields (list[FieldDescriptor]): The list of field descriptors this + oneof can contain. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.OneofDescriptor + + def __new__( + cls, name, full_name, index, containing_type, fields, options=None, + serialized_options=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindOneofByName(full_name) + + def __init__( + self, name, full_name, index, containing_type, fields, options=None, + serialized_options=None, create_key=None): + """Arguments are as described in the attribute description above.""" + if create_key is not _internal_create_key: + _Deprecated('OneofDescriptor') + + super(OneofDescriptor, self).__init__( + options, serialized_options, 'OneofOptions') + self.name = name + self.full_name = full_name + self.index = index + self.containing_type = containing_type + self.fields = fields + + +class ServiceDescriptor(_NestedDescriptorBase): + + """Descriptor for a service. + + Attributes: + name (str): Name of the service. + full_name (str): Full name of the service, including package name. + index (int): 0-indexed index giving the order that this services + definition appears within the .proto file. + methods (list[MethodDescriptor]): List of methods provided by this + service. + methods_by_name (dict(str, MethodDescriptor)): Same + :class:`MethodDescriptor` objects as in :attr:`methods_by_name`, but + indexed by "name" attribute in each :class:`MethodDescriptor`. + options (descriptor_pb2.ServiceOptions): Service options message or + None to use default service options. + file (FileDescriptor): Reference to file info. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.ServiceDescriptor + + def __new__( + cls, + name=None, + full_name=None, + index=None, + methods=None, + options=None, + serialized_options=None, + file=None, # pylint: disable=redefined-builtin + serialized_start=None, + serialized_end=None, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access + return _message.default_pool.FindServiceByName(full_name) + + def __init__(self, name, full_name, index, methods, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + if create_key is not _internal_create_key: + _Deprecated('ServiceDescriptor') + + super(ServiceDescriptor, self).__init__( + options, 'ServiceOptions', name, full_name, file, + None, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + self.index = index + self.methods = methods + self.methods_by_name = dict((m.name, m) for m in methods) + # Set the containing service for each method in this service. + for method in self.methods: + method.containing_service = self + + def FindMethodByName(self, name): + """Searches for the specified method, and returns its descriptor. + + Args: + name (str): Name of the method. + Returns: + MethodDescriptor or None: the descriptor for the requested method, if + found. + """ + return self.methods_by_name.get(name, None) + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.ServiceDescriptorProto. + + Args: + proto (descriptor_pb2.ServiceDescriptorProto): An empty descriptor proto. + """ + # This function is overridden to give a better doc comment. + super(ServiceDescriptor, self).CopyToProto(proto) + + +class MethodDescriptor(DescriptorBase): + + """Descriptor for a method in a service. + + Attributes: + name (str): Name of the method within the service. + full_name (str): Full name of method. + index (int): 0-indexed index of the method inside the service. + containing_service (ServiceDescriptor): The service that contains this + method. + input_type (Descriptor): The descriptor of the message that this method + accepts. + output_type (Descriptor): The descriptor of the message that this method + returns. + client_streaming (bool): Whether this method uses client streaming. + server_streaming (bool): Whether this method uses server streaming. + options (descriptor_pb2.MethodOptions or None): Method options message, or + None to use default method options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.MethodDescriptor + + def __new__(cls, + name, + full_name, + index, + containing_service, + input_type, + output_type, + client_streaming=False, + server_streaming=False, + options=None, + serialized_options=None, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access + return _message.default_pool.FindMethodByName(full_name) + + def __init__(self, + name, + full_name, + index, + containing_service, + input_type, + output_type, + client_streaming=False, + server_streaming=False, + options=None, + serialized_options=None, + create_key=None): + """The arguments are as described in the description of MethodDescriptor + attributes above. + + Note that containing_service may be None, and may be set later if necessary. + """ + if create_key is not _internal_create_key: + _Deprecated('MethodDescriptor') + + super(MethodDescriptor, self).__init__( + options, serialized_options, 'MethodOptions') + self.name = name + self.full_name = full_name + self.index = index + self.containing_service = containing_service + self.input_type = input_type + self.output_type = output_type + self.client_streaming = client_streaming + self.server_streaming = server_streaming + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.MethodDescriptorProto. + + Args: + proto (descriptor_pb2.MethodDescriptorProto): An empty descriptor proto. + + Raises: + Error: If self couldn't be serialized, due to too few constructor + arguments. + """ + if self.containing_service is not None: + from google.protobuf import descriptor_pb2 + service_proto = descriptor_pb2.ServiceDescriptorProto() + self.containing_service.CopyToProto(service_proto) + proto.CopyFrom(service_proto.method[self.index]) + else: + raise Error('Descriptor does not contain a service.') + + +class FileDescriptor(DescriptorBase): + """Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto. + + Note that :attr:`enum_types_by_name`, :attr:`extensions_by_name`, and + :attr:`dependencies` fields are only set by the + :py:mod:`google.protobuf.message_factory` module, and not by the generated + proto code. + + Attributes: + name (str): Name of file, relative to root of source tree. + package (str): Name of the package + syntax (str): string indicating syntax of the file (can be "proto2" or + "proto3") + serialized_pb (bytes): Byte string of serialized + :class:`descriptor_pb2.FileDescriptorProto`. + dependencies (list[FileDescriptor]): List of other :class:`FileDescriptor` + objects this :class:`FileDescriptor` depends on. + public_dependencies (list[FileDescriptor]): A subset of + :attr:`dependencies`, which were declared as "public". + message_types_by_name (dict(str, Descriptor)): Mapping from message names + to their :class:`Descriptor`. + enum_types_by_name (dict(str, EnumDescriptor)): Mapping from enum names to + their :class:`EnumDescriptor`. + extensions_by_name (dict(str, FieldDescriptor)): Mapping from extension + names declared at file scope to their :class:`FieldDescriptor`. + services_by_name (dict(str, ServiceDescriptor)): Mapping from services' + names to their :class:`ServiceDescriptor`. + pool (DescriptorPool): The pool this descriptor belongs to. When not + passed to the constructor, the global default pool is used. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.FileDescriptor + + def __new__(cls, name, package, options=None, + serialized_options=None, serialized_pb=None, + dependencies=None, public_dependencies=None, + syntax=None, pool=None, create_key=None): + # FileDescriptor() is called from various places, not only from generated + # files, to register dynamic proto files and messages. + # pylint: disable=g-explicit-bool-comparison + if serialized_pb == b'': + # Cpp generated code must be linked in if serialized_pb is '' + try: + return _message.default_pool.FindFileByName(name) + except KeyError: + raise RuntimeError('Please link in cpp generated lib for %s' % (name)) + elif serialized_pb: + return _message.default_pool.AddSerializedFile(serialized_pb) + else: + return super(FileDescriptor, cls).__new__(cls) + + def __init__(self, name, package, options=None, + serialized_options=None, serialized_pb=None, + dependencies=None, public_dependencies=None, + syntax=None, pool=None, create_key=None): + """Constructor.""" + if create_key is not _internal_create_key: + _Deprecated('FileDescriptor') + + super(FileDescriptor, self).__init__( + options, serialized_options, 'FileOptions') + + if pool is None: + from google.protobuf import descriptor_pool + pool = descriptor_pool.Default() + self.pool = pool + self.message_types_by_name = {} + self.name = name + self.package = package + self.syntax = syntax or "proto2" + self.serialized_pb = serialized_pb + + self.enum_types_by_name = {} + self.extensions_by_name = {} + self.services_by_name = {} + self.dependencies = (dependencies or []) + self.public_dependencies = (public_dependencies or []) + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.FileDescriptorProto. + + Args: + proto: An empty descriptor_pb2.FileDescriptorProto. + """ + proto.ParseFromString(self.serialized_pb) + + +def _ParseOptions(message, string): + """Parses serialized options. + + This helper function is used to parse serialized options in generated + proto2 files. It must not be used outside proto2. + """ + message.ParseFromString(string) + return message + + +def _ToCamelCase(name): + """Converts name to camel-case and returns it.""" + capitalize_next = False + result = [] + + for c in name: + if c == '_': + if result: + capitalize_next = True + elif capitalize_next: + result.append(c.upper()) + capitalize_next = False + else: + result += c + + # Lower-case the first letter. + if result and result[0].isupper(): + result[0] = result[0].lower() + return ''.join(result) + + +def _OptionsOrNone(descriptor_proto): + """Returns the value of the field `options`, or None if it is not set.""" + if descriptor_proto.HasField('options'): + return descriptor_proto.options + else: + return None + + +def _ToJsonName(name): + """Converts name to Json name and returns it.""" + capitalize_next = False + result = [] + + for c in name: + if c == '_': + capitalize_next = True + elif capitalize_next: + result.append(c.upper()) + capitalize_next = False + else: + result += c + + return ''.join(result) + + +def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True, + syntax=None): + """Make a protobuf Descriptor given a DescriptorProto protobuf. + + Handles nested descriptors. Note that this is limited to the scope of defining + a message inside of another message. Composite fields can currently only be + resolved if the message is defined in the same scope as the field. + + Args: + desc_proto: The descriptor_pb2.DescriptorProto protobuf message. + package: Optional package name for the new message Descriptor (string). + build_file_if_cpp: Update the C++ descriptor pool if api matches. + Set to False on recursion, so no duplicates are created. + syntax: The syntax/semantics that should be used. Set to "proto3" to get + proto3 field presence semantics. + Returns: + A Descriptor for protobuf messages. + """ + if api_implementation.Type() == 'cpp' and build_file_if_cpp: + # The C++ implementation requires all descriptors to be backed by the same + # definition in the C++ descriptor pool. To do this, we build a + # FileDescriptorProto with the same definition as this descriptor and build + # it into the pool. + from google.protobuf import descriptor_pb2 + file_descriptor_proto = descriptor_pb2.FileDescriptorProto() + file_descriptor_proto.message_type.add().MergeFrom(desc_proto) + + # Generate a random name for this proto file to prevent conflicts with any + # imported ones. We need to specify a file name so the descriptor pool + # accepts our FileDescriptorProto, but it is not important what that file + # name is actually set to. + proto_name = binascii.hexlify(os.urandom(16)).decode('ascii') + + if package: + file_descriptor_proto.name = os.path.join(package.replace('.', '/'), + proto_name + '.proto') + file_descriptor_proto.package = package + else: + file_descriptor_proto.name = proto_name + '.proto' + + _message.default_pool.Add(file_descriptor_proto) + result = _message.default_pool.FindFileByName(file_descriptor_proto.name) + + if _USE_C_DESCRIPTORS: + return result.message_types_by_name[desc_proto.name] + + full_message_name = [desc_proto.name] + if package: full_message_name.insert(0, package) + + # Create Descriptors for enum types + enum_types = {} + for enum_proto in desc_proto.enum_type: + full_name = '.'.join(full_message_name + [enum_proto.name]) + enum_desc = EnumDescriptor( + enum_proto.name, full_name, None, [ + EnumValueDescriptor(enum_val.name, ii, enum_val.number, + create_key=_internal_create_key) + for ii, enum_val in enumerate(enum_proto.value)], + create_key=_internal_create_key) + enum_types[full_name] = enum_desc + + # Create Descriptors for nested types + nested_types = {} + for nested_proto in desc_proto.nested_type: + full_name = '.'.join(full_message_name + [nested_proto.name]) + # Nested types are just those defined inside of the message, not all types + # used by fields in the message, so no loops are possible here. + nested_desc = MakeDescriptor(nested_proto, + package='.'.join(full_message_name), + build_file_if_cpp=False, + syntax=syntax) + nested_types[full_name] = nested_desc + + fields = [] + for field_proto in desc_proto.field: + full_name = '.'.join(full_message_name + [field_proto.name]) + enum_desc = None + nested_desc = None + if field_proto.json_name: + json_name = field_proto.json_name + else: + json_name = None + if field_proto.HasField('type_name'): + type_name = field_proto.type_name + full_type_name = '.'.join(full_message_name + + [type_name[type_name.rfind('.')+1:]]) + if full_type_name in nested_types: + nested_desc = nested_types[full_type_name] + elif full_type_name in enum_types: + enum_desc = enum_types[full_type_name] + # Else type_name references a non-local type, which isn't implemented + field = FieldDescriptor( + field_proto.name, full_name, field_proto.number - 1, + field_proto.number, field_proto.type, + FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type), + field_proto.label, None, nested_desc, enum_desc, None, False, None, + options=_OptionsOrNone(field_proto), has_default_value=False, + json_name=json_name, create_key=_internal_create_key) + fields.append(field) + + desc_name = '.'.join(full_message_name) + return Descriptor(desc_proto.name, desc_name, None, None, fields, + list(nested_types.values()), list(enum_types.values()), [], + options=_OptionsOrNone(desc_proto), + create_key=_internal_create_key) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/descriptor_database.py b/openpype/hosts/nuke/vendor/google/protobuf/descriptor_database.py new file mode 100644 index 0000000000..073eddc711 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/descriptor_database.py @@ -0,0 +1,177 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides a container for DescriptorProtos.""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +import warnings + + +class Error(Exception): + pass + + +class DescriptorDatabaseConflictingDefinitionError(Error): + """Raised when a proto is added with the same name & different descriptor.""" + + +class DescriptorDatabase(object): + """A container accepting FileDescriptorProtos and maps DescriptorProtos.""" + + def __init__(self): + self._file_desc_protos_by_file = {} + self._file_desc_protos_by_symbol = {} + + def Add(self, file_desc_proto): + """Adds the FileDescriptorProto and its types to this database. + + Args: + file_desc_proto: The FileDescriptorProto to add. + Raises: + DescriptorDatabaseConflictingDefinitionError: if an attempt is made to + add a proto with the same name but different definition than an + existing proto in the database. + """ + proto_name = file_desc_proto.name + if proto_name not in self._file_desc_protos_by_file: + self._file_desc_protos_by_file[proto_name] = file_desc_proto + elif self._file_desc_protos_by_file[proto_name] != file_desc_proto: + raise DescriptorDatabaseConflictingDefinitionError( + '%s already added, but with different descriptor.' % proto_name) + else: + return + + # Add all the top-level descriptors to the index. + package = file_desc_proto.package + for message in file_desc_proto.message_type: + for name in _ExtractSymbols(message, package): + self._AddSymbol(name, file_desc_proto) + for enum in file_desc_proto.enum_type: + self._AddSymbol(('.'.join((package, enum.name))), file_desc_proto) + for enum_value in enum.value: + self._file_desc_protos_by_symbol[ + '.'.join((package, enum_value.name))] = file_desc_proto + for extension in file_desc_proto.extension: + self._AddSymbol(('.'.join((package, extension.name))), file_desc_proto) + for service in file_desc_proto.service: + self._AddSymbol(('.'.join((package, service.name))), file_desc_proto) + + def FindFileByName(self, name): + """Finds the file descriptor proto by file name. + + Typically the file name is a relative path ending to a .proto file. The + proto with the given name will have to have been added to this database + using the Add method or else an error will be raised. + + Args: + name: The file name to find. + + Returns: + The file descriptor proto matching the name. + + Raises: + KeyError if no file by the given name was added. + """ + + return self._file_desc_protos_by_file[name] + + def FindFileContainingSymbol(self, symbol): + """Finds the file descriptor proto containing the specified symbol. + + The symbol should be a fully qualified name including the file descriptor's + package and any containing messages. Some examples: + + 'some.package.name.Message' + 'some.package.name.Message.NestedEnum' + 'some.package.name.Message.some_field' + + The file descriptor proto containing the specified symbol must be added to + this database using the Add method or else an error will be raised. + + Args: + symbol: The fully qualified symbol name. + + Returns: + The file descriptor proto containing the symbol. + + Raises: + KeyError if no file contains the specified symbol. + """ + try: + return self._file_desc_protos_by_symbol[symbol] + except KeyError: + # Fields, enum values, and nested extensions are not in + # _file_desc_protos_by_symbol. Try to find the top level + # descriptor. Non-existent nested symbol under a valid top level + # descriptor can also be found. The behavior is the same with + # protobuf C++. + top_level, _, _ = symbol.rpartition('.') + try: + return self._file_desc_protos_by_symbol[top_level] + except KeyError: + # Raise the original symbol as a KeyError for better diagnostics. + raise KeyError(symbol) + + def FindFileContainingExtension(self, extendee_name, extension_number): + # TODO(jieluo): implement this API. + return None + + def FindAllExtensionNumbers(self, extendee_name): + # TODO(jieluo): implement this API. + return [] + + def _AddSymbol(self, name, file_desc_proto): + if name in self._file_desc_protos_by_symbol: + warn_msg = ('Conflict register for file "' + file_desc_proto.name + + '": ' + name + + ' is already defined in file "' + + self._file_desc_protos_by_symbol[name].name + '"') + warnings.warn(warn_msg, RuntimeWarning) + self._file_desc_protos_by_symbol[name] = file_desc_proto + + +def _ExtractSymbols(desc_proto, package): + """Pulls out all the symbols from a descriptor proto. + + Args: + desc_proto: The proto to extract symbols from. + package: The package containing the descriptor type. + + Yields: + The fully qualified name found in the descriptor. + """ + message_name = package + '.' + desc_proto.name if package else desc_proto.name + yield message_name + for nested_type in desc_proto.nested_type: + for symbol in _ExtractSymbols(nested_type, message_name): + yield symbol + for enum_type in desc_proto.enum_type: + yield '.'.join((message_name, enum_type.name)) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/descriptor_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/descriptor_pb2.py new file mode 100644 index 0000000000..f570386432 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/descriptor_pb2.py @@ -0,0 +1,1925 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/descriptor.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR = _descriptor.FileDescriptor( + name='google/protobuf/descriptor.proto', + package='google.protobuf', + syntax='proto2', + serialized_options=None, + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"G\n\x11\x46ileDescriptorSet\x12\x32\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xdb\x03\n\x13\x46ileDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07package\x18\x02 \x01(\t\x12\x12\n\ndependency\x18\x03 \x03(\t\x12\x19\n\x11public_dependency\x18\n \x03(\x05\x12\x17\n\x0fweak_dependency\x18\x0b \x03(\x05\x12\x36\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12\x38\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProto\x12\x38\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12-\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptions\x12\x39\n\x10source_code_info\x18\t \x01(\x0b\x32\x1f.google.protobuf.SourceCodeInfo\x12\x0e\n\x06syntax\x18\x0c \x01(\t\"\xa9\x05\n\x0f\x44\x65scriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x38\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x35\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12H\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRange\x12\x39\n\noneof_decl\x18\x08 \x03(\x0b\x32%.google.protobuf.OneofDescriptorProto\x12\x30\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptions\x12\x46\n\x0ereserved_range\x18\t \x03(\x0b\x32..google.protobuf.DescriptorProto.ReservedRange\x12\x15\n\rreserved_name\x18\n \x03(\t\x1a\x65\n\x0e\x45xtensionRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\x12\x37\n\x07options\x18\x03 \x01(\x0b\x32&.google.protobuf.ExtensionRangeOptions\x1a+\n\rReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"g\n\x15\x45xtensionRangeOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xd5\x05\n\x14\x46ieldDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12:\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.Label\x12\x38\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.Type\x12\x11\n\ttype_name\x18\x06 \x01(\t\x12\x10\n\x08\x65xtendee\x18\x02 \x01(\t\x12\x15\n\rdefault_value\x18\x07 \x01(\t\x12\x13\n\x0boneof_index\x18\t \x01(\x05\x12\x11\n\tjson_name\x18\n \x01(\t\x12.\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptions\x12\x17\n\x0fproto3_optional\x18\x11 \x01(\x08\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"T\n\x14OneofDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\x07options\x18\x02 \x01(\x0b\x32\x1d.google.protobuf.OneofOptions\"\xa4\x02\n\x13\x45numDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProto\x12-\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptions\x12N\n\x0ereserved_range\x18\x04 \x03(\x0b\x32\x36.google.protobuf.EnumDescriptorProto.EnumReservedRange\x12\x15\n\rreserved_name\x18\x05 \x03(\t\x1a/\n\x11\x45numReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"l\n\x18\x45numValueDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12\x32\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptions\"\x90\x01\n\x16ServiceDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProto\x12\x30\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptions\"\xc1\x01\n\x15MethodDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninput_type\x18\x02 \x01(\t\x12\x13\n\x0boutput_type\x18\x03 \x01(\t\x12/\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptions\x12\x1f\n\x10\x63lient_streaming\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10server_streaming\x18\x06 \x01(\x08:\x05\x66\x61lse\"\xa5\x06\n\x0b\x46ileOptions\x12\x14\n\x0cjava_package\x18\x01 \x01(\t\x12\x1c\n\x14java_outer_classname\x18\x08 \x01(\t\x12\"\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lse\x12)\n\x1djava_generate_equals_and_hash\x18\x14 \x01(\x08\x42\x02\x18\x01\x12%\n\x16java_string_check_utf8\x18\x1b \x01(\x08:\x05\x66\x61lse\x12\x46\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEED\x12\x12\n\ngo_package\x18\x0b \x01(\t\x12\"\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x05\x66\x61lse\x12$\n\x15java_generic_services\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13py_generic_services\x18\x12 \x01(\x08:\x05\x66\x61lse\x12#\n\x14php_generic_services\x18* \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x10\x63\x63_enable_arenas\x18\x1f \x01(\x08:\x04true\x12\x19\n\x11objc_class_prefix\x18$ \x01(\t\x12\x18\n\x10\x63sharp_namespace\x18% \x01(\t\x12\x14\n\x0cswift_prefix\x18\' \x01(\t\x12\x18\n\x10php_class_prefix\x18( \x01(\t\x12\x15\n\rphp_namespace\x18) \x01(\t\x12\x1e\n\x16php_metadata_namespace\x18, \x01(\t\x12\x14\n\x0cruby_package\x18- \x01(\t\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08&\x10\'\"\x84\x02\n\x0eMessageOptions\x12&\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lse\x12.\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x11\n\tmap_entry\x18\x07 \x01(\x08\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x08\x10\tJ\x04\x08\t\x10\n\"\xbe\x03\n\x0c\x46ieldOptions\x12:\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRING\x12\x0e\n\x06packed\x18\x02 \x01(\x08\x12?\n\x06jstype\x18\x06 \x01(\x0e\x32$.google.protobuf.FieldOptions.JSType:\tJS_NORMAL\x12\x13\n\x04lazy\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x0funverified_lazy\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x13\n\x04weak\x18\n \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02\"5\n\x06JSType\x12\r\n\tJS_NORMAL\x10\x00\x12\r\n\tJS_STRING\x10\x01\x12\r\n\tJS_NUMBER\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05\"^\n\x0cOneofOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x93\x01\n\x0b\x45numOptions\x12\x13\n\x0b\x61llow_alias\x18\x02 \x01(\x08\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x05\x10\x06\"}\n\x10\x45numValueOptions\x12\x19\n\ndeprecated\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"{\n\x0eServiceOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xad\x02\n\rMethodOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12_\n\x11idempotency_level\x18\" \x01(\x0e\x32/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWN\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"P\n\x10IdempotencyLevel\x12\x17\n\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n\nIDEMPOTENT\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9e\x02\n\x13UninterpretedOption\x12;\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePart\x12\x18\n\x10identifier_value\x18\x03 \x01(\t\x12\x1a\n\x12positive_int_value\x18\x04 \x01(\x04\x12\x1a\n\x12negative_int_value\x18\x05 \x01(\x03\x12\x14\n\x0c\x64ouble_value\x18\x06 \x01(\x01\x12\x14\n\x0cstring_value\x18\x07 \x01(\x0c\x12\x17\n\x0f\x61ggregate_value\x18\x08 \x01(\t\x1a\x33\n\x08NamePart\x12\x11\n\tname_part\x18\x01 \x02(\t\x12\x14\n\x0cis_extension\x18\x02 \x02(\x08\"\xd5\x01\n\x0eSourceCodeInfo\x12:\n\x08location\x18\x01 \x03(\x0b\x32(.google.protobuf.SourceCodeInfo.Location\x1a\x86\x01\n\x08Location\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x10\n\x04span\x18\x02 \x03(\x05\x42\x02\x10\x01\x12\x18\n\x10leading_comments\x18\x03 \x01(\t\x12\x19\n\x11trailing_comments\x18\x04 \x01(\t\x12!\n\x19leading_detached_comments\x18\x06 \x03(\t\"\xa7\x01\n\x11GeneratedCodeInfo\x12\x41\n\nannotation\x18\x01 \x03(\x0b\x32-.google.protobuf.GeneratedCodeInfo.Annotation\x1aO\n\nAnnotation\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x13\n\x0bsource_file\x18\x02 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x03 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x05\x42~\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection' + ) +else: + DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"G\n\x11\x46ileDescriptorSet\x12\x32\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xdb\x03\n\x13\x46ileDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07package\x18\x02 \x01(\t\x12\x12\n\ndependency\x18\x03 \x03(\t\x12\x19\n\x11public_dependency\x18\n \x03(\x05\x12\x17\n\x0fweak_dependency\x18\x0b \x03(\x05\x12\x36\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12\x38\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProto\x12\x38\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12-\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptions\x12\x39\n\x10source_code_info\x18\t \x01(\x0b\x32\x1f.google.protobuf.SourceCodeInfo\x12\x0e\n\x06syntax\x18\x0c \x01(\t\"\xa9\x05\n\x0f\x44\x65scriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x38\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x35\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12H\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRange\x12\x39\n\noneof_decl\x18\x08 \x03(\x0b\x32%.google.protobuf.OneofDescriptorProto\x12\x30\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptions\x12\x46\n\x0ereserved_range\x18\t \x03(\x0b\x32..google.protobuf.DescriptorProto.ReservedRange\x12\x15\n\rreserved_name\x18\n \x03(\t\x1a\x65\n\x0e\x45xtensionRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\x12\x37\n\x07options\x18\x03 \x01(\x0b\x32&.google.protobuf.ExtensionRangeOptions\x1a+\n\rReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"g\n\x15\x45xtensionRangeOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xd5\x05\n\x14\x46ieldDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12:\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.Label\x12\x38\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.Type\x12\x11\n\ttype_name\x18\x06 \x01(\t\x12\x10\n\x08\x65xtendee\x18\x02 \x01(\t\x12\x15\n\rdefault_value\x18\x07 \x01(\t\x12\x13\n\x0boneof_index\x18\t \x01(\x05\x12\x11\n\tjson_name\x18\n \x01(\t\x12.\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptions\x12\x17\n\x0fproto3_optional\x18\x11 \x01(\x08\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"T\n\x14OneofDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\x07options\x18\x02 \x01(\x0b\x32\x1d.google.protobuf.OneofOptions\"\xa4\x02\n\x13\x45numDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProto\x12-\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptions\x12N\n\x0ereserved_range\x18\x04 \x03(\x0b\x32\x36.google.protobuf.EnumDescriptorProto.EnumReservedRange\x12\x15\n\rreserved_name\x18\x05 \x03(\t\x1a/\n\x11\x45numReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"l\n\x18\x45numValueDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12\x32\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptions\"\x90\x01\n\x16ServiceDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProto\x12\x30\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptions\"\xc1\x01\n\x15MethodDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninput_type\x18\x02 \x01(\t\x12\x13\n\x0boutput_type\x18\x03 \x01(\t\x12/\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptions\x12\x1f\n\x10\x63lient_streaming\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10server_streaming\x18\x06 \x01(\x08:\x05\x66\x61lse\"\xa5\x06\n\x0b\x46ileOptions\x12\x14\n\x0cjava_package\x18\x01 \x01(\t\x12\x1c\n\x14java_outer_classname\x18\x08 \x01(\t\x12\"\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lse\x12)\n\x1djava_generate_equals_and_hash\x18\x14 \x01(\x08\x42\x02\x18\x01\x12%\n\x16java_string_check_utf8\x18\x1b \x01(\x08:\x05\x66\x61lse\x12\x46\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEED\x12\x12\n\ngo_package\x18\x0b \x01(\t\x12\"\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x05\x66\x61lse\x12$\n\x15java_generic_services\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13py_generic_services\x18\x12 \x01(\x08:\x05\x66\x61lse\x12#\n\x14php_generic_services\x18* \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x10\x63\x63_enable_arenas\x18\x1f \x01(\x08:\x04true\x12\x19\n\x11objc_class_prefix\x18$ \x01(\t\x12\x18\n\x10\x63sharp_namespace\x18% \x01(\t\x12\x14\n\x0cswift_prefix\x18\' \x01(\t\x12\x18\n\x10php_class_prefix\x18( \x01(\t\x12\x15\n\rphp_namespace\x18) \x01(\t\x12\x1e\n\x16php_metadata_namespace\x18, \x01(\t\x12\x14\n\x0cruby_package\x18- \x01(\t\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08&\x10\'\"\x84\x02\n\x0eMessageOptions\x12&\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lse\x12.\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x11\n\tmap_entry\x18\x07 \x01(\x08\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x08\x10\tJ\x04\x08\t\x10\n\"\xbe\x03\n\x0c\x46ieldOptions\x12:\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRING\x12\x0e\n\x06packed\x18\x02 \x01(\x08\x12?\n\x06jstype\x18\x06 \x01(\x0e\x32$.google.protobuf.FieldOptions.JSType:\tJS_NORMAL\x12\x13\n\x04lazy\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x0funverified_lazy\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x13\n\x04weak\x18\n \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02\"5\n\x06JSType\x12\r\n\tJS_NORMAL\x10\x00\x12\r\n\tJS_STRING\x10\x01\x12\r\n\tJS_NUMBER\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05\"^\n\x0cOneofOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x93\x01\n\x0b\x45numOptions\x12\x13\n\x0b\x61llow_alias\x18\x02 \x01(\x08\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x05\x10\x06\"}\n\x10\x45numValueOptions\x12\x19\n\ndeprecated\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"{\n\x0eServiceOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xad\x02\n\rMethodOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12_\n\x11idempotency_level\x18\" \x01(\x0e\x32/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWN\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"P\n\x10IdempotencyLevel\x12\x17\n\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n\nIDEMPOTENT\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9e\x02\n\x13UninterpretedOption\x12;\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePart\x12\x18\n\x10identifier_value\x18\x03 \x01(\t\x12\x1a\n\x12positive_int_value\x18\x04 \x01(\x04\x12\x1a\n\x12negative_int_value\x18\x05 \x01(\x03\x12\x14\n\x0c\x64ouble_value\x18\x06 \x01(\x01\x12\x14\n\x0cstring_value\x18\x07 \x01(\x0c\x12\x17\n\x0f\x61ggregate_value\x18\x08 \x01(\t\x1a\x33\n\x08NamePart\x12\x11\n\tname_part\x18\x01 \x02(\t\x12\x14\n\x0cis_extension\x18\x02 \x02(\x08\"\xd5\x01\n\x0eSourceCodeInfo\x12:\n\x08location\x18\x01 \x03(\x0b\x32(.google.protobuf.SourceCodeInfo.Location\x1a\x86\x01\n\x08Location\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x10\n\x04span\x18\x02 \x03(\x05\x42\x02\x10\x01\x12\x18\n\x10leading_comments\x18\x03 \x01(\t\x12\x19\n\x11trailing_comments\x18\x04 \x01(\t\x12!\n\x19leading_detached_comments\x18\x06 \x03(\t\"\xa7\x01\n\x11GeneratedCodeInfo\x12\x41\n\nannotation\x18\x01 \x03(\x0b\x32-.google.protobuf.GeneratedCodeInfo.Annotation\x1aO\n\nAnnotation\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x13\n\x0bsource_file\x18\x02 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x03 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x05\x42~\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection') + +if _descriptor._USE_C_DESCRIPTORS == False: + _FIELDDESCRIPTORPROTO_TYPE = _descriptor.EnumDescriptor( + name='Type', + full_name='google.protobuf.FieldDescriptorProto.Type', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='TYPE_DOUBLE', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FLOAT', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_INT64', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_UINT64', index=3, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_INT32', index=4, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FIXED64', index=5, number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FIXED32', index=6, number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_BOOL', index=7, number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_STRING', index=8, number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_GROUP', index=9, number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_MESSAGE', index=10, number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_BYTES', index=11, number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_UINT32', index=12, number=13, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_ENUM', index=13, number=14, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SFIXED32', index=14, number=15, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SFIXED64', index=15, number=16, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SINT32', index=16, number=17, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SINT64', index=17, number=18, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_TYPE) + + _FIELDDESCRIPTORPROTO_LABEL = _descriptor.EnumDescriptor( + name='Label', + full_name='google.protobuf.FieldDescriptorProto.Label', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='LABEL_OPTIONAL', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LABEL_REQUIRED', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LABEL_REPEATED', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_LABEL) + + _FILEOPTIONS_OPTIMIZEMODE = _descriptor.EnumDescriptor( + name='OptimizeMode', + full_name='google.protobuf.FileOptions.OptimizeMode', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='SPEED', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='CODE_SIZE', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LITE_RUNTIME', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FILEOPTIONS_OPTIMIZEMODE) + + _FIELDOPTIONS_CTYPE = _descriptor.EnumDescriptor( + name='CType', + full_name='google.protobuf.FieldOptions.CType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='STRING', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='CORD', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='STRING_PIECE', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_CTYPE) + + _FIELDOPTIONS_JSTYPE = _descriptor.EnumDescriptor( + name='JSType', + full_name='google.protobuf.FieldOptions.JSType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='JS_NORMAL', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='JS_STRING', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='JS_NUMBER', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_JSTYPE) + + _METHODOPTIONS_IDEMPOTENCYLEVEL = _descriptor.EnumDescriptor( + name='IdempotencyLevel', + full_name='google.protobuf.MethodOptions.IdempotencyLevel', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='IDEMPOTENCY_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='NO_SIDE_EFFECTS', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='IDEMPOTENT', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_METHODOPTIONS_IDEMPOTENCYLEVEL) + + + _FILEDESCRIPTORSET = _descriptor.Descriptor( + name='FileDescriptorSet', + full_name='google.protobuf.FileDescriptorSet', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='file', full_name='google.protobuf.FileDescriptorSet.file', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _FILEDESCRIPTORPROTO = _descriptor.Descriptor( + name='FileDescriptorProto', + full_name='google.protobuf.FileDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.FileDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='package', full_name='google.protobuf.FileDescriptorProto.package', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='dependency', full_name='google.protobuf.FileDescriptorProto.dependency', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='public_dependency', full_name='google.protobuf.FileDescriptorProto.public_dependency', index=3, + number=10, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='weak_dependency', full_name='google.protobuf.FileDescriptorProto.weak_dependency', index=4, + number=11, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message_type', full_name='google.protobuf.FileDescriptorProto.message_type', index=5, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='enum_type', full_name='google.protobuf.FileDescriptorProto.enum_type', index=6, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='service', full_name='google.protobuf.FileDescriptorProto.service', index=7, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension', full_name='google.protobuf.FileDescriptorProto.extension', index=8, + number=7, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.FileDescriptorProto.options', index=9, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='source_code_info', full_name='google.protobuf.FileDescriptorProto.source_code_info', index=10, + number=9, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='syntax', full_name='google.protobuf.FileDescriptorProto.syntax', index=11, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _DESCRIPTORPROTO_EXTENSIONRANGE = _descriptor.Descriptor( + name='ExtensionRange', + full_name='google.protobuf.DescriptorProto.ExtensionRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.DescriptorProto.ExtensionRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.DescriptorProto.ExtensionRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.DescriptorProto.ExtensionRange.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _DESCRIPTORPROTO_RESERVEDRANGE = _descriptor.Descriptor( + name='ReservedRange', + full_name='google.protobuf.DescriptorProto.ReservedRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.DescriptorProto.ReservedRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.DescriptorProto.ReservedRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _DESCRIPTORPROTO = _descriptor.Descriptor( + name='DescriptorProto', + full_name='google.protobuf.DescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.DescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='field', full_name='google.protobuf.DescriptorProto.field', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension', full_name='google.protobuf.DescriptorProto.extension', index=2, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='nested_type', full_name='google.protobuf.DescriptorProto.nested_type', index=3, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='enum_type', full_name='google.protobuf.DescriptorProto.enum_type', index=4, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension_range', full_name='google.protobuf.DescriptorProto.extension_range', index=5, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='oneof_decl', full_name='google.protobuf.DescriptorProto.oneof_decl', index=6, + number=8, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.DescriptorProto.options', index=7, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_range', full_name='google.protobuf.DescriptorProto.reserved_range', index=8, + number=9, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_name', full_name='google.protobuf.DescriptorProto.reserved_name', index=9, + number=10, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_DESCRIPTORPROTO_EXTENSIONRANGE, _DESCRIPTORPROTO_RESERVEDRANGE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _EXTENSIONRANGEOPTIONS = _descriptor.Descriptor( + name='ExtensionRangeOptions', + full_name='google.protobuf.ExtensionRangeOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.ExtensionRangeOptions.uninterpreted_option', index=0, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _FIELDDESCRIPTORPROTO = _descriptor.Descriptor( + name='FieldDescriptorProto', + full_name='google.protobuf.FieldDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.FieldDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='number', full_name='google.protobuf.FieldDescriptorProto.number', index=1, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='label', full_name='google.protobuf.FieldDescriptorProto.label', index=2, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='type', full_name='google.protobuf.FieldDescriptorProto.type', index=3, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='type_name', full_name='google.protobuf.FieldDescriptorProto.type_name', index=4, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extendee', full_name='google.protobuf.FieldDescriptorProto.extendee', index=5, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='default_value', full_name='google.protobuf.FieldDescriptorProto.default_value', index=6, + number=7, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='oneof_index', full_name='google.protobuf.FieldDescriptorProto.oneof_index', index=7, + number=9, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='json_name', full_name='google.protobuf.FieldDescriptorProto.json_name', index=8, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.FieldDescriptorProto.options', index=9, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='proto3_optional', full_name='google.protobuf.FieldDescriptorProto.proto3_optional', index=10, + number=17, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FIELDDESCRIPTORPROTO_TYPE, + _FIELDDESCRIPTORPROTO_LABEL, + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _ONEOFDESCRIPTORPROTO = _descriptor.Descriptor( + name='OneofDescriptorProto', + full_name='google.protobuf.OneofDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.OneofDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.OneofDescriptorProto.options', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE = _descriptor.Descriptor( + name='EnumReservedRange', + full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _ENUMDESCRIPTORPROTO = _descriptor.Descriptor( + name='EnumDescriptorProto', + full_name='google.protobuf.EnumDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.EnumDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='google.protobuf.EnumDescriptorProto.value', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.EnumDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_range', full_name='google.protobuf.EnumDescriptorProto.reserved_range', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_name', full_name='google.protobuf.EnumDescriptorProto.reserved_name', index=4, + number=5, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _ENUMVALUEDESCRIPTORPROTO = _descriptor.Descriptor( + name='EnumValueDescriptorProto', + full_name='google.protobuf.EnumValueDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.EnumValueDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='number', full_name='google.protobuf.EnumValueDescriptorProto.number', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.EnumValueDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _SERVICEDESCRIPTORPROTO = _descriptor.Descriptor( + name='ServiceDescriptorProto', + full_name='google.protobuf.ServiceDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.ServiceDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='method', full_name='google.protobuf.ServiceDescriptorProto.method', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.ServiceDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _METHODDESCRIPTORPROTO = _descriptor.Descriptor( + name='MethodDescriptorProto', + full_name='google.protobuf.MethodDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.MethodDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='input_type', full_name='google.protobuf.MethodDescriptorProto.input_type', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='output_type', full_name='google.protobuf.MethodDescriptorProto.output_type', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.MethodDescriptorProto.options', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='client_streaming', full_name='google.protobuf.MethodDescriptorProto.client_streaming', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='server_streaming', full_name='google.protobuf.MethodDescriptorProto.server_streaming', index=5, + number=6, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _FILEOPTIONS = _descriptor.Descriptor( + name='FileOptions', + full_name='google.protobuf.FileOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='java_package', full_name='google.protobuf.FileOptions.java_package', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_outer_classname', full_name='google.protobuf.FileOptions.java_outer_classname', index=1, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_multiple_files', full_name='google.protobuf.FileOptions.java_multiple_files', index=2, + number=10, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_generate_equals_and_hash', full_name='google.protobuf.FileOptions.java_generate_equals_and_hash', index=3, + number=20, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_string_check_utf8', full_name='google.protobuf.FileOptions.java_string_check_utf8', index=4, + number=27, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='optimize_for', full_name='google.protobuf.FileOptions.optimize_for', index=5, + number=9, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='go_package', full_name='google.protobuf.FileOptions.go_package', index=6, + number=11, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='cc_generic_services', full_name='google.protobuf.FileOptions.cc_generic_services', index=7, + number=16, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_generic_services', full_name='google.protobuf.FileOptions.java_generic_services', index=8, + number=17, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='py_generic_services', full_name='google.protobuf.FileOptions.py_generic_services', index=9, + number=18, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_generic_services', full_name='google.protobuf.FileOptions.php_generic_services', index=10, + number=42, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.FileOptions.deprecated', index=11, + number=23, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='cc_enable_arenas', full_name='google.protobuf.FileOptions.cc_enable_arenas', index=12, + number=31, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='objc_class_prefix', full_name='google.protobuf.FileOptions.objc_class_prefix', index=13, + number=36, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='csharp_namespace', full_name='google.protobuf.FileOptions.csharp_namespace', index=14, + number=37, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='swift_prefix', full_name='google.protobuf.FileOptions.swift_prefix', index=15, + number=39, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_class_prefix', full_name='google.protobuf.FileOptions.php_class_prefix', index=16, + number=40, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_namespace', full_name='google.protobuf.FileOptions.php_namespace', index=17, + number=41, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_metadata_namespace', full_name='google.protobuf.FileOptions.php_metadata_namespace', index=18, + number=44, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ruby_package', full_name='google.protobuf.FileOptions.ruby_package', index=19, + number=45, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.FileOptions.uninterpreted_option', index=20, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FILEOPTIONS_OPTIMIZEMODE, + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _MESSAGEOPTIONS = _descriptor.Descriptor( + name='MessageOptions', + full_name='google.protobuf.MessageOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='message_set_wire_format', full_name='google.protobuf.MessageOptions.message_set_wire_format', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='no_standard_descriptor_accessor', full_name='google.protobuf.MessageOptions.no_standard_descriptor_accessor', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.MessageOptions.deprecated', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='map_entry', full_name='google.protobuf.MessageOptions.map_entry', index=3, + number=7, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.MessageOptions.uninterpreted_option', index=4, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _FIELDOPTIONS = _descriptor.Descriptor( + name='FieldOptions', + full_name='google.protobuf.FieldOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ctype', full_name='google.protobuf.FieldOptions.ctype', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='packed', full_name='google.protobuf.FieldOptions.packed', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='jstype', full_name='google.protobuf.FieldOptions.jstype', index=2, + number=6, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='lazy', full_name='google.protobuf.FieldOptions.lazy', index=3, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='unverified_lazy', full_name='google.protobuf.FieldOptions.unverified_lazy', index=4, + number=15, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.FieldOptions.deprecated', index=5, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='weak', full_name='google.protobuf.FieldOptions.weak', index=6, + number=10, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.FieldOptions.uninterpreted_option', index=7, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FIELDOPTIONS_CTYPE, + _FIELDOPTIONS_JSTYPE, + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ONEOFOPTIONS = _descriptor.Descriptor( + name='OneofOptions', + full_name='google.protobuf.OneofOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.OneofOptions.uninterpreted_option', index=0, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ENUMOPTIONS = _descriptor.Descriptor( + name='EnumOptions', + full_name='google.protobuf.EnumOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='allow_alias', full_name='google.protobuf.EnumOptions.allow_alias', index=0, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.EnumOptions.deprecated', index=1, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.EnumOptions.uninterpreted_option', index=2, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ENUMVALUEOPTIONS = _descriptor.Descriptor( + name='EnumValueOptions', + full_name='google.protobuf.EnumValueOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.EnumValueOptions.deprecated', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.EnumValueOptions.uninterpreted_option', index=1, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _SERVICEOPTIONS = _descriptor.Descriptor( + name='ServiceOptions', + full_name='google.protobuf.ServiceOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.ServiceOptions.deprecated', index=0, + number=33, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.ServiceOptions.uninterpreted_option', index=1, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _METHODOPTIONS = _descriptor.Descriptor( + name='MethodOptions', + full_name='google.protobuf.MethodOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.MethodOptions.deprecated', index=0, + number=33, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='idempotency_level', full_name='google.protobuf.MethodOptions.idempotency_level', index=1, + number=34, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.MethodOptions.uninterpreted_option', index=2, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _METHODOPTIONS_IDEMPOTENCYLEVEL, + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _UNINTERPRETEDOPTION_NAMEPART = _descriptor.Descriptor( + name='NamePart', + full_name='google.protobuf.UninterpretedOption.NamePart', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name_part', full_name='google.protobuf.UninterpretedOption.NamePart.name_part', index=0, + number=1, type=9, cpp_type=9, label=2, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='is_extension', full_name='google.protobuf.UninterpretedOption.NamePart.is_extension', index=1, + number=2, type=8, cpp_type=7, label=2, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _UNINTERPRETEDOPTION = _descriptor.Descriptor( + name='UninterpretedOption', + full_name='google.protobuf.UninterpretedOption', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.UninterpretedOption.name', index=0, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='identifier_value', full_name='google.protobuf.UninterpretedOption.identifier_value', index=1, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='positive_int_value', full_name='google.protobuf.UninterpretedOption.positive_int_value', index=2, + number=4, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='negative_int_value', full_name='google.protobuf.UninterpretedOption.negative_int_value', index=3, + number=5, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='double_value', full_name='google.protobuf.UninterpretedOption.double_value', index=4, + number=6, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='string_value', full_name='google.protobuf.UninterpretedOption.string_value', index=5, + number=7, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='aggregate_value', full_name='google.protobuf.UninterpretedOption.aggregate_value', index=6, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_UNINTERPRETEDOPTION_NAMEPART, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _SOURCECODEINFO_LOCATION = _descriptor.Descriptor( + name='Location', + full_name='google.protobuf.SourceCodeInfo.Location', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='path', full_name='google.protobuf.SourceCodeInfo.Location.path', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='span', full_name='google.protobuf.SourceCodeInfo.Location.span', index=1, + number=2, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='leading_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_comments', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='trailing_comments', full_name='google.protobuf.SourceCodeInfo.Location.trailing_comments', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='leading_detached_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_detached_comments', index=4, + number=6, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _SOURCECODEINFO = _descriptor.Descriptor( + name='SourceCodeInfo', + full_name='google.protobuf.SourceCodeInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='location', full_name='google.protobuf.SourceCodeInfo.location', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SOURCECODEINFO_LOCATION, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _GENERATEDCODEINFO_ANNOTATION = _descriptor.Descriptor( + name='Annotation', + full_name='google.protobuf.GeneratedCodeInfo.Annotation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='path', full_name='google.protobuf.GeneratedCodeInfo.Annotation.path', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='source_file', full_name='google.protobuf.GeneratedCodeInfo.Annotation.source_file', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='begin', full_name='google.protobuf.GeneratedCodeInfo.Annotation.begin', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.GeneratedCodeInfo.Annotation.end', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _GENERATEDCODEINFO = _descriptor.Descriptor( + name='GeneratedCodeInfo', + full_name='google.protobuf.GeneratedCodeInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='annotation', full_name='google.protobuf.GeneratedCodeInfo.annotation', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_GENERATEDCODEINFO_ANNOTATION, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _FILEDESCRIPTORSET.fields_by_name['file'].message_type = _FILEDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['message_type'].message_type = _DESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['service'].message_type = _SERVICEDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['options'].message_type = _FILEOPTIONS + _FILEDESCRIPTORPROTO.fields_by_name['source_code_info'].message_type = _SOURCECODEINFO + _DESCRIPTORPROTO_EXTENSIONRANGE.fields_by_name['options'].message_type = _EXTENSIONRANGEOPTIONS + _DESCRIPTORPROTO_EXTENSIONRANGE.containing_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO_RESERVEDRANGE.containing_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['field'].message_type = _FIELDDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['nested_type'].message_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['extension_range'].message_type = _DESCRIPTORPROTO_EXTENSIONRANGE + _DESCRIPTORPROTO.fields_by_name['oneof_decl'].message_type = _ONEOFDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['options'].message_type = _MESSAGEOPTIONS + _DESCRIPTORPROTO.fields_by_name['reserved_range'].message_type = _DESCRIPTORPROTO_RESERVEDRANGE + _EXTENSIONRANGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FIELDDESCRIPTORPROTO.fields_by_name['label'].enum_type = _FIELDDESCRIPTORPROTO_LABEL + _FIELDDESCRIPTORPROTO.fields_by_name['type'].enum_type = _FIELDDESCRIPTORPROTO_TYPE + _FIELDDESCRIPTORPROTO.fields_by_name['options'].message_type = _FIELDOPTIONS + _FIELDDESCRIPTORPROTO_TYPE.containing_type = _FIELDDESCRIPTORPROTO + _FIELDDESCRIPTORPROTO_LABEL.containing_type = _FIELDDESCRIPTORPROTO + _ONEOFDESCRIPTORPROTO.fields_by_name['options'].message_type = _ONEOFOPTIONS + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE.containing_type = _ENUMDESCRIPTORPROTO + _ENUMDESCRIPTORPROTO.fields_by_name['value'].message_type = _ENUMVALUEDESCRIPTORPROTO + _ENUMDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMOPTIONS + _ENUMDESCRIPTORPROTO.fields_by_name['reserved_range'].message_type = _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE + _ENUMVALUEDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMVALUEOPTIONS + _SERVICEDESCRIPTORPROTO.fields_by_name['method'].message_type = _METHODDESCRIPTORPROTO + _SERVICEDESCRIPTORPROTO.fields_by_name['options'].message_type = _SERVICEOPTIONS + _METHODDESCRIPTORPROTO.fields_by_name['options'].message_type = _METHODOPTIONS + _FILEOPTIONS.fields_by_name['optimize_for'].enum_type = _FILEOPTIONS_OPTIMIZEMODE + _FILEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FILEOPTIONS_OPTIMIZEMODE.containing_type = _FILEOPTIONS + _MESSAGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FIELDOPTIONS.fields_by_name['ctype'].enum_type = _FIELDOPTIONS_CTYPE + _FIELDOPTIONS.fields_by_name['jstype'].enum_type = _FIELDOPTIONS_JSTYPE + _FIELDOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FIELDOPTIONS_CTYPE.containing_type = _FIELDOPTIONS + _FIELDOPTIONS_JSTYPE.containing_type = _FIELDOPTIONS + _ONEOFOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _ENUMOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _ENUMVALUEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _SERVICEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _METHODOPTIONS.fields_by_name['idempotency_level'].enum_type = _METHODOPTIONS_IDEMPOTENCYLEVEL + _METHODOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _METHODOPTIONS_IDEMPOTENCYLEVEL.containing_type = _METHODOPTIONS + _UNINTERPRETEDOPTION_NAMEPART.containing_type = _UNINTERPRETEDOPTION + _UNINTERPRETEDOPTION.fields_by_name['name'].message_type = _UNINTERPRETEDOPTION_NAMEPART + _SOURCECODEINFO_LOCATION.containing_type = _SOURCECODEINFO + _SOURCECODEINFO.fields_by_name['location'].message_type = _SOURCECODEINFO_LOCATION + _GENERATEDCODEINFO_ANNOTATION.containing_type = _GENERATEDCODEINFO + _GENERATEDCODEINFO.fields_by_name['annotation'].message_type = _GENERATEDCODEINFO_ANNOTATION + DESCRIPTOR.message_types_by_name['FileDescriptorSet'] = _FILEDESCRIPTORSET + DESCRIPTOR.message_types_by_name['FileDescriptorProto'] = _FILEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['DescriptorProto'] = _DESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['ExtensionRangeOptions'] = _EXTENSIONRANGEOPTIONS + DESCRIPTOR.message_types_by_name['FieldDescriptorProto'] = _FIELDDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['OneofDescriptorProto'] = _ONEOFDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['EnumDescriptorProto'] = _ENUMDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['EnumValueDescriptorProto'] = _ENUMVALUEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['ServiceDescriptorProto'] = _SERVICEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['MethodDescriptorProto'] = _METHODDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['FileOptions'] = _FILEOPTIONS + DESCRIPTOR.message_types_by_name['MessageOptions'] = _MESSAGEOPTIONS + DESCRIPTOR.message_types_by_name['FieldOptions'] = _FIELDOPTIONS + DESCRIPTOR.message_types_by_name['OneofOptions'] = _ONEOFOPTIONS + DESCRIPTOR.message_types_by_name['EnumOptions'] = _ENUMOPTIONS + DESCRIPTOR.message_types_by_name['EnumValueOptions'] = _ENUMVALUEOPTIONS + DESCRIPTOR.message_types_by_name['ServiceOptions'] = _SERVICEOPTIONS + DESCRIPTOR.message_types_by_name['MethodOptions'] = _METHODOPTIONS + DESCRIPTOR.message_types_by_name['UninterpretedOption'] = _UNINTERPRETEDOPTION + DESCRIPTOR.message_types_by_name['SourceCodeInfo'] = _SOURCECODEINFO + DESCRIPTOR.message_types_by_name['GeneratedCodeInfo'] = _GENERATEDCODEINFO + _sym_db.RegisterFileDescriptor(DESCRIPTOR) + +else: + _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.descriptor_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _FILEDESCRIPTORSET._serialized_start=53 + _FILEDESCRIPTORSET._serialized_end=124 + _FILEDESCRIPTORPROTO._serialized_start=127 + _FILEDESCRIPTORPROTO._serialized_end=602 + _DESCRIPTORPROTO._serialized_start=605 + _DESCRIPTORPROTO._serialized_end=1286 + _DESCRIPTORPROTO_EXTENSIONRANGE._serialized_start=1140 + _DESCRIPTORPROTO_EXTENSIONRANGE._serialized_end=1241 + _DESCRIPTORPROTO_RESERVEDRANGE._serialized_start=1243 + _DESCRIPTORPROTO_RESERVEDRANGE._serialized_end=1286 + _EXTENSIONRANGEOPTIONS._serialized_start=1288 + _EXTENSIONRANGEOPTIONS._serialized_end=1391 + _FIELDDESCRIPTORPROTO._serialized_start=1394 + _FIELDDESCRIPTORPROTO._serialized_end=2119 + _FIELDDESCRIPTORPROTO_TYPE._serialized_start=1740 + _FIELDDESCRIPTORPROTO_TYPE._serialized_end=2050 + _FIELDDESCRIPTORPROTO_LABEL._serialized_start=2052 + _FIELDDESCRIPTORPROTO_LABEL._serialized_end=2119 + _ONEOFDESCRIPTORPROTO._serialized_start=2121 + _ONEOFDESCRIPTORPROTO._serialized_end=2205 + _ENUMDESCRIPTORPROTO._serialized_start=2208 + _ENUMDESCRIPTORPROTO._serialized_end=2500 + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE._serialized_start=2453 + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE._serialized_end=2500 + _ENUMVALUEDESCRIPTORPROTO._serialized_start=2502 + _ENUMVALUEDESCRIPTORPROTO._serialized_end=2610 + _SERVICEDESCRIPTORPROTO._serialized_start=2613 + _SERVICEDESCRIPTORPROTO._serialized_end=2757 + _METHODDESCRIPTORPROTO._serialized_start=2760 + _METHODDESCRIPTORPROTO._serialized_end=2953 + _FILEOPTIONS._serialized_start=2956 + _FILEOPTIONS._serialized_end=3761 + _FILEOPTIONS_OPTIMIZEMODE._serialized_start=3686 + _FILEOPTIONS_OPTIMIZEMODE._serialized_end=3744 + _MESSAGEOPTIONS._serialized_start=3764 + _MESSAGEOPTIONS._serialized_end=4024 + _FIELDOPTIONS._serialized_start=4027 + _FIELDOPTIONS._serialized_end=4473 + _FIELDOPTIONS_CTYPE._serialized_start=4354 + _FIELDOPTIONS_CTYPE._serialized_end=4401 + _FIELDOPTIONS_JSTYPE._serialized_start=4403 + _FIELDOPTIONS_JSTYPE._serialized_end=4456 + _ONEOFOPTIONS._serialized_start=4475 + _ONEOFOPTIONS._serialized_end=4569 + _ENUMOPTIONS._serialized_start=4572 + _ENUMOPTIONS._serialized_end=4719 + _ENUMVALUEOPTIONS._serialized_start=4721 + _ENUMVALUEOPTIONS._serialized_end=4846 + _SERVICEOPTIONS._serialized_start=4848 + _SERVICEOPTIONS._serialized_end=4971 + _METHODOPTIONS._serialized_start=4974 + _METHODOPTIONS._serialized_end=5275 + _METHODOPTIONS_IDEMPOTENCYLEVEL._serialized_start=5184 + _METHODOPTIONS_IDEMPOTENCYLEVEL._serialized_end=5264 + _UNINTERPRETEDOPTION._serialized_start=5278 + _UNINTERPRETEDOPTION._serialized_end=5564 + _UNINTERPRETEDOPTION_NAMEPART._serialized_start=5513 + _UNINTERPRETEDOPTION_NAMEPART._serialized_end=5564 + _SOURCECODEINFO._serialized_start=5567 + _SOURCECODEINFO._serialized_end=5780 + _SOURCECODEINFO_LOCATION._serialized_start=5646 + _SOURCECODEINFO_LOCATION._serialized_end=5780 + _GENERATEDCODEINFO._serialized_start=5783 + _GENERATEDCODEINFO._serialized_end=5950 + _GENERATEDCODEINFO_ANNOTATION._serialized_start=5871 + _GENERATEDCODEINFO_ANNOTATION._serialized_end=5950 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/descriptor_pool.py b/openpype/hosts/nuke/vendor/google/protobuf/descriptor_pool.py new file mode 100644 index 0000000000..911372a8b0 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/descriptor_pool.py @@ -0,0 +1,1295 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides DescriptorPool to use as a container for proto2 descriptors. + +The DescriptorPool is used in conjection with a DescriptorDatabase to maintain +a collection of protocol buffer descriptors for use when dynamically creating +message types at runtime. + +For most applications protocol buffers should be used via modules generated by +the protocol buffer compiler tool. This should only be used when the type of +protocol buffers used in an application or library cannot be predetermined. + +Below is a straightforward example on how to use this class:: + + pool = DescriptorPool() + file_descriptor_protos = [ ... ] + for file_descriptor_proto in file_descriptor_protos: + pool.Add(file_descriptor_proto) + my_message_descriptor = pool.FindMessageTypeByName('some.package.MessageType') + +The message descriptor can be used in conjunction with the message_factory +module in order to create a protocol buffer class that can be encoded and +decoded. + +If you want to get a Python class for the specified proto, use the +helper functions inside google.protobuf.message_factory +directly instead of this class. +""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +import collections +import warnings + +from google.protobuf import descriptor +from google.protobuf import descriptor_database +from google.protobuf import text_encoding + + +_USE_C_DESCRIPTORS = descriptor._USE_C_DESCRIPTORS # pylint: disable=protected-access + + +def _Deprecated(func): + """Mark functions as deprecated.""" + + def NewFunc(*args, **kwargs): + warnings.warn( + 'Call to deprecated function %s(). Note: Do add unlinked descriptors ' + 'to descriptor_pool is wrong. Use Add() or AddSerializedFile() ' + 'instead.' % func.__name__, + category=DeprecationWarning) + return func(*args, **kwargs) + NewFunc.__name__ = func.__name__ + NewFunc.__doc__ = func.__doc__ + NewFunc.__dict__.update(func.__dict__) + return NewFunc + + +def _NormalizeFullyQualifiedName(name): + """Remove leading period from fully-qualified type name. + + Due to b/13860351 in descriptor_database.py, types in the root namespace are + generated with a leading period. This function removes that prefix. + + Args: + name (str): The fully-qualified symbol name. + + Returns: + str: The normalized fully-qualified symbol name. + """ + return name.lstrip('.') + + +def _OptionsOrNone(descriptor_proto): + """Returns the value of the field `options`, or None if it is not set.""" + if descriptor_proto.HasField('options'): + return descriptor_proto.options + else: + return None + + +def _IsMessageSetExtension(field): + return (field.is_extension and + field.containing_type.has_options and + field.containing_type.GetOptions().message_set_wire_format and + field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL) + + +class DescriptorPool(object): + """A collection of protobufs dynamically constructed by descriptor protos.""" + + if _USE_C_DESCRIPTORS: + + def __new__(cls, descriptor_db=None): + # pylint: disable=protected-access + return descriptor._message.DescriptorPool(descriptor_db) + + def __init__(self, descriptor_db=None): + """Initializes a Pool of proto buffs. + + The descriptor_db argument to the constructor is provided to allow + specialized file descriptor proto lookup code to be triggered on demand. An + example would be an implementation which will read and compile a file + specified in a call to FindFileByName() and not require the call to Add() + at all. Results from this database will be cached internally here as well. + + Args: + descriptor_db: A secondary source of file descriptors. + """ + + self._internal_db = descriptor_database.DescriptorDatabase() + self._descriptor_db = descriptor_db + self._descriptors = {} + self._enum_descriptors = {} + self._service_descriptors = {} + self._file_descriptors = {} + self._toplevel_extensions = {} + # TODO(jieluo): Remove _file_desc_by_toplevel_extension after + # maybe year 2020 for compatibility issue (with 3.4.1 only). + self._file_desc_by_toplevel_extension = {} + self._top_enum_values = {} + # We store extensions in two two-level mappings: The first key is the + # descriptor of the message being extended, the second key is the extension + # full name or its tag number. + self._extensions_by_name = collections.defaultdict(dict) + self._extensions_by_number = collections.defaultdict(dict) + + def _CheckConflictRegister(self, desc, desc_name, file_name): + """Check if the descriptor name conflicts with another of the same name. + + Args: + desc: Descriptor of a message, enum, service, extension or enum value. + desc_name (str): the full name of desc. + file_name (str): The file name of descriptor. + """ + for register, descriptor_type in [ + (self._descriptors, descriptor.Descriptor), + (self._enum_descriptors, descriptor.EnumDescriptor), + (self._service_descriptors, descriptor.ServiceDescriptor), + (self._toplevel_extensions, descriptor.FieldDescriptor), + (self._top_enum_values, descriptor.EnumValueDescriptor)]: + if desc_name in register: + old_desc = register[desc_name] + if isinstance(old_desc, descriptor.EnumValueDescriptor): + old_file = old_desc.type.file.name + else: + old_file = old_desc.file.name + + if not isinstance(desc, descriptor_type) or ( + old_file != file_name): + error_msg = ('Conflict register for file "' + file_name + + '": ' + desc_name + + ' is already defined in file "' + + old_file + '". Please fix the conflict by adding ' + 'package name on the proto file, or use different ' + 'name for the duplication.') + if isinstance(desc, descriptor.EnumValueDescriptor): + error_msg += ('\nNote: enum values appear as ' + 'siblings of the enum type instead of ' + 'children of it.') + + raise TypeError(error_msg) + + return + + def Add(self, file_desc_proto): + """Adds the FileDescriptorProto and its types to this pool. + + Args: + file_desc_proto (FileDescriptorProto): The file descriptor to add. + """ + + self._internal_db.Add(file_desc_proto) + + def AddSerializedFile(self, serialized_file_desc_proto): + """Adds the FileDescriptorProto and its types to this pool. + + Args: + serialized_file_desc_proto (bytes): A bytes string, serialization of the + :class:`FileDescriptorProto` to add. + + Returns: + FileDescriptor: Descriptor for the added file. + """ + + # pylint: disable=g-import-not-at-top + from google.protobuf import descriptor_pb2 + file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString( + serialized_file_desc_proto) + file_desc = self._ConvertFileProtoToFileDescriptor(file_desc_proto) + file_desc.serialized_pb = serialized_file_desc_proto + return file_desc + + # Add Descriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddDescriptor(self, desc): + self._AddDescriptor(desc) + + # Never call this method. It is for internal usage only. + def _AddDescriptor(self, desc): + """Adds a Descriptor to the pool, non-recursively. + + If the Descriptor contains nested messages or enums, the caller must + explicitly register them. This method also registers the FileDescriptor + associated with the message. + + Args: + desc: A Descriptor. + """ + if not isinstance(desc, descriptor.Descriptor): + raise TypeError('Expected instance of descriptor.Descriptor.') + + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + + self._descriptors[desc.full_name] = desc + self._AddFileDescriptor(desc.file) + + # Add EnumDescriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddEnumDescriptor(self, enum_desc): + self._AddEnumDescriptor(enum_desc) + + # Never call this method. It is for internal usage only. + def _AddEnumDescriptor(self, enum_desc): + """Adds an EnumDescriptor to the pool. + + This method also registers the FileDescriptor associated with the enum. + + Args: + enum_desc: An EnumDescriptor. + """ + + if not isinstance(enum_desc, descriptor.EnumDescriptor): + raise TypeError('Expected instance of descriptor.EnumDescriptor.') + + file_name = enum_desc.file.name + self._CheckConflictRegister(enum_desc, enum_desc.full_name, file_name) + self._enum_descriptors[enum_desc.full_name] = enum_desc + + # Top enum values need to be indexed. + # Count the number of dots to see whether the enum is toplevel or nested + # in a message. We cannot use enum_desc.containing_type at this stage. + if enum_desc.file.package: + top_level = (enum_desc.full_name.count('.') + - enum_desc.file.package.count('.') == 1) + else: + top_level = enum_desc.full_name.count('.') == 0 + if top_level: + file_name = enum_desc.file.name + package = enum_desc.file.package + for enum_value in enum_desc.values: + full_name = _NormalizeFullyQualifiedName( + '.'.join((package, enum_value.name))) + self._CheckConflictRegister(enum_value, full_name, file_name) + self._top_enum_values[full_name] = enum_value + self._AddFileDescriptor(enum_desc.file) + + # Add ServiceDescriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddServiceDescriptor(self, service_desc): + self._AddServiceDescriptor(service_desc) + + # Never call this method. It is for internal usage only. + def _AddServiceDescriptor(self, service_desc): + """Adds a ServiceDescriptor to the pool. + + Args: + service_desc: A ServiceDescriptor. + """ + + if not isinstance(service_desc, descriptor.ServiceDescriptor): + raise TypeError('Expected instance of descriptor.ServiceDescriptor.') + + self._CheckConflictRegister(service_desc, service_desc.full_name, + service_desc.file.name) + self._service_descriptors[service_desc.full_name] = service_desc + + # Add ExtensionDescriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddExtensionDescriptor(self, extension): + self._AddExtensionDescriptor(extension) + + # Never call this method. It is for internal usage only. + def _AddExtensionDescriptor(self, extension): + """Adds a FieldDescriptor describing an extension to the pool. + + Args: + extension: A FieldDescriptor. + + Raises: + AssertionError: when another extension with the same number extends the + same message. + TypeError: when the specified extension is not a + descriptor.FieldDescriptor. + """ + if not (isinstance(extension, descriptor.FieldDescriptor) and + extension.is_extension): + raise TypeError('Expected an extension descriptor.') + + if extension.extension_scope is None: + self._toplevel_extensions[extension.full_name] = extension + + try: + existing_desc = self._extensions_by_number[ + extension.containing_type][extension.number] + except KeyError: + pass + else: + if extension is not existing_desc: + raise AssertionError( + 'Extensions "%s" and "%s" both try to extend message type "%s" ' + 'with field number %d.' % + (extension.full_name, existing_desc.full_name, + extension.containing_type.full_name, extension.number)) + + self._extensions_by_number[extension.containing_type][ + extension.number] = extension + self._extensions_by_name[extension.containing_type][ + extension.full_name] = extension + + # Also register MessageSet extensions with the type name. + if _IsMessageSetExtension(extension): + self._extensions_by_name[extension.containing_type][ + extension.message_type.full_name] = extension + + @_Deprecated + def AddFileDescriptor(self, file_desc): + self._InternalAddFileDescriptor(file_desc) + + # Never call this method. It is for internal usage only. + def _InternalAddFileDescriptor(self, file_desc): + """Adds a FileDescriptor to the pool, non-recursively. + + If the FileDescriptor contains messages or enums, the caller must explicitly + register them. + + Args: + file_desc: A FileDescriptor. + """ + + self._AddFileDescriptor(file_desc) + # TODO(jieluo): This is a temporary solution for FieldDescriptor.file. + # FieldDescriptor.file is added in code gen. Remove this solution after + # maybe 2020 for compatibility reason (with 3.4.1 only). + for extension in file_desc.extensions_by_name.values(): + self._file_desc_by_toplevel_extension[ + extension.full_name] = file_desc + + def _AddFileDescriptor(self, file_desc): + """Adds a FileDescriptor to the pool, non-recursively. + + If the FileDescriptor contains messages or enums, the caller must explicitly + register them. + + Args: + file_desc: A FileDescriptor. + """ + + if not isinstance(file_desc, descriptor.FileDescriptor): + raise TypeError('Expected instance of descriptor.FileDescriptor.') + self._file_descriptors[file_desc.name] = file_desc + + def FindFileByName(self, file_name): + """Gets a FileDescriptor by file name. + + Args: + file_name (str): The path to the file to get a descriptor for. + + Returns: + FileDescriptor: The descriptor for the named file. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + + try: + return self._file_descriptors[file_name] + except KeyError: + pass + + try: + file_proto = self._internal_db.FindFileByName(file_name) + except KeyError as error: + if self._descriptor_db: + file_proto = self._descriptor_db.FindFileByName(file_name) + else: + raise error + if not file_proto: + raise KeyError('Cannot find a file named %s' % file_name) + return self._ConvertFileProtoToFileDescriptor(file_proto) + + def FindFileContainingSymbol(self, symbol): + """Gets the FileDescriptor for the file containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: Descriptor for the file that contains the specified + symbol. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + + symbol = _NormalizeFullyQualifiedName(symbol) + try: + return self._InternalFindFileContainingSymbol(symbol) + except KeyError: + pass + + try: + # Try fallback database. Build and find again if possible. + self._FindFileContainingSymbolInDb(symbol) + return self._InternalFindFileContainingSymbol(symbol) + except KeyError: + raise KeyError('Cannot find a file containing %s' % symbol) + + def _InternalFindFileContainingSymbol(self, symbol): + """Gets the already built FileDescriptor containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: Descriptor for the file that contains the specified + symbol. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + try: + return self._descriptors[symbol].file + except KeyError: + pass + + try: + return self._enum_descriptors[symbol].file + except KeyError: + pass + + try: + return self._service_descriptors[symbol].file + except KeyError: + pass + + try: + return self._top_enum_values[symbol].type.file + except KeyError: + pass + + try: + return self._file_desc_by_toplevel_extension[symbol] + except KeyError: + pass + + # Try fields, enum values and nested extensions inside a message. + top_name, _, sub_name = symbol.rpartition('.') + try: + message = self.FindMessageTypeByName(top_name) + assert (sub_name in message.extensions_by_name or + sub_name in message.fields_by_name or + sub_name in message.enum_values_by_name) + return message.file + except (KeyError, AssertionError): + raise KeyError('Cannot find a file containing %s' % symbol) + + def FindMessageTypeByName(self, full_name): + """Loads the named descriptor from the pool. + + Args: + full_name (str): The full name of the descriptor to load. + + Returns: + Descriptor: The descriptor for the named type. + + Raises: + KeyError: if the message cannot be found in the pool. + """ + + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._descriptors[full_name] + + def FindEnumTypeByName(self, full_name): + """Loads the named enum descriptor from the pool. + + Args: + full_name (str): The full name of the enum descriptor to load. + + Returns: + EnumDescriptor: The enum descriptor for the named type. + + Raises: + KeyError: if the enum cannot be found in the pool. + """ + + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._enum_descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._enum_descriptors[full_name] + + def FindFieldByName(self, full_name): + """Loads the named field descriptor from the pool. + + Args: + full_name (str): The full name of the field descriptor to load. + + Returns: + FieldDescriptor: The field descriptor for the named field. + + Raises: + KeyError: if the field cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + message_name, _, field_name = full_name.rpartition('.') + message_descriptor = self.FindMessageTypeByName(message_name) + return message_descriptor.fields_by_name[field_name] + + def FindOneofByName(self, full_name): + """Loads the named oneof descriptor from the pool. + + Args: + full_name (str): The full name of the oneof descriptor to load. + + Returns: + OneofDescriptor: The oneof descriptor for the named oneof. + + Raises: + KeyError: if the oneof cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + message_name, _, oneof_name = full_name.rpartition('.') + message_descriptor = self.FindMessageTypeByName(message_name) + return message_descriptor.oneofs_by_name[oneof_name] + + def FindExtensionByName(self, full_name): + """Loads the named extension descriptor from the pool. + + Args: + full_name (str): The full name of the extension descriptor to load. + + Returns: + FieldDescriptor: The field descriptor for the named extension. + + Raises: + KeyError: if the extension cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + try: + # The proto compiler does not give any link between the FileDescriptor + # and top-level extensions unless the FileDescriptorProto is added to + # the DescriptorDatabase, but this can impact memory usage. + # So we registered these extensions by name explicitly. + return self._toplevel_extensions[full_name] + except KeyError: + pass + message_name, _, extension_name = full_name.rpartition('.') + try: + # Most extensions are nested inside a message. + scope = self.FindMessageTypeByName(message_name) + except KeyError: + # Some extensions are defined at file scope. + scope = self._FindFileContainingSymbolInDb(full_name) + return scope.extensions_by_name[extension_name] + + def FindExtensionByNumber(self, message_descriptor, number): + """Gets the extension of the specified message with the specified number. + + Extensions have to be registered to this pool by calling :func:`Add` or + :func:`AddExtensionDescriptor`. + + Args: + message_descriptor (Descriptor): descriptor of the extended message. + number (int): Number of the extension field. + + Returns: + FieldDescriptor: The descriptor for the extension. + + Raises: + KeyError: when no extension with the given number is known for the + specified message. + """ + try: + return self._extensions_by_number[message_descriptor][number] + except KeyError: + self._TryLoadExtensionFromDB(message_descriptor, number) + return self._extensions_by_number[message_descriptor][number] + + def FindAllExtensions(self, message_descriptor): + """Gets all the known extensions of a given message. + + Extensions have to be registered to this pool by build related + :func:`Add` or :func:`AddExtensionDescriptor`. + + Args: + message_descriptor (Descriptor): Descriptor of the extended message. + + Returns: + list[FieldDescriptor]: Field descriptors describing the extensions. + """ + # Fallback to descriptor db if FindAllExtensionNumbers is provided. + if self._descriptor_db and hasattr( + self._descriptor_db, 'FindAllExtensionNumbers'): + full_name = message_descriptor.full_name + all_numbers = self._descriptor_db.FindAllExtensionNumbers(full_name) + for number in all_numbers: + if number in self._extensions_by_number[message_descriptor]: + continue + self._TryLoadExtensionFromDB(message_descriptor, number) + + return list(self._extensions_by_number[message_descriptor].values()) + + def _TryLoadExtensionFromDB(self, message_descriptor, number): + """Try to Load extensions from descriptor db. + + Args: + message_descriptor: descriptor of the extended message. + number: the extension number that needs to be loaded. + """ + if not self._descriptor_db: + return + # Only supported when FindFileContainingExtension is provided. + if not hasattr( + self._descriptor_db, 'FindFileContainingExtension'): + return + + full_name = message_descriptor.full_name + file_proto = self._descriptor_db.FindFileContainingExtension( + full_name, number) + + if file_proto is None: + return + + try: + self._ConvertFileProtoToFileDescriptor(file_proto) + except: + warn_msg = ('Unable to load proto file %s for extension number %d.' % + (file_proto.name, number)) + warnings.warn(warn_msg, RuntimeWarning) + + def FindServiceByName(self, full_name): + """Loads the named service descriptor from the pool. + + Args: + full_name (str): The full name of the service descriptor to load. + + Returns: + ServiceDescriptor: The service descriptor for the named service. + + Raises: + KeyError: if the service cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._service_descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._service_descriptors[full_name] + + def FindMethodByName(self, full_name): + """Loads the named service method descriptor from the pool. + + Args: + full_name (str): The full name of the method descriptor to load. + + Returns: + MethodDescriptor: The method descriptor for the service method. + + Raises: + KeyError: if the method cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + service_name, _, method_name = full_name.rpartition('.') + service_descriptor = self.FindServiceByName(service_name) + return service_descriptor.methods_by_name[method_name] + + def _FindFileContainingSymbolInDb(self, symbol): + """Finds the file in descriptor DB containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: The file that contains the specified symbol. + + Raises: + KeyError: if the file cannot be found in the descriptor database. + """ + try: + file_proto = self._internal_db.FindFileContainingSymbol(symbol) + except KeyError as error: + if self._descriptor_db: + file_proto = self._descriptor_db.FindFileContainingSymbol(symbol) + else: + raise error + if not file_proto: + raise KeyError('Cannot find a file containing %s' % symbol) + return self._ConvertFileProtoToFileDescriptor(file_proto) + + def _ConvertFileProtoToFileDescriptor(self, file_proto): + """Creates a FileDescriptor from a proto or returns a cached copy. + + This method also has the side effect of loading all the symbols found in + the file into the appropriate dictionaries in the pool. + + Args: + file_proto: The proto to convert. + + Returns: + A FileDescriptor matching the passed in proto. + """ + if file_proto.name not in self._file_descriptors: + built_deps = list(self._GetDeps(file_proto.dependency)) + direct_deps = [self.FindFileByName(n) for n in file_proto.dependency] + public_deps = [direct_deps[i] for i in file_proto.public_dependency] + + file_descriptor = descriptor.FileDescriptor( + pool=self, + name=file_proto.name, + package=file_proto.package, + syntax=file_proto.syntax, + options=_OptionsOrNone(file_proto), + serialized_pb=file_proto.SerializeToString(), + dependencies=direct_deps, + public_dependencies=public_deps, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + scope = {} + + # This loop extracts all the message and enum types from all the + # dependencies of the file_proto. This is necessary to create the + # scope of available message types when defining the passed in + # file proto. + for dependency in built_deps: + scope.update(self._ExtractSymbols( + dependency.message_types_by_name.values())) + scope.update((_PrefixWithDot(enum.full_name), enum) + for enum in dependency.enum_types_by_name.values()) + + for message_type in file_proto.message_type: + message_desc = self._ConvertMessageDescriptor( + message_type, file_proto.package, file_descriptor, scope, + file_proto.syntax) + file_descriptor.message_types_by_name[message_desc.name] = ( + message_desc) + + for enum_type in file_proto.enum_type: + file_descriptor.enum_types_by_name[enum_type.name] = ( + self._ConvertEnumDescriptor(enum_type, file_proto.package, + file_descriptor, None, scope, True)) + + for index, extension_proto in enumerate(file_proto.extension): + extension_desc = self._MakeFieldDescriptor( + extension_proto, file_proto.package, index, file_descriptor, + is_extension=True) + extension_desc.containing_type = self._GetTypeFromScope( + file_descriptor.package, extension_proto.extendee, scope) + self._SetFieldType(extension_proto, extension_desc, + file_descriptor.package, scope) + file_descriptor.extensions_by_name[extension_desc.name] = ( + extension_desc) + self._file_desc_by_toplevel_extension[extension_desc.full_name] = ( + file_descriptor) + + for desc_proto in file_proto.message_type: + self._SetAllFieldTypes(file_proto.package, desc_proto, scope) + + if file_proto.package: + desc_proto_prefix = _PrefixWithDot(file_proto.package) + else: + desc_proto_prefix = '' + + for desc_proto in file_proto.message_type: + desc = self._GetTypeFromScope( + desc_proto_prefix, desc_proto.name, scope) + file_descriptor.message_types_by_name[desc_proto.name] = desc + + for index, service_proto in enumerate(file_proto.service): + file_descriptor.services_by_name[service_proto.name] = ( + self._MakeServiceDescriptor(service_proto, index, scope, + file_proto.package, file_descriptor)) + + self._file_descriptors[file_proto.name] = file_descriptor + + # Add extensions to the pool + file_desc = self._file_descriptors[file_proto.name] + for extension in file_desc.extensions_by_name.values(): + self._AddExtensionDescriptor(extension) + for message_type in file_desc.message_types_by_name.values(): + for extension in message_type.extensions: + self._AddExtensionDescriptor(extension) + + return file_desc + + def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None, + scope=None, syntax=None): + """Adds the proto to the pool in the specified package. + + Args: + desc_proto: The descriptor_pb2.DescriptorProto protobuf message. + package: The package the proto should be located in. + file_desc: The file containing this message. + scope: Dict mapping short and full symbols to message and enum types. + syntax: string indicating syntax of the file ("proto2" or "proto3") + + Returns: + The added descriptor. + """ + + if package: + desc_name = '.'.join((package, desc_proto.name)) + else: + desc_name = desc_proto.name + + if file_desc is None: + file_name = None + else: + file_name = file_desc.name + + if scope is None: + scope = {} + + nested = [ + self._ConvertMessageDescriptor( + nested, desc_name, file_desc, scope, syntax) + for nested in desc_proto.nested_type] + enums = [ + self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, + scope, False) + for enum in desc_proto.enum_type] + fields = [self._MakeFieldDescriptor(field, desc_name, index, file_desc) + for index, field in enumerate(desc_proto.field)] + extensions = [ + self._MakeFieldDescriptor(extension, desc_name, index, file_desc, + is_extension=True) + for index, extension in enumerate(desc_proto.extension)] + oneofs = [ + # pylint: disable=g-complex-comprehension + descriptor.OneofDescriptor( + desc.name, + '.'.join((desc_name, desc.name)), + index, + None, + [], + _OptionsOrNone(desc), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + for index, desc in enumerate(desc_proto.oneof_decl) + ] + extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range] + if extension_ranges: + is_extendable = True + else: + is_extendable = False + desc = descriptor.Descriptor( + name=desc_proto.name, + full_name=desc_name, + filename=file_name, + containing_type=None, + fields=fields, + oneofs=oneofs, + nested_types=nested, + enum_types=enums, + extensions=extensions, + options=_OptionsOrNone(desc_proto), + is_extendable=is_extendable, + extension_ranges=extension_ranges, + file=file_desc, + serialized_start=None, + serialized_end=None, + syntax=syntax, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + for nested in desc.nested_types: + nested.containing_type = desc + for enum in desc.enum_types: + enum.containing_type = desc + for field_index, field_desc in enumerate(desc_proto.field): + if field_desc.HasField('oneof_index'): + oneof_index = field_desc.oneof_index + oneofs[oneof_index].fields.append(fields[field_index]) + fields[field_index].containing_oneof = oneofs[oneof_index] + + scope[_PrefixWithDot(desc_name)] = desc + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._descriptors[desc_name] = desc + return desc + + def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None, + containing_type=None, scope=None, top_level=False): + """Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf. + + Args: + enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message. + package: Optional package name for the new message EnumDescriptor. + file_desc: The file containing the enum descriptor. + containing_type: The type containing this enum. + scope: Scope containing available types. + top_level: If True, the enum is a top level symbol. If False, the enum + is defined inside a message. + + Returns: + The added descriptor + """ + + if package: + enum_name = '.'.join((package, enum_proto.name)) + else: + enum_name = enum_proto.name + + if file_desc is None: + file_name = None + else: + file_name = file_desc.name + + values = [self._MakeEnumValueDescriptor(value, index) + for index, value in enumerate(enum_proto.value)] + desc = descriptor.EnumDescriptor(name=enum_proto.name, + full_name=enum_name, + filename=file_name, + file=file_desc, + values=values, + containing_type=containing_type, + options=_OptionsOrNone(enum_proto), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + scope['.%s' % enum_name] = desc + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._enum_descriptors[enum_name] = desc + + # Add top level enum values. + if top_level: + for value in values: + full_name = _NormalizeFullyQualifiedName( + '.'.join((package, value.name))) + self._CheckConflictRegister(value, full_name, file_name) + self._top_enum_values[full_name] = value + + return desc + + def _MakeFieldDescriptor(self, field_proto, message_name, index, + file_desc, is_extension=False): + """Creates a field descriptor from a FieldDescriptorProto. + + For message and enum type fields, this method will do a look up + in the pool for the appropriate descriptor for that type. If it + is unavailable, it will fall back to the _source function to + create it. If this type is still unavailable, construction will + fail. + + Args: + field_proto: The proto describing the field. + message_name: The name of the containing message. + index: Index of the field + file_desc: The file containing the field descriptor. + is_extension: Indication that this field is for an extension. + + Returns: + An initialized FieldDescriptor object + """ + + if message_name: + full_name = '.'.join((message_name, field_proto.name)) + else: + full_name = field_proto.name + + if field_proto.json_name: + json_name = field_proto.json_name + else: + json_name = None + + return descriptor.FieldDescriptor( + name=field_proto.name, + full_name=full_name, + index=index, + number=field_proto.number, + type=field_proto.type, + cpp_type=None, + message_type=None, + enum_type=None, + containing_type=None, + label=field_proto.label, + has_default_value=False, + default_value=None, + is_extension=is_extension, + extension_scope=None, + options=_OptionsOrNone(field_proto), + json_name=json_name, + file=file_desc, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _SetAllFieldTypes(self, package, desc_proto, scope): + """Sets all the descriptor's fields's types. + + This method also sets the containing types on any extensions. + + Args: + package: The current package of desc_proto. + desc_proto: The message descriptor to update. + scope: Enclosing scope of available types. + """ + + package = _PrefixWithDot(package) + + main_desc = self._GetTypeFromScope(package, desc_proto.name, scope) + + if package == '.': + nested_package = _PrefixWithDot(desc_proto.name) + else: + nested_package = '.'.join([package, desc_proto.name]) + + for field_proto, field_desc in zip(desc_proto.field, main_desc.fields): + self._SetFieldType(field_proto, field_desc, nested_package, scope) + + for extension_proto, extension_desc in ( + zip(desc_proto.extension, main_desc.extensions)): + extension_desc.containing_type = self._GetTypeFromScope( + nested_package, extension_proto.extendee, scope) + self._SetFieldType(extension_proto, extension_desc, nested_package, scope) + + for nested_type in desc_proto.nested_type: + self._SetAllFieldTypes(nested_package, nested_type, scope) + + def _SetFieldType(self, field_proto, field_desc, package, scope): + """Sets the field's type, cpp_type, message_type and enum_type. + + Args: + field_proto: Data about the field in proto format. + field_desc: The descriptor to modify. + package: The package the field's container is in. + scope: Enclosing scope of available types. + """ + if field_proto.type_name: + desc = self._GetTypeFromScope(package, field_proto.type_name, scope) + else: + desc = None + + if not field_proto.HasField('type'): + if isinstance(desc, descriptor.Descriptor): + field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE + else: + field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM + + field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType( + field_proto.type) + + if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE + or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP): + field_desc.message_type = desc + + if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.enum_type = desc + + if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED: + field_desc.has_default_value = False + field_desc.default_value = [] + elif field_proto.HasField('default_value'): + field_desc.has_default_value = True + if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or + field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): + field_desc.default_value = float(field_proto.default_value) + elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: + field_desc.default_value = field_proto.default_value + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: + field_desc.default_value = field_proto.default_value.lower() == 'true' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.default_value = field_desc.enum_type.values_by_name[ + field_proto.default_value].number + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: + field_desc.default_value = text_encoding.CUnescape( + field_proto.default_value) + elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE: + field_desc.default_value = None + else: + # All other types are of the "int" type. + field_desc.default_value = int(field_proto.default_value) + else: + field_desc.has_default_value = False + if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or + field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): + field_desc.default_value = 0.0 + elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: + field_desc.default_value = u'' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: + field_desc.default_value = False + elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.default_value = field_desc.enum_type.values[0].number + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: + field_desc.default_value = b'' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE: + field_desc.default_value = None + elif field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP: + field_desc.default_value = None + else: + # All other types are of the "int" type. + field_desc.default_value = 0 + + field_desc.type = field_proto.type + + def _MakeEnumValueDescriptor(self, value_proto, index): + """Creates a enum value descriptor object from a enum value proto. + + Args: + value_proto: The proto describing the enum value. + index: The index of the enum value. + + Returns: + An initialized EnumValueDescriptor object. + """ + + return descriptor.EnumValueDescriptor( + name=value_proto.name, + index=index, + number=value_proto.number, + options=_OptionsOrNone(value_proto), + type=None, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _MakeServiceDescriptor(self, service_proto, service_index, scope, + package, file_desc): + """Make a protobuf ServiceDescriptor given a ServiceDescriptorProto. + + Args: + service_proto: The descriptor_pb2.ServiceDescriptorProto protobuf message. + service_index: The index of the service in the File. + scope: Dict mapping short and full symbols to message and enum types. + package: Optional package name for the new message EnumDescriptor. + file_desc: The file containing the service descriptor. + + Returns: + The added descriptor. + """ + + if package: + service_name = '.'.join((package, service_proto.name)) + else: + service_name = service_proto.name + + methods = [self._MakeMethodDescriptor(method_proto, service_name, package, + scope, index) + for index, method_proto in enumerate(service_proto.method)] + desc = descriptor.ServiceDescriptor( + name=service_proto.name, + full_name=service_name, + index=service_index, + methods=methods, + options=_OptionsOrNone(service_proto), + file=file_desc, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._service_descriptors[service_name] = desc + return desc + + def _MakeMethodDescriptor(self, method_proto, service_name, package, scope, + index): + """Creates a method descriptor from a MethodDescriptorProto. + + Args: + method_proto: The proto describing the method. + service_name: The name of the containing service. + package: Optional package name to look up for types. + scope: Scope containing available types. + index: Index of the method in the service. + + Returns: + An initialized MethodDescriptor object. + """ + full_name = '.'.join((service_name, method_proto.name)) + input_type = self._GetTypeFromScope( + package, method_proto.input_type, scope) + output_type = self._GetTypeFromScope( + package, method_proto.output_type, scope) + return descriptor.MethodDescriptor( + name=method_proto.name, + full_name=full_name, + index=index, + containing_service=None, + input_type=input_type, + output_type=output_type, + client_streaming=method_proto.client_streaming, + server_streaming=method_proto.server_streaming, + options=_OptionsOrNone(method_proto), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _ExtractSymbols(self, descriptors): + """Pulls out all the symbols from descriptor protos. + + Args: + descriptors: The messages to extract descriptors from. + Yields: + A two element tuple of the type name and descriptor object. + """ + + for desc in descriptors: + yield (_PrefixWithDot(desc.full_name), desc) + for symbol in self._ExtractSymbols(desc.nested_types): + yield symbol + for enum in desc.enum_types: + yield (_PrefixWithDot(enum.full_name), enum) + + def _GetDeps(self, dependencies, visited=None): + """Recursively finds dependencies for file protos. + + Args: + dependencies: The names of the files being depended on. + visited: The names of files already found. + + Yields: + Each direct and indirect dependency. + """ + + visited = visited or set() + for dependency in dependencies: + if dependency not in visited: + visited.add(dependency) + dep_desc = self.FindFileByName(dependency) + yield dep_desc + public_files = [d.name for d in dep_desc.public_dependencies] + yield from self._GetDeps(public_files, visited) + + def _GetTypeFromScope(self, package, type_name, scope): + """Finds a given type name in the current scope. + + Args: + package: The package the proto should be located in. + type_name: The name of the type to be found in the scope. + scope: Dict mapping short and full symbols to message and enum types. + + Returns: + The descriptor for the requested type. + """ + if type_name not in scope: + components = _PrefixWithDot(package).split('.') + while components: + possible_match = '.'.join(components + [type_name]) + if possible_match in scope: + type_name = possible_match + break + else: + components.pop(-1) + return scope[type_name] + + +def _PrefixWithDot(name): + return name if name.startswith('.') else '.%s' % name + + +if _USE_C_DESCRIPTORS: + # TODO(amauryfa): This pool could be constructed from Python code, when we + # support a flag like 'use_cpp_generated_pool=True'. + # pylint: disable=protected-access + _DEFAULT = descriptor._message.default_pool +else: + _DEFAULT = DescriptorPool() + + +def Default(): + return _DEFAULT diff --git a/openpype/hosts/nuke/vendor/google/protobuf/duration_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/duration_pb2.py new file mode 100644 index 0000000000..a8ecc07bdf --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/duration_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/duration.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\"*\n\x08\x44uration\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42\x83\x01\n\x13\x63om.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.duration_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\rDurationProtoP\001Z1google.golang.org/protobuf/types/known/durationpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _DURATION._serialized_start=51 + _DURATION._serialized_end=93 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/empty_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/empty_pb2.py new file mode 100644 index 0000000000..0b4d554db3 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/empty_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/empty.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\x07\n\x05\x45mptyB}\n\x13\x63om.google.protobufB\nEmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.empty_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\nEmptyProtoP\001Z.google.golang.org/protobuf/types/known/emptypb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _EMPTY._serialized_start=48 + _EMPTY._serialized_end=55 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/field_mask_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/field_mask_pb2.py new file mode 100644 index 0000000000..80a4e96e59 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/field_mask_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/field_mask.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"\x1a\n\tFieldMask\x12\r\n\x05paths\x18\x01 \x03(\tB\x85\x01\n\x13\x63om.google.protobufB\x0e\x46ieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.field_mask_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\016FieldMaskProtoP\001Z2google.golang.org/protobuf/types/known/fieldmaskpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _FIELDMASK._serialized_start=53 + _FIELDMASK._serialized_end=79 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/__init__.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/_parameterized.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/_parameterized.py new file mode 100644 index 0000000000..afdbb78c36 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/_parameterized.py @@ -0,0 +1,443 @@ +#! /usr/bin/env python +# +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Adds support for parameterized tests to Python's unittest TestCase class. + +A parameterized test is a method in a test case that is invoked with different +argument tuples. + +A simple example: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + (1, 2, 3), + (4, 5, 9), + (1, 1, 3)) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + + +Each invocation is a separate test case and properly isolated just +like a normal test method, with its own setUp/tearDown cycle. In the +example above, there are three separate testcases, one of which will +fail due to an assertion error (1 + 1 != 3). + +Parameters for individual test cases can be tuples (with positional parameters) +or dictionaries (with named parameters): + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + {'op1': 1, 'op2': 2, 'result': 3}, + {'op1': 4, 'op2': 5, 'result': 9}, + ) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + +If a parameterized test fails, the error message will show the +original test name (which is modified internally) and the arguments +for the specific invocation, which are part of the string returned by +the shortDescription() method on test cases. + +The id method of the test, used internally by the unittest framework, +is also modified to show the arguments. To make sure that test names +stay the same across several invocations, object representations like + + >>> class Foo(object): + ... pass + >>> repr(Foo()) + '<__main__.Foo object at 0x23d8610>' + +are turned into '<__main__.Foo>'. For even more descriptive names, +especially in test logs, you can use the named_parameters decorator. In +this case, only tuples are supported, and the first parameters has to +be a string (or an object that returns an apt name when converted via +str()): + + class NamedExample(parameterized.TestCase): + @parameterized.named_parameters( + ('Normal', 'aa', 'aaa', True), + ('EmptyPrefix', '', 'abc', True), + ('BothEmpty', '', '', True)) + def testStartsWith(self, prefix, string, result): + self.assertEqual(result, strings.startswith(prefix)) + +Named tests also have the benefit that they can be run individually +from the command line: + + $ testmodule.py NamedExample.testStartsWithNormal + . + -------------------------------------------------------------------- + Ran 1 test in 0.000s + + OK + +Parameterized Classes +===================== +If invocation arguments are shared across test methods in a single +TestCase class, instead of decorating all test methods +individually, the class itself can be decorated: + + @parameterized.parameters( + (1, 2, 3) + (4, 5, 9)) + class ArithmeticTest(parameterized.TestCase): + def testAdd(self, arg1, arg2, result): + self.assertEqual(arg1 + arg2, result) + + def testSubtract(self, arg2, arg2, result): + self.assertEqual(result - arg1, arg2) + +Inputs from Iterables +===================== +If parameters should be shared across several test cases, or are dynamically +created from other sources, a single non-tuple iterable can be passed into +the decorator. This iterable will be used to obtain the test cases: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + c.op1, c.op2, c.result for c in testcases + ) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + + +Single-Argument Test Methods +============================ +If a test method takes only one argument, the single argument does not need to +be wrapped into a tuple: + + class NegativeNumberExample(parameterized.TestCase): + @parameterized.parameters( + -1, -3, -4, -5 + ) + def testIsNegative(self, arg): + self.assertTrue(IsNegative(arg)) +""" + +__author__ = 'tmarek@google.com (Torsten Marek)' + +import functools +import re +import types +import unittest +import uuid + +try: + # Since python 3 + import collections.abc as collections_abc +except ImportError: + # Won't work after python 3.8 + import collections as collections_abc + +ADDR_RE = re.compile(r'\<([a-zA-Z0-9_\-\.]+) object at 0x[a-fA-F0-9]+\>') +_SEPARATOR = uuid.uuid1().hex +_FIRST_ARG = object() +_ARGUMENT_REPR = object() + + +def _CleanRepr(obj): + return ADDR_RE.sub(r'<\1>', repr(obj)) + + +# Helper function formerly from the unittest module, removed from it in +# Python 2.7. +def _StrClass(cls): + return '%s.%s' % (cls.__module__, cls.__name__) + + +def _NonStringIterable(obj): + return (isinstance(obj, collections_abc.Iterable) and + not isinstance(obj, str)) + + +def _FormatParameterList(testcase_params): + if isinstance(testcase_params, collections_abc.Mapping): + return ', '.join('%s=%s' % (argname, _CleanRepr(value)) + for argname, value in testcase_params.items()) + elif _NonStringIterable(testcase_params): + return ', '.join(map(_CleanRepr, testcase_params)) + else: + return _FormatParameterList((testcase_params,)) + + +class _ParameterizedTestIter(object): + """Callable and iterable class for producing new test cases.""" + + def __init__(self, test_method, testcases, naming_type): + """Returns concrete test functions for a test and a list of parameters. + + The naming_type is used to determine the name of the concrete + functions as reported by the unittest framework. If naming_type is + _FIRST_ARG, the testcases must be tuples, and the first element must + have a string representation that is a valid Python identifier. + + Args: + test_method: The decorated test method. + testcases: (list of tuple/dict) A list of parameter + tuples/dicts for individual test invocations. + naming_type: The test naming type, either _NAMED or _ARGUMENT_REPR. + """ + self._test_method = test_method + self.testcases = testcases + self._naming_type = naming_type + + def __call__(self, *args, **kwargs): + raise RuntimeError('You appear to be running a parameterized test case ' + 'without having inherited from parameterized.' + 'TestCase. This is bad because none of ' + 'your test cases are actually being run.') + + def __iter__(self): + test_method = self._test_method + naming_type = self._naming_type + + def MakeBoundParamTest(testcase_params): + @functools.wraps(test_method) + def BoundParamTest(self): + if isinstance(testcase_params, collections_abc.Mapping): + test_method(self, **testcase_params) + elif _NonStringIterable(testcase_params): + test_method(self, *testcase_params) + else: + test_method(self, testcase_params) + + if naming_type is _FIRST_ARG: + # Signal the metaclass that the name of the test function is unique + # and descriptive. + BoundParamTest.__x_use_name__ = True + BoundParamTest.__name__ += str(testcase_params[0]) + testcase_params = testcase_params[1:] + elif naming_type is _ARGUMENT_REPR: + # __x_extra_id__ is used to pass naming information to the __new__ + # method of TestGeneratorMetaclass. + # The metaclass will make sure to create a unique, but nondescriptive + # name for this test. + BoundParamTest.__x_extra_id__ = '(%s)' % ( + _FormatParameterList(testcase_params),) + else: + raise RuntimeError('%s is not a valid naming type.' % (naming_type,)) + + BoundParamTest.__doc__ = '%s(%s)' % ( + BoundParamTest.__name__, _FormatParameterList(testcase_params)) + if test_method.__doc__: + BoundParamTest.__doc__ += '\n%s' % (test_method.__doc__,) + return BoundParamTest + return (MakeBoundParamTest(c) for c in self.testcases) + + +def _IsSingletonList(testcases): + """True iff testcases contains only a single non-tuple element.""" + return len(testcases) == 1 and not isinstance(testcases[0], tuple) + + +def _ModifyClass(class_object, testcases, naming_type): + assert not getattr(class_object, '_id_suffix', None), ( + 'Cannot add parameters to %s,' + ' which already has parameterized methods.' % (class_object,)) + class_object._id_suffix = id_suffix = {} + # We change the size of __dict__ while we iterate over it, + # which Python 3.x will complain about, so use copy(). + for name, obj in class_object.__dict__.copy().items(): + if (name.startswith(unittest.TestLoader.testMethodPrefix) + and isinstance(obj, types.FunctionType)): + delattr(class_object, name) + methods = {} + _UpdateClassDictForParamTestCase( + methods, id_suffix, name, + _ParameterizedTestIter(obj, testcases, naming_type)) + for name, meth in methods.items(): + setattr(class_object, name, meth) + + +def _ParameterDecorator(naming_type, testcases): + """Implementation of the parameterization decorators. + + Args: + naming_type: The naming type. + testcases: Testcase parameters. + + Returns: + A function for modifying the decorated object. + """ + def _Apply(obj): + if isinstance(obj, type): + _ModifyClass( + obj, + list(testcases) if not isinstance(testcases, collections_abc.Sequence) + else testcases, + naming_type) + return obj + else: + return _ParameterizedTestIter(obj, testcases, naming_type) + + if _IsSingletonList(testcases): + assert _NonStringIterable(testcases[0]), ( + 'Single parameter argument must be a non-string iterable') + testcases = testcases[0] + + return _Apply + + +def parameters(*testcases): # pylint: disable=invalid-name + """A decorator for creating parameterized tests. + + See the module docstring for a usage example. + Args: + *testcases: Parameters for the decorated method, either a single + iterable, or a list of tuples/dicts/objects (for tests + with only one argument). + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + return _ParameterDecorator(_ARGUMENT_REPR, testcases) + + +def named_parameters(*testcases): # pylint: disable=invalid-name + """A decorator for creating parameterized tests. + + See the module docstring for a usage example. The first element of + each parameter tuple should be a string and will be appended to the + name of the test method. + + Args: + *testcases: Parameters for the decorated method, either a single + iterable, or a list of tuples. + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + return _ParameterDecorator(_FIRST_ARG, testcases) + + +class TestGeneratorMetaclass(type): + """Metaclass for test cases with test generators. + + A test generator is an iterable in a testcase that produces callables. These + callables must be single-argument methods. These methods are injected into + the class namespace and the original iterable is removed. If the name of the + iterable conforms to the test pattern, the injected methods will be picked + up as tests by the unittest framework. + + In general, it is supposed to be used in conjunction with the + parameters decorator. + """ + + def __new__(mcs, class_name, bases, dct): + dct['_id_suffix'] = id_suffix = {} + for name, obj in dct.copy().items(): + if (name.startswith(unittest.TestLoader.testMethodPrefix) and + _NonStringIterable(obj)): + iterator = iter(obj) + dct.pop(name) + _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator) + + return type.__new__(mcs, class_name, bases, dct) + + +def _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator): + """Adds individual test cases to a dictionary. + + Args: + dct: The target dictionary. + id_suffix: The dictionary for mapping names to test IDs. + name: The original name of the test case. + iterator: The iterator generating the individual test cases. + """ + for idx, func in enumerate(iterator): + assert callable(func), 'Test generators must yield callables, got %r' % ( + func,) + if getattr(func, '__x_use_name__', False): + new_name = func.__name__ + else: + new_name = '%s%s%d' % (name, _SEPARATOR, idx) + assert new_name not in dct, ( + 'Name of parameterized test case "%s" not unique' % (new_name,)) + dct[new_name] = func + id_suffix[new_name] = getattr(func, '__x_extra_id__', '') + + +class TestCase(unittest.TestCase, metaclass=TestGeneratorMetaclass): + """Base class for test cases using the parameters decorator.""" + + def _OriginalName(self): + return self._testMethodName.split(_SEPARATOR)[0] + + def __str__(self): + return '%s (%s)' % (self._OriginalName(), _StrClass(self.__class__)) + + def id(self): # pylint: disable=invalid-name + """Returns the descriptive ID of the test. + + This is used internally by the unittesting framework to get a name + for the test to be used in reports. + + Returns: + The test id. + """ + return '%s.%s%s' % (_StrClass(self.__class__), + self._OriginalName(), + self._id_suffix.get(self._testMethodName, '')) + + +def CoopTestCase(other_base_class): + """Returns a new base class with a cooperative metaclass base. + + This enables the TestCase to be used in combination + with other base classes that have custom metaclasses, such as + mox.MoxTestBase. + + Only works with metaclasses that do not override type.__new__. + + Example: + + import google3 + import mox + + from google3.testing.pybase import parameterized + + class ExampleTest(parameterized.CoopTestCase(mox.MoxTestBase)): + ... + + Args: + other_base_class: (class) A test case base class. + + Returns: + A new class object. + """ + metaclass = type( + 'CoopMetaclass', + (other_base_class.__metaclass__, + TestGeneratorMetaclass), {}) + return metaclass( + 'CoopTestCase', + (other_base_class, TestCase), {}) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/api_implementation.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/api_implementation.py new file mode 100644 index 0000000000..7fef237670 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/api_implementation.py @@ -0,0 +1,112 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Determine which implementation of the protobuf API is used in this process. +""" + +import os +import sys +import warnings + +try: + # pylint: disable=g-import-not-at-top + from google.protobuf.internal import _api_implementation + # The compile-time constants in the _api_implementation module can be used to + # switch to a certain implementation of the Python API at build time. + _api_version = _api_implementation.api_version +except ImportError: + _api_version = -1 # Unspecified by compiler flags. + +if _api_version == 1: + raise ValueError('api_version=1 is no longer supported.') + + +_default_implementation_type = ('cpp' if _api_version > 0 else 'python') + + +# This environment variable can be used to switch to a certain implementation +# of the Python API, overriding the compile-time constants in the +# _api_implementation module. Right now only 'python' and 'cpp' are valid +# values. Any other value will be ignored. +_implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', + _default_implementation_type) + +if _implementation_type != 'python': + _implementation_type = 'cpp' + +if 'PyPy' in sys.version and _implementation_type == 'cpp': + warnings.warn('PyPy does not work yet with cpp protocol buffers. ' + 'Falling back to the python implementation.') + _implementation_type = 'python' + + +# Detect if serialization should be deterministic by default +try: + # The presence of this module in a build allows the proto implementation to + # be upgraded merely via build deps. + # + # NOTE: Merely importing this automatically enables deterministic proto + # serialization for C++ code, but we still need to export it as a boolean so + # that we can do the same for `_implementation_type == 'python'`. + # + # NOTE2: It is possible for C++ code to enable deterministic serialization by + # default _without_ affecting Python code, if the C++ implementation is not in + # use by this module. That is intended behavior, so we don't actually expose + # this boolean outside of this module. + # + # pylint: disable=g-import-not-at-top,unused-import + from google.protobuf import enable_deterministic_proto_serialization + _python_deterministic_proto_serialization = True +except ImportError: + _python_deterministic_proto_serialization = False + + +# Usage of this function is discouraged. Clients shouldn't care which +# implementation of the API is in use. Note that there is no guarantee +# that differences between APIs will be maintained. +# Please don't use this function if possible. +def Type(): + return _implementation_type + + +def _SetType(implementation_type): + """Never use! Only for protobuf benchmark.""" + global _implementation_type + _implementation_type = implementation_type + + +# See comment on 'Type' above. +def Version(): + return 2 + + +# For internal use only +def IsPythonDefaultSerializationDeterministic(): + return _python_deterministic_proto_serialization diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/builder.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/builder.py new file mode 100644 index 0000000000..64353ee4af --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/builder.py @@ -0,0 +1,130 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Builds descriptors, message classes and services for generated _pb2.py. + +This file is only called in python generated _pb2.py files. It builds +descriptors, message classes and services that users can directly use +in generated code. +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +_sym_db = _symbol_database.Default() + + +def BuildMessageAndEnumDescriptors(file_des, module): + """Builds message and enum descriptors. + + Args: + file_des: FileDescriptor of the .proto file + module: Generated _pb2 module + """ + + def BuildNestedDescriptors(msg_des, prefix): + for (name, nested_msg) in msg_des.nested_types_by_name.items(): + module_name = prefix + name.upper() + module[module_name] = nested_msg + BuildNestedDescriptors(nested_msg, module_name + '_') + for enum_des in msg_des.enum_types: + module[prefix + enum_des.name.upper()] = enum_des + + for (name, msg_des) in file_des.message_types_by_name.items(): + module_name = '_' + name.upper() + module[module_name] = msg_des + BuildNestedDescriptors(msg_des, module_name + '_') + + +def BuildTopDescriptorsAndMessages(file_des, module_name, module): + """Builds top level descriptors and message classes. + + Args: + file_des: FileDescriptor of the .proto file + module_name: str, the name of generated _pb2 module + module: Generated _pb2 module + """ + + def BuildMessage(msg_des): + create_dict = {} + for (name, nested_msg) in msg_des.nested_types_by_name.items(): + create_dict[name] = BuildMessage(nested_msg) + create_dict['DESCRIPTOR'] = msg_des + create_dict['__module__'] = module_name + message_class = _reflection.GeneratedProtocolMessageType( + msg_des.name, (_message.Message,), create_dict) + _sym_db.RegisterMessage(message_class) + return message_class + + # top level enums + for (name, enum_des) in file_des.enum_types_by_name.items(): + module['_' + name.upper()] = enum_des + module[name] = enum_type_wrapper.EnumTypeWrapper(enum_des) + for enum_value in enum_des.values: + module[enum_value.name] = enum_value.number + + # top level extensions + for (name, extension_des) in file_des.extensions_by_name.items(): + module[name.upper() + '_FIELD_NUMBER'] = extension_des.number + module[name] = extension_des + + # services + for (name, service) in file_des.services_by_name.items(): + module['_' + name.upper()] = service + + # Build messages. + for (name, msg_des) in file_des.message_types_by_name.items(): + module[name] = BuildMessage(msg_des) + + +def BuildServices(file_des, module_name, module): + """Builds services classes and services stub class. + + Args: + file_des: FileDescriptor of the .proto file + module_name: str, the name of generated _pb2 module + module: Generated _pb2 module + """ + # pylint: disable=g-import-not-at-top + from google.protobuf import service as _service + from google.protobuf import service_reflection + # pylint: enable=g-import-not-at-top + for (name, service) in file_des.services_by_name.items(): + module[name] = service_reflection.GeneratedServiceType( + name, (_service.Service,), + dict(DESCRIPTOR=service, __module__=module_name)) + stub_name = name + '_Stub' + module[stub_name] = service_reflection.GeneratedServiceStubType( + stub_name, (module[name],), + dict(DESCRIPTOR=service, __module__=module_name)) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/containers.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/containers.py new file mode 100644 index 0000000000..29fbb53d2f --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/containers.py @@ -0,0 +1,710 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains container classes to represent different protocol buffer types. + +This file defines container classes which represent categories of protocol +buffer field types which need extra maintenance. Currently these categories +are: + +- Repeated scalar fields - These are all repeated fields which aren't + composite (e.g. they are of simple types like int32, string, etc). +- Repeated composite fields - Repeated fields which are composite. This + includes groups and nested messages. +""" + +import collections.abc +import copy +import pickle +from typing import ( + Any, + Iterable, + Iterator, + List, + MutableMapping, + MutableSequence, + NoReturn, + Optional, + Sequence, + TypeVar, + Union, + overload, +) + + +_T = TypeVar('_T') +_K = TypeVar('_K') +_V = TypeVar('_V') + + +class BaseContainer(Sequence[_T]): + """Base container class.""" + + # Minimizes memory usage and disallows assignment to other attributes. + __slots__ = ['_message_listener', '_values'] + + def __init__(self, message_listener: Any) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The RepeatedScalarFieldContainer will call this object's + Modified() method when it is modified. + """ + self._message_listener = message_listener + self._values = [] + + @overload + def __getitem__(self, key: int) -> _T: + ... + + @overload + def __getitem__(self, key: slice) -> List[_T]: + ... + + def __getitem__(self, key): + """Retrieves item by the specified key.""" + return self._values[key] + + def __len__(self) -> int: + """Returns the number of elements in the container.""" + return len(self._values) + + def __ne__(self, other: Any) -> bool: + """Checks if another instance isn't equal to this one.""" + # The concrete classes should define __eq__. + return not self == other + + __hash__ = None + + def __repr__(self) -> str: + return repr(self._values) + + def sort(self, *args, **kwargs) -> None: + # Continue to support the old sort_function keyword argument. + # This is expected to be a rare occurrence, so use LBYL to avoid + # the overhead of actually catching KeyError. + if 'sort_function' in kwargs: + kwargs['cmp'] = kwargs.pop('sort_function') + self._values.sort(*args, **kwargs) + + def reverse(self) -> None: + self._values.reverse() + + +# TODO(slebedev): Remove this. BaseContainer does *not* conform to +# MutableSequence, only its subclasses do. +collections.abc.MutableSequence.register(BaseContainer) + + +class RepeatedScalarFieldContainer(BaseContainer[_T], MutableSequence[_T]): + """Simple, type-checked, list-like container for holding repeated scalars.""" + + # Disallows assignment to other attributes. + __slots__ = ['_type_checker'] + + def __init__( + self, + message_listener: Any, + type_checker: Any, + ) -> None: + """Args: + + message_listener: A MessageListener implementation. The + RepeatedScalarFieldContainer will call this object's Modified() method + when it is modified. + type_checker: A type_checkers.ValueChecker instance to run on elements + inserted into this container. + """ + super().__init__(message_listener) + self._type_checker = type_checker + + def append(self, value: _T) -> None: + """Appends an item to the list. Similar to list.append().""" + self._values.append(self._type_checker.CheckValue(value)) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def insert(self, key: int, value: _T) -> None: + """Inserts the item at the specified position. Similar to list.insert().""" + self._values.insert(key, self._type_checker.CheckValue(value)) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def extend(self, elem_seq: Iterable[_T]) -> None: + """Extends by appending the given iterable. Similar to list.extend().""" + if elem_seq is None: + return + try: + elem_seq_iter = iter(elem_seq) + except TypeError: + if not elem_seq: + # silently ignore falsy inputs :-/. + # TODO(ptucker): Deprecate this behavior. b/18413862 + return + raise + + new_values = [self._type_checker.CheckValue(elem) for elem in elem_seq_iter] + if new_values: + self._values.extend(new_values) + self._message_listener.Modified() + + def MergeFrom( + self, + other: Union['RepeatedScalarFieldContainer[_T]', Iterable[_T]], + ) -> None: + """Appends the contents of another repeated field of the same type to this + one. We do not check the types of the individual fields. + """ + self._values.extend(other) + self._message_listener.Modified() + + def remove(self, elem: _T): + """Removes an item from the list. Similar to list.remove().""" + self._values.remove(elem) + self._message_listener.Modified() + + def pop(self, key: Optional[int] = -1) -> _T: + """Removes and returns an item at a given index. Similar to list.pop().""" + value = self._values[key] + self.__delitem__(key) + return value + + @overload + def __setitem__(self, key: int, value: _T) -> None: + ... + + @overload + def __setitem__(self, key: slice, value: Iterable[_T]) -> None: + ... + + def __setitem__(self, key, value) -> None: + """Sets the item on the specified position.""" + if isinstance(key, slice): + if key.step is not None: + raise ValueError('Extended slices not supported') + self._values[key] = map(self._type_checker.CheckValue, value) + self._message_listener.Modified() + else: + self._values[key] = self._type_checker.CheckValue(value) + self._message_listener.Modified() + + def __delitem__(self, key: Union[int, slice]) -> None: + """Deletes the item at the specified position.""" + del self._values[key] + self._message_listener.Modified() + + def __eq__(self, other: Any) -> bool: + """Compares the current instance with another one.""" + if self is other: + return True + # Special case for the same type which should be common and fast. + if isinstance(other, self.__class__): + return other._values == self._values + # We are presumably comparing against some other sequence type. + return other == self._values + + def __deepcopy__( + self, + unused_memo: Any = None, + ) -> 'RepeatedScalarFieldContainer[_T]': + clone = RepeatedScalarFieldContainer( + copy.deepcopy(self._message_listener), self._type_checker) + clone.MergeFrom(self) + return clone + + def __reduce__(self, **kwargs) -> NoReturn: + raise pickle.PickleError( + "Can't pickle repeated scalar fields, convert to list first") + + +# TODO(slebedev): Constrain T to be a subtype of Message. +class RepeatedCompositeFieldContainer(BaseContainer[_T], MutableSequence[_T]): + """Simple, list-like container for holding repeated composite fields.""" + + # Disallows assignment to other attributes. + __slots__ = ['_message_descriptor'] + + def __init__(self, message_listener: Any, message_descriptor: Any) -> None: + """ + Note that we pass in a descriptor instead of the generated directly, + since at the time we construct a _RepeatedCompositeFieldContainer we + haven't yet necessarily initialized the type that will be contained in the + container. + + Args: + message_listener: A MessageListener implementation. + The RepeatedCompositeFieldContainer will call this object's + Modified() method when it is modified. + message_descriptor: A Descriptor instance describing the protocol type + that should be present in this container. We'll use the + _concrete_class field of this descriptor when the client calls add(). + """ + super().__init__(message_listener) + self._message_descriptor = message_descriptor + + def add(self, **kwargs: Any) -> _T: + """Adds a new element at the end of the list and returns it. Keyword + arguments may be used to initialize the element. + """ + new_element = self._message_descriptor._concrete_class(**kwargs) + new_element._SetListener(self._message_listener) + self._values.append(new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + return new_element + + def append(self, value: _T) -> None: + """Appends one element by copying the message.""" + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + new_element.CopyFrom(value) + self._values.append(new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def insert(self, key: int, value: _T) -> None: + """Inserts the item at the specified position by copying.""" + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + new_element.CopyFrom(value) + self._values.insert(key, new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def extend(self, elem_seq: Iterable[_T]) -> None: + """Extends by appending the given sequence of elements of the same type + + as this one, copying each individual message. + """ + message_class = self._message_descriptor._concrete_class + listener = self._message_listener + values = self._values + for message in elem_seq: + new_element = message_class() + new_element._SetListener(listener) + new_element.MergeFrom(message) + values.append(new_element) + listener.Modified() + + def MergeFrom( + self, + other: Union['RepeatedCompositeFieldContainer[_T]', Iterable[_T]], + ) -> None: + """Appends the contents of another repeated field of the same type to this + one, copying each individual message. + """ + self.extend(other) + + def remove(self, elem: _T) -> None: + """Removes an item from the list. Similar to list.remove().""" + self._values.remove(elem) + self._message_listener.Modified() + + def pop(self, key: Optional[int] = -1) -> _T: + """Removes and returns an item at a given index. Similar to list.pop().""" + value = self._values[key] + self.__delitem__(key) + return value + + @overload + def __setitem__(self, key: int, value: _T) -> None: + ... + + @overload + def __setitem__(self, key: slice, value: Iterable[_T]) -> None: + ... + + def __setitem__(self, key, value): + # This method is implemented to make RepeatedCompositeFieldContainer + # structurally compatible with typing.MutableSequence. It is + # otherwise unsupported and will always raise an error. + raise TypeError( + f'{self.__class__.__name__} object does not support item assignment') + + def __delitem__(self, key: Union[int, slice]) -> None: + """Deletes the item at the specified position.""" + del self._values[key] + self._message_listener.Modified() + + def __eq__(self, other: Any) -> bool: + """Compares the current instance with another one.""" + if self is other: + return True + if not isinstance(other, self.__class__): + raise TypeError('Can only compare repeated composite fields against ' + 'other repeated composite fields.') + return self._values == other._values + + +class ScalarMap(MutableMapping[_K, _V]): + """Simple, type-checked, dict-like container for holding repeated scalars.""" + + # Disallows assignment to other attributes. + __slots__ = ['_key_checker', '_value_checker', '_values', '_message_listener', + '_entry_descriptor'] + + def __init__( + self, + message_listener: Any, + key_checker: Any, + value_checker: Any, + entry_descriptor: Any, + ) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The ScalarMap will call this object's Modified() method when it + is modified. + key_checker: A type_checkers.ValueChecker instance to run on keys + inserted into this container. + value_checker: A type_checkers.ValueChecker instance to run on values + inserted into this container. + entry_descriptor: The MessageDescriptor of a map entry: key and value. + """ + self._message_listener = message_listener + self._key_checker = key_checker + self._value_checker = value_checker + self._entry_descriptor = entry_descriptor + self._values = {} + + def __getitem__(self, key: _K) -> _V: + try: + return self._values[key] + except KeyError: + key = self._key_checker.CheckValue(key) + val = self._value_checker.DefaultValue() + self._values[key] = val + return val + + def __contains__(self, item: _K) -> bool: + # We check the key's type to match the strong-typing flavor of the API. + # Also this makes it easier to match the behavior of the C++ implementation. + self._key_checker.CheckValue(item) + return item in self._values + + @overload + def get(self, key: _K) -> Optional[_V]: + ... + + @overload + def get(self, key: _K, default: _T) -> Union[_V, _T]: + ... + + # We need to override this explicitly, because our defaultdict-like behavior + # will make the default implementation (from our base class) always insert + # the key. + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + def __setitem__(self, key: _K, value: _V) -> _T: + checked_key = self._key_checker.CheckValue(key) + checked_value = self._value_checker.CheckValue(value) + self._values[checked_key] = checked_value + self._message_listener.Modified() + + def __delitem__(self, key: _K) -> None: + del self._values[key] + self._message_listener.Modified() + + def __len__(self) -> int: + return len(self._values) + + def __iter__(self) -> Iterator[_K]: + return iter(self._values) + + def __repr__(self) -> str: + return repr(self._values) + + def MergeFrom(self, other: 'ScalarMap[_K, _V]') -> None: + self._values.update(other._values) + self._message_listener.Modified() + + def InvalidateIterators(self) -> None: + # It appears that the only way to reliably invalidate iterators to + # self._values is to ensure that its size changes. + original = self._values + self._values = original.copy() + original[None] = None + + # This is defined in the abstract base, but we can do it much more cheaply. + def clear(self) -> None: + self._values.clear() + self._message_listener.Modified() + + def GetEntryClass(self) -> Any: + return self._entry_descriptor._concrete_class + + +class MessageMap(MutableMapping[_K, _V]): + """Simple, type-checked, dict-like container for with submessage values.""" + + # Disallows assignment to other attributes. + __slots__ = ['_key_checker', '_values', '_message_listener', + '_message_descriptor', '_entry_descriptor'] + + def __init__( + self, + message_listener: Any, + message_descriptor: Any, + key_checker: Any, + entry_descriptor: Any, + ) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The ScalarMap will call this object's Modified() method when it + is modified. + key_checker: A type_checkers.ValueChecker instance to run on keys + inserted into this container. + value_checker: A type_checkers.ValueChecker instance to run on values + inserted into this container. + entry_descriptor: The MessageDescriptor of a map entry: key and value. + """ + self._message_listener = message_listener + self._message_descriptor = message_descriptor + self._key_checker = key_checker + self._entry_descriptor = entry_descriptor + self._values = {} + + def __getitem__(self, key: _K) -> _V: + key = self._key_checker.CheckValue(key) + try: + return self._values[key] + except KeyError: + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + self._values[key] = new_element + self._message_listener.Modified() + return new_element + + def get_or_create(self, key: _K) -> _V: + """get_or_create() is an alias for getitem (ie. map[key]). + + Args: + key: The key to get or create in the map. + + This is useful in cases where you want to be explicit that the call is + mutating the map. This can avoid lint errors for statements like this + that otherwise would appear to be pointless statements: + + msg.my_map[key] + """ + return self[key] + + @overload + def get(self, key: _K) -> Optional[_V]: + ... + + @overload + def get(self, key: _K, default: _T) -> Union[_V, _T]: + ... + + # We need to override this explicitly, because our defaultdict-like behavior + # will make the default implementation (from our base class) always insert + # the key. + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + def __contains__(self, item: _K) -> bool: + item = self._key_checker.CheckValue(item) + return item in self._values + + def __setitem__(self, key: _K, value: _V) -> NoReturn: + raise ValueError('May not set values directly, call my_map[key].foo = 5') + + def __delitem__(self, key: _K) -> None: + key = self._key_checker.CheckValue(key) + del self._values[key] + self._message_listener.Modified() + + def __len__(self) -> int: + return len(self._values) + + def __iter__(self) -> Iterator[_K]: + return iter(self._values) + + def __repr__(self) -> str: + return repr(self._values) + + def MergeFrom(self, other: 'MessageMap[_K, _V]') -> None: + # pylint: disable=protected-access + for key in other._values: + # According to documentation: "When parsing from the wire or when merging, + # if there are duplicate map keys the last key seen is used". + if key in self: + del self[key] + self[key].CopyFrom(other[key]) + # self._message_listener.Modified() not required here, because + # mutations to submessages already propagate. + + def InvalidateIterators(self) -> None: + # It appears that the only way to reliably invalidate iterators to + # self._values is to ensure that its size changes. + original = self._values + self._values = original.copy() + original[None] = None + + # This is defined in the abstract base, but we can do it much more cheaply. + def clear(self) -> None: + self._values.clear() + self._message_listener.Modified() + + def GetEntryClass(self) -> Any: + return self._entry_descriptor._concrete_class + + +class _UnknownField: + """A parsed unknown field.""" + + # Disallows assignment to other attributes. + __slots__ = ['_field_number', '_wire_type', '_data'] + + def __init__(self, field_number, wire_type, data): + self._field_number = field_number + self._wire_type = wire_type + self._data = data + return + + def __lt__(self, other): + # pylint: disable=protected-access + return self._field_number < other._field_number + + def __eq__(self, other): + if self is other: + return True + # pylint: disable=protected-access + return (self._field_number == other._field_number and + self._wire_type == other._wire_type and + self._data == other._data) + + +class UnknownFieldRef: # pylint: disable=missing-class-docstring + + def __init__(self, parent, index): + self._parent = parent + self._index = index + + def _check_valid(self): + if not self._parent: + raise ValueError('UnknownField does not exist. ' + 'The parent message might be cleared.') + if self._index >= len(self._parent): + raise ValueError('UnknownField does not exist. ' + 'The parent message might be cleared.') + + @property + def field_number(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._field_number + + @property + def wire_type(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._wire_type + + @property + def data(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._data + + +class UnknownFieldSet: + """UnknownField container""" + + # Disallows assignment to other attributes. + __slots__ = ['_values'] + + def __init__(self): + self._values = [] + + def __getitem__(self, index): + if self._values is None: + raise ValueError('UnknownFields does not exist. ' + 'The parent message might be cleared.') + size = len(self._values) + if index < 0: + index += size + if index < 0 or index >= size: + raise IndexError('index %d out of range'.index) + + return UnknownFieldRef(self, index) + + def _internal_get(self, index): + return self._values[index] + + def __len__(self): + if self._values is None: + raise ValueError('UnknownFields does not exist. ' + 'The parent message might be cleared.') + return len(self._values) + + def _add(self, field_number, wire_type, data): + unknown_field = _UnknownField(field_number, wire_type, data) + self._values.append(unknown_field) + return unknown_field + + def __iter__(self): + for i in range(len(self)): + yield UnknownFieldRef(self, i) + + def _extend(self, other): + if other is None: + return + # pylint: disable=protected-access + self._values.extend(other._values) + + def __eq__(self, other): + if self is other: + return True + # Sort unknown fields because their order shouldn't + # affect equality test. + values = list(self._values) + if other is None: + return not values + values.sort() + # pylint: disable=protected-access + other_values = sorted(other._values) + return values == other_values + + def _clear(self): + for value in self._values: + # pylint: disable=protected-access + if isinstance(value._data, UnknownFieldSet): + value._data._clear() # pylint: disable=protected-access + self._values = None diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/decoder.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/decoder.py new file mode 100644 index 0000000000..bc1b7b785c --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/decoder.py @@ -0,0 +1,1029 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Code for decoding protocol buffer primitives. + +This code is very similar to encoder.py -- read the docs for that module first. + +A "decoder" is a function with the signature: + Decode(buffer, pos, end, message, field_dict) +The arguments are: + buffer: The string containing the encoded message. + pos: The current position in the string. + end: The position in the string where the current message ends. May be + less than len(buffer) if we're reading a sub-message. + message: The message object into which we're parsing. + field_dict: message._fields (avoids a hashtable lookup). +The decoder reads the field and stores it into field_dict, returning the new +buffer position. A decoder for a repeated field may proactively decode all of +the elements of that field, if they appear consecutively. + +Note that decoders may throw any of the following: + IndexError: Indicates a truncated message. + struct.error: Unpacking of a fixed-width field failed. + message.DecodeError: Other errors. + +Decoders are expected to raise an exception if they are called with pos > end. +This allows callers to be lax about bounds checking: it's fineto read past +"end" as long as you are sure that someone else will notice and throw an +exception later on. + +Something up the call stack is expected to catch IndexError and struct.error +and convert them to message.DecodeError. + +Decoders are constructed using decoder constructors with the signature: + MakeDecoder(field_number, is_repeated, is_packed, key, new_default) +The arguments are: + field_number: The field number of the field we want to decode. + is_repeated: Is the field a repeated field? (bool) + is_packed: Is the field a packed field? (bool) + key: The key to use when looking up the field within field_dict. + (This is actually the FieldDescriptor but nothing in this + file should depend on that.) + new_default: A function which takes a message object as a parameter and + returns a new instance of the default value for this field. + (This is called for repeated fields and sub-messages, when an + instance does not already exist.) + +As with encoders, we define a decoder constructor for every type of field. +Then, for every field of every message class we construct an actual decoder. +That decoder goes into a dict indexed by tag, so when we decode a message +we repeatedly read a tag, look up the corresponding decoder, and invoke it. +""" + +__author__ = 'kenton@google.com (Kenton Varda)' + +import math +import struct + +from google.protobuf.internal import containers +from google.protobuf.internal import encoder +from google.protobuf.internal import wire_format +from google.protobuf import message + + +# This is not for optimization, but rather to avoid conflicts with local +# variables named "message". +_DecodeError = message.DecodeError + + +def _VarintDecoder(mask, result_type): + """Return an encoder for a basic varint value (does not include tag). + + Decoded values will be bitwise-anded with the given mask before being + returned, e.g. to limit them to 32 bits. The returned decoder does not + take the usual "end" parameter -- the caller is expected to do bounds checking + after the fact (often the caller can defer such checking until later). The + decoder returns a (value, new_pos) pair. + """ + + def DecodeVarint(buffer, pos): + result = 0 + shift = 0 + while 1: + b = buffer[pos] + result |= ((b & 0x7f) << shift) + pos += 1 + if not (b & 0x80): + result &= mask + result = result_type(result) + return (result, pos) + shift += 7 + if shift >= 64: + raise _DecodeError('Too many bytes when decoding varint.') + return DecodeVarint + + +def _SignedVarintDecoder(bits, result_type): + """Like _VarintDecoder() but decodes signed values.""" + + signbit = 1 << (bits - 1) + mask = (1 << bits) - 1 + + def DecodeVarint(buffer, pos): + result = 0 + shift = 0 + while 1: + b = buffer[pos] + result |= ((b & 0x7f) << shift) + pos += 1 + if not (b & 0x80): + result &= mask + result = (result ^ signbit) - signbit + result = result_type(result) + return (result, pos) + shift += 7 + if shift >= 64: + raise _DecodeError('Too many bytes when decoding varint.') + return DecodeVarint + +# All 32-bit and 64-bit values are represented as int. +_DecodeVarint = _VarintDecoder((1 << 64) - 1, int) +_DecodeSignedVarint = _SignedVarintDecoder(64, int) + +# Use these versions for values which must be limited to 32 bits. +_DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int) +_DecodeSignedVarint32 = _SignedVarintDecoder(32, int) + + +def ReadTag(buffer, pos): + """Read a tag from the memoryview, and return a (tag_bytes, new_pos) tuple. + + We return the raw bytes of the tag rather than decoding them. The raw + bytes can then be used to look up the proper decoder. This effectively allows + us to trade some work that would be done in pure-python (decoding a varint) + for work that is done in C (searching for a byte string in a hash table). + In a low-level language it would be much cheaper to decode the varint and + use that, but not in Python. + + Args: + buffer: memoryview object of the encoded bytes + pos: int of the current position to start from + + Returns: + Tuple[bytes, int] of the tag data and new position. + """ + start = pos + while buffer[pos] & 0x80: + pos += 1 + pos += 1 + + tag_bytes = buffer[start:pos].tobytes() + return tag_bytes, pos + + +# -------------------------------------------------------------------- + + +def _SimpleDecoder(wire_type, decode_value): + """Return a constructor for a decoder for fields of a particular type. + + Args: + wire_type: The field's wire type. + decode_value: A function which decodes an individual value, e.g. + _DecodeVarint() + """ + + def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default, + clear_if_default=False): + if is_packed: + local_DecodeVarint = _DecodeVarint + def DecodePackedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + (endpoint, pos) = local_DecodeVarint(buffer, pos) + endpoint += pos + if endpoint > end: + raise _DecodeError('Truncated message.') + while pos < endpoint: + (element, pos) = decode_value(buffer, pos) + value.append(element) + if pos > endpoint: + del value[-1] # Discard corrupt value. + raise _DecodeError('Packed element was truncated.') + return pos + return DecodePackedField + elif is_repeated: + tag_bytes = encoder.TagBytes(field_number, wire_type) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (element, new_pos) = decode_value(buffer, pos) + value.append(element) + # Predict that the next tag is another copy of the same repeated + # field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos >= end: + # Prediction failed. Return. + if new_pos > end: + raise _DecodeError('Truncated message.') + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (new_value, pos) = decode_value(buffer, pos) + if pos > end: + raise _DecodeError('Truncated message.') + if clear_if_default and not new_value: + field_dict.pop(key, None) + else: + field_dict[key] = new_value + return pos + return DecodeField + + return SpecificDecoder + + +def _ModifiedDecoder(wire_type, decode_value, modify_value): + """Like SimpleDecoder but additionally invokes modify_value on every value + before storing it. Usually modify_value is ZigZagDecode. + """ + + # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but + # not enough to make a significant difference. + + def InnerDecode(buffer, pos): + (result, new_pos) = decode_value(buffer, pos) + return (modify_value(result), new_pos) + return _SimpleDecoder(wire_type, InnerDecode) + + +def _StructPackDecoder(wire_type, format): + """Return a constructor for a decoder for a fixed-width field. + + Args: + wire_type: The field's wire type. + format: The format string to pass to struct.unpack(). + """ + + value_size = struct.calcsize(format) + local_unpack = struct.unpack + + # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but + # not enough to make a significant difference. + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + + def InnerDecode(buffer, pos): + new_pos = pos + value_size + result = local_unpack(format, buffer[pos:new_pos])[0] + return (result, new_pos) + return _SimpleDecoder(wire_type, InnerDecode) + + +def _FloatDecoder(): + """Returns a decoder for a float field. + + This code works around a bug in struct.unpack for non-finite 32-bit + floating-point values. + """ + + local_unpack = struct.unpack + + def InnerDecode(buffer, pos): + """Decode serialized float to a float and new position. + + Args: + buffer: memoryview of the serialized bytes + pos: int, position in the memory view to start at. + + Returns: + Tuple[float, int] of the deserialized float value and new position + in the serialized data. + """ + # We expect a 32-bit value in little-endian byte order. Bit 1 is the sign + # bit, bits 2-9 represent the exponent, and bits 10-32 are the significand. + new_pos = pos + 4 + float_bytes = buffer[pos:new_pos].tobytes() + + # If this value has all its exponent bits set, then it's non-finite. + # In Python 2.4, struct.unpack will convert it to a finite 64-bit value. + # To avoid that, we parse it specially. + if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'): + # If at least one significand bit is set... + if float_bytes[0:3] != b'\x00\x00\x80': + return (math.nan, new_pos) + # If sign bit is set... + if float_bytes[3:4] == b'\xFF': + return (-math.inf, new_pos) + return (math.inf, new_pos) + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + result = local_unpack('= b'\xF0') + and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')): + return (math.nan, new_pos) + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + result = local_unpack(' end: + raise _DecodeError('Truncated message.') + while pos < endpoint: + value_start_pos = pos + (element, pos) = _DecodeSignedVarint32(buffer, pos) + # pylint: disable=protected-access + if element in enum_type.values_by_number: + value.append(element) + else: + if not message._unknown_fields: + message._unknown_fields = [] + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_VARINT) + + message._unknown_fields.append( + (tag_bytes, buffer[value_start_pos:pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + field_number, wire_format.WIRETYPE_VARINT, element) + # pylint: enable=protected-access + if pos > endpoint: + if element in enum_type.values_by_number: + del value[-1] # Discard corrupt value. + else: + del message._unknown_fields[-1] + # pylint: disable=protected-access + del message._unknown_field_set._values[-1] + # pylint: enable=protected-access + raise _DecodeError('Packed element was truncated.') + return pos + return DecodePackedField + elif is_repeated: + tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_VARINT) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + """Decode serialized repeated enum to its value and a new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (element, new_pos) = _DecodeSignedVarint32(buffer, pos) + # pylint: disable=protected-access + if element in enum_type.values_by_number: + value.append(element) + else: + if not message._unknown_fields: + message._unknown_fields = [] + message._unknown_fields.append( + (tag_bytes, buffer[pos:new_pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + field_number, wire_format.WIRETYPE_VARINT, element) + # pylint: enable=protected-access + # Predict that the next tag is another copy of the same repeated + # field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos >= end: + # Prediction failed. Return. + if new_pos > end: + raise _DecodeError('Truncated message.') + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + """Decode serialized repeated enum to its value and a new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + value_start_pos = pos + (enum_value, pos) = _DecodeSignedVarint32(buffer, pos) + if pos > end: + raise _DecodeError('Truncated message.') + if clear_if_default and not enum_value: + field_dict.pop(key, None) + return pos + # pylint: disable=protected-access + if enum_value in enum_type.values_by_number: + field_dict[key] = enum_value + else: + if not message._unknown_fields: + message._unknown_fields = [] + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_VARINT) + message._unknown_fields.append( + (tag_bytes, buffer[value_start_pos:pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + field_number, wire_format.WIRETYPE_VARINT, enum_value) + # pylint: enable=protected-access + return pos + return DecodeField + + +# -------------------------------------------------------------------- + + +Int32Decoder = _SimpleDecoder( + wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32) + +Int64Decoder = _SimpleDecoder( + wire_format.WIRETYPE_VARINT, _DecodeSignedVarint) + +UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32) +UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint) + +SInt32Decoder = _ModifiedDecoder( + wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode) +SInt64Decoder = _ModifiedDecoder( + wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode) + +# Note that Python conveniently guarantees that when using the '<' prefix on +# formats, they will also have the same size across all platforms (as opposed +# to without the prefix, where their sizes depend on the C compiler's basic +# type sizes). +Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, ' end: + raise _DecodeError('Truncated string.') + value.append(_ConvertToUnicode(buffer[pos:new_pos])) + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + if clear_if_default and not size: + field_dict.pop(key, None) + else: + field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos]) + return new_pos + return DecodeField + + +def BytesDecoder(field_number, is_repeated, is_packed, key, new_default, + clear_if_default=False): + """Returns a decoder for a bytes field.""" + + local_DecodeVarint = _DecodeVarint + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + value.append(buffer[pos:new_pos].tobytes()) + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + if clear_if_default and not size: + field_dict.pop(key, None) + else: + field_dict[key] = buffer[pos:new_pos].tobytes() + return new_pos + return DecodeField + + +def GroupDecoder(field_number, is_repeated, is_packed, key, new_default): + """Returns a decoder for a group field.""" + + end_tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_END_GROUP) + end_tag_len = len(end_tag_bytes) + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_START_GROUP) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read sub-message. + pos = value.add()._InternalParse(buffer, pos, end) + # Read end tag. + new_pos = pos+end_tag_len + if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: + raise _DecodeError('Missing group end tag.') + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read sub-message. + pos = value._InternalParse(buffer, pos, end) + # Read end tag. + new_pos = pos+end_tag_len + if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: + raise _DecodeError('Missing group end tag.') + return new_pos + return DecodeField + + +def MessageDecoder(field_number, is_repeated, is_packed, key, new_default): + """Returns a decoder for a message field.""" + + local_DecodeVarint = _DecodeVarint + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + if value.add()._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + if value._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it encountered + # an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + return new_pos + return DecodeField + + +# -------------------------------------------------------------------- + +MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP) + +def MessageSetItemDecoder(descriptor): + """Returns a decoder for a MessageSet item. + + The parameter is the message Descriptor. + + The message set message looks like this: + message MessageSet { + repeated group Item = 1 { + required int32 type_id = 2; + required string message = 3; + } + } + """ + + type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT) + message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED) + item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP) + + local_ReadTag = ReadTag + local_DecodeVarint = _DecodeVarint + local_SkipField = SkipField + + def DecodeItem(buffer, pos, end, message, field_dict): + """Decode serialized message set to its value and new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + message_set_item_start = pos + type_id = -1 + message_start = -1 + message_end = -1 + + # Technically, type_id and message can appear in any order, so we need + # a little loop here. + while 1: + (tag_bytes, pos) = local_ReadTag(buffer, pos) + if tag_bytes == type_id_tag_bytes: + (type_id, pos) = local_DecodeVarint(buffer, pos) + elif tag_bytes == message_tag_bytes: + (size, message_start) = local_DecodeVarint(buffer, pos) + pos = message_end = message_start + size + elif tag_bytes == item_end_tag_bytes: + break + else: + pos = SkipField(buffer, pos, end, tag_bytes) + if pos == -1: + raise _DecodeError('Missing group end tag.') + + if pos > end: + raise _DecodeError('Truncated message.') + + if type_id == -1: + raise _DecodeError('MessageSet item missing type_id.') + if message_start == -1: + raise _DecodeError('MessageSet item missing message.') + + extension = message.Extensions._FindExtensionByNumber(type_id) + # pylint: disable=protected-access + if extension is not None: + value = field_dict.get(extension) + if value is None: + message_type = extension.message_type + if not hasattr(message_type, '_concrete_class'): + # pylint: disable=protected-access + message._FACTORY.GetPrototype(message_type) + value = field_dict.setdefault( + extension, message_type._concrete_class()) + if value._InternalParse(buffer, message_start,message_end) != message_end: + # The only reason _InternalParse would return early is if it encountered + # an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + else: + if not message._unknown_fields: + message._unknown_fields = [] + message._unknown_fields.append( + (MESSAGE_SET_ITEM_TAG, buffer[message_set_item_start:pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + type_id, + wire_format.WIRETYPE_LENGTH_DELIMITED, + buffer[message_start:message_end].tobytes()) + # pylint: enable=protected-access + + return pos + + return DecodeItem + +# -------------------------------------------------------------------- + +def MapDecoder(field_descriptor, new_default, is_message_map): + """Returns a decoder for a map field.""" + + key = field_descriptor + tag_bytes = encoder.TagBytes(field_descriptor.number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + local_DecodeVarint = _DecodeVarint + # Can't read _concrete_class yet; might not be initialized. + message_type = field_descriptor.message_type + + def DecodeMap(buffer, pos, end, message, field_dict): + submsg = message_type._concrete_class() + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + submsg.Clear() + if submsg._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + + if is_message_map: + value[submsg.key].CopyFrom(submsg.value) + else: + value[submsg.key] = submsg.value + + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + + return DecodeMap + +# -------------------------------------------------------------------- +# Optimization is not as heavy here because calls to SkipField() are rare, +# except for handling end-group tags. + +def _SkipVarint(buffer, pos, end): + """Skip a varint value. Returns the new position.""" + # Previously ord(buffer[pos]) raised IndexError when pos is out of range. + # With this code, ord(b'') raises TypeError. Both are handled in + # python_message.py to generate a 'Truncated message' error. + while ord(buffer[pos:pos+1].tobytes()) & 0x80: + pos += 1 + pos += 1 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + +def _SkipFixed64(buffer, pos, end): + """Skip a fixed64 value. Returns the new position.""" + + pos += 8 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + + +def _DecodeFixed64(buffer, pos): + """Decode a fixed64.""" + new_pos = pos + 8 + return (struct.unpack(' end: + raise _DecodeError('Truncated message.') + return pos + + +def _SkipGroup(buffer, pos, end): + """Skip sub-group. Returns the new position.""" + + while 1: + (tag_bytes, pos) = ReadTag(buffer, pos) + new_pos = SkipField(buffer, pos, end, tag_bytes) + if new_pos == -1: + return pos + pos = new_pos + + +def _DecodeUnknownFieldSet(buffer, pos, end_pos=None): + """Decode UnknownFieldSet. Returns the UnknownFieldSet and new position.""" + + unknown_field_set = containers.UnknownFieldSet() + while end_pos is None or pos < end_pos: + (tag_bytes, pos) = ReadTag(buffer, pos) + (tag, _) = _DecodeVarint(tag_bytes, 0) + field_number, wire_type = wire_format.UnpackTag(tag) + if wire_type == wire_format.WIRETYPE_END_GROUP: + break + (data, pos) = _DecodeUnknownField(buffer, pos, wire_type) + # pylint: disable=protected-access + unknown_field_set._add(field_number, wire_type, data) + + return (unknown_field_set, pos) + + +def _DecodeUnknownField(buffer, pos, wire_type): + """Decode a unknown field. Returns the UnknownField and new position.""" + + if wire_type == wire_format.WIRETYPE_VARINT: + (data, pos) = _DecodeVarint(buffer, pos) + elif wire_type == wire_format.WIRETYPE_FIXED64: + (data, pos) = _DecodeFixed64(buffer, pos) + elif wire_type == wire_format.WIRETYPE_FIXED32: + (data, pos) = _DecodeFixed32(buffer, pos) + elif wire_type == wire_format.WIRETYPE_LENGTH_DELIMITED: + (size, pos) = _DecodeVarint(buffer, pos) + data = buffer[pos:pos+size].tobytes() + pos += size + elif wire_type == wire_format.WIRETYPE_START_GROUP: + (data, pos) = _DecodeUnknownFieldSet(buffer, pos) + elif wire_type == wire_format.WIRETYPE_END_GROUP: + return (0, -1) + else: + raise _DecodeError('Wrong wire type in tag.') + + return (data, pos) + + +def _EndGroup(buffer, pos, end): + """Skipping an END_GROUP tag returns -1 to tell the parent loop to break.""" + + return -1 + + +def _SkipFixed32(buffer, pos, end): + """Skip a fixed32 value. Returns the new position.""" + + pos += 4 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + + +def _DecodeFixed32(buffer, pos): + """Decode a fixed32.""" + + new_pos = pos + 4 + return (struct.unpack('B').pack + + def EncodeVarint(write, value, unused_deterministic=None): + bits = value & 0x7f + value >>= 7 + while value: + write(local_int2byte(0x80|bits)) + bits = value & 0x7f + value >>= 7 + return write(local_int2byte(bits)) + + return EncodeVarint + + +def _SignedVarintEncoder(): + """Return an encoder for a basic signed varint value (does not include + tag).""" + + local_int2byte = struct.Struct('>B').pack + + def EncodeSignedVarint(write, value, unused_deterministic=None): + if value < 0: + value += (1 << 64) + bits = value & 0x7f + value >>= 7 + while value: + write(local_int2byte(0x80|bits)) + bits = value & 0x7f + value >>= 7 + return write(local_int2byte(bits)) + + return EncodeSignedVarint + + +_EncodeVarint = _VarintEncoder() +_EncodeSignedVarint = _SignedVarintEncoder() + + +def _VarintBytes(value): + """Encode the given integer as a varint and return the bytes. This is only + called at startup time so it doesn't need to be fast.""" + + pieces = [] + _EncodeVarint(pieces.append, value, True) + return b"".join(pieces) + + +def TagBytes(field_number, wire_type): + """Encode the given tag and return the bytes. Only called at startup.""" + + return bytes(_VarintBytes(wire_format.PackTag(field_number, wire_type))) + +# -------------------------------------------------------------------- +# As with sizers (see above), we have a number of common encoder +# implementations. + + +def _SimpleEncoder(wire_type, encode_value, compute_value_size): + """Return a constructor for an encoder for fields of a particular type. + + Args: + wire_type: The field's wire type, for encoding tags. + encode_value: A function which encodes an individual value, e.g. + _EncodeVarint(). + compute_value_size: A function which computes the size of an individual + value, e.g. _VarintSize(). + """ + + def SpecificEncoder(field_number, is_repeated, is_packed): + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + size = 0 + for element in value: + size += compute_value_size(element) + local_EncodeVarint(write, size, deterministic) + for element in value: + encode_value(write, element, deterministic) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, deterministic): + for element in value: + write(tag_bytes) + encode_value(write, element, deterministic) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, deterministic): + write(tag_bytes) + return encode_value(write, value, deterministic) + return EncodeField + + return SpecificEncoder + + +def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value): + """Like SimpleEncoder but additionally invokes modify_value on every value + before passing it to encode_value. Usually modify_value is ZigZagEncode.""" + + def SpecificEncoder(field_number, is_repeated, is_packed): + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + size = 0 + for element in value: + size += compute_value_size(modify_value(element)) + local_EncodeVarint(write, size, deterministic) + for element in value: + encode_value(write, modify_value(element), deterministic) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, deterministic): + for element in value: + write(tag_bytes) + encode_value(write, modify_value(element), deterministic) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, deterministic): + write(tag_bytes) + return encode_value(write, modify_value(value), deterministic) + return EncodeField + + return SpecificEncoder + + +def _StructPackEncoder(wire_type, format): + """Return a constructor for an encoder for a fixed-width field. + + Args: + wire_type: The field's wire type, for encoding tags. + format: The format string to pass to struct.pack(). + """ + + value_size = struct.calcsize(format) + + def SpecificEncoder(field_number, is_repeated, is_packed): + local_struct_pack = struct.pack + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + local_EncodeVarint(write, len(value) * value_size, deterministic) + for element in value: + write(local_struct_pack(format, element)) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, unused_deterministic=None): + for element in value: + write(tag_bytes) + write(local_struct_pack(format, element)) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, unused_deterministic=None): + write(tag_bytes) + return write(local_struct_pack(format, value)) + return EncodeField + + return SpecificEncoder + + +def _FloatingPointEncoder(wire_type, format): + """Return a constructor for an encoder for float fields. + + This is like StructPackEncoder, but catches errors that may be due to + passing non-finite floating-point values to struct.pack, and makes a + second attempt to encode those values. + + Args: + wire_type: The field's wire type, for encoding tags. + format: The format string to pass to struct.pack(). + """ + + value_size = struct.calcsize(format) + if value_size == 4: + def EncodeNonFiniteOrRaise(write, value): + # Remember that the serialized form uses little-endian byte order. + if value == _POS_INF: + write(b'\x00\x00\x80\x7F') + elif value == _NEG_INF: + write(b'\x00\x00\x80\xFF') + elif value != value: # NaN + write(b'\x00\x00\xC0\x7F') + else: + raise + elif value_size == 8: + def EncodeNonFiniteOrRaise(write, value): + if value == _POS_INF: + write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F') + elif value == _NEG_INF: + write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF') + elif value != value: # NaN + write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F') + else: + raise + else: + raise ValueError('Can\'t encode floating-point values that are ' + '%d bytes long (only 4 or 8)' % value_size) + + def SpecificEncoder(field_number, is_repeated, is_packed): + local_struct_pack = struct.pack + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + local_EncodeVarint(write, len(value) * value_size, deterministic) + for element in value: + # This try/except block is going to be faster than any code that + # we could write to check whether element is finite. + try: + write(local_struct_pack(format, element)) + except SystemError: + EncodeNonFiniteOrRaise(write, element) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, unused_deterministic=None): + for element in value: + write(tag_bytes) + try: + write(local_struct_pack(format, element)) + except SystemError: + EncodeNonFiniteOrRaise(write, element) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, unused_deterministic=None): + write(tag_bytes) + try: + write(local_struct_pack(format, value)) + except SystemError: + EncodeNonFiniteOrRaise(write, value) + return EncodeField + + return SpecificEncoder + + +# ==================================================================== +# Here we declare an encoder constructor for each field type. These work +# very similarly to sizer constructors, described earlier. + + +Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder( + wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize) + +UInt32Encoder = UInt64Encoder = _SimpleEncoder( + wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize) + +SInt32Encoder = SInt64Encoder = _ModifiedEncoder( + wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize, + wire_format.ZigZagEncode) + +# Note that Python conveniently guarantees that when using the '<' prefix on +# formats, they will also have the same size across all platforms (as opposed +# to without the prefix, where their sizes depend on the C compiler's basic +# type sizes). +Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, ' str + ValueType = int + + def __init__(self, enum_type): + """Inits EnumTypeWrapper with an EnumDescriptor.""" + self._enum_type = enum_type + self.DESCRIPTOR = enum_type # pylint: disable=invalid-name + + def Name(self, number): # pylint: disable=invalid-name + """Returns a string containing the name of an enum value.""" + try: + return self._enum_type.values_by_number[number].name + except KeyError: + pass # fall out to break exception chaining + + if not isinstance(number, int): + raise TypeError( + 'Enum value for {} must be an int, but got {} {!r}.'.format( + self._enum_type.name, type(number), number)) + else: + # repr here to handle the odd case when you pass in a boolean. + raise ValueError('Enum {} has no name defined for value {!r}'.format( + self._enum_type.name, number)) + + def Value(self, name): # pylint: disable=invalid-name + """Returns the value corresponding to the given enum name.""" + try: + return self._enum_type.values_by_name[name].number + except KeyError: + pass # fall out to break exception chaining + raise ValueError('Enum {} has no value defined for name {!r}'.format( + self._enum_type.name, name)) + + def keys(self): + """Return a list of the string names in the enum. + + Returns: + A list of strs, in the order they were defined in the .proto file. + """ + + return [value_descriptor.name + for value_descriptor in self._enum_type.values] + + def values(self): + """Return a list of the integer values in the enum. + + Returns: + A list of ints, in the order they were defined in the .proto file. + """ + + return [value_descriptor.number + for value_descriptor in self._enum_type.values] + + def items(self): + """Return a list of the (name, value) pairs of the enum. + + Returns: + A list of (str, int) pairs, in the order they were defined + in the .proto file. + """ + return [(value_descriptor.name, value_descriptor.number) + for value_descriptor in self._enum_type.values] + + def __getattr__(self, name): + """Returns the value corresponding to the given enum name.""" + try: + return super( + EnumTypeWrapper, + self).__getattribute__('_enum_type').values_by_name[name].number + except KeyError: + pass # fall out to break exception chaining + raise AttributeError('Enum {} has no value defined for name {!r}'.format( + self._enum_type.name, name)) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/extension_dict.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/extension_dict.py new file mode 100644 index 0000000000..b346cf283e --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/extension_dict.py @@ -0,0 +1,213 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains _ExtensionDict class to represent extensions. +""" + +from google.protobuf.internal import type_checkers +from google.protobuf.descriptor import FieldDescriptor + + +def _VerifyExtensionHandle(message, extension_handle): + """Verify that the given extension handle is valid.""" + + if not isinstance(extension_handle, FieldDescriptor): + raise KeyError('HasExtension() expects an extension handle, got: %s' % + extension_handle) + + if not extension_handle.is_extension: + raise KeyError('"%s" is not an extension.' % extension_handle.full_name) + + if not extension_handle.containing_type: + raise KeyError('"%s" is missing a containing_type.' + % extension_handle.full_name) + + if extension_handle.containing_type is not message.DESCRIPTOR: + raise KeyError('Extension "%s" extends message type "%s", but this ' + 'message is of type "%s".' % + (extension_handle.full_name, + extension_handle.containing_type.full_name, + message.DESCRIPTOR.full_name)) + + +# TODO(robinson): Unify error handling of "unknown extension" crap. +# TODO(robinson): Support iteritems()-style iteration over all +# extensions with the "has" bits turned on? +class _ExtensionDict(object): + + """Dict-like container for Extension fields on proto instances. + + Note that in all cases we expect extension handles to be + FieldDescriptors. + """ + + def __init__(self, extended_message): + """ + Args: + extended_message: Message instance for which we are the Extensions dict. + """ + self._extended_message = extended_message + + def __getitem__(self, extension_handle): + """Returns the current value of the given extension handle.""" + + _VerifyExtensionHandle(self._extended_message, extension_handle) + + result = self._extended_message._fields.get(extension_handle) + if result is not None: + return result + + if extension_handle.label == FieldDescriptor.LABEL_REPEATED: + result = extension_handle._default_constructor(self._extended_message) + elif extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + message_type = extension_handle.message_type + if not hasattr(message_type, '_concrete_class'): + # pylint: disable=protected-access + self._extended_message._FACTORY.GetPrototype(message_type) + assert getattr(extension_handle.message_type, '_concrete_class', None), ( + 'Uninitialized concrete class found for field %r (message type %r)' + % (extension_handle.full_name, + extension_handle.message_type.full_name)) + result = extension_handle.message_type._concrete_class() + try: + result._SetListener(self._extended_message._listener_for_children) + except ReferenceError: + pass + else: + # Singular scalar -- just return the default without inserting into the + # dict. + return extension_handle.default_value + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + result = self._extended_message._fields.setdefault( + extension_handle, result) + + return result + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + + my_fields = self._extended_message.ListFields() + other_fields = other._extended_message.ListFields() + + # Get rid of non-extension fields. + my_fields = [field for field in my_fields if field.is_extension] + other_fields = [field for field in other_fields if field.is_extension] + + return my_fields == other_fields + + def __ne__(self, other): + return not self == other + + def __len__(self): + fields = self._extended_message.ListFields() + # Get rid of non-extension fields. + extension_fields = [field for field in fields if field[0].is_extension] + return len(extension_fields) + + def __hash__(self): + raise TypeError('unhashable object') + + # Note that this is only meaningful for non-repeated, scalar extension + # fields. Note also that we may have to call _Modified() when we do + # successfully set a field this way, to set any necessary "has" bits in the + # ancestors of the extended message. + def __setitem__(self, extension_handle, value): + """If extension_handle specifies a non-repeated, scalar extension + field, sets the value of that field. + """ + + _VerifyExtensionHandle(self._extended_message, extension_handle) + + if (extension_handle.label == FieldDescriptor.LABEL_REPEATED or + extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE): + raise TypeError( + 'Cannot assign to extension "%s" because it is a repeated or ' + 'composite type.' % extension_handle.full_name) + + # It's slightly wasteful to lookup the type checker each time, + # but we expect this to be a vanishingly uncommon case anyway. + type_checker = type_checkers.GetTypeChecker(extension_handle) + # pylint: disable=protected-access + self._extended_message._fields[extension_handle] = ( + type_checker.CheckValue(value)) + self._extended_message._Modified() + + def __delitem__(self, extension_handle): + self._extended_message.ClearExtension(extension_handle) + + def _FindExtensionByName(self, name): + """Tries to find a known extension with the specified name. + + Args: + name: Extension full name. + + Returns: + Extension field descriptor. + """ + return self._extended_message._extensions_by_name.get(name, None) + + def _FindExtensionByNumber(self, number): + """Tries to find a known extension with the field number. + + Args: + number: Extension field number. + + Returns: + Extension field descriptor. + """ + return self._extended_message._extensions_by_number.get(number, None) + + def __iter__(self): + # Return a generator over the populated extension fields + return (f[0] for f in self._extended_message.ListFields() + if f[0].is_extension) + + def __contains__(self, extension_handle): + _VerifyExtensionHandle(self._extended_message, extension_handle) + + if extension_handle not in self._extended_message._fields: + return False + + if extension_handle.label == FieldDescriptor.LABEL_REPEATED: + return bool(self._extended_message._fields.get(extension_handle)) + + if extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + value = self._extended_message._fields.get(extension_handle) + # pylint: disable=protected-access + return value is not None and value._is_present_in_parent + + return True diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/message_listener.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/message_listener.py new file mode 100644 index 0000000000..0fc255a774 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/message_listener.py @@ -0,0 +1,78 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Defines a listener interface for observing certain +state transitions on Message objects. + +Also defines a null implementation of this interface. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + + +class MessageListener(object): + + """Listens for modifications made to a message. Meant to be registered via + Message._SetListener(). + + Attributes: + dirty: If True, then calling Modified() would be a no-op. This can be + used to avoid these calls entirely in the common case. + """ + + def Modified(self): + """Called every time the message is modified in such a way that the parent + message may need to be updated. This currently means either: + (a) The message was modified for the first time, so the parent message + should henceforth mark the message as present. + (b) The message's cached byte size became dirty -- i.e. the message was + modified for the first time after a previous call to ByteSize(). + Therefore the parent should also mark its byte size as dirty. + Note that (a) implies (b), since new objects start out with a client cached + size (zero). However, we document (a) explicitly because it is important. + + Modified() will *only* be called in response to one of these two events -- + not every time the sub-message is modified. + + Note that if the listener's |dirty| attribute is true, then calling + Modified at the moment would be a no-op, so it can be skipped. Performance- + sensitive callers should check this attribute directly before calling since + it will be true most of the time. + """ + + raise NotImplementedError + + +class NullMessageListener(object): + + """No-op MessageListener implementation.""" + + def Modified(self): + pass diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/message_set_extensions_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/message_set_extensions_pb2.py new file mode 100644 index 0000000000..63651a3f19 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/message_set_extensions_pb2.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/message_set_extensions.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n5google/protobuf/internal/message_set_extensions.proto\x12\x18google.protobuf.internal\"\x1e\n\x0eTestMessageSet*\x08\x08\x04\x10\xff\xff\xff\xff\x07:\x02\x08\x01\"\xa5\x01\n\x18TestMessageSetExtension1\x12\t\n\x01i\x18\x0f \x01(\x05\x32~\n\x15message_set_extension\x12(.google.protobuf.internal.TestMessageSet\x18\xab\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension1\"\xa7\x01\n\x18TestMessageSetExtension2\x12\x0b\n\x03str\x18\x19 \x01(\t2~\n\x15message_set_extension\x12(.google.protobuf.internal.TestMessageSet\x18\xca\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension2\"(\n\x18TestMessageSetExtension3\x12\x0c\n\x04text\x18# \x01(\t:\x7f\n\x16message_set_extension3\x12(.google.protobuf.internal.TestMessageSet\x18\xdf\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.message_set_extensions_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + TestMessageSet.RegisterExtension(message_set_extension3) + TestMessageSet.RegisterExtension(_TESTMESSAGESETEXTENSION1.extensions_by_name['message_set_extension']) + TestMessageSet.RegisterExtension(_TESTMESSAGESETEXTENSION2.extensions_by_name['message_set_extension']) + + DESCRIPTOR._options = None + _TESTMESSAGESET._options = None + _TESTMESSAGESET._serialized_options = b'\010\001' + _TESTMESSAGESET._serialized_start=83 + _TESTMESSAGESET._serialized_end=113 + _TESTMESSAGESETEXTENSION1._serialized_start=116 + _TESTMESSAGESETEXTENSION1._serialized_end=281 + _TESTMESSAGESETEXTENSION2._serialized_start=284 + _TESTMESSAGESETEXTENSION2._serialized_end=451 + _TESTMESSAGESETEXTENSION3._serialized_start=453 + _TESTMESSAGESETEXTENSION3._serialized_end=493 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/missing_enum_values_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/missing_enum_values_pb2.py new file mode 100644 index 0000000000..5497083197 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/missing_enum_values_pb2.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/missing_enum_values.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n2google/protobuf/internal/missing_enum_values.proto\x12\x1fgoogle.protobuf.python.internal\"\xc1\x02\n\x0eTestEnumValues\x12X\n\x14optional_nested_enum\x18\x01 \x01(\x0e\x32:.google.protobuf.python.internal.TestEnumValues.NestedEnum\x12X\n\x14repeated_nested_enum\x18\x02 \x03(\x0e\x32:.google.protobuf.python.internal.TestEnumValues.NestedEnum\x12Z\n\x12packed_nested_enum\x18\x03 \x03(\x0e\x32:.google.protobuf.python.internal.TestEnumValues.NestedEnumB\x02\x10\x01\"\x1f\n\nNestedEnum\x12\x08\n\x04ZERO\x10\x00\x12\x07\n\x03ONE\x10\x01\"\xd3\x02\n\x15TestMissingEnumValues\x12_\n\x14optional_nested_enum\x18\x01 \x01(\x0e\x32\x41.google.protobuf.python.internal.TestMissingEnumValues.NestedEnum\x12_\n\x14repeated_nested_enum\x18\x02 \x03(\x0e\x32\x41.google.protobuf.python.internal.TestMissingEnumValues.NestedEnum\x12\x61\n\x12packed_nested_enum\x18\x03 \x03(\x0e\x32\x41.google.protobuf.python.internal.TestMissingEnumValues.NestedEnumB\x02\x10\x01\"\x15\n\nNestedEnum\x12\x07\n\x03TWO\x10\x02\"\x1b\n\nJustString\x12\r\n\x05\x64ummy\x18\x01 \x02(\t') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.missing_enum_values_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _TESTENUMVALUES.fields_by_name['packed_nested_enum']._options = None + _TESTENUMVALUES.fields_by_name['packed_nested_enum']._serialized_options = b'\020\001' + _TESTMISSINGENUMVALUES.fields_by_name['packed_nested_enum']._options = None + _TESTMISSINGENUMVALUES.fields_by_name['packed_nested_enum']._serialized_options = b'\020\001' + _TESTENUMVALUES._serialized_start=88 + _TESTENUMVALUES._serialized_end=409 + _TESTENUMVALUES_NESTEDENUM._serialized_start=378 + _TESTENUMVALUES_NESTEDENUM._serialized_end=409 + _TESTMISSINGENUMVALUES._serialized_start=412 + _TESTMISSINGENUMVALUES._serialized_end=751 + _TESTMISSINGENUMVALUES_NESTEDENUM._serialized_start=730 + _TESTMISSINGENUMVALUES_NESTEDENUM._serialized_end=751 + _JUSTSTRING._serialized_start=753 + _JUSTSTRING._serialized_end=780 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/more_extensions_dynamic_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/more_extensions_dynamic_pb2.py new file mode 100644 index 0000000000..0953706bac --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/more_extensions_dynamic_pb2.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/more_extensions_dynamic.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf.internal import more_extensions_pb2 as google_dot_protobuf_dot_internal_dot_more__extensions__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n6google/protobuf/internal/more_extensions_dynamic.proto\x12\x18google.protobuf.internal\x1a.google/protobuf/internal/more_extensions.proto\"\x1f\n\x12\x44ynamicMessageType\x12\t\n\x01\x61\x18\x01 \x01(\x05:J\n\x17\x64ynamic_int32_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x64 \x01(\x05:z\n\x19\x64ynamic_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x65 \x01(\x0b\x32,.google.protobuf.internal.DynamicMessageType:\x83\x01\n\"repeated_dynamic_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x66 \x03(\x0b\x32,.google.protobuf.internal.DynamicMessageType') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.more_extensions_dynamic_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + google_dot_protobuf_dot_internal_dot_more__extensions__pb2.ExtendedMessage.RegisterExtension(dynamic_int32_extension) + google_dot_protobuf_dot_internal_dot_more__extensions__pb2.ExtendedMessage.RegisterExtension(dynamic_message_extension) + google_dot_protobuf_dot_internal_dot_more__extensions__pb2.ExtendedMessage.RegisterExtension(repeated_dynamic_message_extension) + + DESCRIPTOR._options = None + _DYNAMICMESSAGETYPE._serialized_start=132 + _DYNAMICMESSAGETYPE._serialized_end=163 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/more_extensions_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/more_extensions_pb2.py new file mode 100644 index 0000000000..1cfa1b7c8b --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/more_extensions_pb2.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/more_extensions.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n.google/protobuf/internal/more_extensions.proto\x12\x18google.protobuf.internal\"\x99\x01\n\x0fTopLevelMessage\x12\x41\n\nsubmessage\x18\x01 \x01(\x0b\x32).google.protobuf.internal.ExtendedMessageB\x02(\x01\x12\x43\n\x0enested_message\x18\x02 \x01(\x0b\x32\'.google.protobuf.internal.NestedMessageB\x02(\x01\"R\n\rNestedMessage\x12\x41\n\nsubmessage\x18\x01 \x01(\x0b\x32).google.protobuf.internal.ExtendedMessageB\x02(\x01\"K\n\x0f\x45xtendedMessage\x12\x17\n\x0eoptional_int32\x18\xe9\x07 \x01(\x05\x12\x18\n\x0frepeated_string\x18\xea\x07 \x03(\t*\x05\x08\x01\x10\xe8\x07\"-\n\x0e\x46oreignMessage\x12\x1b\n\x13\x66oreign_message_int\x18\x01 \x01(\x05:I\n\x16optional_int_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x01 \x01(\x05:w\n\x1aoptional_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x02 \x01(\x0b\x32(.google.protobuf.internal.ForeignMessage:I\n\x16repeated_int_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x03 \x03(\x05:w\n\x1arepeated_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x04 \x03(\x0b\x32(.google.protobuf.internal.ForeignMessage') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.more_extensions_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + ExtendedMessage.RegisterExtension(optional_int_extension) + ExtendedMessage.RegisterExtension(optional_message_extension) + ExtendedMessage.RegisterExtension(repeated_int_extension) + ExtendedMessage.RegisterExtension(repeated_message_extension) + + DESCRIPTOR._options = None + _TOPLEVELMESSAGE.fields_by_name['submessage']._options = None + _TOPLEVELMESSAGE.fields_by_name['submessage']._serialized_options = b'(\001' + _TOPLEVELMESSAGE.fields_by_name['nested_message']._options = None + _TOPLEVELMESSAGE.fields_by_name['nested_message']._serialized_options = b'(\001' + _NESTEDMESSAGE.fields_by_name['submessage']._options = None + _NESTEDMESSAGE.fields_by_name['submessage']._serialized_options = b'(\001' + _TOPLEVELMESSAGE._serialized_start=77 + _TOPLEVELMESSAGE._serialized_end=230 + _NESTEDMESSAGE._serialized_start=232 + _NESTEDMESSAGE._serialized_end=314 + _EXTENDEDMESSAGE._serialized_start=316 + _EXTENDEDMESSAGE._serialized_end=391 + _FOREIGNMESSAGE._serialized_start=393 + _FOREIGNMESSAGE._serialized_end=438 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/more_messages_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/more_messages_pb2.py new file mode 100644 index 0000000000..d7f7115609 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/more_messages_pb2.py @@ -0,0 +1,556 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/more_messages.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n,google/protobuf/internal/more_messages.proto\x12\x18google.protobuf.internal\"h\n\x10OutOfOrderFields\x12\x17\n\x0foptional_sint32\x18\x05 \x01(\x11\x12\x17\n\x0foptional_uint32\x18\x03 \x01(\r\x12\x16\n\x0eoptional_int32\x18\x01 \x01(\x05*\x04\x08\x04\x10\x05*\x04\x08\x02\x10\x03\"\xcd\x02\n\x05\x63lass\x12\x1b\n\tint_field\x18\x01 \x01(\x05R\x08json_int\x12\n\n\x02if\x18\x02 \x01(\x05\x12(\n\x02\x61s\x18\x03 \x01(\x0e\x32\x1c.google.protobuf.internal.is\x12\x30\n\nenum_field\x18\x04 \x01(\x0e\x32\x1c.google.protobuf.internal.is\x12>\n\x11nested_enum_field\x18\x05 \x01(\x0e\x32#.google.protobuf.internal.class.for\x12;\n\x0enested_message\x18\x06 \x01(\x0b\x32#.google.protobuf.internal.class.try\x1a\x1c\n\x03try\x12\r\n\x05\x66ield\x18\x01 \x01(\x05*\x06\x08\xe7\x07\x10\x90N\"\x1c\n\x03\x66or\x12\x0b\n\x07\x64\x65\x66\x61ult\x10\x00\x12\x08\n\x04True\x10\x01*\x06\x08\xe7\x07\x10\x90N\"?\n\x0b\x45xtendClass20\n\x06return\x12\x1f.google.protobuf.internal.class\x18\xea\x07 \x01(\x05\"~\n\x0fTestFullKeyword\x12:\n\x06\x66ield1\x18\x01 \x01(\x0b\x32*.google.protobuf.internal.OutOfOrderFields\x12/\n\x06\x66ield2\x18\x02 \x01(\x0b\x32\x1f.google.protobuf.internal.class\"\xa5\x0f\n\x11LotsNestedMessage\x1a\x04\n\x02\x42\x30\x1a\x04\n\x02\x42\x31\x1a\x04\n\x02\x42\x32\x1a\x04\n\x02\x42\x33\x1a\x04\n\x02\x42\x34\x1a\x04\n\x02\x42\x35\x1a\x04\n\x02\x42\x36\x1a\x04\n\x02\x42\x37\x1a\x04\n\x02\x42\x38\x1a\x04\n\x02\x42\x39\x1a\x05\n\x03\x42\x31\x30\x1a\x05\n\x03\x42\x31\x31\x1a\x05\n\x03\x42\x31\x32\x1a\x05\n\x03\x42\x31\x33\x1a\x05\n\x03\x42\x31\x34\x1a\x05\n\x03\x42\x31\x35\x1a\x05\n\x03\x42\x31\x36\x1a\x05\n\x03\x42\x31\x37\x1a\x05\n\x03\x42\x31\x38\x1a\x05\n\x03\x42\x31\x39\x1a\x05\n\x03\x42\x32\x30\x1a\x05\n\x03\x42\x32\x31\x1a\x05\n\x03\x42\x32\x32\x1a\x05\n\x03\x42\x32\x33\x1a\x05\n\x03\x42\x32\x34\x1a\x05\n\x03\x42\x32\x35\x1a\x05\n\x03\x42\x32\x36\x1a\x05\n\x03\x42\x32\x37\x1a\x05\n\x03\x42\x32\x38\x1a\x05\n\x03\x42\x32\x39\x1a\x05\n\x03\x42\x33\x30\x1a\x05\n\x03\x42\x33\x31\x1a\x05\n\x03\x42\x33\x32\x1a\x05\n\x03\x42\x33\x33\x1a\x05\n\x03\x42\x33\x34\x1a\x05\n\x03\x42\x33\x35\x1a\x05\n\x03\x42\x33\x36\x1a\x05\n\x03\x42\x33\x37\x1a\x05\n\x03\x42\x33\x38\x1a\x05\n\x03\x42\x33\x39\x1a\x05\n\x03\x42\x34\x30\x1a\x05\n\x03\x42\x34\x31\x1a\x05\n\x03\x42\x34\x32\x1a\x05\n\x03\x42\x34\x33\x1a\x05\n\x03\x42\x34\x34\x1a\x05\n\x03\x42\x34\x35\x1a\x05\n\x03\x42\x34\x36\x1a\x05\n\x03\x42\x34\x37\x1a\x05\n\x03\x42\x34\x38\x1a\x05\n\x03\x42\x34\x39\x1a\x05\n\x03\x42\x35\x30\x1a\x05\n\x03\x42\x35\x31\x1a\x05\n\x03\x42\x35\x32\x1a\x05\n\x03\x42\x35\x33\x1a\x05\n\x03\x42\x35\x34\x1a\x05\n\x03\x42\x35\x35\x1a\x05\n\x03\x42\x35\x36\x1a\x05\n\x03\x42\x35\x37\x1a\x05\n\x03\x42\x35\x38\x1a\x05\n\x03\x42\x35\x39\x1a\x05\n\x03\x42\x36\x30\x1a\x05\n\x03\x42\x36\x31\x1a\x05\n\x03\x42\x36\x32\x1a\x05\n\x03\x42\x36\x33\x1a\x05\n\x03\x42\x36\x34\x1a\x05\n\x03\x42\x36\x35\x1a\x05\n\x03\x42\x36\x36\x1a\x05\n\x03\x42\x36\x37\x1a\x05\n\x03\x42\x36\x38\x1a\x05\n\x03\x42\x36\x39\x1a\x05\n\x03\x42\x37\x30\x1a\x05\n\x03\x42\x37\x31\x1a\x05\n\x03\x42\x37\x32\x1a\x05\n\x03\x42\x37\x33\x1a\x05\n\x03\x42\x37\x34\x1a\x05\n\x03\x42\x37\x35\x1a\x05\n\x03\x42\x37\x36\x1a\x05\n\x03\x42\x37\x37\x1a\x05\n\x03\x42\x37\x38\x1a\x05\n\x03\x42\x37\x39\x1a\x05\n\x03\x42\x38\x30\x1a\x05\n\x03\x42\x38\x31\x1a\x05\n\x03\x42\x38\x32\x1a\x05\n\x03\x42\x38\x33\x1a\x05\n\x03\x42\x38\x34\x1a\x05\n\x03\x42\x38\x35\x1a\x05\n\x03\x42\x38\x36\x1a\x05\n\x03\x42\x38\x37\x1a\x05\n\x03\x42\x38\x38\x1a\x05\n\x03\x42\x38\x39\x1a\x05\n\x03\x42\x39\x30\x1a\x05\n\x03\x42\x39\x31\x1a\x05\n\x03\x42\x39\x32\x1a\x05\n\x03\x42\x39\x33\x1a\x05\n\x03\x42\x39\x34\x1a\x05\n\x03\x42\x39\x35\x1a\x05\n\x03\x42\x39\x36\x1a\x05\n\x03\x42\x39\x37\x1a\x05\n\x03\x42\x39\x38\x1a\x05\n\x03\x42\x39\x39\x1a\x06\n\x04\x42\x31\x30\x30\x1a\x06\n\x04\x42\x31\x30\x31\x1a\x06\n\x04\x42\x31\x30\x32\x1a\x06\n\x04\x42\x31\x30\x33\x1a\x06\n\x04\x42\x31\x30\x34\x1a\x06\n\x04\x42\x31\x30\x35\x1a\x06\n\x04\x42\x31\x30\x36\x1a\x06\n\x04\x42\x31\x30\x37\x1a\x06\n\x04\x42\x31\x30\x38\x1a\x06\n\x04\x42\x31\x30\x39\x1a\x06\n\x04\x42\x31\x31\x30\x1a\x06\n\x04\x42\x31\x31\x31\x1a\x06\n\x04\x42\x31\x31\x32\x1a\x06\n\x04\x42\x31\x31\x33\x1a\x06\n\x04\x42\x31\x31\x34\x1a\x06\n\x04\x42\x31\x31\x35\x1a\x06\n\x04\x42\x31\x31\x36\x1a\x06\n\x04\x42\x31\x31\x37\x1a\x06\n\x04\x42\x31\x31\x38\x1a\x06\n\x04\x42\x31\x31\x39\x1a\x06\n\x04\x42\x31\x32\x30\x1a\x06\n\x04\x42\x31\x32\x31\x1a\x06\n\x04\x42\x31\x32\x32\x1a\x06\n\x04\x42\x31\x32\x33\x1a\x06\n\x04\x42\x31\x32\x34\x1a\x06\n\x04\x42\x31\x32\x35\x1a\x06\n\x04\x42\x31\x32\x36\x1a\x06\n\x04\x42\x31\x32\x37\x1a\x06\n\x04\x42\x31\x32\x38\x1a\x06\n\x04\x42\x31\x32\x39\x1a\x06\n\x04\x42\x31\x33\x30\x1a\x06\n\x04\x42\x31\x33\x31\x1a\x06\n\x04\x42\x31\x33\x32\x1a\x06\n\x04\x42\x31\x33\x33\x1a\x06\n\x04\x42\x31\x33\x34\x1a\x06\n\x04\x42\x31\x33\x35\x1a\x06\n\x04\x42\x31\x33\x36\x1a\x06\n\x04\x42\x31\x33\x37\x1a\x06\n\x04\x42\x31\x33\x38\x1a\x06\n\x04\x42\x31\x33\x39\x1a\x06\n\x04\x42\x31\x34\x30\x1a\x06\n\x04\x42\x31\x34\x31\x1a\x06\n\x04\x42\x31\x34\x32\x1a\x06\n\x04\x42\x31\x34\x33\x1a\x06\n\x04\x42\x31\x34\x34\x1a\x06\n\x04\x42\x31\x34\x35\x1a\x06\n\x04\x42\x31\x34\x36\x1a\x06\n\x04\x42\x31\x34\x37\x1a\x06\n\x04\x42\x31\x34\x38\x1a\x06\n\x04\x42\x31\x34\x39\x1a\x06\n\x04\x42\x31\x35\x30\x1a\x06\n\x04\x42\x31\x35\x31\x1a\x06\n\x04\x42\x31\x35\x32\x1a\x06\n\x04\x42\x31\x35\x33\x1a\x06\n\x04\x42\x31\x35\x34\x1a\x06\n\x04\x42\x31\x35\x35\x1a\x06\n\x04\x42\x31\x35\x36\x1a\x06\n\x04\x42\x31\x35\x37\x1a\x06\n\x04\x42\x31\x35\x38\x1a\x06\n\x04\x42\x31\x35\x39\x1a\x06\n\x04\x42\x31\x36\x30\x1a\x06\n\x04\x42\x31\x36\x31\x1a\x06\n\x04\x42\x31\x36\x32\x1a\x06\n\x04\x42\x31\x36\x33\x1a\x06\n\x04\x42\x31\x36\x34\x1a\x06\n\x04\x42\x31\x36\x35\x1a\x06\n\x04\x42\x31\x36\x36\x1a\x06\n\x04\x42\x31\x36\x37\x1a\x06\n\x04\x42\x31\x36\x38\x1a\x06\n\x04\x42\x31\x36\x39\x1a\x06\n\x04\x42\x31\x37\x30\x1a\x06\n\x04\x42\x31\x37\x31\x1a\x06\n\x04\x42\x31\x37\x32\x1a\x06\n\x04\x42\x31\x37\x33\x1a\x06\n\x04\x42\x31\x37\x34\x1a\x06\n\x04\x42\x31\x37\x35\x1a\x06\n\x04\x42\x31\x37\x36\x1a\x06\n\x04\x42\x31\x37\x37\x1a\x06\n\x04\x42\x31\x37\x38\x1a\x06\n\x04\x42\x31\x37\x39\x1a\x06\n\x04\x42\x31\x38\x30\x1a\x06\n\x04\x42\x31\x38\x31\x1a\x06\n\x04\x42\x31\x38\x32\x1a\x06\n\x04\x42\x31\x38\x33\x1a\x06\n\x04\x42\x31\x38\x34\x1a\x06\n\x04\x42\x31\x38\x35\x1a\x06\n\x04\x42\x31\x38\x36\x1a\x06\n\x04\x42\x31\x38\x37\x1a\x06\n\x04\x42\x31\x38\x38\x1a\x06\n\x04\x42\x31\x38\x39\x1a\x06\n\x04\x42\x31\x39\x30\x1a\x06\n\x04\x42\x31\x39\x31\x1a\x06\n\x04\x42\x31\x39\x32\x1a\x06\n\x04\x42\x31\x39\x33\x1a\x06\n\x04\x42\x31\x39\x34\x1a\x06\n\x04\x42\x31\x39\x35\x1a\x06\n\x04\x42\x31\x39\x36\x1a\x06\n\x04\x42\x31\x39\x37\x1a\x06\n\x04\x42\x31\x39\x38\x1a\x06\n\x04\x42\x31\x39\x39\x1a\x06\n\x04\x42\x32\x30\x30\x1a\x06\n\x04\x42\x32\x30\x31\x1a\x06\n\x04\x42\x32\x30\x32\x1a\x06\n\x04\x42\x32\x30\x33\x1a\x06\n\x04\x42\x32\x30\x34\x1a\x06\n\x04\x42\x32\x30\x35\x1a\x06\n\x04\x42\x32\x30\x36\x1a\x06\n\x04\x42\x32\x30\x37\x1a\x06\n\x04\x42\x32\x30\x38\x1a\x06\n\x04\x42\x32\x30\x39\x1a\x06\n\x04\x42\x32\x31\x30\x1a\x06\n\x04\x42\x32\x31\x31\x1a\x06\n\x04\x42\x32\x31\x32\x1a\x06\n\x04\x42\x32\x31\x33\x1a\x06\n\x04\x42\x32\x31\x34\x1a\x06\n\x04\x42\x32\x31\x35\x1a\x06\n\x04\x42\x32\x31\x36\x1a\x06\n\x04\x42\x32\x31\x37\x1a\x06\n\x04\x42\x32\x31\x38\x1a\x06\n\x04\x42\x32\x31\x39\x1a\x06\n\x04\x42\x32\x32\x30\x1a\x06\n\x04\x42\x32\x32\x31\x1a\x06\n\x04\x42\x32\x32\x32\x1a\x06\n\x04\x42\x32\x32\x33\x1a\x06\n\x04\x42\x32\x32\x34\x1a\x06\n\x04\x42\x32\x32\x35\x1a\x06\n\x04\x42\x32\x32\x36\x1a\x06\n\x04\x42\x32\x32\x37\x1a\x06\n\x04\x42\x32\x32\x38\x1a\x06\n\x04\x42\x32\x32\x39\x1a\x06\n\x04\x42\x32\x33\x30\x1a\x06\n\x04\x42\x32\x33\x31\x1a\x06\n\x04\x42\x32\x33\x32\x1a\x06\n\x04\x42\x32\x33\x33\x1a\x06\n\x04\x42\x32\x33\x34\x1a\x06\n\x04\x42\x32\x33\x35\x1a\x06\n\x04\x42\x32\x33\x36\x1a\x06\n\x04\x42\x32\x33\x37\x1a\x06\n\x04\x42\x32\x33\x38\x1a\x06\n\x04\x42\x32\x33\x39\x1a\x06\n\x04\x42\x32\x34\x30\x1a\x06\n\x04\x42\x32\x34\x31\x1a\x06\n\x04\x42\x32\x34\x32\x1a\x06\n\x04\x42\x32\x34\x33\x1a\x06\n\x04\x42\x32\x34\x34\x1a\x06\n\x04\x42\x32\x34\x35\x1a\x06\n\x04\x42\x32\x34\x36\x1a\x06\n\x04\x42\x32\x34\x37\x1a\x06\n\x04\x42\x32\x34\x38\x1a\x06\n\x04\x42\x32\x34\x39\x1a\x06\n\x04\x42\x32\x35\x30\x1a\x06\n\x04\x42\x32\x35\x31\x1a\x06\n\x04\x42\x32\x35\x32\x1a\x06\n\x04\x42\x32\x35\x33\x1a\x06\n\x04\x42\x32\x35\x34\x1a\x06\n\x04\x42\x32\x35\x35*\x1b\n\x02is\x12\x0b\n\x07\x64\x65\x66\x61ult\x10\x00\x12\x08\n\x04\x65lse\x10\x01:C\n\x0foptional_uint64\x12*.google.protobuf.internal.OutOfOrderFields\x18\x04 \x01(\x04:B\n\x0eoptional_int64\x12*.google.protobuf.internal.OutOfOrderFields\x18\x02 \x01(\x03:2\n\x08\x63ontinue\x12\x1f.google.protobuf.internal.class\x18\xe9\x07 \x01(\x05:2\n\x04with\x12#.google.protobuf.internal.class.try\x18\xe9\x07 \x01(\x05') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.more_messages_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + OutOfOrderFields.RegisterExtension(optional_uint64) + OutOfOrderFields.RegisterExtension(optional_int64) + globals()['class'].RegisterExtension(globals()['continue']) + getattr(globals()['class'], 'try').RegisterExtension(globals()['with']) + globals()['class'].RegisterExtension(_EXTENDCLASS.extensions_by_name['return']) + + DESCRIPTOR._options = None + _IS._serialized_start=2669 + _IS._serialized_end=2696 + _OUTOFORDERFIELDS._serialized_start=74 + _OUTOFORDERFIELDS._serialized_end=178 + _CLASS._serialized_start=181 + _CLASS._serialized_end=514 + _CLASS_TRY._serialized_start=448 + _CLASS_TRY._serialized_end=476 + _CLASS_FOR._serialized_start=478 + _CLASS_FOR._serialized_end=506 + _EXTENDCLASS._serialized_start=516 + _EXTENDCLASS._serialized_end=579 + _TESTFULLKEYWORD._serialized_start=581 + _TESTFULLKEYWORD._serialized_end=707 + _LOTSNESTEDMESSAGE._serialized_start=710 + _LOTSNESTEDMESSAGE._serialized_end=2667 + _LOTSNESTEDMESSAGE_B0._serialized_start=731 + _LOTSNESTEDMESSAGE_B0._serialized_end=735 + _LOTSNESTEDMESSAGE_B1._serialized_start=737 + _LOTSNESTEDMESSAGE_B1._serialized_end=741 + _LOTSNESTEDMESSAGE_B2._serialized_start=743 + _LOTSNESTEDMESSAGE_B2._serialized_end=747 + _LOTSNESTEDMESSAGE_B3._serialized_start=749 + _LOTSNESTEDMESSAGE_B3._serialized_end=753 + _LOTSNESTEDMESSAGE_B4._serialized_start=755 + _LOTSNESTEDMESSAGE_B4._serialized_end=759 + _LOTSNESTEDMESSAGE_B5._serialized_start=761 + _LOTSNESTEDMESSAGE_B5._serialized_end=765 + _LOTSNESTEDMESSAGE_B6._serialized_start=767 + _LOTSNESTEDMESSAGE_B6._serialized_end=771 + _LOTSNESTEDMESSAGE_B7._serialized_start=773 + _LOTSNESTEDMESSAGE_B7._serialized_end=777 + _LOTSNESTEDMESSAGE_B8._serialized_start=779 + _LOTSNESTEDMESSAGE_B8._serialized_end=783 + _LOTSNESTEDMESSAGE_B9._serialized_start=785 + _LOTSNESTEDMESSAGE_B9._serialized_end=789 + _LOTSNESTEDMESSAGE_B10._serialized_start=791 + _LOTSNESTEDMESSAGE_B10._serialized_end=796 + _LOTSNESTEDMESSAGE_B11._serialized_start=798 + _LOTSNESTEDMESSAGE_B11._serialized_end=803 + _LOTSNESTEDMESSAGE_B12._serialized_start=805 + _LOTSNESTEDMESSAGE_B12._serialized_end=810 + _LOTSNESTEDMESSAGE_B13._serialized_start=812 + _LOTSNESTEDMESSAGE_B13._serialized_end=817 + _LOTSNESTEDMESSAGE_B14._serialized_start=819 + _LOTSNESTEDMESSAGE_B14._serialized_end=824 + _LOTSNESTEDMESSAGE_B15._serialized_start=826 + _LOTSNESTEDMESSAGE_B15._serialized_end=831 + _LOTSNESTEDMESSAGE_B16._serialized_start=833 + _LOTSNESTEDMESSAGE_B16._serialized_end=838 + _LOTSNESTEDMESSAGE_B17._serialized_start=840 + _LOTSNESTEDMESSAGE_B17._serialized_end=845 + _LOTSNESTEDMESSAGE_B18._serialized_start=847 + _LOTSNESTEDMESSAGE_B18._serialized_end=852 + _LOTSNESTEDMESSAGE_B19._serialized_start=854 + _LOTSNESTEDMESSAGE_B19._serialized_end=859 + _LOTSNESTEDMESSAGE_B20._serialized_start=861 + _LOTSNESTEDMESSAGE_B20._serialized_end=866 + _LOTSNESTEDMESSAGE_B21._serialized_start=868 + _LOTSNESTEDMESSAGE_B21._serialized_end=873 + _LOTSNESTEDMESSAGE_B22._serialized_start=875 + _LOTSNESTEDMESSAGE_B22._serialized_end=880 + _LOTSNESTEDMESSAGE_B23._serialized_start=882 + _LOTSNESTEDMESSAGE_B23._serialized_end=887 + _LOTSNESTEDMESSAGE_B24._serialized_start=889 + _LOTSNESTEDMESSAGE_B24._serialized_end=894 + _LOTSNESTEDMESSAGE_B25._serialized_start=896 + _LOTSNESTEDMESSAGE_B25._serialized_end=901 + _LOTSNESTEDMESSAGE_B26._serialized_start=903 + _LOTSNESTEDMESSAGE_B26._serialized_end=908 + _LOTSNESTEDMESSAGE_B27._serialized_start=910 + _LOTSNESTEDMESSAGE_B27._serialized_end=915 + _LOTSNESTEDMESSAGE_B28._serialized_start=917 + _LOTSNESTEDMESSAGE_B28._serialized_end=922 + _LOTSNESTEDMESSAGE_B29._serialized_start=924 + _LOTSNESTEDMESSAGE_B29._serialized_end=929 + _LOTSNESTEDMESSAGE_B30._serialized_start=931 + _LOTSNESTEDMESSAGE_B30._serialized_end=936 + _LOTSNESTEDMESSAGE_B31._serialized_start=938 + _LOTSNESTEDMESSAGE_B31._serialized_end=943 + _LOTSNESTEDMESSAGE_B32._serialized_start=945 + _LOTSNESTEDMESSAGE_B32._serialized_end=950 + _LOTSNESTEDMESSAGE_B33._serialized_start=952 + _LOTSNESTEDMESSAGE_B33._serialized_end=957 + _LOTSNESTEDMESSAGE_B34._serialized_start=959 + _LOTSNESTEDMESSAGE_B34._serialized_end=964 + _LOTSNESTEDMESSAGE_B35._serialized_start=966 + _LOTSNESTEDMESSAGE_B35._serialized_end=971 + _LOTSNESTEDMESSAGE_B36._serialized_start=973 + _LOTSNESTEDMESSAGE_B36._serialized_end=978 + _LOTSNESTEDMESSAGE_B37._serialized_start=980 + _LOTSNESTEDMESSAGE_B37._serialized_end=985 + _LOTSNESTEDMESSAGE_B38._serialized_start=987 + _LOTSNESTEDMESSAGE_B38._serialized_end=992 + _LOTSNESTEDMESSAGE_B39._serialized_start=994 + _LOTSNESTEDMESSAGE_B39._serialized_end=999 + _LOTSNESTEDMESSAGE_B40._serialized_start=1001 + _LOTSNESTEDMESSAGE_B40._serialized_end=1006 + _LOTSNESTEDMESSAGE_B41._serialized_start=1008 + _LOTSNESTEDMESSAGE_B41._serialized_end=1013 + _LOTSNESTEDMESSAGE_B42._serialized_start=1015 + _LOTSNESTEDMESSAGE_B42._serialized_end=1020 + _LOTSNESTEDMESSAGE_B43._serialized_start=1022 + _LOTSNESTEDMESSAGE_B43._serialized_end=1027 + _LOTSNESTEDMESSAGE_B44._serialized_start=1029 + _LOTSNESTEDMESSAGE_B44._serialized_end=1034 + _LOTSNESTEDMESSAGE_B45._serialized_start=1036 + _LOTSNESTEDMESSAGE_B45._serialized_end=1041 + _LOTSNESTEDMESSAGE_B46._serialized_start=1043 + _LOTSNESTEDMESSAGE_B46._serialized_end=1048 + _LOTSNESTEDMESSAGE_B47._serialized_start=1050 + _LOTSNESTEDMESSAGE_B47._serialized_end=1055 + _LOTSNESTEDMESSAGE_B48._serialized_start=1057 + _LOTSNESTEDMESSAGE_B48._serialized_end=1062 + _LOTSNESTEDMESSAGE_B49._serialized_start=1064 + _LOTSNESTEDMESSAGE_B49._serialized_end=1069 + _LOTSNESTEDMESSAGE_B50._serialized_start=1071 + _LOTSNESTEDMESSAGE_B50._serialized_end=1076 + _LOTSNESTEDMESSAGE_B51._serialized_start=1078 + _LOTSNESTEDMESSAGE_B51._serialized_end=1083 + _LOTSNESTEDMESSAGE_B52._serialized_start=1085 + _LOTSNESTEDMESSAGE_B52._serialized_end=1090 + _LOTSNESTEDMESSAGE_B53._serialized_start=1092 + _LOTSNESTEDMESSAGE_B53._serialized_end=1097 + _LOTSNESTEDMESSAGE_B54._serialized_start=1099 + _LOTSNESTEDMESSAGE_B54._serialized_end=1104 + _LOTSNESTEDMESSAGE_B55._serialized_start=1106 + _LOTSNESTEDMESSAGE_B55._serialized_end=1111 + _LOTSNESTEDMESSAGE_B56._serialized_start=1113 + _LOTSNESTEDMESSAGE_B56._serialized_end=1118 + _LOTSNESTEDMESSAGE_B57._serialized_start=1120 + _LOTSNESTEDMESSAGE_B57._serialized_end=1125 + _LOTSNESTEDMESSAGE_B58._serialized_start=1127 + _LOTSNESTEDMESSAGE_B58._serialized_end=1132 + _LOTSNESTEDMESSAGE_B59._serialized_start=1134 + _LOTSNESTEDMESSAGE_B59._serialized_end=1139 + _LOTSNESTEDMESSAGE_B60._serialized_start=1141 + _LOTSNESTEDMESSAGE_B60._serialized_end=1146 + _LOTSNESTEDMESSAGE_B61._serialized_start=1148 + _LOTSNESTEDMESSAGE_B61._serialized_end=1153 + _LOTSNESTEDMESSAGE_B62._serialized_start=1155 + _LOTSNESTEDMESSAGE_B62._serialized_end=1160 + _LOTSNESTEDMESSAGE_B63._serialized_start=1162 + _LOTSNESTEDMESSAGE_B63._serialized_end=1167 + _LOTSNESTEDMESSAGE_B64._serialized_start=1169 + _LOTSNESTEDMESSAGE_B64._serialized_end=1174 + _LOTSNESTEDMESSAGE_B65._serialized_start=1176 + _LOTSNESTEDMESSAGE_B65._serialized_end=1181 + _LOTSNESTEDMESSAGE_B66._serialized_start=1183 + _LOTSNESTEDMESSAGE_B66._serialized_end=1188 + _LOTSNESTEDMESSAGE_B67._serialized_start=1190 + _LOTSNESTEDMESSAGE_B67._serialized_end=1195 + _LOTSNESTEDMESSAGE_B68._serialized_start=1197 + _LOTSNESTEDMESSAGE_B68._serialized_end=1202 + _LOTSNESTEDMESSAGE_B69._serialized_start=1204 + _LOTSNESTEDMESSAGE_B69._serialized_end=1209 + _LOTSNESTEDMESSAGE_B70._serialized_start=1211 + _LOTSNESTEDMESSAGE_B70._serialized_end=1216 + _LOTSNESTEDMESSAGE_B71._serialized_start=1218 + _LOTSNESTEDMESSAGE_B71._serialized_end=1223 + _LOTSNESTEDMESSAGE_B72._serialized_start=1225 + _LOTSNESTEDMESSAGE_B72._serialized_end=1230 + _LOTSNESTEDMESSAGE_B73._serialized_start=1232 + _LOTSNESTEDMESSAGE_B73._serialized_end=1237 + _LOTSNESTEDMESSAGE_B74._serialized_start=1239 + _LOTSNESTEDMESSAGE_B74._serialized_end=1244 + _LOTSNESTEDMESSAGE_B75._serialized_start=1246 + _LOTSNESTEDMESSAGE_B75._serialized_end=1251 + _LOTSNESTEDMESSAGE_B76._serialized_start=1253 + _LOTSNESTEDMESSAGE_B76._serialized_end=1258 + _LOTSNESTEDMESSAGE_B77._serialized_start=1260 + _LOTSNESTEDMESSAGE_B77._serialized_end=1265 + _LOTSNESTEDMESSAGE_B78._serialized_start=1267 + _LOTSNESTEDMESSAGE_B78._serialized_end=1272 + _LOTSNESTEDMESSAGE_B79._serialized_start=1274 + _LOTSNESTEDMESSAGE_B79._serialized_end=1279 + _LOTSNESTEDMESSAGE_B80._serialized_start=1281 + _LOTSNESTEDMESSAGE_B80._serialized_end=1286 + _LOTSNESTEDMESSAGE_B81._serialized_start=1288 + _LOTSNESTEDMESSAGE_B81._serialized_end=1293 + _LOTSNESTEDMESSAGE_B82._serialized_start=1295 + _LOTSNESTEDMESSAGE_B82._serialized_end=1300 + _LOTSNESTEDMESSAGE_B83._serialized_start=1302 + _LOTSNESTEDMESSAGE_B83._serialized_end=1307 + _LOTSNESTEDMESSAGE_B84._serialized_start=1309 + _LOTSNESTEDMESSAGE_B84._serialized_end=1314 + _LOTSNESTEDMESSAGE_B85._serialized_start=1316 + _LOTSNESTEDMESSAGE_B85._serialized_end=1321 + _LOTSNESTEDMESSAGE_B86._serialized_start=1323 + _LOTSNESTEDMESSAGE_B86._serialized_end=1328 + _LOTSNESTEDMESSAGE_B87._serialized_start=1330 + _LOTSNESTEDMESSAGE_B87._serialized_end=1335 + _LOTSNESTEDMESSAGE_B88._serialized_start=1337 + _LOTSNESTEDMESSAGE_B88._serialized_end=1342 + _LOTSNESTEDMESSAGE_B89._serialized_start=1344 + _LOTSNESTEDMESSAGE_B89._serialized_end=1349 + _LOTSNESTEDMESSAGE_B90._serialized_start=1351 + _LOTSNESTEDMESSAGE_B90._serialized_end=1356 + _LOTSNESTEDMESSAGE_B91._serialized_start=1358 + _LOTSNESTEDMESSAGE_B91._serialized_end=1363 + _LOTSNESTEDMESSAGE_B92._serialized_start=1365 + _LOTSNESTEDMESSAGE_B92._serialized_end=1370 + _LOTSNESTEDMESSAGE_B93._serialized_start=1372 + _LOTSNESTEDMESSAGE_B93._serialized_end=1377 + _LOTSNESTEDMESSAGE_B94._serialized_start=1379 + _LOTSNESTEDMESSAGE_B94._serialized_end=1384 + _LOTSNESTEDMESSAGE_B95._serialized_start=1386 + _LOTSNESTEDMESSAGE_B95._serialized_end=1391 + _LOTSNESTEDMESSAGE_B96._serialized_start=1393 + _LOTSNESTEDMESSAGE_B96._serialized_end=1398 + _LOTSNESTEDMESSAGE_B97._serialized_start=1400 + _LOTSNESTEDMESSAGE_B97._serialized_end=1405 + _LOTSNESTEDMESSAGE_B98._serialized_start=1407 + _LOTSNESTEDMESSAGE_B98._serialized_end=1412 + _LOTSNESTEDMESSAGE_B99._serialized_start=1414 + _LOTSNESTEDMESSAGE_B99._serialized_end=1419 + _LOTSNESTEDMESSAGE_B100._serialized_start=1421 + _LOTSNESTEDMESSAGE_B100._serialized_end=1427 + _LOTSNESTEDMESSAGE_B101._serialized_start=1429 + _LOTSNESTEDMESSAGE_B101._serialized_end=1435 + _LOTSNESTEDMESSAGE_B102._serialized_start=1437 + _LOTSNESTEDMESSAGE_B102._serialized_end=1443 + _LOTSNESTEDMESSAGE_B103._serialized_start=1445 + _LOTSNESTEDMESSAGE_B103._serialized_end=1451 + _LOTSNESTEDMESSAGE_B104._serialized_start=1453 + _LOTSNESTEDMESSAGE_B104._serialized_end=1459 + _LOTSNESTEDMESSAGE_B105._serialized_start=1461 + _LOTSNESTEDMESSAGE_B105._serialized_end=1467 + _LOTSNESTEDMESSAGE_B106._serialized_start=1469 + _LOTSNESTEDMESSAGE_B106._serialized_end=1475 + _LOTSNESTEDMESSAGE_B107._serialized_start=1477 + _LOTSNESTEDMESSAGE_B107._serialized_end=1483 + _LOTSNESTEDMESSAGE_B108._serialized_start=1485 + _LOTSNESTEDMESSAGE_B108._serialized_end=1491 + _LOTSNESTEDMESSAGE_B109._serialized_start=1493 + _LOTSNESTEDMESSAGE_B109._serialized_end=1499 + _LOTSNESTEDMESSAGE_B110._serialized_start=1501 + _LOTSNESTEDMESSAGE_B110._serialized_end=1507 + _LOTSNESTEDMESSAGE_B111._serialized_start=1509 + _LOTSNESTEDMESSAGE_B111._serialized_end=1515 + _LOTSNESTEDMESSAGE_B112._serialized_start=1517 + _LOTSNESTEDMESSAGE_B112._serialized_end=1523 + _LOTSNESTEDMESSAGE_B113._serialized_start=1525 + _LOTSNESTEDMESSAGE_B113._serialized_end=1531 + _LOTSNESTEDMESSAGE_B114._serialized_start=1533 + _LOTSNESTEDMESSAGE_B114._serialized_end=1539 + _LOTSNESTEDMESSAGE_B115._serialized_start=1541 + _LOTSNESTEDMESSAGE_B115._serialized_end=1547 + _LOTSNESTEDMESSAGE_B116._serialized_start=1549 + _LOTSNESTEDMESSAGE_B116._serialized_end=1555 + _LOTSNESTEDMESSAGE_B117._serialized_start=1557 + _LOTSNESTEDMESSAGE_B117._serialized_end=1563 + _LOTSNESTEDMESSAGE_B118._serialized_start=1565 + _LOTSNESTEDMESSAGE_B118._serialized_end=1571 + _LOTSNESTEDMESSAGE_B119._serialized_start=1573 + _LOTSNESTEDMESSAGE_B119._serialized_end=1579 + _LOTSNESTEDMESSAGE_B120._serialized_start=1581 + _LOTSNESTEDMESSAGE_B120._serialized_end=1587 + _LOTSNESTEDMESSAGE_B121._serialized_start=1589 + _LOTSNESTEDMESSAGE_B121._serialized_end=1595 + _LOTSNESTEDMESSAGE_B122._serialized_start=1597 + _LOTSNESTEDMESSAGE_B122._serialized_end=1603 + _LOTSNESTEDMESSAGE_B123._serialized_start=1605 + _LOTSNESTEDMESSAGE_B123._serialized_end=1611 + _LOTSNESTEDMESSAGE_B124._serialized_start=1613 + _LOTSNESTEDMESSAGE_B124._serialized_end=1619 + _LOTSNESTEDMESSAGE_B125._serialized_start=1621 + _LOTSNESTEDMESSAGE_B125._serialized_end=1627 + _LOTSNESTEDMESSAGE_B126._serialized_start=1629 + _LOTSNESTEDMESSAGE_B126._serialized_end=1635 + _LOTSNESTEDMESSAGE_B127._serialized_start=1637 + _LOTSNESTEDMESSAGE_B127._serialized_end=1643 + _LOTSNESTEDMESSAGE_B128._serialized_start=1645 + _LOTSNESTEDMESSAGE_B128._serialized_end=1651 + _LOTSNESTEDMESSAGE_B129._serialized_start=1653 + _LOTSNESTEDMESSAGE_B129._serialized_end=1659 + _LOTSNESTEDMESSAGE_B130._serialized_start=1661 + _LOTSNESTEDMESSAGE_B130._serialized_end=1667 + _LOTSNESTEDMESSAGE_B131._serialized_start=1669 + _LOTSNESTEDMESSAGE_B131._serialized_end=1675 + _LOTSNESTEDMESSAGE_B132._serialized_start=1677 + _LOTSNESTEDMESSAGE_B132._serialized_end=1683 + _LOTSNESTEDMESSAGE_B133._serialized_start=1685 + _LOTSNESTEDMESSAGE_B133._serialized_end=1691 + _LOTSNESTEDMESSAGE_B134._serialized_start=1693 + _LOTSNESTEDMESSAGE_B134._serialized_end=1699 + _LOTSNESTEDMESSAGE_B135._serialized_start=1701 + _LOTSNESTEDMESSAGE_B135._serialized_end=1707 + _LOTSNESTEDMESSAGE_B136._serialized_start=1709 + _LOTSNESTEDMESSAGE_B136._serialized_end=1715 + _LOTSNESTEDMESSAGE_B137._serialized_start=1717 + _LOTSNESTEDMESSAGE_B137._serialized_end=1723 + _LOTSNESTEDMESSAGE_B138._serialized_start=1725 + _LOTSNESTEDMESSAGE_B138._serialized_end=1731 + _LOTSNESTEDMESSAGE_B139._serialized_start=1733 + _LOTSNESTEDMESSAGE_B139._serialized_end=1739 + _LOTSNESTEDMESSAGE_B140._serialized_start=1741 + _LOTSNESTEDMESSAGE_B140._serialized_end=1747 + _LOTSNESTEDMESSAGE_B141._serialized_start=1749 + _LOTSNESTEDMESSAGE_B141._serialized_end=1755 + _LOTSNESTEDMESSAGE_B142._serialized_start=1757 + _LOTSNESTEDMESSAGE_B142._serialized_end=1763 + _LOTSNESTEDMESSAGE_B143._serialized_start=1765 + _LOTSNESTEDMESSAGE_B143._serialized_end=1771 + _LOTSNESTEDMESSAGE_B144._serialized_start=1773 + _LOTSNESTEDMESSAGE_B144._serialized_end=1779 + _LOTSNESTEDMESSAGE_B145._serialized_start=1781 + _LOTSNESTEDMESSAGE_B145._serialized_end=1787 + _LOTSNESTEDMESSAGE_B146._serialized_start=1789 + _LOTSNESTEDMESSAGE_B146._serialized_end=1795 + _LOTSNESTEDMESSAGE_B147._serialized_start=1797 + _LOTSNESTEDMESSAGE_B147._serialized_end=1803 + _LOTSNESTEDMESSAGE_B148._serialized_start=1805 + _LOTSNESTEDMESSAGE_B148._serialized_end=1811 + _LOTSNESTEDMESSAGE_B149._serialized_start=1813 + _LOTSNESTEDMESSAGE_B149._serialized_end=1819 + _LOTSNESTEDMESSAGE_B150._serialized_start=1821 + _LOTSNESTEDMESSAGE_B150._serialized_end=1827 + _LOTSNESTEDMESSAGE_B151._serialized_start=1829 + _LOTSNESTEDMESSAGE_B151._serialized_end=1835 + _LOTSNESTEDMESSAGE_B152._serialized_start=1837 + _LOTSNESTEDMESSAGE_B152._serialized_end=1843 + _LOTSNESTEDMESSAGE_B153._serialized_start=1845 + _LOTSNESTEDMESSAGE_B153._serialized_end=1851 + _LOTSNESTEDMESSAGE_B154._serialized_start=1853 + _LOTSNESTEDMESSAGE_B154._serialized_end=1859 + _LOTSNESTEDMESSAGE_B155._serialized_start=1861 + _LOTSNESTEDMESSAGE_B155._serialized_end=1867 + _LOTSNESTEDMESSAGE_B156._serialized_start=1869 + _LOTSNESTEDMESSAGE_B156._serialized_end=1875 + _LOTSNESTEDMESSAGE_B157._serialized_start=1877 + _LOTSNESTEDMESSAGE_B157._serialized_end=1883 + _LOTSNESTEDMESSAGE_B158._serialized_start=1885 + _LOTSNESTEDMESSAGE_B158._serialized_end=1891 + _LOTSNESTEDMESSAGE_B159._serialized_start=1893 + _LOTSNESTEDMESSAGE_B159._serialized_end=1899 + _LOTSNESTEDMESSAGE_B160._serialized_start=1901 + _LOTSNESTEDMESSAGE_B160._serialized_end=1907 + _LOTSNESTEDMESSAGE_B161._serialized_start=1909 + _LOTSNESTEDMESSAGE_B161._serialized_end=1915 + _LOTSNESTEDMESSAGE_B162._serialized_start=1917 + _LOTSNESTEDMESSAGE_B162._serialized_end=1923 + _LOTSNESTEDMESSAGE_B163._serialized_start=1925 + _LOTSNESTEDMESSAGE_B163._serialized_end=1931 + _LOTSNESTEDMESSAGE_B164._serialized_start=1933 + _LOTSNESTEDMESSAGE_B164._serialized_end=1939 + _LOTSNESTEDMESSAGE_B165._serialized_start=1941 + _LOTSNESTEDMESSAGE_B165._serialized_end=1947 + _LOTSNESTEDMESSAGE_B166._serialized_start=1949 + _LOTSNESTEDMESSAGE_B166._serialized_end=1955 + _LOTSNESTEDMESSAGE_B167._serialized_start=1957 + _LOTSNESTEDMESSAGE_B167._serialized_end=1963 + _LOTSNESTEDMESSAGE_B168._serialized_start=1965 + _LOTSNESTEDMESSAGE_B168._serialized_end=1971 + _LOTSNESTEDMESSAGE_B169._serialized_start=1973 + _LOTSNESTEDMESSAGE_B169._serialized_end=1979 + _LOTSNESTEDMESSAGE_B170._serialized_start=1981 + _LOTSNESTEDMESSAGE_B170._serialized_end=1987 + _LOTSNESTEDMESSAGE_B171._serialized_start=1989 + _LOTSNESTEDMESSAGE_B171._serialized_end=1995 + _LOTSNESTEDMESSAGE_B172._serialized_start=1997 + _LOTSNESTEDMESSAGE_B172._serialized_end=2003 + _LOTSNESTEDMESSAGE_B173._serialized_start=2005 + _LOTSNESTEDMESSAGE_B173._serialized_end=2011 + _LOTSNESTEDMESSAGE_B174._serialized_start=2013 + _LOTSNESTEDMESSAGE_B174._serialized_end=2019 + _LOTSNESTEDMESSAGE_B175._serialized_start=2021 + _LOTSNESTEDMESSAGE_B175._serialized_end=2027 + _LOTSNESTEDMESSAGE_B176._serialized_start=2029 + _LOTSNESTEDMESSAGE_B176._serialized_end=2035 + _LOTSNESTEDMESSAGE_B177._serialized_start=2037 + _LOTSNESTEDMESSAGE_B177._serialized_end=2043 + _LOTSNESTEDMESSAGE_B178._serialized_start=2045 + _LOTSNESTEDMESSAGE_B178._serialized_end=2051 + _LOTSNESTEDMESSAGE_B179._serialized_start=2053 + _LOTSNESTEDMESSAGE_B179._serialized_end=2059 + _LOTSNESTEDMESSAGE_B180._serialized_start=2061 + _LOTSNESTEDMESSAGE_B180._serialized_end=2067 + _LOTSNESTEDMESSAGE_B181._serialized_start=2069 + _LOTSNESTEDMESSAGE_B181._serialized_end=2075 + _LOTSNESTEDMESSAGE_B182._serialized_start=2077 + _LOTSNESTEDMESSAGE_B182._serialized_end=2083 + _LOTSNESTEDMESSAGE_B183._serialized_start=2085 + _LOTSNESTEDMESSAGE_B183._serialized_end=2091 + _LOTSNESTEDMESSAGE_B184._serialized_start=2093 + _LOTSNESTEDMESSAGE_B184._serialized_end=2099 + _LOTSNESTEDMESSAGE_B185._serialized_start=2101 + _LOTSNESTEDMESSAGE_B185._serialized_end=2107 + _LOTSNESTEDMESSAGE_B186._serialized_start=2109 + _LOTSNESTEDMESSAGE_B186._serialized_end=2115 + _LOTSNESTEDMESSAGE_B187._serialized_start=2117 + _LOTSNESTEDMESSAGE_B187._serialized_end=2123 + _LOTSNESTEDMESSAGE_B188._serialized_start=2125 + _LOTSNESTEDMESSAGE_B188._serialized_end=2131 + _LOTSNESTEDMESSAGE_B189._serialized_start=2133 + _LOTSNESTEDMESSAGE_B189._serialized_end=2139 + _LOTSNESTEDMESSAGE_B190._serialized_start=2141 + _LOTSNESTEDMESSAGE_B190._serialized_end=2147 + _LOTSNESTEDMESSAGE_B191._serialized_start=2149 + _LOTSNESTEDMESSAGE_B191._serialized_end=2155 + _LOTSNESTEDMESSAGE_B192._serialized_start=2157 + _LOTSNESTEDMESSAGE_B192._serialized_end=2163 + _LOTSNESTEDMESSAGE_B193._serialized_start=2165 + _LOTSNESTEDMESSAGE_B193._serialized_end=2171 + _LOTSNESTEDMESSAGE_B194._serialized_start=2173 + _LOTSNESTEDMESSAGE_B194._serialized_end=2179 + _LOTSNESTEDMESSAGE_B195._serialized_start=2181 + _LOTSNESTEDMESSAGE_B195._serialized_end=2187 + _LOTSNESTEDMESSAGE_B196._serialized_start=2189 + _LOTSNESTEDMESSAGE_B196._serialized_end=2195 + _LOTSNESTEDMESSAGE_B197._serialized_start=2197 + _LOTSNESTEDMESSAGE_B197._serialized_end=2203 + _LOTSNESTEDMESSAGE_B198._serialized_start=2205 + _LOTSNESTEDMESSAGE_B198._serialized_end=2211 + _LOTSNESTEDMESSAGE_B199._serialized_start=2213 + _LOTSNESTEDMESSAGE_B199._serialized_end=2219 + _LOTSNESTEDMESSAGE_B200._serialized_start=2221 + _LOTSNESTEDMESSAGE_B200._serialized_end=2227 + _LOTSNESTEDMESSAGE_B201._serialized_start=2229 + _LOTSNESTEDMESSAGE_B201._serialized_end=2235 + _LOTSNESTEDMESSAGE_B202._serialized_start=2237 + _LOTSNESTEDMESSAGE_B202._serialized_end=2243 + _LOTSNESTEDMESSAGE_B203._serialized_start=2245 + _LOTSNESTEDMESSAGE_B203._serialized_end=2251 + _LOTSNESTEDMESSAGE_B204._serialized_start=2253 + _LOTSNESTEDMESSAGE_B204._serialized_end=2259 + _LOTSNESTEDMESSAGE_B205._serialized_start=2261 + _LOTSNESTEDMESSAGE_B205._serialized_end=2267 + _LOTSNESTEDMESSAGE_B206._serialized_start=2269 + _LOTSNESTEDMESSAGE_B206._serialized_end=2275 + _LOTSNESTEDMESSAGE_B207._serialized_start=2277 + _LOTSNESTEDMESSAGE_B207._serialized_end=2283 + _LOTSNESTEDMESSAGE_B208._serialized_start=2285 + _LOTSNESTEDMESSAGE_B208._serialized_end=2291 + _LOTSNESTEDMESSAGE_B209._serialized_start=2293 + _LOTSNESTEDMESSAGE_B209._serialized_end=2299 + _LOTSNESTEDMESSAGE_B210._serialized_start=2301 + _LOTSNESTEDMESSAGE_B210._serialized_end=2307 + _LOTSNESTEDMESSAGE_B211._serialized_start=2309 + _LOTSNESTEDMESSAGE_B211._serialized_end=2315 + _LOTSNESTEDMESSAGE_B212._serialized_start=2317 + _LOTSNESTEDMESSAGE_B212._serialized_end=2323 + _LOTSNESTEDMESSAGE_B213._serialized_start=2325 + _LOTSNESTEDMESSAGE_B213._serialized_end=2331 + _LOTSNESTEDMESSAGE_B214._serialized_start=2333 + _LOTSNESTEDMESSAGE_B214._serialized_end=2339 + _LOTSNESTEDMESSAGE_B215._serialized_start=2341 + _LOTSNESTEDMESSAGE_B215._serialized_end=2347 + _LOTSNESTEDMESSAGE_B216._serialized_start=2349 + _LOTSNESTEDMESSAGE_B216._serialized_end=2355 + _LOTSNESTEDMESSAGE_B217._serialized_start=2357 + _LOTSNESTEDMESSAGE_B217._serialized_end=2363 + _LOTSNESTEDMESSAGE_B218._serialized_start=2365 + _LOTSNESTEDMESSAGE_B218._serialized_end=2371 + _LOTSNESTEDMESSAGE_B219._serialized_start=2373 + _LOTSNESTEDMESSAGE_B219._serialized_end=2379 + _LOTSNESTEDMESSAGE_B220._serialized_start=2381 + _LOTSNESTEDMESSAGE_B220._serialized_end=2387 + _LOTSNESTEDMESSAGE_B221._serialized_start=2389 + _LOTSNESTEDMESSAGE_B221._serialized_end=2395 + _LOTSNESTEDMESSAGE_B222._serialized_start=2397 + _LOTSNESTEDMESSAGE_B222._serialized_end=2403 + _LOTSNESTEDMESSAGE_B223._serialized_start=2405 + _LOTSNESTEDMESSAGE_B223._serialized_end=2411 + _LOTSNESTEDMESSAGE_B224._serialized_start=2413 + _LOTSNESTEDMESSAGE_B224._serialized_end=2419 + _LOTSNESTEDMESSAGE_B225._serialized_start=2421 + _LOTSNESTEDMESSAGE_B225._serialized_end=2427 + _LOTSNESTEDMESSAGE_B226._serialized_start=2429 + _LOTSNESTEDMESSAGE_B226._serialized_end=2435 + _LOTSNESTEDMESSAGE_B227._serialized_start=2437 + _LOTSNESTEDMESSAGE_B227._serialized_end=2443 + _LOTSNESTEDMESSAGE_B228._serialized_start=2445 + _LOTSNESTEDMESSAGE_B228._serialized_end=2451 + _LOTSNESTEDMESSAGE_B229._serialized_start=2453 + _LOTSNESTEDMESSAGE_B229._serialized_end=2459 + _LOTSNESTEDMESSAGE_B230._serialized_start=2461 + _LOTSNESTEDMESSAGE_B230._serialized_end=2467 + _LOTSNESTEDMESSAGE_B231._serialized_start=2469 + _LOTSNESTEDMESSAGE_B231._serialized_end=2475 + _LOTSNESTEDMESSAGE_B232._serialized_start=2477 + _LOTSNESTEDMESSAGE_B232._serialized_end=2483 + _LOTSNESTEDMESSAGE_B233._serialized_start=2485 + _LOTSNESTEDMESSAGE_B233._serialized_end=2491 + _LOTSNESTEDMESSAGE_B234._serialized_start=2493 + _LOTSNESTEDMESSAGE_B234._serialized_end=2499 + _LOTSNESTEDMESSAGE_B235._serialized_start=2501 + _LOTSNESTEDMESSAGE_B235._serialized_end=2507 + _LOTSNESTEDMESSAGE_B236._serialized_start=2509 + _LOTSNESTEDMESSAGE_B236._serialized_end=2515 + _LOTSNESTEDMESSAGE_B237._serialized_start=2517 + _LOTSNESTEDMESSAGE_B237._serialized_end=2523 + _LOTSNESTEDMESSAGE_B238._serialized_start=2525 + _LOTSNESTEDMESSAGE_B238._serialized_end=2531 + _LOTSNESTEDMESSAGE_B239._serialized_start=2533 + _LOTSNESTEDMESSAGE_B239._serialized_end=2539 + _LOTSNESTEDMESSAGE_B240._serialized_start=2541 + _LOTSNESTEDMESSAGE_B240._serialized_end=2547 + _LOTSNESTEDMESSAGE_B241._serialized_start=2549 + _LOTSNESTEDMESSAGE_B241._serialized_end=2555 + _LOTSNESTEDMESSAGE_B242._serialized_start=2557 + _LOTSNESTEDMESSAGE_B242._serialized_end=2563 + _LOTSNESTEDMESSAGE_B243._serialized_start=2565 + _LOTSNESTEDMESSAGE_B243._serialized_end=2571 + _LOTSNESTEDMESSAGE_B244._serialized_start=2573 + _LOTSNESTEDMESSAGE_B244._serialized_end=2579 + _LOTSNESTEDMESSAGE_B245._serialized_start=2581 + _LOTSNESTEDMESSAGE_B245._serialized_end=2587 + _LOTSNESTEDMESSAGE_B246._serialized_start=2589 + _LOTSNESTEDMESSAGE_B246._serialized_end=2595 + _LOTSNESTEDMESSAGE_B247._serialized_start=2597 + _LOTSNESTEDMESSAGE_B247._serialized_end=2603 + _LOTSNESTEDMESSAGE_B248._serialized_start=2605 + _LOTSNESTEDMESSAGE_B248._serialized_end=2611 + _LOTSNESTEDMESSAGE_B249._serialized_start=2613 + _LOTSNESTEDMESSAGE_B249._serialized_end=2619 + _LOTSNESTEDMESSAGE_B250._serialized_start=2621 + _LOTSNESTEDMESSAGE_B250._serialized_end=2627 + _LOTSNESTEDMESSAGE_B251._serialized_start=2629 + _LOTSNESTEDMESSAGE_B251._serialized_end=2635 + _LOTSNESTEDMESSAGE_B252._serialized_start=2637 + _LOTSNESTEDMESSAGE_B252._serialized_end=2643 + _LOTSNESTEDMESSAGE_B253._serialized_start=2645 + _LOTSNESTEDMESSAGE_B253._serialized_end=2651 + _LOTSNESTEDMESSAGE_B254._serialized_start=2653 + _LOTSNESTEDMESSAGE_B254._serialized_end=2659 + _LOTSNESTEDMESSAGE_B255._serialized_start=2661 + _LOTSNESTEDMESSAGE_B255._serialized_end=2667 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/no_package_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/no_package_pb2.py new file mode 100644 index 0000000000..d46dee080a --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/no_package_pb2.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/no_package.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n)google/protobuf/internal/no_package.proto\";\n\x10NoPackageMessage\x12\'\n\x0fno_package_enum\x18\x01 \x01(\x0e\x32\x0e.NoPackageEnum*?\n\rNoPackageEnum\x12\x16\n\x12NO_PACKAGE_VALUE_0\x10\x00\x12\x16\n\x12NO_PACKAGE_VALUE_1\x10\x01') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.no_package_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _NOPACKAGEENUM._serialized_start=106 + _NOPACKAGEENUM._serialized_end=169 + _NOPACKAGEMESSAGE._serialized_start=45 + _NOPACKAGEMESSAGE._serialized_end=104 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/python_message.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/python_message.py new file mode 100644 index 0000000000..2921d5cb6e --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/python_message.py @@ -0,0 +1,1539 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This code is meant to work on Python 2.4 and above only. +# +# TODO(robinson): Helpers for verbose, common checks like seeing if a +# descriptor's cpp_type is CPPTYPE_MESSAGE. + +"""Contains a metaclass and helper functions used to create +protocol message classes from Descriptor objects at runtime. + +Recall that a metaclass is the "type" of a class. +(A class is to a metaclass what an instance is to a class.) + +In this case, we use the GeneratedProtocolMessageType metaclass +to inject all the useful functionality into the classes +output by the protocol compiler at compile-time. + +The upshot of all this is that the real implementation +details for ALL pure-Python protocol buffers are *here in +this file*. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +from io import BytesIO +import struct +import sys +import weakref + +# We use "as" to avoid name collisions with variables. +from google.protobuf.internal import api_implementation +from google.protobuf.internal import containers +from google.protobuf.internal import decoder +from google.protobuf.internal import encoder +from google.protobuf.internal import enum_type_wrapper +from google.protobuf.internal import extension_dict +from google.protobuf.internal import message_listener as message_listener_mod +from google.protobuf.internal import type_checkers +from google.protobuf.internal import well_known_types +from google.protobuf.internal import wire_format +from google.protobuf import descriptor as descriptor_mod +from google.protobuf import message as message_mod +from google.protobuf import text_format + +_FieldDescriptor = descriptor_mod.FieldDescriptor +_AnyFullTypeName = 'google.protobuf.Any' +_ExtensionDict = extension_dict._ExtensionDict + +class GeneratedProtocolMessageType(type): + + """Metaclass for protocol message classes created at runtime from Descriptors. + + We add implementations for all methods described in the Message class. We + also create properties to allow getting/setting all fields in the protocol + message. Finally, we create slots to prevent users from accidentally + "setting" nonexistent fields in the protocol message, which then wouldn't get + serialized / deserialized properly. + + The protocol compiler currently uses this metaclass to create protocol + message classes at runtime. Clients can also manually create their own + classes at runtime, as in this example: + + mydescriptor = Descriptor(.....) + factory = symbol_database.Default() + factory.pool.AddDescriptor(mydescriptor) + MyProtoClass = factory.GetPrototype(mydescriptor) + myproto_instance = MyProtoClass() + myproto.foo_field = 23 + ... + """ + + # Must be consistent with the protocol-compiler code in + # proto2/compiler/internal/generator.*. + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __new__(cls, name, bases, dictionary): + """Custom allocation for runtime-generated class types. + + We override __new__ because this is apparently the only place + where we can meaningfully set __slots__ on the class we're creating(?). + (The interplay between metaclasses and slots is not very well-documented). + + Args: + name: Name of the class (ignored, but required by the + metaclass protocol). + bases: Base classes of the class we're constructing. + (Should be message.Message). We ignore this field, but + it's required by the metaclass protocol + dictionary: The class dictionary of the class we're + constructing. dictionary[_DESCRIPTOR_KEY] must contain + a Descriptor object describing this protocol message + type. + + Returns: + Newly-allocated class. + + Raises: + RuntimeError: Generated code only work with python cpp extension. + """ + descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] + + if isinstance(descriptor, str): + raise RuntimeError('The generated code only work with python cpp ' + 'extension, but it is using pure python runtime.') + + # If a concrete class already exists for this descriptor, don't try to + # create another. Doing so will break any messages that already exist with + # the existing class. + # + # The C++ implementation appears to have its own internal `PyMessageFactory` + # to achieve similar results. + # + # This most commonly happens in `text_format.py` when using descriptors from + # a custom pool; it calls symbol_database.Global().getPrototype() on a + # descriptor which already has an existing concrete class. + new_class = getattr(descriptor, '_concrete_class', None) + if new_class: + return new_class + + if descriptor.full_name in well_known_types.WKTBASES: + bases += (well_known_types.WKTBASES[descriptor.full_name],) + _AddClassAttributesForNestedExtensions(descriptor, dictionary) + _AddSlots(descriptor, dictionary) + + superclass = super(GeneratedProtocolMessageType, cls) + new_class = superclass.__new__(cls, name, bases, dictionary) + return new_class + + def __init__(cls, name, bases, dictionary): + """Here we perform the majority of our work on the class. + We add enum getters, an __init__ method, implementations + of all Message methods, and properties for all fields + in the protocol type. + + Args: + name: Name of the class (ignored, but required by the + metaclass protocol). + bases: Base classes of the class we're constructing. + (Should be message.Message). We ignore this field, but + it's required by the metaclass protocol + dictionary: The class dictionary of the class we're + constructing. dictionary[_DESCRIPTOR_KEY] must contain + a Descriptor object describing this protocol message + type. + """ + descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] + + # If this is an _existing_ class looked up via `_concrete_class` in the + # __new__ method above, then we don't need to re-initialize anything. + existing_class = getattr(descriptor, '_concrete_class', None) + if existing_class: + assert existing_class is cls, ( + 'Duplicate `GeneratedProtocolMessageType` created for descriptor %r' + % (descriptor.full_name)) + return + + cls._decoders_by_tag = {} + if (descriptor.has_options and + descriptor.GetOptions().message_set_wire_format): + cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = ( + decoder.MessageSetItemDecoder(descriptor), None) + + # Attach stuff to each FieldDescriptor for quick lookup later on. + for field in descriptor.fields: + _AttachFieldHelpers(cls, field) + + descriptor._concrete_class = cls # pylint: disable=protected-access + _AddEnumValues(descriptor, cls) + _AddInitMethod(descriptor, cls) + _AddPropertiesForFields(descriptor, cls) + _AddPropertiesForExtensions(descriptor, cls) + _AddStaticMethods(cls) + _AddMessageMethods(descriptor, cls) + _AddPrivateHelperMethods(descriptor, cls) + + superclass = super(GeneratedProtocolMessageType, cls) + superclass.__init__(name, bases, dictionary) + + +# Stateless helpers for GeneratedProtocolMessageType below. +# Outside clients should not access these directly. +# +# I opted not to make any of these methods on the metaclass, to make it more +# clear that I'm not really using any state there and to keep clients from +# thinking that they have direct access to these construction helpers. + + +def _PropertyName(proto_field_name): + """Returns the name of the public property attribute which + clients can use to get and (in some cases) set the value + of a protocol message field. + + Args: + proto_field_name: The protocol message field name, exactly + as it appears (or would appear) in a .proto file. + """ + # TODO(robinson): Escape Python keywords (e.g., yield), and test this support. + # nnorwitz makes my day by writing: + # """ + # FYI. See the keyword module in the stdlib. This could be as simple as: + # + # if keyword.iskeyword(proto_field_name): + # return proto_field_name + "_" + # return proto_field_name + # """ + # Kenton says: The above is a BAD IDEA. People rely on being able to use + # getattr() and setattr() to reflectively manipulate field values. If we + # rename the properties, then every such user has to also make sure to apply + # the same transformation. Note that currently if you name a field "yield", + # you can still access it just fine using getattr/setattr -- it's not even + # that cumbersome to do so. + # TODO(kenton): Remove this method entirely if/when everyone agrees with my + # position. + return proto_field_name + + +def _AddSlots(message_descriptor, dictionary): + """Adds a __slots__ entry to dictionary, containing the names of all valid + attributes for this message type. + + Args: + message_descriptor: A Descriptor instance describing this message type. + dictionary: Class dictionary to which we'll add a '__slots__' entry. + """ + dictionary['__slots__'] = ['_cached_byte_size', + '_cached_byte_size_dirty', + '_fields', + '_unknown_fields', + '_unknown_field_set', + '_is_present_in_parent', + '_listener', + '_listener_for_children', + '__weakref__', + '_oneofs'] + + +def _IsMessageSetExtension(field): + return (field.is_extension and + field.containing_type.has_options and + field.containing_type.GetOptions().message_set_wire_format and + field.type == _FieldDescriptor.TYPE_MESSAGE and + field.label == _FieldDescriptor.LABEL_OPTIONAL) + + +def _IsMapField(field): + return (field.type == _FieldDescriptor.TYPE_MESSAGE and + field.message_type.has_options and + field.message_type.GetOptions().map_entry) + + +def _IsMessageMapField(field): + value_type = field.message_type.fields_by_name['value'] + return value_type.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE + + +def _AttachFieldHelpers(cls, field_descriptor): + is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED) + is_packable = (is_repeated and + wire_format.IsTypePackable(field_descriptor.type)) + is_proto3 = field_descriptor.containing_type.syntax == 'proto3' + if not is_packable: + is_packed = False + elif field_descriptor.containing_type.syntax == 'proto2': + is_packed = (field_descriptor.has_options and + field_descriptor.GetOptions().packed) + else: + has_packed_false = (field_descriptor.has_options and + field_descriptor.GetOptions().HasField('packed') and + field_descriptor.GetOptions().packed == False) + is_packed = not has_packed_false + is_map_entry = _IsMapField(field_descriptor) + + if is_map_entry: + field_encoder = encoder.MapEncoder(field_descriptor) + sizer = encoder.MapSizer(field_descriptor, + _IsMessageMapField(field_descriptor)) + elif _IsMessageSetExtension(field_descriptor): + field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number) + sizer = encoder.MessageSetItemSizer(field_descriptor.number) + else: + field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type]( + field_descriptor.number, is_repeated, is_packed) + sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type]( + field_descriptor.number, is_repeated, is_packed) + + field_descriptor._encoder = field_encoder + field_descriptor._sizer = sizer + field_descriptor._default_constructor = _DefaultValueConstructorForField( + field_descriptor) + + def AddDecoder(wiretype, is_packed): + tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype) + decode_type = field_descriptor.type + if (decode_type == _FieldDescriptor.TYPE_ENUM and + type_checkers.SupportsOpenEnums(field_descriptor)): + decode_type = _FieldDescriptor.TYPE_INT32 + + oneof_descriptor = None + clear_if_default = False + if field_descriptor.containing_oneof is not None: + oneof_descriptor = field_descriptor + elif (is_proto3 and not is_repeated and + field_descriptor.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE): + clear_if_default = True + + if is_map_entry: + is_message_map = _IsMessageMapField(field_descriptor) + + field_decoder = decoder.MapDecoder( + field_descriptor, _GetInitializeDefaultForMap(field_descriptor), + is_message_map) + elif decode_type == _FieldDescriptor.TYPE_STRING: + field_decoder = decoder.StringDecoder( + field_descriptor.number, is_repeated, is_packed, + field_descriptor, field_descriptor._default_constructor, + clear_if_default) + elif field_descriptor.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + field_decoder = type_checkers.TYPE_TO_DECODER[decode_type]( + field_descriptor.number, is_repeated, is_packed, + field_descriptor, field_descriptor._default_constructor) + else: + field_decoder = type_checkers.TYPE_TO_DECODER[decode_type]( + field_descriptor.number, is_repeated, is_packed, + # pylint: disable=protected-access + field_descriptor, field_descriptor._default_constructor, + clear_if_default) + + cls._decoders_by_tag[tag_bytes] = (field_decoder, oneof_descriptor) + + AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type], + False) + + if is_repeated and wire_format.IsTypePackable(field_descriptor.type): + # To support wire compatibility of adding packed = true, add a decoder for + # packed values regardless of the field's options. + AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True) + + +def _AddClassAttributesForNestedExtensions(descriptor, dictionary): + extensions = descriptor.extensions_by_name + for extension_name, extension_field in extensions.items(): + assert extension_name not in dictionary + dictionary[extension_name] = extension_field + + +def _AddEnumValues(descriptor, cls): + """Sets class-level attributes for all enum fields defined in this message. + + Also exporting a class-level object that can name enum values. + + Args: + descriptor: Descriptor object for this message type. + cls: Class we're constructing for this message type. + """ + for enum_type in descriptor.enum_types: + setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type)) + for enum_value in enum_type.values: + setattr(cls, enum_value.name, enum_value.number) + + +def _GetInitializeDefaultForMap(field): + if field.label != _FieldDescriptor.LABEL_REPEATED: + raise ValueError('map_entry set on non-repeated field %s' % ( + field.name)) + fields_by_name = field.message_type.fields_by_name + key_checker = type_checkers.GetTypeChecker(fields_by_name['key']) + + value_field = fields_by_name['value'] + if _IsMessageMapField(field): + def MakeMessageMapDefault(message): + return containers.MessageMap( + message._listener_for_children, value_field.message_type, key_checker, + field.message_type) + return MakeMessageMapDefault + else: + value_checker = type_checkers.GetTypeChecker(value_field) + def MakePrimitiveMapDefault(message): + return containers.ScalarMap( + message._listener_for_children, key_checker, value_checker, + field.message_type) + return MakePrimitiveMapDefault + +def _DefaultValueConstructorForField(field): + """Returns a function which returns a default value for a field. + + Args: + field: FieldDescriptor object for this field. + + The returned function has one argument: + message: Message instance containing this field, or a weakref proxy + of same. + + That function in turn returns a default value for this field. The default + value may refer back to |message| via a weak reference. + """ + + if _IsMapField(field): + return _GetInitializeDefaultForMap(field) + + if field.label == _FieldDescriptor.LABEL_REPEATED: + if field.has_default_value and field.default_value != []: + raise ValueError('Repeated field default value not empty list: %s' % ( + field.default_value)) + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + # We can't look at _concrete_class yet since it might not have + # been set. (Depends on order in which we initialize the classes). + message_type = field.message_type + def MakeRepeatedMessageDefault(message): + return containers.RepeatedCompositeFieldContainer( + message._listener_for_children, field.message_type) + return MakeRepeatedMessageDefault + else: + type_checker = type_checkers.GetTypeChecker(field) + def MakeRepeatedScalarDefault(message): + return containers.RepeatedScalarFieldContainer( + message._listener_for_children, type_checker) + return MakeRepeatedScalarDefault + + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + # _concrete_class may not yet be initialized. + message_type = field.message_type + def MakeSubMessageDefault(message): + assert getattr(message_type, '_concrete_class', None), ( + 'Uninitialized concrete class found for field %r (message type %r)' + % (field.full_name, message_type.full_name)) + result = message_type._concrete_class() + result._SetListener( + _OneofListener(message, field) + if field.containing_oneof is not None + else message._listener_for_children) + return result + return MakeSubMessageDefault + + def MakeScalarDefault(message): + # TODO(protobuf-team): This may be broken since there may not be + # default_value. Combine with has_default_value somehow. + return field.default_value + return MakeScalarDefault + + +def _ReraiseTypeErrorWithFieldName(message_name, field_name): + """Re-raise the currently-handled TypeError with the field name added.""" + exc = sys.exc_info()[1] + if len(exc.args) == 1 and type(exc) is TypeError: + # simple TypeError; add field name to exception message + exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name)) + + # re-raise possibly-amended exception with original traceback: + raise exc.with_traceback(sys.exc_info()[2]) + + +def _AddInitMethod(message_descriptor, cls): + """Adds an __init__ method to cls.""" + + def _GetIntegerEnumValue(enum_type, value): + """Convert a string or integer enum value to an integer. + + If the value is a string, it is converted to the enum value in + enum_type with the same name. If the value is not a string, it's + returned as-is. (No conversion or bounds-checking is done.) + """ + if isinstance(value, str): + try: + return enum_type.values_by_name[value].number + except KeyError: + raise ValueError('Enum type %s: unknown label "%s"' % ( + enum_type.full_name, value)) + return value + + def init(self, **kwargs): + self._cached_byte_size = 0 + self._cached_byte_size_dirty = len(kwargs) > 0 + self._fields = {} + # Contains a mapping from oneof field descriptors to the descriptor + # of the currently set field in that oneof field. + self._oneofs = {} + + # _unknown_fields is () when empty for efficiency, and will be turned into + # a list if fields are added. + self._unknown_fields = () + # _unknown_field_set is None when empty for efficiency, and will be + # turned into UnknownFieldSet struct if fields are added. + self._unknown_field_set = None # pylint: disable=protected-access + self._is_present_in_parent = False + self._listener = message_listener_mod.NullMessageListener() + self._listener_for_children = _Listener(self) + for field_name, field_value in kwargs.items(): + field = _GetFieldByName(message_descriptor, field_name) + if field is None: + raise TypeError('%s() got an unexpected keyword argument "%s"' % + (message_descriptor.name, field_name)) + if field_value is None: + # field=None is the same as no field at all. + continue + if field.label == _FieldDescriptor.LABEL_REPEATED: + copy = field._default_constructor(self) + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite + if _IsMapField(field): + if _IsMessageMapField(field): + for key in field_value: + copy[key].MergeFrom(field_value[key]) + else: + copy.update(field_value) + else: + for val in field_value: + if isinstance(val, dict): + copy.add(**val) + else: + copy.add().MergeFrom(val) + else: # Scalar + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + field_value = [_GetIntegerEnumValue(field.enum_type, val) + for val in field_value] + copy.extend(field_value) + self._fields[field] = copy + elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + copy = field._default_constructor(self) + new_val = field_value + if isinstance(field_value, dict): + new_val = field.message_type._concrete_class(**field_value) + try: + copy.MergeFrom(new_val) + except TypeError: + _ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name) + self._fields[field] = copy + else: + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + field_value = _GetIntegerEnumValue(field.enum_type, field_value) + try: + setattr(self, field_name, field_value) + except TypeError: + _ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name) + + init.__module__ = None + init.__doc__ = None + cls.__init__ = init + + +def _GetFieldByName(message_descriptor, field_name): + """Returns a field descriptor by field name. + + Args: + message_descriptor: A Descriptor describing all fields in message. + field_name: The name of the field to retrieve. + Returns: + The field descriptor associated with the field name. + """ + try: + return message_descriptor.fields_by_name[field_name] + except KeyError: + raise ValueError('Protocol message %s has no "%s" field.' % + (message_descriptor.name, field_name)) + + +def _AddPropertiesForFields(descriptor, cls): + """Adds properties for all fields in this protocol message type.""" + for field in descriptor.fields: + _AddPropertiesForField(field, cls) + + if descriptor.is_extendable: + # _ExtensionDict is just an adaptor with no state so we allocate a new one + # every time it is accessed. + cls.Extensions = property(lambda self: _ExtensionDict(self)) + + +def _AddPropertiesForField(field, cls): + """Adds a public property for a protocol message field. + Clients can use this property to get and (in the case + of non-repeated scalar fields) directly set the value + of a protocol message field. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + # Catch it if we add other types that we should + # handle specially here. + assert _FieldDescriptor.MAX_CPPTYPE == 10 + + constant_name = field.name.upper() + '_FIELD_NUMBER' + setattr(cls, constant_name, field.number) + + if field.label == _FieldDescriptor.LABEL_REPEATED: + _AddPropertiesForRepeatedField(field, cls) + elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + _AddPropertiesForNonRepeatedCompositeField(field, cls) + else: + _AddPropertiesForNonRepeatedScalarField(field, cls) + + +class _FieldProperty(property): + __slots__ = ('DESCRIPTOR',) + + def __init__(self, descriptor, getter, setter, doc): + property.__init__(self, getter, setter, doc=doc) + self.DESCRIPTOR = descriptor + + +def _AddPropertiesForRepeatedField(field, cls): + """Adds a public property for a "repeated" protocol message field. Clients + can use this property to get the value of the field, which will be either a + RepeatedScalarFieldContainer or RepeatedCompositeFieldContainer (see + below). + + Note that when clients add values to these containers, we perform + type-checking in the case of repeated scalar fields, and we also set any + necessary "has" bits as a side-effect. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + + def getter(self): + field_value = self._fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + field_value = self._fields.setdefault(field, field_value) + return field_value + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + # We define a setter just so we can throw an exception with a more + # helpful error message. + def setter(self, new_value): + raise AttributeError('Assignment not allowed to repeated field ' + '"%s" in protocol message object.' % proto_field_name) + + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForNonRepeatedScalarField(field, cls): + """Adds a public property for a nonrepeated, scalar protocol message field. + Clients can use this property to get and directly set the value of the field. + Note that when the client sets the value of a field by using this property, + all necessary "has" bits are set as a side-effect, and we also perform + type-checking. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + type_checker = type_checkers.GetTypeChecker(field) + default_value = field.default_value + is_proto3 = field.containing_type.syntax == 'proto3' + + def getter(self): + # TODO(protobuf-team): This may be broken since there may not be + # default_value. Combine with has_default_value somehow. + return self._fields.get(field, default_value) + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + clear_when_set_to_default = is_proto3 and not field.containing_oneof + + def field_setter(self, new_value): + # pylint: disable=protected-access + # Testing the value for truthiness captures all of the proto3 defaults + # (0, 0.0, enum 0, and False). + try: + new_value = type_checker.CheckValue(new_value) + except TypeError as e: + raise TypeError( + 'Cannot set %s to %.1024r: %s' % (field.full_name, new_value, e)) + if clear_when_set_to_default and not new_value: + self._fields.pop(field, None) + else: + self._fields[field] = new_value + # Check _cached_byte_size_dirty inline to improve performance, since scalar + # setters are called frequently. + if not self._cached_byte_size_dirty: + self._Modified() + + if field.containing_oneof: + def setter(self, new_value): + field_setter(self, new_value) + self._UpdateOneofState(field) + else: + setter = field_setter + + setter.__module__ = None + setter.__doc__ = 'Setter for %s.' % proto_field_name + + # Add a property to encapsulate the getter/setter. + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForNonRepeatedCompositeField(field, cls): + """Adds a public property for a nonrepeated, composite protocol message field. + A composite field is a "group" or "message" field. + + Clients can use this property to get the value of the field, but cannot + assign to the property directly. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + # TODO(robinson): Remove duplication with similar method + # for non-repeated scalars. + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + + def getter(self): + field_value = self._fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + field_value = self._fields.setdefault(field, field_value) + return field_value + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + # We define a setter just so we can throw an exception with a more + # helpful error message. + def setter(self, new_value): + raise AttributeError('Assignment not allowed to composite field ' + '"%s" in protocol message object.' % proto_field_name) + + # Add a property to encapsulate the getter. + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForExtensions(descriptor, cls): + """Adds properties for all fields in this protocol message type.""" + extensions = descriptor.extensions_by_name + for extension_name, extension_field in extensions.items(): + constant_name = extension_name.upper() + '_FIELD_NUMBER' + setattr(cls, constant_name, extension_field.number) + + # TODO(amauryfa): Migrate all users of these attributes to functions like + # pool.FindExtensionByNumber(descriptor). + if descriptor.file is not None: + # TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available. + pool = descriptor.file.pool + cls._extensions_by_number = pool._extensions_by_number[descriptor] + cls._extensions_by_name = pool._extensions_by_name[descriptor] + +def _AddStaticMethods(cls): + # TODO(robinson): This probably needs to be thread-safe(?) + def RegisterExtension(extension_handle): + extension_handle.containing_type = cls.DESCRIPTOR + # TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available. + # pylint: disable=protected-access + cls.DESCRIPTOR.file.pool._AddExtensionDescriptor(extension_handle) + _AttachFieldHelpers(cls, extension_handle) + cls.RegisterExtension = staticmethod(RegisterExtension) + + def FromString(s): + message = cls() + message.MergeFromString(s) + return message + cls.FromString = staticmethod(FromString) + + +def _IsPresent(item): + """Given a (FieldDescriptor, value) tuple from _fields, return true if the + value should be included in the list returned by ListFields().""" + + if item[0].label == _FieldDescriptor.LABEL_REPEATED: + return bool(item[1]) + elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + return item[1]._is_present_in_parent + else: + return True + + +def _AddListFieldsMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def ListFields(self): + all_fields = [item for item in self._fields.items() if _IsPresent(item)] + all_fields.sort(key = lambda item: item[0].number) + return all_fields + + cls.ListFields = ListFields + +_PROTO3_ERROR_TEMPLATE = \ + ('Protocol message %s has no non-repeated submessage field "%s" ' + 'nor marked as optional') +_PROTO2_ERROR_TEMPLATE = 'Protocol message %s has no non-repeated field "%s"' + +def _AddHasFieldMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + is_proto3 = (message_descriptor.syntax == "proto3") + error_msg = _PROTO3_ERROR_TEMPLATE if is_proto3 else _PROTO2_ERROR_TEMPLATE + + hassable_fields = {} + for field in message_descriptor.fields: + if field.label == _FieldDescriptor.LABEL_REPEATED: + continue + # For proto3, only submessages and fields inside a oneof have presence. + if (is_proto3 and field.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE and + not field.containing_oneof): + continue + hassable_fields[field.name] = field + + # Has methods are supported for oneof descriptors. + for oneof in message_descriptor.oneofs: + hassable_fields[oneof.name] = oneof + + def HasField(self, field_name): + try: + field = hassable_fields[field_name] + except KeyError: + raise ValueError(error_msg % (message_descriptor.full_name, field_name)) + + if isinstance(field, descriptor_mod.OneofDescriptor): + try: + return HasField(self, self._oneofs[field].name) + except KeyError: + return False + else: + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + value = self._fields.get(field) + return value is not None and value._is_present_in_parent + else: + return field in self._fields + + cls.HasField = HasField + + +def _AddClearFieldMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def ClearField(self, field_name): + try: + field = message_descriptor.fields_by_name[field_name] + except KeyError: + try: + field = message_descriptor.oneofs_by_name[field_name] + if field in self._oneofs: + field = self._oneofs[field] + else: + return + except KeyError: + raise ValueError('Protocol message %s has no "%s" field.' % + (message_descriptor.name, field_name)) + + if field in self._fields: + # To match the C++ implementation, we need to invalidate iterators + # for map fields when ClearField() happens. + if hasattr(self._fields[field], 'InvalidateIterators'): + self._fields[field].InvalidateIterators() + + # Note: If the field is a sub-message, its listener will still point + # at us. That's fine, because the worst than can happen is that it + # will call _Modified() and invalidate our byte size. Big deal. + del self._fields[field] + + if self._oneofs.get(field.containing_oneof, None) is field: + del self._oneofs[field.containing_oneof] + + # Always call _Modified() -- even if nothing was changed, this is + # a mutating method, and thus calling it should cause the field to become + # present in the parent message. + self._Modified() + + cls.ClearField = ClearField + + +def _AddClearExtensionMethod(cls): + """Helper for _AddMessageMethods().""" + def ClearExtension(self, extension_handle): + extension_dict._VerifyExtensionHandle(self, extension_handle) + + # Similar to ClearField(), above. + if extension_handle in self._fields: + del self._fields[extension_handle] + self._Modified() + cls.ClearExtension = ClearExtension + + +def _AddHasExtensionMethod(cls): + """Helper for _AddMessageMethods().""" + def HasExtension(self, extension_handle): + extension_dict._VerifyExtensionHandle(self, extension_handle) + if extension_handle.label == _FieldDescriptor.LABEL_REPEATED: + raise KeyError('"%s" is repeated.' % extension_handle.full_name) + + if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + value = self._fields.get(extension_handle) + return value is not None and value._is_present_in_parent + else: + return extension_handle in self._fields + cls.HasExtension = HasExtension + +def _InternalUnpackAny(msg): + """Unpacks Any message and returns the unpacked message. + + This internal method is different from public Any Unpack method which takes + the target message as argument. _InternalUnpackAny method does not have + target message type and need to find the message type in descriptor pool. + + Args: + msg: An Any message to be unpacked. + + Returns: + The unpacked message. + """ + # TODO(amauryfa): Don't use the factory of generated messages. + # To make Any work with custom factories, use the message factory of the + # parent message. + # pylint: disable=g-import-not-at-top + from google.protobuf import symbol_database + factory = symbol_database.Default() + + type_url = msg.type_url + + if not type_url: + return None + + # TODO(haberman): For now we just strip the hostname. Better logic will be + # required. + type_name = type_url.split('/')[-1] + descriptor = factory.pool.FindMessageTypeByName(type_name) + + if descriptor is None: + return None + + message_class = factory.GetPrototype(descriptor) + message = message_class() + + message.ParseFromString(msg.value) + return message + + +def _AddEqualsMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __eq__(self, other): + if (not isinstance(other, message_mod.Message) or + other.DESCRIPTOR != self.DESCRIPTOR): + return False + + if self is other: + return True + + if self.DESCRIPTOR.full_name == _AnyFullTypeName: + any_a = _InternalUnpackAny(self) + any_b = _InternalUnpackAny(other) + if any_a and any_b: + return any_a == any_b + + if not self.ListFields() == other.ListFields(): + return False + + # TODO(jieluo): Fix UnknownFieldSet to consider MessageSet extensions, + # then use it for the comparison. + unknown_fields = list(self._unknown_fields) + unknown_fields.sort() + other_unknown_fields = list(other._unknown_fields) + other_unknown_fields.sort() + return unknown_fields == other_unknown_fields + + cls.__eq__ = __eq__ + + +def _AddStrMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __str__(self): + return text_format.MessageToString(self) + cls.__str__ = __str__ + + +def _AddReprMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __repr__(self): + return text_format.MessageToString(self) + cls.__repr__ = __repr__ + + +def _AddUnicodeMethod(unused_message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def __unicode__(self): + return text_format.MessageToString(self, as_utf8=True).decode('utf-8') + cls.__unicode__ = __unicode__ + + +def _BytesForNonRepeatedElement(value, field_number, field_type): + """Returns the number of bytes needed to serialize a non-repeated element. + The returned byte count includes space for tag information and any + other additional space associated with serializing value. + + Args: + value: Value we're serializing. + field_number: Field number of this value. (Since the field number + is stored as part of a varint-encoded tag, this has an impact + on the total bytes required to serialize the value). + field_type: The type of the field. One of the TYPE_* constants + within FieldDescriptor. + """ + try: + fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type] + return fn(field_number, value) + except KeyError: + raise message_mod.EncodeError('Unrecognized field type: %d' % field_type) + + +def _AddByteSizeMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def ByteSize(self): + if not self._cached_byte_size_dirty: + return self._cached_byte_size + + size = 0 + descriptor = self.DESCRIPTOR + if descriptor.GetOptions().map_entry: + # Fields of map entry should always be serialized. + size = descriptor.fields_by_name['key']._sizer(self.key) + size += descriptor.fields_by_name['value']._sizer(self.value) + else: + for field_descriptor, field_value in self.ListFields(): + size += field_descriptor._sizer(field_value) + for tag_bytes, value_bytes in self._unknown_fields: + size += len(tag_bytes) + len(value_bytes) + + self._cached_byte_size = size + self._cached_byte_size_dirty = False + self._listener_for_children.dirty = False + return size + + cls.ByteSize = ByteSize + + +def _AddSerializeToStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def SerializeToString(self, **kwargs): + # Check if the message has all of its required fields set. + if not self.IsInitialized(): + raise message_mod.EncodeError( + 'Message %s is missing required fields: %s' % ( + self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors()))) + return self.SerializePartialToString(**kwargs) + cls.SerializeToString = SerializeToString + + +def _AddSerializePartialToStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def SerializePartialToString(self, **kwargs): + out = BytesIO() + self._InternalSerialize(out.write, **kwargs) + return out.getvalue() + cls.SerializePartialToString = SerializePartialToString + + def InternalSerialize(self, write_bytes, deterministic=None): + if deterministic is None: + deterministic = ( + api_implementation.IsPythonDefaultSerializationDeterministic()) + else: + deterministic = bool(deterministic) + + descriptor = self.DESCRIPTOR + if descriptor.GetOptions().map_entry: + # Fields of map entry should always be serialized. + descriptor.fields_by_name['key']._encoder( + write_bytes, self.key, deterministic) + descriptor.fields_by_name['value']._encoder( + write_bytes, self.value, deterministic) + else: + for field_descriptor, field_value in self.ListFields(): + field_descriptor._encoder(write_bytes, field_value, deterministic) + for tag_bytes, value_bytes in self._unknown_fields: + write_bytes(tag_bytes) + write_bytes(value_bytes) + cls._InternalSerialize = InternalSerialize + + +def _AddMergeFromStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def MergeFromString(self, serialized): + serialized = memoryview(serialized) + length = len(serialized) + try: + if self._InternalParse(serialized, 0, length) != length: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise message_mod.DecodeError('Unexpected end-group tag.') + except (IndexError, TypeError): + # Now ord(buf[p:p+1]) == ord('') gets TypeError. + raise message_mod.DecodeError('Truncated message.') + except struct.error as e: + raise message_mod.DecodeError(e) + return length # Return this for legacy reasons. + cls.MergeFromString = MergeFromString + + local_ReadTag = decoder.ReadTag + local_SkipField = decoder.SkipField + decoders_by_tag = cls._decoders_by_tag + + def InternalParse(self, buffer, pos, end): + """Create a message from serialized bytes. + + Args: + self: Message, instance of the proto message object. + buffer: memoryview of the serialized data. + pos: int, position to start in the serialized data. + end: int, end position of the serialized data. + + Returns: + Message object. + """ + # Guard against internal misuse, since this function is called internally + # quite extensively, and its easy to accidentally pass bytes. + assert isinstance(buffer, memoryview) + self._Modified() + field_dict = self._fields + # pylint: disable=protected-access + unknown_field_set = self._unknown_field_set + while pos != end: + (tag_bytes, new_pos) = local_ReadTag(buffer, pos) + field_decoder, field_desc = decoders_by_tag.get(tag_bytes, (None, None)) + if field_decoder is None: + if not self._unknown_fields: # pylint: disable=protected-access + self._unknown_fields = [] # pylint: disable=protected-access + if unknown_field_set is None: + # pylint: disable=protected-access + self._unknown_field_set = containers.UnknownFieldSet() + # pylint: disable=protected-access + unknown_field_set = self._unknown_field_set + # pylint: disable=protected-access + (tag, _) = decoder._DecodeVarint(tag_bytes, 0) + field_number, wire_type = wire_format.UnpackTag(tag) + if field_number == 0: + raise message_mod.DecodeError('Field number 0 is illegal.') + # TODO(jieluo): remove old_pos. + old_pos = new_pos + (data, new_pos) = decoder._DecodeUnknownField( + buffer, new_pos, wire_type) # pylint: disable=protected-access + if new_pos == -1: + return pos + # pylint: disable=protected-access + unknown_field_set._add(field_number, wire_type, data) + # TODO(jieluo): remove _unknown_fields. + new_pos = local_SkipField(buffer, old_pos, end, tag_bytes) + if new_pos == -1: + return pos + self._unknown_fields.append( + (tag_bytes, buffer[old_pos:new_pos].tobytes())) + pos = new_pos + else: + pos = field_decoder(buffer, new_pos, end, self, field_dict) + if field_desc: + self._UpdateOneofState(field_desc) + return pos + cls._InternalParse = InternalParse + + +def _AddIsInitializedMethod(message_descriptor, cls): + """Adds the IsInitialized and FindInitializationError methods to the + protocol message class.""" + + required_fields = [field for field in message_descriptor.fields + if field.label == _FieldDescriptor.LABEL_REQUIRED] + + def IsInitialized(self, errors=None): + """Checks if all required fields of a message are set. + + Args: + errors: A list which, if provided, will be populated with the field + paths of all missing required fields. + + Returns: + True iff the specified message has all required fields set. + """ + + # Performance is critical so we avoid HasField() and ListFields(). + + for field in required_fields: + if (field not in self._fields or + (field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and + not self._fields[field]._is_present_in_parent)): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + + for field, value in list(self._fields.items()): # dict can change size! + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if field.label == _FieldDescriptor.LABEL_REPEATED: + if (field.message_type.has_options and + field.message_type.GetOptions().map_entry): + continue + for element in value: + if not element.IsInitialized(): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + elif value._is_present_in_parent and not value.IsInitialized(): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + + return True + + cls.IsInitialized = IsInitialized + + def FindInitializationErrors(self): + """Finds required fields which are not initialized. + + Returns: + A list of strings. Each string is a path to an uninitialized field from + the top-level message, e.g. "foo.bar[5].baz". + """ + + errors = [] # simplify things + + for field in required_fields: + if not self.HasField(field.name): + errors.append(field.name) + + for field, value in self.ListFields(): + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if field.is_extension: + name = '(%s)' % field.full_name + else: + name = field.name + + if _IsMapField(field): + if _IsMessageMapField(field): + for key in value: + element = value[key] + prefix = '%s[%s].' % (name, key) + sub_errors = element.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + else: + # ScalarMaps can't have any initialization errors. + pass + elif field.label == _FieldDescriptor.LABEL_REPEATED: + for i in range(len(value)): + element = value[i] + prefix = '%s[%d].' % (name, i) + sub_errors = element.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + else: + prefix = name + '.' + sub_errors = value.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + + return errors + + cls.FindInitializationErrors = FindInitializationErrors + + +def _FullyQualifiedClassName(klass): + module = klass.__module__ + name = getattr(klass, '__qualname__', klass.__name__) + if module in (None, 'builtins', '__builtin__'): + return name + return module + '.' + name + + +def _AddMergeFromMethod(cls): + LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED + CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE + + def MergeFrom(self, msg): + if not isinstance(msg, cls): + raise TypeError( + 'Parameter to MergeFrom() must be instance of same class: ' + 'expected %s got %s.' % (_FullyQualifiedClassName(cls), + _FullyQualifiedClassName(msg.__class__))) + + assert msg is not self + self._Modified() + + fields = self._fields + + for field, value in msg._fields.items(): + if field.label == LABEL_REPEATED: + field_value = fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + fields[field] = field_value + field_value.MergeFrom(value) + elif field.cpp_type == CPPTYPE_MESSAGE: + if value._is_present_in_parent: + field_value = fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + fields[field] = field_value + field_value.MergeFrom(value) + else: + self._fields[field] = value + if field.containing_oneof: + self._UpdateOneofState(field) + + if msg._unknown_fields: + if not self._unknown_fields: + self._unknown_fields = [] + self._unknown_fields.extend(msg._unknown_fields) + # pylint: disable=protected-access + if self._unknown_field_set is None: + self._unknown_field_set = containers.UnknownFieldSet() + self._unknown_field_set._extend(msg._unknown_field_set) + + cls.MergeFrom = MergeFrom + + +def _AddWhichOneofMethod(message_descriptor, cls): + def WhichOneof(self, oneof_name): + """Returns the name of the currently set field inside a oneof, or None.""" + try: + field = message_descriptor.oneofs_by_name[oneof_name] + except KeyError: + raise ValueError( + 'Protocol message has no oneof "%s" field.' % oneof_name) + + nested_field = self._oneofs.get(field, None) + if nested_field is not None and self.HasField(nested_field.name): + return nested_field.name + else: + return None + + cls.WhichOneof = WhichOneof + + +def _Clear(self): + # Clear fields. + self._fields = {} + self._unknown_fields = () + # pylint: disable=protected-access + if self._unknown_field_set is not None: + self._unknown_field_set._clear() + self._unknown_field_set = None + + self._oneofs = {} + self._Modified() + + +def _UnknownFields(self): + if self._unknown_field_set is None: # pylint: disable=protected-access + # pylint: disable=protected-access + self._unknown_field_set = containers.UnknownFieldSet() + return self._unknown_field_set # pylint: disable=protected-access + + +def _DiscardUnknownFields(self): + self._unknown_fields = [] + self._unknown_field_set = None # pylint: disable=protected-access + for field, value in self.ListFields(): + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if _IsMapField(field): + if _IsMessageMapField(field): + for key in value: + value[key].DiscardUnknownFields() + elif field.label == _FieldDescriptor.LABEL_REPEATED: + for sub_message in value: + sub_message.DiscardUnknownFields() + else: + value.DiscardUnknownFields() + + +def _SetListener(self, listener): + if listener is None: + self._listener = message_listener_mod.NullMessageListener() + else: + self._listener = listener + + +def _AddMessageMethods(message_descriptor, cls): + """Adds implementations of all Message methods to cls.""" + _AddListFieldsMethod(message_descriptor, cls) + _AddHasFieldMethod(message_descriptor, cls) + _AddClearFieldMethod(message_descriptor, cls) + if message_descriptor.is_extendable: + _AddClearExtensionMethod(cls) + _AddHasExtensionMethod(cls) + _AddEqualsMethod(message_descriptor, cls) + _AddStrMethod(message_descriptor, cls) + _AddReprMethod(message_descriptor, cls) + _AddUnicodeMethod(message_descriptor, cls) + _AddByteSizeMethod(message_descriptor, cls) + _AddSerializeToStringMethod(message_descriptor, cls) + _AddSerializePartialToStringMethod(message_descriptor, cls) + _AddMergeFromStringMethod(message_descriptor, cls) + _AddIsInitializedMethod(message_descriptor, cls) + _AddMergeFromMethod(cls) + _AddWhichOneofMethod(message_descriptor, cls) + # Adds methods which do not depend on cls. + cls.Clear = _Clear + cls.UnknownFields = _UnknownFields + cls.DiscardUnknownFields = _DiscardUnknownFields + cls._SetListener = _SetListener + + +def _AddPrivateHelperMethods(message_descriptor, cls): + """Adds implementation of private helper methods to cls.""" + + def Modified(self): + """Sets the _cached_byte_size_dirty bit to true, + and propagates this to our listener iff this was a state change. + """ + + # Note: Some callers check _cached_byte_size_dirty before calling + # _Modified() as an extra optimization. So, if this method is ever + # changed such that it does stuff even when _cached_byte_size_dirty is + # already true, the callers need to be updated. + if not self._cached_byte_size_dirty: + self._cached_byte_size_dirty = True + self._listener_for_children.dirty = True + self._is_present_in_parent = True + self._listener.Modified() + + def _UpdateOneofState(self, field): + """Sets field as the active field in its containing oneof. + + Will also delete currently active field in the oneof, if it is different + from the argument. Does not mark the message as modified. + """ + other_field = self._oneofs.setdefault(field.containing_oneof, field) + if other_field is not field: + del self._fields[other_field] + self._oneofs[field.containing_oneof] = field + + cls._Modified = Modified + cls.SetInParent = Modified + cls._UpdateOneofState = _UpdateOneofState + + +class _Listener(object): + + """MessageListener implementation that a parent message registers with its + child message. + + In order to support semantics like: + + foo.bar.baz.qux = 23 + assert foo.HasField('bar') + + ...child objects must have back references to their parents. + This helper class is at the heart of this support. + """ + + def __init__(self, parent_message): + """Args: + parent_message: The message whose _Modified() method we should call when + we receive Modified() messages. + """ + # This listener establishes a back reference from a child (contained) object + # to its parent (containing) object. We make this a weak reference to avoid + # creating cyclic garbage when the client finishes with the 'parent' object + # in the tree. + if isinstance(parent_message, weakref.ProxyType): + self._parent_message_weakref = parent_message + else: + self._parent_message_weakref = weakref.proxy(parent_message) + + # As an optimization, we also indicate directly on the listener whether + # or not the parent message is dirty. This way we can avoid traversing + # up the tree in the common case. + self.dirty = False + + def Modified(self): + if self.dirty: + return + try: + # Propagate the signal to our parents iff this is the first field set. + self._parent_message_weakref._Modified() + except ReferenceError: + # We can get here if a client has kept a reference to a child object, + # and is now setting a field on it, but the child's parent has been + # garbage-collected. This is not an error. + pass + + +class _OneofListener(_Listener): + """Special listener implementation for setting composite oneof fields.""" + + def __init__(self, parent_message, field): + """Args: + parent_message: The message whose _Modified() method we should call when + we receive Modified() messages. + field: The descriptor of the field being set in the parent message. + """ + super(_OneofListener, self).__init__(parent_message) + self._field = field + + def Modified(self): + """Also updates the state of the containing oneof in the parent message.""" + try: + self._parent_message_weakref._UpdateOneofState(self._field) + super(_OneofListener, self).Modified() + except ReferenceError: + pass diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/type_checkers.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/type_checkers.py new file mode 100644 index 0000000000..a53e71fe8e --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/type_checkers.py @@ -0,0 +1,435 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides type checking routines. + +This module defines type checking utilities in the forms of dictionaries: + +VALUE_CHECKERS: A dictionary of field types and a value validation object. +TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing + function. +TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization + function. +FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their + corresponding wire types. +TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization + function. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import ctypes +import numbers + +from google.protobuf.internal import decoder +from google.protobuf.internal import encoder +from google.protobuf.internal import wire_format +from google.protobuf import descriptor + +_FieldDescriptor = descriptor.FieldDescriptor + + +def TruncateToFourByteFloat(original): + return ctypes.c_float(original).value + + +def ToShortestFloat(original): + """Returns the shortest float that has same value in wire.""" + # All 4 byte floats have between 6 and 9 significant digits, so we + # start with 6 as the lower bound. + # It has to be iterative because use '.9g' directly can not get rid + # of the noises for most values. For example if set a float_field=0.9 + # use '.9g' will print 0.899999976. + precision = 6 + rounded = float('{0:.{1}g}'.format(original, precision)) + while TruncateToFourByteFloat(rounded) != original: + precision += 1 + rounded = float('{0:.{1}g}'.format(original, precision)) + return rounded + + +def SupportsOpenEnums(field_descriptor): + return field_descriptor.containing_type.syntax == 'proto3' + + +def GetTypeChecker(field): + """Returns a type checker for a message field of the specified types. + + Args: + field: FieldDescriptor object for this field. + + Returns: + An instance of TypeChecker which can be used to verify the types + of values assigned to a field of the specified type. + """ + if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and + field.type == _FieldDescriptor.TYPE_STRING): + return UnicodeValueChecker() + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + if SupportsOpenEnums(field): + # When open enums are supported, any int32 can be assigned. + return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32] + else: + return EnumValueChecker(field.enum_type) + return _VALUE_CHECKERS[field.cpp_type] + + +# None of the typecheckers below make any attempt to guard against people +# subclassing builtin types and doing weird things. We're not trying to +# protect against malicious clients here, just people accidentally shooting +# themselves in the foot in obvious ways. +class TypeChecker(object): + + """Type checker used to catch type errors as early as possible + when the client is setting scalar fields in protocol messages. + """ + + def __init__(self, *acceptable_types): + self._acceptable_types = acceptable_types + + def CheckValue(self, proposed_value): + """Type check the provided value and return it. + + The returned value might have been normalized to another type. + """ + if not isinstance(proposed_value, self._acceptable_types): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), self._acceptable_types)) + raise TypeError(message) + return proposed_value + + +class TypeCheckerWithDefault(TypeChecker): + + def __init__(self, default_value, *acceptable_types): + TypeChecker.__init__(self, *acceptable_types) + self._default_value = default_value + + def DefaultValue(self): + return self._default_value + + +class BoolValueChecker(object): + """Type checker used for bool fields.""" + + def CheckValue(self, proposed_value): + if not hasattr(proposed_value, '__index__') or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (bool, int))) + raise TypeError(message) + return bool(proposed_value) + + def DefaultValue(self): + return False + + +# IntValueChecker and its subclasses perform integer type-checks +# and bounds-checks. +class IntValueChecker(object): + + """Checker used for integer fields. Performs type-check and range check.""" + + def CheckValue(self, proposed_value): + if not hasattr(proposed_value, '__index__') or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (int,))) + raise TypeError(message) + + if not self._MIN <= int(proposed_value) <= self._MAX: + raise ValueError('Value out of range: %d' % proposed_value) + # We force all values to int to make alternate implementations where the + # distinction is more significant (e.g. the C++ implementation) simpler. + proposed_value = int(proposed_value) + return proposed_value + + def DefaultValue(self): + return 0 + + +class EnumValueChecker(object): + + """Checker used for enum fields. Performs type-check and range check.""" + + def __init__(self, enum_type): + self._enum_type = enum_type + + def CheckValue(self, proposed_value): + if not isinstance(proposed_value, numbers.Integral): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (int,))) + raise TypeError(message) + if int(proposed_value) not in self._enum_type.values_by_number: + raise ValueError('Unknown enum value: %d' % proposed_value) + return proposed_value + + def DefaultValue(self): + return self._enum_type.values[0].number + + +class UnicodeValueChecker(object): + + """Checker used for string fields. + + Always returns a unicode value, even if the input is of type str. + """ + + def CheckValue(self, proposed_value): + if not isinstance(proposed_value, (bytes, str)): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (bytes, str))) + raise TypeError(message) + + # If the value is of type 'bytes' make sure that it is valid UTF-8 data. + if isinstance(proposed_value, bytes): + try: + proposed_value = proposed_value.decode('utf-8') + except UnicodeDecodeError: + raise ValueError('%.1024r has type bytes, but isn\'t valid UTF-8 ' + 'encoding. Non-UTF-8 strings must be converted to ' + 'unicode objects before being added.' % + (proposed_value)) + else: + try: + proposed_value.encode('utf8') + except UnicodeEncodeError: + raise ValueError('%.1024r isn\'t a valid unicode string and ' + 'can\'t be encoded in UTF-8.'% + (proposed_value)) + + return proposed_value + + def DefaultValue(self): + return u"" + + +class Int32ValueChecker(IntValueChecker): + # We're sure to use ints instead of longs here since comparison may be more + # efficient. + _MIN = -2147483648 + _MAX = 2147483647 + + +class Uint32ValueChecker(IntValueChecker): + _MIN = 0 + _MAX = (1 << 32) - 1 + + +class Int64ValueChecker(IntValueChecker): + _MIN = -(1 << 63) + _MAX = (1 << 63) - 1 + + +class Uint64ValueChecker(IntValueChecker): + _MIN = 0 + _MAX = (1 << 64) - 1 + + +# The max 4 bytes float is about 3.4028234663852886e+38 +_FLOAT_MAX = float.fromhex('0x1.fffffep+127') +_FLOAT_MIN = -_FLOAT_MAX +_INF = float('inf') +_NEG_INF = float('-inf') + + +class DoubleValueChecker(object): + """Checker used for double fields. + + Performs type-check and range check. + """ + + def CheckValue(self, proposed_value): + """Check and convert proposed_value to float.""" + if (not hasattr(proposed_value, '__float__') and + not hasattr(proposed_value, '__index__')) or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: int, float' % + (proposed_value, type(proposed_value))) + raise TypeError(message) + return float(proposed_value) + + def DefaultValue(self): + return 0.0 + + +class FloatValueChecker(DoubleValueChecker): + """Checker used for float fields. + + Performs type-check and range check. + + Values exceeding a 32-bit float will be converted to inf/-inf. + """ + + def CheckValue(self, proposed_value): + """Check and convert proposed_value to float.""" + converted_value = super().CheckValue(proposed_value) + # This inf rounding matches the C++ proto SafeDoubleToFloat logic. + if converted_value > _FLOAT_MAX: + return _INF + if converted_value < _FLOAT_MIN: + return _NEG_INF + + return TruncateToFourByteFloat(converted_value) + +# Type-checkers for all scalar CPPTYPEs. +_VALUE_CHECKERS = { + _FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(), + _FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(), + _FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(), + _FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(), + _FieldDescriptor.CPPTYPE_DOUBLE: DoubleValueChecker(), + _FieldDescriptor.CPPTYPE_FLOAT: FloatValueChecker(), + _FieldDescriptor.CPPTYPE_BOOL: BoolValueChecker(), + _FieldDescriptor.CPPTYPE_STRING: TypeCheckerWithDefault(b'', bytes), +} + + +# Map from field type to a function F, such that F(field_num, value) +# gives the total byte size for a value of the given type. This +# byte size includes tag information and any other additional space +# associated with serializing "value". +TYPE_TO_BYTE_SIZE_FN = { + _FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize, + _FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize, + _FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize, + _FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize, + _FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize, + _FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize, + _FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize, + _FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize, + _FieldDescriptor.TYPE_STRING: wire_format.StringByteSize, + _FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize, + _FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize, + _FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize, + _FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize, + _FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize, + _FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize, + _FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize, + _FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize, + _FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize + } + + +# Maps from field types to encoder constructors. +TYPE_TO_ENCODER = { + _FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder, + _FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder, + _FieldDescriptor.TYPE_INT64: encoder.Int64Encoder, + _FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder, + _FieldDescriptor.TYPE_INT32: encoder.Int32Encoder, + _FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder, + _FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder, + _FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder, + _FieldDescriptor.TYPE_STRING: encoder.StringEncoder, + _FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder, + _FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder, + _FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder, + _FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder, + _FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder, + _FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder, + _FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder, + _FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder, + _FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder, + } + + +# Maps from field types to sizer constructors. +TYPE_TO_SIZER = { + _FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer, + _FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer, + _FieldDescriptor.TYPE_INT64: encoder.Int64Sizer, + _FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer, + _FieldDescriptor.TYPE_INT32: encoder.Int32Sizer, + _FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer, + _FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer, + _FieldDescriptor.TYPE_BOOL: encoder.BoolSizer, + _FieldDescriptor.TYPE_STRING: encoder.StringSizer, + _FieldDescriptor.TYPE_GROUP: encoder.GroupSizer, + _FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer, + _FieldDescriptor.TYPE_BYTES: encoder.BytesSizer, + _FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer, + _FieldDescriptor.TYPE_ENUM: encoder.EnumSizer, + _FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer, + _FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer, + _FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer, + _FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer, + } + + +# Maps from field type to a decoder constructor. +TYPE_TO_DECODER = { + _FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder, + _FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder, + _FieldDescriptor.TYPE_INT64: decoder.Int64Decoder, + _FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder, + _FieldDescriptor.TYPE_INT32: decoder.Int32Decoder, + _FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder, + _FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder, + _FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder, + _FieldDescriptor.TYPE_STRING: decoder.StringDecoder, + _FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder, + _FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder, + _FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder, + _FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder, + _FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder, + _FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder, + _FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder, + _FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder, + _FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder, + } + +# Maps from field type to expected wiretype. +FIELD_TYPE_TO_WIRE_TYPE = { + _FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_STRING: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP, + _FieldDescriptor.TYPE_MESSAGE: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_BYTES: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT, + } diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/well_known_types.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/well_known_types.py new file mode 100644 index 0000000000..b581ab750a --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/well_known_types.py @@ -0,0 +1,878 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains well known classes. + +This files defines well known classes which need extra maintenance including: + - Any + - Duration + - FieldMask + - Struct + - Timestamp +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + +import calendar +import collections.abc +import datetime + +from google.protobuf.descriptor import FieldDescriptor + +_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S' +_NANOS_PER_SECOND = 1000000000 +_NANOS_PER_MILLISECOND = 1000000 +_NANOS_PER_MICROSECOND = 1000 +_MILLIS_PER_SECOND = 1000 +_MICROS_PER_SECOND = 1000000 +_SECONDS_PER_DAY = 24 * 3600 +_DURATION_SECONDS_MAX = 315576000000 + + +class Any(object): + """Class for Any Message type.""" + + __slots__ = () + + def Pack(self, msg, type_url_prefix='type.googleapis.com/', + deterministic=None): + """Packs the specified message into current Any message.""" + if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/': + self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name) + else: + self.type_url = '%s%s' % (type_url_prefix, msg.DESCRIPTOR.full_name) + self.value = msg.SerializeToString(deterministic=deterministic) + + def Unpack(self, msg): + """Unpacks the current Any message into specified message.""" + descriptor = msg.DESCRIPTOR + if not self.Is(descriptor): + return False + msg.ParseFromString(self.value) + return True + + def TypeName(self): + """Returns the protobuf type name of the inner message.""" + # Only last part is to be used: b/25630112 + return self.type_url.split('/')[-1] + + def Is(self, descriptor): + """Checks if this Any represents the given protobuf type.""" + return '/' in self.type_url and self.TypeName() == descriptor.full_name + + +_EPOCH_DATETIME_NAIVE = datetime.datetime.utcfromtimestamp(0) +_EPOCH_DATETIME_AWARE = datetime.datetime.fromtimestamp( + 0, tz=datetime.timezone.utc) + + +class Timestamp(object): + """Class for Timestamp message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts Timestamp to RFC 3339 date string format. + + Returns: + A string converted from timestamp. The string is always Z-normalized + and uses 3, 6 or 9 fractional digits as required to represent the + exact time. Example of the return format: '1972-01-01T10:00:20.021Z' + """ + nanos = self.nanos % _NANOS_PER_SECOND + total_sec = self.seconds + (self.nanos - nanos) // _NANOS_PER_SECOND + seconds = total_sec % _SECONDS_PER_DAY + days = (total_sec - seconds) // _SECONDS_PER_DAY + dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(days, seconds) + + result = dt.isoformat() + if (nanos % 1e9) == 0: + # If there are 0 fractional digits, the fractional + # point '.' should be omitted when serializing. + return result + 'Z' + if (nanos % 1e6) == 0: + # Serialize 3 fractional digits. + return result + '.%03dZ' % (nanos / 1e6) + if (nanos % 1e3) == 0: + # Serialize 6 fractional digits. + return result + '.%06dZ' % (nanos / 1e3) + # Serialize 9 fractional digits. + return result + '.%09dZ' % nanos + + def FromJsonString(self, value): + """Parse a RFC 3339 date string format to Timestamp. + + Args: + value: A date string. Any fractional digits (or none) and any offset are + accepted as long as they fit into nano-seconds precision. + Example of accepted format: '1972-01-01T10:00:20.021-05:00' + + Raises: + ValueError: On parsing problems. + """ + if not isinstance(value, str): + raise ValueError('Timestamp JSON value not a string: {!r}'.format(value)) + timezone_offset = value.find('Z') + if timezone_offset == -1: + timezone_offset = value.find('+') + if timezone_offset == -1: + timezone_offset = value.rfind('-') + if timezone_offset == -1: + raise ValueError( + 'Failed to parse timestamp: missing valid timezone offset.') + time_value = value[0:timezone_offset] + # Parse datetime and nanos. + point_position = time_value.find('.') + if point_position == -1: + second_value = time_value + nano_value = '' + else: + second_value = time_value[:point_position] + nano_value = time_value[point_position + 1:] + if 't' in second_value: + raise ValueError( + 'time data \'{0}\' does not match format \'%Y-%m-%dT%H:%M:%S\', ' + 'lowercase \'t\' is not accepted'.format(second_value)) + date_object = datetime.datetime.strptime(second_value, _TIMESTAMPFOMAT) + td = date_object - datetime.datetime(1970, 1, 1) + seconds = td.seconds + td.days * _SECONDS_PER_DAY + if len(nano_value) > 9: + raise ValueError( + 'Failed to parse Timestamp: nanos {0} more than ' + '9 fractional digits.'.format(nano_value)) + if nano_value: + nanos = round(float('0.' + nano_value) * 1e9) + else: + nanos = 0 + # Parse timezone offsets. + if value[timezone_offset] == 'Z': + if len(value) != timezone_offset + 1: + raise ValueError('Failed to parse timestamp: invalid trailing' + ' data {0}.'.format(value)) + else: + timezone = value[timezone_offset:] + pos = timezone.find(':') + if pos == -1: + raise ValueError( + 'Invalid timezone offset value: {0}.'.format(timezone)) + if timezone[0] == '+': + seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60 + else: + seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60 + # Set seconds and nanos + self.seconds = int(seconds) + self.nanos = int(nanos) + + def GetCurrentTime(self): + """Get the current UTC into Timestamp.""" + self.FromDatetime(datetime.datetime.utcnow()) + + def ToNanoseconds(self): + """Converts Timestamp to nanoseconds since epoch.""" + return self.seconds * _NANOS_PER_SECOND + self.nanos + + def ToMicroseconds(self): + """Converts Timestamp to microseconds since epoch.""" + return (self.seconds * _MICROS_PER_SECOND + + self.nanos // _NANOS_PER_MICROSECOND) + + def ToMilliseconds(self): + """Converts Timestamp to milliseconds since epoch.""" + return (self.seconds * _MILLIS_PER_SECOND + + self.nanos // _NANOS_PER_MILLISECOND) + + def ToSeconds(self): + """Converts Timestamp to seconds since epoch.""" + return self.seconds + + def FromNanoseconds(self, nanos): + """Converts nanoseconds since epoch to Timestamp.""" + self.seconds = nanos // _NANOS_PER_SECOND + self.nanos = nanos % _NANOS_PER_SECOND + + def FromMicroseconds(self, micros): + """Converts microseconds since epoch to Timestamp.""" + self.seconds = micros // _MICROS_PER_SECOND + self.nanos = (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND + + def FromMilliseconds(self, millis): + """Converts milliseconds since epoch to Timestamp.""" + self.seconds = millis // _MILLIS_PER_SECOND + self.nanos = (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND + + def FromSeconds(self, seconds): + """Converts seconds since epoch to Timestamp.""" + self.seconds = seconds + self.nanos = 0 + + def ToDatetime(self, tzinfo=None): + """Converts Timestamp to a datetime. + + Args: + tzinfo: A datetime.tzinfo subclass; defaults to None. + + Returns: + If tzinfo is None, returns a timezone-naive UTC datetime (with no timezone + information, i.e. not aware that it's UTC). + + Otherwise, returns a timezone-aware datetime in the input timezone. + """ + delta = datetime.timedelta( + seconds=self.seconds, + microseconds=_RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND)) + if tzinfo is None: + return _EPOCH_DATETIME_NAIVE + delta + else: + return _EPOCH_DATETIME_AWARE.astimezone(tzinfo) + delta + + def FromDatetime(self, dt): + """Converts datetime to Timestamp. + + Args: + dt: A datetime. If it's timezone-naive, it's assumed to be in UTC. + """ + # Using this guide: http://wiki.python.org/moin/WorkingWithTime + # And this conversion guide: http://docs.python.org/library/time.html + + # Turn the date parameter into a tuple (struct_time) that can then be + # manipulated into a long value of seconds. During the conversion from + # struct_time to long, the source date in UTC, and so it follows that the + # correct transformation is calendar.timegm() + self.seconds = calendar.timegm(dt.utctimetuple()) + self.nanos = dt.microsecond * _NANOS_PER_MICROSECOND + + +class Duration(object): + """Class for Duration message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts Duration to string format. + + Returns: + A string converted from self. The string format will contains + 3, 6, or 9 fractional digits depending on the precision required to + represent the exact Duration value. For example: "1s", "1.010s", + "1.000000100s", "-3.100s" + """ + _CheckDurationValid(self.seconds, self.nanos) + if self.seconds < 0 or self.nanos < 0: + result = '-' + seconds = - self.seconds + int((0 - self.nanos) // 1e9) + nanos = (0 - self.nanos) % 1e9 + else: + result = '' + seconds = self.seconds + int(self.nanos // 1e9) + nanos = self.nanos % 1e9 + result += '%d' % seconds + if (nanos % 1e9) == 0: + # If there are 0 fractional digits, the fractional + # point '.' should be omitted when serializing. + return result + 's' + if (nanos % 1e6) == 0: + # Serialize 3 fractional digits. + return result + '.%03ds' % (nanos / 1e6) + if (nanos % 1e3) == 0: + # Serialize 6 fractional digits. + return result + '.%06ds' % (nanos / 1e3) + # Serialize 9 fractional digits. + return result + '.%09ds' % nanos + + def FromJsonString(self, value): + """Converts a string to Duration. + + Args: + value: A string to be converted. The string must end with 's'. Any + fractional digits (or none) are accepted as long as they fit into + precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s + + Raises: + ValueError: On parsing problems. + """ + if not isinstance(value, str): + raise ValueError('Duration JSON value not a string: {!r}'.format(value)) + if len(value) < 1 or value[-1] != 's': + raise ValueError( + 'Duration must end with letter "s": {0}.'.format(value)) + try: + pos = value.find('.') + if pos == -1: + seconds = int(value[:-1]) + nanos = 0 + else: + seconds = int(value[:pos]) + if value[0] == '-': + nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9)) + else: + nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9)) + _CheckDurationValid(seconds, nanos) + self.seconds = seconds + self.nanos = nanos + except ValueError as e: + raise ValueError( + 'Couldn\'t parse duration: {0} : {1}.'.format(value, e)) + + def ToNanoseconds(self): + """Converts a Duration to nanoseconds.""" + return self.seconds * _NANOS_PER_SECOND + self.nanos + + def ToMicroseconds(self): + """Converts a Duration to microseconds.""" + micros = _RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND) + return self.seconds * _MICROS_PER_SECOND + micros + + def ToMilliseconds(self): + """Converts a Duration to milliseconds.""" + millis = _RoundTowardZero(self.nanos, _NANOS_PER_MILLISECOND) + return self.seconds * _MILLIS_PER_SECOND + millis + + def ToSeconds(self): + """Converts a Duration to seconds.""" + return self.seconds + + def FromNanoseconds(self, nanos): + """Converts nanoseconds to Duration.""" + self._NormalizeDuration(nanos // _NANOS_PER_SECOND, + nanos % _NANOS_PER_SECOND) + + def FromMicroseconds(self, micros): + """Converts microseconds to Duration.""" + self._NormalizeDuration( + micros // _MICROS_PER_SECOND, + (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND) + + def FromMilliseconds(self, millis): + """Converts milliseconds to Duration.""" + self._NormalizeDuration( + millis // _MILLIS_PER_SECOND, + (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND) + + def FromSeconds(self, seconds): + """Converts seconds to Duration.""" + self.seconds = seconds + self.nanos = 0 + + def ToTimedelta(self): + """Converts Duration to timedelta.""" + return datetime.timedelta( + seconds=self.seconds, microseconds=_RoundTowardZero( + self.nanos, _NANOS_PER_MICROSECOND)) + + def FromTimedelta(self, td): + """Converts timedelta to Duration.""" + self._NormalizeDuration(td.seconds + td.days * _SECONDS_PER_DAY, + td.microseconds * _NANOS_PER_MICROSECOND) + + def _NormalizeDuration(self, seconds, nanos): + """Set Duration by seconds and nanos.""" + # Force nanos to be negative if the duration is negative. + if seconds < 0 and nanos > 0: + seconds += 1 + nanos -= _NANOS_PER_SECOND + self.seconds = seconds + self.nanos = nanos + + +def _CheckDurationValid(seconds, nanos): + if seconds < -_DURATION_SECONDS_MAX or seconds > _DURATION_SECONDS_MAX: + raise ValueError( + 'Duration is not valid: Seconds {0} must be in range ' + '[-315576000000, 315576000000].'.format(seconds)) + if nanos <= -_NANOS_PER_SECOND or nanos >= _NANOS_PER_SECOND: + raise ValueError( + 'Duration is not valid: Nanos {0} must be in range ' + '[-999999999, 999999999].'.format(nanos)) + if (nanos < 0 and seconds > 0) or (nanos > 0 and seconds < 0): + raise ValueError( + 'Duration is not valid: Sign mismatch.') + + +def _RoundTowardZero(value, divider): + """Truncates the remainder part after division.""" + # For some languages, the sign of the remainder is implementation + # dependent if any of the operands is negative. Here we enforce + # "rounded toward zero" semantics. For example, for (-5) / 2 an + # implementation may give -3 as the result with the remainder being + # 1. This function ensures we always return -2 (closer to zero). + result = value // divider + remainder = value % divider + if result < 0 and remainder > 0: + return result + 1 + else: + return result + + +class FieldMask(object): + """Class for FieldMask message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts FieldMask to string according to proto3 JSON spec.""" + camelcase_paths = [] + for path in self.paths: + camelcase_paths.append(_SnakeCaseToCamelCase(path)) + return ','.join(camelcase_paths) + + def FromJsonString(self, value): + """Converts string to FieldMask according to proto3 JSON spec.""" + if not isinstance(value, str): + raise ValueError('FieldMask JSON value not a string: {!r}'.format(value)) + self.Clear() + if value: + for path in value.split(','): + self.paths.append(_CamelCaseToSnakeCase(path)) + + def IsValidForDescriptor(self, message_descriptor): + """Checks whether the FieldMask is valid for Message Descriptor.""" + for path in self.paths: + if not _IsValidPath(message_descriptor, path): + return False + return True + + def AllFieldsFromDescriptor(self, message_descriptor): + """Gets all direct fields of Message Descriptor to FieldMask.""" + self.Clear() + for field in message_descriptor.fields: + self.paths.append(field.name) + + def CanonicalFormFromMask(self, mask): + """Converts a FieldMask to the canonical form. + + Removes paths that are covered by another path. For example, + "foo.bar" is covered by "foo" and will be removed if "foo" + is also in the FieldMask. Then sorts all paths in alphabetical order. + + Args: + mask: The original FieldMask to be converted. + """ + tree = _FieldMaskTree(mask) + tree.ToFieldMask(self) + + def Union(self, mask1, mask2): + """Merges mask1 and mask2 into this FieldMask.""" + _CheckFieldMaskMessage(mask1) + _CheckFieldMaskMessage(mask2) + tree = _FieldMaskTree(mask1) + tree.MergeFromFieldMask(mask2) + tree.ToFieldMask(self) + + def Intersect(self, mask1, mask2): + """Intersects mask1 and mask2 into this FieldMask.""" + _CheckFieldMaskMessage(mask1) + _CheckFieldMaskMessage(mask2) + tree = _FieldMaskTree(mask1) + intersection = _FieldMaskTree() + for path in mask2.paths: + tree.IntersectPath(path, intersection) + intersection.ToFieldMask(self) + + def MergeMessage( + self, source, destination, + replace_message_field=False, replace_repeated_field=False): + """Merges fields specified in FieldMask from source to destination. + + Args: + source: Source message. + destination: The destination message to be merged into. + replace_message_field: Replace message field if True. Merge message + field if False. + replace_repeated_field: Replace repeated field if True. Append + elements of repeated field if False. + """ + tree = _FieldMaskTree(self) + tree.MergeMessage( + source, destination, replace_message_field, replace_repeated_field) + + +def _IsValidPath(message_descriptor, path): + """Checks whether the path is valid for Message Descriptor.""" + parts = path.split('.') + last = parts.pop() + for name in parts: + field = message_descriptor.fields_by_name.get(name) + if (field is None or + field.label == FieldDescriptor.LABEL_REPEATED or + field.type != FieldDescriptor.TYPE_MESSAGE): + return False + message_descriptor = field.message_type + return last in message_descriptor.fields_by_name + + +def _CheckFieldMaskMessage(message): + """Raises ValueError if message is not a FieldMask.""" + message_descriptor = message.DESCRIPTOR + if (message_descriptor.name != 'FieldMask' or + message_descriptor.file.name != 'google/protobuf/field_mask.proto'): + raise ValueError('Message {0} is not a FieldMask.'.format( + message_descriptor.full_name)) + + +def _SnakeCaseToCamelCase(path_name): + """Converts a path name from snake_case to camelCase.""" + result = [] + after_underscore = False + for c in path_name: + if c.isupper(): + raise ValueError( + 'Fail to print FieldMask to Json string: Path name ' + '{0} must not contain uppercase letters.'.format(path_name)) + if after_underscore: + if c.islower(): + result.append(c.upper()) + after_underscore = False + else: + raise ValueError( + 'Fail to print FieldMask to Json string: The ' + 'character after a "_" must be a lowercase letter ' + 'in path name {0}.'.format(path_name)) + elif c == '_': + after_underscore = True + else: + result += c + + if after_underscore: + raise ValueError('Fail to print FieldMask to Json string: Trailing "_" ' + 'in path name {0}.'.format(path_name)) + return ''.join(result) + + +def _CamelCaseToSnakeCase(path_name): + """Converts a field name from camelCase to snake_case.""" + result = [] + for c in path_name: + if c == '_': + raise ValueError('Fail to parse FieldMask: Path name ' + '{0} must not contain "_"s.'.format(path_name)) + if c.isupper(): + result += '_' + result += c.lower() + else: + result += c + return ''.join(result) + + +class _FieldMaskTree(object): + """Represents a FieldMask in a tree structure. + + For example, given a FieldMask "foo.bar,foo.baz,bar.baz", + the FieldMaskTree will be: + [_root] -+- foo -+- bar + | | + | +- baz + | + +- bar --- baz + In the tree, each leaf node represents a field path. + """ + + __slots__ = ('_root',) + + def __init__(self, field_mask=None): + """Initializes the tree by FieldMask.""" + self._root = {} + if field_mask: + self.MergeFromFieldMask(field_mask) + + def MergeFromFieldMask(self, field_mask): + """Merges a FieldMask to the tree.""" + for path in field_mask.paths: + self.AddPath(path) + + def AddPath(self, path): + """Adds a field path into the tree. + + If the field path to add is a sub-path of an existing field path + in the tree (i.e., a leaf node), it means the tree already matches + the given path so nothing will be added to the tree. If the path + matches an existing non-leaf node in the tree, that non-leaf node + will be turned into a leaf node with all its children removed because + the path matches all the node's children. Otherwise, a new path will + be added. + + Args: + path: The field path to add. + """ + node = self._root + for name in path.split('.'): + if name not in node: + node[name] = {} + elif not node[name]: + # Pre-existing empty node implies we already have this entire tree. + return + node = node[name] + # Remove any sub-trees we might have had. + node.clear() + + def ToFieldMask(self, field_mask): + """Converts the tree to a FieldMask.""" + field_mask.Clear() + _AddFieldPaths(self._root, '', field_mask) + + def IntersectPath(self, path, intersection): + """Calculates the intersection part of a field path with this tree. + + Args: + path: The field path to calculates. + intersection: The out tree to record the intersection part. + """ + node = self._root + for name in path.split('.'): + if name not in node: + return + elif not node[name]: + intersection.AddPath(path) + return + node = node[name] + intersection.AddLeafNodes(path, node) + + def AddLeafNodes(self, prefix, node): + """Adds leaf nodes begin with prefix to this tree.""" + if not node: + self.AddPath(prefix) + for name in node: + child_path = prefix + '.' + name + self.AddLeafNodes(child_path, node[name]) + + def MergeMessage( + self, source, destination, + replace_message, replace_repeated): + """Merge all fields specified by this tree from source to destination.""" + _MergeMessage( + self._root, source, destination, replace_message, replace_repeated) + + +def _StrConvert(value): + """Converts value to str if it is not.""" + # This file is imported by c extension and some methods like ClearField + # requires string for the field name. py2/py3 has different text + # type and may use unicode. + if not isinstance(value, str): + return value.encode('utf-8') + return value + + +def _MergeMessage( + node, source, destination, replace_message, replace_repeated): + """Merge all fields specified by a sub-tree from source to destination.""" + source_descriptor = source.DESCRIPTOR + for name in node: + child = node[name] + field = source_descriptor.fields_by_name[name] + if field is None: + raise ValueError('Error: Can\'t find field {0} in message {1}.'.format( + name, source_descriptor.full_name)) + if child: + # Sub-paths are only allowed for singular message fields. + if (field.label == FieldDescriptor.LABEL_REPEATED or + field.cpp_type != FieldDescriptor.CPPTYPE_MESSAGE): + raise ValueError('Error: Field {0} in message {1} is not a singular ' + 'message field and cannot have sub-fields.'.format( + name, source_descriptor.full_name)) + if source.HasField(name): + _MergeMessage( + child, getattr(source, name), getattr(destination, name), + replace_message, replace_repeated) + continue + if field.label == FieldDescriptor.LABEL_REPEATED: + if replace_repeated: + destination.ClearField(_StrConvert(name)) + repeated_source = getattr(source, name) + repeated_destination = getattr(destination, name) + repeated_destination.MergeFrom(repeated_source) + else: + if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + if replace_message: + destination.ClearField(_StrConvert(name)) + if source.HasField(name): + getattr(destination, name).MergeFrom(getattr(source, name)) + else: + setattr(destination, name, getattr(source, name)) + + +def _AddFieldPaths(node, prefix, field_mask): + """Adds the field paths descended from node to field_mask.""" + if not node and prefix: + field_mask.paths.append(prefix) + return + for name in sorted(node): + if prefix: + child_path = prefix + '.' + name + else: + child_path = name + _AddFieldPaths(node[name], child_path, field_mask) + + +def _SetStructValue(struct_value, value): + if value is None: + struct_value.null_value = 0 + elif isinstance(value, bool): + # Note: this check must come before the number check because in Python + # True and False are also considered numbers. + struct_value.bool_value = value + elif isinstance(value, str): + struct_value.string_value = value + elif isinstance(value, (int, float)): + struct_value.number_value = value + elif isinstance(value, (dict, Struct)): + struct_value.struct_value.Clear() + struct_value.struct_value.update(value) + elif isinstance(value, (list, ListValue)): + struct_value.list_value.Clear() + struct_value.list_value.extend(value) + else: + raise ValueError('Unexpected type') + + +def _GetStructValue(struct_value): + which = struct_value.WhichOneof('kind') + if which == 'struct_value': + return struct_value.struct_value + elif which == 'null_value': + return None + elif which == 'number_value': + return struct_value.number_value + elif which == 'string_value': + return struct_value.string_value + elif which == 'bool_value': + return struct_value.bool_value + elif which == 'list_value': + return struct_value.list_value + elif which is None: + raise ValueError('Value not set') + + +class Struct(object): + """Class for Struct message type.""" + + __slots__ = () + + def __getitem__(self, key): + return _GetStructValue(self.fields[key]) + + def __contains__(self, item): + return item in self.fields + + def __setitem__(self, key, value): + _SetStructValue(self.fields[key], value) + + def __delitem__(self, key): + del self.fields[key] + + def __len__(self): + return len(self.fields) + + def __iter__(self): + return iter(self.fields) + + def keys(self): # pylint: disable=invalid-name + return self.fields.keys() + + def values(self): # pylint: disable=invalid-name + return [self[key] for key in self] + + def items(self): # pylint: disable=invalid-name + return [(key, self[key]) for key in self] + + def get_or_create_list(self, key): + """Returns a list for this key, creating if it didn't exist already.""" + if not self.fields[key].HasField('list_value'): + # Clear will mark list_value modified which will indeed create a list. + self.fields[key].list_value.Clear() + return self.fields[key].list_value + + def get_or_create_struct(self, key): + """Returns a struct for this key, creating if it didn't exist already.""" + if not self.fields[key].HasField('struct_value'): + # Clear will mark struct_value modified which will indeed create a struct. + self.fields[key].struct_value.Clear() + return self.fields[key].struct_value + + def update(self, dictionary): # pylint: disable=invalid-name + for key, value in dictionary.items(): + _SetStructValue(self.fields[key], value) + +collections.abc.MutableMapping.register(Struct) + + +class ListValue(object): + """Class for ListValue message type.""" + + __slots__ = () + + def __len__(self): + return len(self.values) + + def append(self, value): + _SetStructValue(self.values.add(), value) + + def extend(self, elem_seq): + for value in elem_seq: + self.append(value) + + def __getitem__(self, index): + """Retrieves item by the specified index.""" + return _GetStructValue(self.values.__getitem__(index)) + + def __setitem__(self, index, value): + _SetStructValue(self.values.__getitem__(index), value) + + def __delitem__(self, key): + del self.values[key] + + def items(self): + for i in range(len(self)): + yield self[i] + + def add_struct(self): + """Appends and returns a struct value as the next value in the list.""" + struct_value = self.values.add().struct_value + # Clear will mark struct_value modified which will indeed create a struct. + struct_value.Clear() + return struct_value + + def add_list(self): + """Appends and returns a list value as the next value in the list.""" + list_value = self.values.add().list_value + # Clear will mark list_value modified which will indeed create a list. + list_value.Clear() + return list_value + +collections.abc.MutableSequence.register(ListValue) + + +WKTBASES = { + 'google.protobuf.Any': Any, + 'google.protobuf.Duration': Duration, + 'google.protobuf.FieldMask': FieldMask, + 'google.protobuf.ListValue': ListValue, + 'google.protobuf.Struct': Struct, + 'google.protobuf.Timestamp': Timestamp, +} diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/wire_format.py b/openpype/hosts/nuke/vendor/google/protobuf/internal/wire_format.py new file mode 100644 index 0000000000..883f525585 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/internal/wire_format.py @@ -0,0 +1,268 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Constants and static functions to support protocol buffer wire format.""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import struct +from google.protobuf import descriptor +from google.protobuf import message + + +TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag. +TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7 + +# These numbers identify the wire type of a protocol buffer value. +# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded +# tag-and-type to store one of these WIRETYPE_* constants. +# These values must match WireType enum in google/protobuf/wire_format.h. +WIRETYPE_VARINT = 0 +WIRETYPE_FIXED64 = 1 +WIRETYPE_LENGTH_DELIMITED = 2 +WIRETYPE_START_GROUP = 3 +WIRETYPE_END_GROUP = 4 +WIRETYPE_FIXED32 = 5 +_WIRETYPE_MAX = 5 + + +# Bounds for various integer types. +INT32_MAX = int((1 << 31) - 1) +INT32_MIN = int(-(1 << 31)) +UINT32_MAX = (1 << 32) - 1 + +INT64_MAX = (1 << 63) - 1 +INT64_MIN = -(1 << 63) +UINT64_MAX = (1 << 64) - 1 + +# "struct" format strings that will encode/decode the specified formats. +FORMAT_UINT32_LITTLE_ENDIAN = '> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK) + + +def ZigZagEncode(value): + """ZigZag Transform: Encodes signed integers so that they can be + effectively used with varint encoding. See wire_format.h for + more details. + """ + if value >= 0: + return value << 1 + return (value << 1) ^ (~0) + + +def ZigZagDecode(value): + """Inverse of ZigZagEncode().""" + if not value & 0x1: + return value >> 1 + return (value >> 1) ^ (~0) + + + +# The *ByteSize() functions below return the number of bytes required to +# serialize "field number + type" information and then serialize the value. + + +def Int32ByteSize(field_number, int32): + return Int64ByteSize(field_number, int32) + + +def Int32ByteSizeNoTag(int32): + return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32) + + +def Int64ByteSize(field_number, int64): + # Have to convert to uint before calling UInt64ByteSize(). + return UInt64ByteSize(field_number, 0xffffffffffffffff & int64) + + +def UInt32ByteSize(field_number, uint32): + return UInt64ByteSize(field_number, uint32) + + +def UInt64ByteSize(field_number, uint64): + return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64) + + +def SInt32ByteSize(field_number, int32): + return UInt32ByteSize(field_number, ZigZagEncode(int32)) + + +def SInt64ByteSize(field_number, int64): + return UInt64ByteSize(field_number, ZigZagEncode(int64)) + + +def Fixed32ByteSize(field_number, fixed32): + return TagByteSize(field_number) + 4 + + +def Fixed64ByteSize(field_number, fixed64): + return TagByteSize(field_number) + 8 + + +def SFixed32ByteSize(field_number, sfixed32): + return TagByteSize(field_number) + 4 + + +def SFixed64ByteSize(field_number, sfixed64): + return TagByteSize(field_number) + 8 + + +def FloatByteSize(field_number, flt): + return TagByteSize(field_number) + 4 + + +def DoubleByteSize(field_number, double): + return TagByteSize(field_number) + 8 + + +def BoolByteSize(field_number, b): + return TagByteSize(field_number) + 1 + + +def EnumByteSize(field_number, enum): + return UInt32ByteSize(field_number, enum) + + +def StringByteSize(field_number, string): + return BytesByteSize(field_number, string.encode('utf-8')) + + +def BytesByteSize(field_number, b): + return (TagByteSize(field_number) + + _VarUInt64ByteSizeNoTag(len(b)) + + len(b)) + + +def GroupByteSize(field_number, message): + return (2 * TagByteSize(field_number) # START and END group. + + message.ByteSize()) + + +def MessageByteSize(field_number, message): + return (TagByteSize(field_number) + + _VarUInt64ByteSizeNoTag(message.ByteSize()) + + message.ByteSize()) + + +def MessageSetItemByteSize(field_number, msg): + # First compute the sizes of the tags. + # There are 2 tags for the beginning and ending of the repeated group, that + # is field number 1, one with field number 2 (type_id) and one with field + # number 3 (message). + total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3)) + + # Add the number of bytes for type_id. + total_size += _VarUInt64ByteSizeNoTag(field_number) + + message_size = msg.ByteSize() + + # The number of bytes for encoding the length of the message. + total_size += _VarUInt64ByteSizeNoTag(message_size) + + # The size of the message. + total_size += message_size + return total_size + + +def TagByteSize(field_number): + """Returns the bytes required to serialize a tag with this field number.""" + # Just pass in type 0, since the type won't affect the tag+type size. + return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0)) + + +# Private helper function for the *ByteSize() functions above. + +def _VarUInt64ByteSizeNoTag(uint64): + """Returns the number of bytes required to serialize a single varint + using boundary value comparisons. (unrolled loop optimization -WPierce) + uint64 must be unsigned. + """ + if uint64 <= 0x7f: return 1 + if uint64 <= 0x3fff: return 2 + if uint64 <= 0x1fffff: return 3 + if uint64 <= 0xfffffff: return 4 + if uint64 <= 0x7ffffffff: return 5 + if uint64 <= 0x3ffffffffff: return 6 + if uint64 <= 0x1ffffffffffff: return 7 + if uint64 <= 0xffffffffffffff: return 8 + if uint64 <= 0x7fffffffffffffff: return 9 + if uint64 > UINT64_MAX: + raise message.EncodeError('Value out of range: %d' % uint64) + return 10 + + +NON_PACKABLE_TYPES = ( + descriptor.FieldDescriptor.TYPE_STRING, + descriptor.FieldDescriptor.TYPE_GROUP, + descriptor.FieldDescriptor.TYPE_MESSAGE, + descriptor.FieldDescriptor.TYPE_BYTES +) + + +def IsTypePackable(field_type): + """Return true iff packable = true is valid for fields of this type. + + Args: + field_type: a FieldDescriptor::Type value. + + Returns: + True iff fields of this type are packable. + """ + return field_type not in NON_PACKABLE_TYPES diff --git a/openpype/hosts/nuke/vendor/google/protobuf/json_format.py b/openpype/hosts/nuke/vendor/google/protobuf/json_format.py new file mode 100644 index 0000000000..5024ed89d7 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/json_format.py @@ -0,0 +1,912 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains routines for printing protocol messages in JSON format. + +Simple usage example: + + # Create a proto object and serialize it to a json format string. + message = my_proto_pb2.MyMessage(foo='bar') + json_string = json_format.MessageToJson(message) + + # Parse a json format string to proto object. + message = json_format.Parse(json_string, my_proto_pb2.MyMessage()) +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + + +import base64 +from collections import OrderedDict +import json +import math +from operator import methodcaller +import re +import sys + +from google.protobuf.internal import type_checkers +from google.protobuf import descriptor +from google.protobuf import symbol_database + + +_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S' +_INT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT32, + descriptor.FieldDescriptor.CPPTYPE_UINT32, + descriptor.FieldDescriptor.CPPTYPE_INT64, + descriptor.FieldDescriptor.CPPTYPE_UINT64]) +_INT64_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT64, + descriptor.FieldDescriptor.CPPTYPE_UINT64]) +_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT, + descriptor.FieldDescriptor.CPPTYPE_DOUBLE]) +_INFINITY = 'Infinity' +_NEG_INFINITY = '-Infinity' +_NAN = 'NaN' + +_UNPAIRED_SURROGATE_PATTERN = re.compile( + u'[\ud800-\udbff](?![\udc00-\udfff])|(? self.max_recursion_depth: + raise ParseError('Message too deep. Max recursion depth is {0}'.format( + self.max_recursion_depth)) + message_descriptor = message.DESCRIPTOR + full_name = message_descriptor.full_name + if not path: + path = message_descriptor.name + if _IsWrapperMessage(message_descriptor): + self._ConvertWrapperMessage(value, message, path) + elif full_name in _WKTJSONMETHODS: + methodcaller(_WKTJSONMETHODS[full_name][1], value, message, path)(self) + else: + self._ConvertFieldValuePair(value, message, path) + self.recursion_depth -= 1 + + def _ConvertFieldValuePair(self, js, message, path): + """Convert field value pairs into regular message. + + Args: + js: A JSON object to convert the field value pairs. + message: A regular protocol message to record the data. + path: parent path to log parse error info. + + Raises: + ParseError: In case of problems converting. + """ + names = [] + message_descriptor = message.DESCRIPTOR + fields_by_json_name = dict((f.json_name, f) + for f in message_descriptor.fields) + for name in js: + try: + field = fields_by_json_name.get(name, None) + if not field: + field = message_descriptor.fields_by_name.get(name, None) + if not field and _VALID_EXTENSION_NAME.match(name): + if not message_descriptor.is_extendable: + raise ParseError( + 'Message type {0} does not have extensions at {1}'.format( + message_descriptor.full_name, path)) + identifier = name[1:-1] # strip [] brackets + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(identifier) + # pylint: enable=protected-access + if not field: + # Try looking for extension by the message type name, dropping the + # field name following the final . separator in full_name. + identifier = '.'.join(identifier.split('.')[:-1]) + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(identifier) + # pylint: enable=protected-access + if not field: + if self.ignore_unknown_fields: + continue + raise ParseError( + ('Message type "{0}" has no field named "{1}" at "{2}".\n' + ' Available Fields(except extensions): "{3}"').format( + message_descriptor.full_name, name, path, + [f.json_name for f in message_descriptor.fields])) + if name in names: + raise ParseError('Message type "{0}" should not have multiple ' + '"{1}" fields at "{2}".'.format( + message.DESCRIPTOR.full_name, name, path)) + names.append(name) + value = js[name] + # Check no other oneof field is parsed. + if field.containing_oneof is not None and value is not None: + oneof_name = field.containing_oneof.name + if oneof_name in names: + raise ParseError('Message type "{0}" should not have multiple ' + '"{1}" oneof fields at "{2}".'.format( + message.DESCRIPTOR.full_name, oneof_name, + path)) + names.append(oneof_name) + + if value is None: + if (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE + and field.message_type.full_name == 'google.protobuf.Value'): + sub_message = getattr(message, field.name) + sub_message.null_value = 0 + elif (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM + and field.enum_type.full_name == 'google.protobuf.NullValue'): + setattr(message, field.name, 0) + else: + message.ClearField(field.name) + continue + + # Parse field value. + if _IsMapEntry(field): + message.ClearField(field.name) + self._ConvertMapFieldValue(value, message, field, + '{0}.{1}'.format(path, name)) + elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + message.ClearField(field.name) + if not isinstance(value, list): + raise ParseError('repeated field {0} must be in [] which is ' + '{1} at {2}'.format(name, value, path)) + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + # Repeated message field. + for index, item in enumerate(value): + sub_message = getattr(message, field.name).add() + # None is a null_value in Value. + if (item is None and + sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'): + raise ParseError('null is not allowed to be used as an element' + ' in a repeated field at {0}.{1}[{2}]'.format( + path, name, index)) + self.ConvertMessage(item, sub_message, + '{0}.{1}[{2}]'.format(path, name, index)) + else: + # Repeated scalar field. + for index, item in enumerate(value): + if item is None: + raise ParseError('null is not allowed to be used as an element' + ' in a repeated field at {0}.{1}[{2}]'.format( + path, name, index)) + getattr(message, field.name).append( + _ConvertScalarFieldValue( + item, field, '{0}.{1}[{2}]'.format(path, name, index))) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + if field.is_extension: + sub_message = message.Extensions[field] + else: + sub_message = getattr(message, field.name) + sub_message.SetInParent() + self.ConvertMessage(value, sub_message, '{0}.{1}'.format(path, name)) + else: + if field.is_extension: + message.Extensions[field] = _ConvertScalarFieldValue( + value, field, '{0}.{1}'.format(path, name)) + else: + setattr( + message, field.name, + _ConvertScalarFieldValue(value, field, + '{0}.{1}'.format(path, name))) + except ParseError as e: + if field and field.containing_oneof is None: + raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) + else: + raise ParseError(str(e)) + except ValueError as e: + raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) + except TypeError as e: + raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) + + def _ConvertAnyMessage(self, value, message, path): + """Convert a JSON representation into Any message.""" + if isinstance(value, dict) and not value: + return + try: + type_url = value['@type'] + except KeyError: + raise ParseError( + '@type is missing when parsing any message at {0}'.format(path)) + + try: + sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool) + except TypeError as e: + raise ParseError('{0} at {1}'.format(e, path)) + message_descriptor = sub_message.DESCRIPTOR + full_name = message_descriptor.full_name + if _IsWrapperMessage(message_descriptor): + self._ConvertWrapperMessage(value['value'], sub_message, + '{0}.value'.format(path)) + elif full_name in _WKTJSONMETHODS: + methodcaller(_WKTJSONMETHODS[full_name][1], value['value'], sub_message, + '{0}.value'.format(path))( + self) + else: + del value['@type'] + self._ConvertFieldValuePair(value, sub_message, path) + value['@type'] = type_url + # Sets Any message + message.value = sub_message.SerializeToString() + message.type_url = type_url + + def _ConvertGenericMessage(self, value, message, path): + """Convert a JSON representation into message with FromJsonString.""" + # Duration, Timestamp, FieldMask have a FromJsonString method to do the + # conversion. Users can also call the method directly. + try: + message.FromJsonString(value) + except ValueError as e: + raise ParseError('{0} at {1}'.format(e, path)) + + def _ConvertValueMessage(self, value, message, path): + """Convert a JSON representation into Value message.""" + if isinstance(value, dict): + self._ConvertStructMessage(value, message.struct_value, path) + elif isinstance(value, list): + self._ConvertListValueMessage(value, message.list_value, path) + elif value is None: + message.null_value = 0 + elif isinstance(value, bool): + message.bool_value = value + elif isinstance(value, str): + message.string_value = value + elif isinstance(value, _INT_OR_FLOAT): + message.number_value = value + else: + raise ParseError('Value {0} has unexpected type {1} at {2}'.format( + value, type(value), path)) + + def _ConvertListValueMessage(self, value, message, path): + """Convert a JSON representation into ListValue message.""" + if not isinstance(value, list): + raise ParseError('ListValue must be in [] which is {0} at {1}'.format( + value, path)) + message.ClearField('values') + for index, item in enumerate(value): + self._ConvertValueMessage(item, message.values.add(), + '{0}[{1}]'.format(path, index)) + + def _ConvertStructMessage(self, value, message, path): + """Convert a JSON representation into Struct message.""" + if not isinstance(value, dict): + raise ParseError('Struct must be in a dict which is {0} at {1}'.format( + value, path)) + # Clear will mark the struct as modified so it will be created even if + # there are no values. + message.Clear() + for key in value: + self._ConvertValueMessage(value[key], message.fields[key], + '{0}.{1}'.format(path, key)) + return + + def _ConvertWrapperMessage(self, value, message, path): + """Convert a JSON representation into Wrapper message.""" + field = message.DESCRIPTOR.fields_by_name['value'] + setattr( + message, 'value', + _ConvertScalarFieldValue(value, field, path='{0}.value'.format(path))) + + def _ConvertMapFieldValue(self, value, message, field, path): + """Convert map field value for a message map field. + + Args: + value: A JSON object to convert the map field value. + message: A protocol message to record the converted data. + field: The descriptor of the map field to be converted. + path: parent path to log parse error info. + + Raises: + ParseError: In case of convert problems. + """ + if not isinstance(value, dict): + raise ParseError( + 'Map field {0} must be in a dict which is {1} at {2}'.format( + field.name, value, path)) + key_field = field.message_type.fields_by_name['key'] + value_field = field.message_type.fields_by_name['value'] + for key in value: + key_value = _ConvertScalarFieldValue(key, key_field, + '{0}.key'.format(path), True) + if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + self.ConvertMessage(value[key], + getattr(message, field.name)[key_value], + '{0}[{1}]'.format(path, key_value)) + else: + getattr(message, field.name)[key_value] = _ConvertScalarFieldValue( + value[key], value_field, path='{0}[{1}]'.format(path, key_value)) + + +def _ConvertScalarFieldValue(value, field, path, require_str=False): + """Convert a single scalar field value. + + Args: + value: A scalar value to convert the scalar field value. + field: The descriptor of the field to convert. + path: parent path to log parse error info. + require_str: If True, the field value must be a str. + + Returns: + The converted scalar field value + + Raises: + ParseError: In case of convert problems. + """ + try: + if field.cpp_type in _INT_TYPES: + return _ConvertInteger(value) + elif field.cpp_type in _FLOAT_TYPES: + return _ConvertFloat(value, field) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: + return _ConvertBool(value, require_str) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: + if field.type == descriptor.FieldDescriptor.TYPE_BYTES: + if isinstance(value, str): + encoded = value.encode('utf-8') + else: + encoded = value + # Add extra padding '=' + padded_value = encoded + b'=' * (4 - len(encoded) % 4) + return base64.urlsafe_b64decode(padded_value) + else: + # Checking for unpaired surrogates appears to be unreliable, + # depending on the specific Python version, so we check manually. + if _UNPAIRED_SURROGATE_PATTERN.search(value): + raise ParseError('Unpaired surrogate') + return value + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: + # Convert an enum value. + enum_value = field.enum_type.values_by_name.get(value, None) + if enum_value is None: + try: + number = int(value) + enum_value = field.enum_type.values_by_number.get(number, None) + except ValueError: + raise ParseError('Invalid enum value {0} for enum type {1}'.format( + value, field.enum_type.full_name)) + if enum_value is None: + if field.file.syntax == 'proto3': + # Proto3 accepts unknown enums. + return number + raise ParseError('Invalid enum value {0} for enum type {1}'.format( + value, field.enum_type.full_name)) + return enum_value.number + except ParseError as e: + raise ParseError('{0} at {1}'.format(e, path)) + + +def _ConvertInteger(value): + """Convert an integer. + + Args: + value: A scalar value to convert. + + Returns: + The integer value. + + Raises: + ParseError: If an integer couldn't be consumed. + """ + if isinstance(value, float) and not value.is_integer(): + raise ParseError('Couldn\'t parse integer: {0}'.format(value)) + + if isinstance(value, str) and value.find(' ') != -1: + raise ParseError('Couldn\'t parse integer: "{0}"'.format(value)) + + if isinstance(value, bool): + raise ParseError('Bool value {0} is not acceptable for ' + 'integer field'.format(value)) + + return int(value) + + +def _ConvertFloat(value, field): + """Convert an floating point number.""" + if isinstance(value, float): + if math.isnan(value): + raise ParseError('Couldn\'t parse NaN, use quoted "NaN" instead') + if math.isinf(value): + if value > 0: + raise ParseError('Couldn\'t parse Infinity or value too large, ' + 'use quoted "Infinity" instead') + else: + raise ParseError('Couldn\'t parse -Infinity or value too small, ' + 'use quoted "-Infinity" instead') + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT: + # pylint: disable=protected-access + if value > type_checkers._FLOAT_MAX: + raise ParseError('Float value too large') + # pylint: disable=protected-access + if value < type_checkers._FLOAT_MIN: + raise ParseError('Float value too small') + if value == 'nan': + raise ParseError('Couldn\'t parse float "nan", use "NaN" instead') + try: + # Assume Python compatible syntax. + return float(value) + except ValueError: + # Check alternative spellings. + if value == _NEG_INFINITY: + return float('-inf') + elif value == _INFINITY: + return float('inf') + elif value == _NAN: + return float('nan') + else: + raise ParseError('Couldn\'t parse float: {0}'.format(value)) + + +def _ConvertBool(value, require_str): + """Convert a boolean value. + + Args: + value: A scalar value to convert. + require_str: If True, value must be a str. + + Returns: + The bool parsed. + + Raises: + ParseError: If a boolean value couldn't be consumed. + """ + if require_str: + if value == 'true': + return True + elif value == 'false': + return False + else: + raise ParseError('Expected "true" or "false", not {0}'.format(value)) + + if not isinstance(value, bool): + raise ParseError('Expected true or false without quotes') + return value + +_WKTJSONMETHODS = { + 'google.protobuf.Any': ['_AnyMessageToJsonObject', + '_ConvertAnyMessage'], + 'google.protobuf.Duration': ['_GenericMessageToJsonObject', + '_ConvertGenericMessage'], + 'google.protobuf.FieldMask': ['_GenericMessageToJsonObject', + '_ConvertGenericMessage'], + 'google.protobuf.ListValue': ['_ListValueMessageToJsonObject', + '_ConvertListValueMessage'], + 'google.protobuf.Struct': ['_StructMessageToJsonObject', + '_ConvertStructMessage'], + 'google.protobuf.Timestamp': ['_GenericMessageToJsonObject', + '_ConvertGenericMessage'], + 'google.protobuf.Value': ['_ValueMessageToJsonObject', + '_ConvertValueMessage'] +} diff --git a/openpype/hosts/nuke/vendor/google/protobuf/message.py b/openpype/hosts/nuke/vendor/google/protobuf/message.py new file mode 100644 index 0000000000..76c6802f70 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/message.py @@ -0,0 +1,424 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# TODO(robinson): We should just make these methods all "pure-virtual" and move +# all implementation out, into reflection.py for now. + + +"""Contains an abstract base class for protocol messages.""" + +__author__ = 'robinson@google.com (Will Robinson)' + +class Error(Exception): + """Base error type for this module.""" + pass + + +class DecodeError(Error): + """Exception raised when deserializing messages.""" + pass + + +class EncodeError(Error): + """Exception raised when serializing messages.""" + pass + + +class Message(object): + + """Abstract base class for protocol messages. + + Protocol message classes are almost always generated by the protocol + compiler. These generated types subclass Message and implement the methods + shown below. + """ + + # TODO(robinson): Link to an HTML document here. + + # TODO(robinson): Document that instances of this class will also + # have an Extensions attribute with __getitem__ and __setitem__. + # Again, not sure how to best convey this. + + # TODO(robinson): Document that the class must also have a static + # RegisterExtension(extension_field) method. + # Not sure how to best express at this point. + + # TODO(robinson): Document these fields and methods. + + __slots__ = [] + + #: The :class:`google.protobuf.descriptor.Descriptor` for this message type. + DESCRIPTOR = None + + def __deepcopy__(self, memo=None): + clone = type(self)() + clone.MergeFrom(self) + return clone + + def __eq__(self, other_msg): + """Recursively compares two messages by value and structure.""" + raise NotImplementedError + + def __ne__(self, other_msg): + # Can't just say self != other_msg, since that would infinitely recurse. :) + return not self == other_msg + + def __hash__(self): + raise TypeError('unhashable object') + + def __str__(self): + """Outputs a human-readable representation of the message.""" + raise NotImplementedError + + def __unicode__(self): + """Outputs a human-readable representation of the message.""" + raise NotImplementedError + + def MergeFrom(self, other_msg): + """Merges the contents of the specified message into current message. + + This method merges the contents of the specified message into the current + message. Singular fields that are set in the specified message overwrite + the corresponding fields in the current message. Repeated fields are + appended. Singular sub-messages and groups are recursively merged. + + Args: + other_msg (Message): A message to merge into the current message. + """ + raise NotImplementedError + + def CopyFrom(self, other_msg): + """Copies the content of the specified message into the current message. + + The method clears the current message and then merges the specified + message using MergeFrom. + + Args: + other_msg (Message): A message to copy into the current one. + """ + if self is other_msg: + return + self.Clear() + self.MergeFrom(other_msg) + + def Clear(self): + """Clears all data that was set in the message.""" + raise NotImplementedError + + def SetInParent(self): + """Mark this as present in the parent. + + This normally happens automatically when you assign a field of a + sub-message, but sometimes you want to make the sub-message + present while keeping it empty. If you find yourself using this, + you may want to reconsider your design. + """ + raise NotImplementedError + + def IsInitialized(self): + """Checks if the message is initialized. + + Returns: + bool: The method returns True if the message is initialized (i.e. all of + its required fields are set). + """ + raise NotImplementedError + + # TODO(robinson): MergeFromString() should probably return None and be + # implemented in terms of a helper that returns the # of bytes read. Our + # deserialization routines would use the helper when recursively + # deserializing, but the end user would almost always just want the no-return + # MergeFromString(). + + def MergeFromString(self, serialized): + """Merges serialized protocol buffer data into this message. + + When we find a field in `serialized` that is already present + in this message: + + - If it's a "repeated" field, we append to the end of our list. + - Else, if it's a scalar, we overwrite our field. + - Else, (it's a nonrepeated composite), we recursively merge + into the existing composite. + + Args: + serialized (bytes): Any object that allows us to call + ``memoryview(serialized)`` to access a string of bytes using the + buffer interface. + + Returns: + int: The number of bytes read from `serialized`. + For non-group messages, this will always be `len(serialized)`, + but for messages which are actually groups, this will + generally be less than `len(serialized)`, since we must + stop when we reach an ``END_GROUP`` tag. Note that if + we *do* stop because of an ``END_GROUP`` tag, the number + of bytes returned does not include the bytes + for the ``END_GROUP`` tag information. + + Raises: + DecodeError: if the input cannot be parsed. + """ + # TODO(robinson): Document handling of unknown fields. + # TODO(robinson): When we switch to a helper, this will return None. + raise NotImplementedError + + def ParseFromString(self, serialized): + """Parse serialized protocol buffer data into this message. + + Like :func:`MergeFromString()`, except we clear the object first. + + Raises: + message.DecodeError if the input cannot be parsed. + """ + self.Clear() + return self.MergeFromString(serialized) + + def SerializeToString(self, **kwargs): + """Serializes the protocol message to a binary string. + + Keyword Args: + deterministic (bool): If true, requests deterministic serialization + of the protobuf, with predictable ordering of map keys. + + Returns: + A binary string representation of the message if all of the required + fields in the message are set (i.e. the message is initialized). + + Raises: + EncodeError: if the message isn't initialized (see :func:`IsInitialized`). + """ + raise NotImplementedError + + def SerializePartialToString(self, **kwargs): + """Serializes the protocol message to a binary string. + + This method is similar to SerializeToString but doesn't check if the + message is initialized. + + Keyword Args: + deterministic (bool): If true, requests deterministic serialization + of the protobuf, with predictable ordering of map keys. + + Returns: + bytes: A serialized representation of the partial message. + """ + raise NotImplementedError + + # TODO(robinson): Decide whether we like these better + # than auto-generated has_foo() and clear_foo() methods + # on the instances themselves. This way is less consistent + # with C++, but it makes reflection-type access easier and + # reduces the number of magically autogenerated things. + # + # TODO(robinson): Be sure to document (and test) exactly + # which field names are accepted here. Are we case-sensitive? + # What do we do with fields that share names with Python keywords + # like 'lambda' and 'yield'? + # + # nnorwitz says: + # """ + # Typically (in python), an underscore is appended to names that are + # keywords. So they would become lambda_ or yield_. + # """ + def ListFields(self): + """Returns a list of (FieldDescriptor, value) tuples for present fields. + + A message field is non-empty if HasField() would return true. A singular + primitive field is non-empty if HasField() would return true in proto2 or it + is non zero in proto3. A repeated field is non-empty if it contains at least + one element. The fields are ordered by field number. + + Returns: + list[tuple(FieldDescriptor, value)]: field descriptors and values + for all fields in the message which are not empty. The values vary by + field type. + """ + raise NotImplementedError + + def HasField(self, field_name): + """Checks if a certain field is set for the message. + + For a oneof group, checks if any field inside is set. Note that if the + field_name is not defined in the message descriptor, :exc:`ValueError` will + be raised. + + Args: + field_name (str): The name of the field to check for presence. + + Returns: + bool: Whether a value has been set for the named field. + + Raises: + ValueError: if the `field_name` is not a member of this message. + """ + raise NotImplementedError + + def ClearField(self, field_name): + """Clears the contents of a given field. + + Inside a oneof group, clears the field set. If the name neither refers to a + defined field or oneof group, :exc:`ValueError` is raised. + + Args: + field_name (str): The name of the field to check for presence. + + Raises: + ValueError: if the `field_name` is not a member of this message. + """ + raise NotImplementedError + + def WhichOneof(self, oneof_group): + """Returns the name of the field that is set inside a oneof group. + + If no field is set, returns None. + + Args: + oneof_group (str): the name of the oneof group to check. + + Returns: + str or None: The name of the group that is set, or None. + + Raises: + ValueError: no group with the given name exists + """ + raise NotImplementedError + + def HasExtension(self, extension_handle): + """Checks if a certain extension is present for this message. + + Extensions are retrieved using the :attr:`Extensions` mapping (if present). + + Args: + extension_handle: The handle for the extension to check. + + Returns: + bool: Whether the extension is present for this message. + + Raises: + KeyError: if the extension is repeated. Similar to repeated fields, + there is no separate notion of presence: a "not present" repeated + extension is an empty list. + """ + raise NotImplementedError + + def ClearExtension(self, extension_handle): + """Clears the contents of a given extension. + + Args: + extension_handle: The handle for the extension to clear. + """ + raise NotImplementedError + + def UnknownFields(self): + """Returns the UnknownFieldSet. + + Returns: + UnknownFieldSet: The unknown fields stored in this message. + """ + raise NotImplementedError + + def DiscardUnknownFields(self): + """Clears all fields in the :class:`UnknownFieldSet`. + + This operation is recursive for nested message. + """ + raise NotImplementedError + + def ByteSize(self): + """Returns the serialized size of this message. + + Recursively calls ByteSize() on all contained messages. + + Returns: + int: The number of bytes required to serialize this message. + """ + raise NotImplementedError + + @classmethod + def FromString(cls, s): + raise NotImplementedError + + @staticmethod + def RegisterExtension(extension_handle): + raise NotImplementedError + + def _SetListener(self, message_listener): + """Internal method used by the protocol message implementation. + Clients should not call this directly. + + Sets a listener that this message will call on certain state transitions. + + The purpose of this method is to register back-edges from children to + parents at runtime, for the purpose of setting "has" bits and + byte-size-dirty bits in the parent and ancestor objects whenever a child or + descendant object is modified. + + If the client wants to disconnect this Message from the object tree, she + explicitly sets callback to None. + + If message_listener is None, unregisters any existing listener. Otherwise, + message_listener must implement the MessageListener interface in + internal/message_listener.py, and we discard any listener registered + via a previous _SetListener() call. + """ + raise NotImplementedError + + def __getstate__(self): + """Support the pickle protocol.""" + return dict(serialized=self.SerializePartialToString()) + + def __setstate__(self, state): + """Support the pickle protocol.""" + self.__init__() + serialized = state['serialized'] + # On Python 3, using encoding='latin1' is required for unpickling + # protos pickled by Python 2. + if not isinstance(serialized, bytes): + serialized = serialized.encode('latin1') + self.ParseFromString(serialized) + + def __reduce__(self): + message_descriptor = self.DESCRIPTOR + if message_descriptor.containing_type is None: + return type(self), (), self.__getstate__() + # the message type must be nested. + # Python does not pickle nested classes; use the symbol_database on the + # receiving end. + container = message_descriptor + return (_InternalConstructMessage, (container.full_name,), + self.__getstate__()) + + +def _InternalConstructMessage(full_name): + """Constructs a nested message.""" + from google.protobuf import symbol_database # pylint:disable=g-import-not-at-top + + return symbol_database.Default().GetSymbol(full_name)() diff --git a/openpype/hosts/nuke/vendor/google/protobuf/message_factory.py b/openpype/hosts/nuke/vendor/google/protobuf/message_factory.py new file mode 100644 index 0000000000..3656fa6874 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/message_factory.py @@ -0,0 +1,185 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides a factory class for generating dynamic messages. + +The easiest way to use this class is if you have access to the FileDescriptor +protos containing the messages you want to create you can just do the following: + +message_classes = message_factory.GetMessages(iterable_of_file_descriptors) +my_proto_instance = message_classes['some.proto.package.MessageName']() +""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +from google.protobuf.internal import api_implementation +from google.protobuf import descriptor_pool +from google.protobuf import message + +if api_implementation.Type() == 'cpp': + from google.protobuf.pyext import cpp_message as message_impl +else: + from google.protobuf.internal import python_message as message_impl + + +# The type of all Message classes. +_GENERATED_PROTOCOL_MESSAGE_TYPE = message_impl.GeneratedProtocolMessageType + + +class MessageFactory(object): + """Factory for creating Proto2 messages from descriptors in a pool.""" + + def __init__(self, pool=None): + """Initializes a new factory.""" + self.pool = pool or descriptor_pool.DescriptorPool() + + # local cache of all classes built from protobuf descriptors + self._classes = {} + + def GetPrototype(self, descriptor): + """Obtains a proto2 message class based on the passed in descriptor. + + Passing a descriptor with a fully qualified name matching a previous + invocation will cause the same class to be returned. + + Args: + descriptor: The descriptor to build from. + + Returns: + A class describing the passed in descriptor. + """ + if descriptor not in self._classes: + result_class = self.CreatePrototype(descriptor) + # The assignment to _classes is redundant for the base implementation, but + # might avoid confusion in cases where CreatePrototype gets overridden and + # does not call the base implementation. + self._classes[descriptor] = result_class + return result_class + return self._classes[descriptor] + + def CreatePrototype(self, descriptor): + """Builds a proto2 message class based on the passed in descriptor. + + Don't call this function directly, it always creates a new class. Call + GetPrototype() instead. This method is meant to be overridden in subblasses + to perform additional operations on the newly constructed class. + + Args: + descriptor: The descriptor to build from. + + Returns: + A class describing the passed in descriptor. + """ + descriptor_name = descriptor.name + result_class = _GENERATED_PROTOCOL_MESSAGE_TYPE( + descriptor_name, + (message.Message,), + { + 'DESCRIPTOR': descriptor, + # If module not set, it wrongly points to message_factory module. + '__module__': None, + }) + result_class._FACTORY = self # pylint: disable=protected-access + # Assign in _classes before doing recursive calls to avoid infinite + # recursion. + self._classes[descriptor] = result_class + for field in descriptor.fields: + if field.message_type: + self.GetPrototype(field.message_type) + for extension in result_class.DESCRIPTOR.extensions: + if extension.containing_type not in self._classes: + self.GetPrototype(extension.containing_type) + extended_class = self._classes[extension.containing_type] + extended_class.RegisterExtension(extension) + return result_class + + def GetMessages(self, files): + """Gets all the messages from a specified file. + + This will find and resolve dependencies, failing if the descriptor + pool cannot satisfy them. + + Args: + files: The file names to extract messages from. + + Returns: + A dictionary mapping proto names to the message classes. This will include + any dependent messages as well as any messages defined in the same file as + a specified message. + """ + result = {} + for file_name in files: + file_desc = self.pool.FindFileByName(file_name) + for desc in file_desc.message_types_by_name.values(): + result[desc.full_name] = self.GetPrototype(desc) + + # While the extension FieldDescriptors are created by the descriptor pool, + # the python classes created in the factory need them to be registered + # explicitly, which is done below. + # + # The call to RegisterExtension will specifically check if the + # extension was already registered on the object and either + # ignore the registration if the original was the same, or raise + # an error if they were different. + + for extension in file_desc.extensions_by_name.values(): + if extension.containing_type not in self._classes: + self.GetPrototype(extension.containing_type) + extended_class = self._classes[extension.containing_type] + extended_class.RegisterExtension(extension) + return result + + +_FACTORY = MessageFactory() + + +def GetMessages(file_protos): + """Builds a dictionary of all the messages available in a set of files. + + Args: + file_protos: Iterable of FileDescriptorProto to build messages out of. + + Returns: + A dictionary mapping proto names to the message classes. This will include + any dependent messages as well as any messages defined in the same file as + a specified message. + """ + # The cpp implementation of the protocol buffer library requires to add the + # message in topological order of the dependency graph. + file_by_name = {file_proto.name: file_proto for file_proto in file_protos} + def _AddFile(file_proto): + for dependency in file_proto.dependency: + if dependency in file_by_name: + # Remove from elements to be visited, in order to cut cycles. + _AddFile(file_by_name.pop(dependency)) + _FACTORY.pool.Add(file_proto) + while file_by_name: + _AddFile(file_by_name.popitem()[1]) + return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos]) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/proto_builder.py b/openpype/hosts/nuke/vendor/google/protobuf/proto_builder.py new file mode 100644 index 0000000000..a4667ce63e --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/proto_builder.py @@ -0,0 +1,134 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Dynamic Protobuf class creator.""" + +from collections import OrderedDict +import hashlib +import os + +from google.protobuf import descriptor_pb2 +from google.protobuf import descriptor +from google.protobuf import message_factory + + +def _GetMessageFromFactory(factory, full_name): + """Get a proto class from the MessageFactory by name. + + Args: + factory: a MessageFactory instance. + full_name: str, the fully qualified name of the proto type. + Returns: + A class, for the type identified by full_name. + Raises: + KeyError, if the proto is not found in the factory's descriptor pool. + """ + proto_descriptor = factory.pool.FindMessageTypeByName(full_name) + proto_cls = factory.GetPrototype(proto_descriptor) + return proto_cls + + +def MakeSimpleProtoClass(fields, full_name=None, pool=None): + """Create a Protobuf class whose fields are basic types. + + Note: this doesn't validate field names! + + Args: + fields: dict of {name: field_type} mappings for each field in the proto. If + this is an OrderedDict the order will be maintained, otherwise the + fields will be sorted by name. + full_name: optional str, the fully-qualified name of the proto type. + pool: optional DescriptorPool instance. + Returns: + a class, the new protobuf class with a FileDescriptor. + """ + factory = message_factory.MessageFactory(pool=pool) + + if full_name is not None: + try: + proto_cls = _GetMessageFromFactory(factory, full_name) + return proto_cls + except KeyError: + # The factory's DescriptorPool doesn't know about this class yet. + pass + + # Get a list of (name, field_type) tuples from the fields dict. If fields was + # an OrderedDict we keep the order, but otherwise we sort the field to ensure + # consistent ordering. + field_items = fields.items() + if not isinstance(fields, OrderedDict): + field_items = sorted(field_items) + + # Use a consistent file name that is unlikely to conflict with any imported + # proto files. + fields_hash = hashlib.sha1() + for f_name, f_type in field_items: + fields_hash.update(f_name.encode('utf-8')) + fields_hash.update(str(f_type).encode('utf-8')) + proto_file_name = fields_hash.hexdigest() + '.proto' + + # If the proto is anonymous, use the same hash to name it. + if full_name is None: + full_name = ('net.proto2.python.public.proto_builder.AnonymousProto_' + + fields_hash.hexdigest()) + try: + proto_cls = _GetMessageFromFactory(factory, full_name) + return proto_cls + except KeyError: + # The factory's DescriptorPool doesn't know about this class yet. + pass + + # This is the first time we see this proto: add a new descriptor to the pool. + factory.pool.Add( + _MakeFileDescriptorProto(proto_file_name, full_name, field_items)) + return _GetMessageFromFactory(factory, full_name) + + +def _MakeFileDescriptorProto(proto_file_name, full_name, field_items): + """Populate FileDescriptorProto for MessageFactory's DescriptorPool.""" + package, name = full_name.rsplit('.', 1) + file_proto = descriptor_pb2.FileDescriptorProto() + file_proto.name = os.path.join(package.replace('.', '/'), proto_file_name) + file_proto.package = package + desc_proto = file_proto.message_type.add() + desc_proto.name = name + for f_number, (f_name, f_type) in enumerate(field_items, 1): + field_proto = desc_proto.field.add() + field_proto.name = f_name + # # If the number falls in the reserved range, reassign it to the correct + # # number after the range. + if f_number >= descriptor.FieldDescriptor.FIRST_RESERVED_FIELD_NUMBER: + f_number += ( + descriptor.FieldDescriptor.LAST_RESERVED_FIELD_NUMBER - + descriptor.FieldDescriptor.FIRST_RESERVED_FIELD_NUMBER + 1) + field_proto.number = f_number + field_proto.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL + field_proto.type = f_type + return file_proto diff --git a/openpype/hosts/nuke/vendor/google/protobuf/pyext/__init__.py b/openpype/hosts/nuke/vendor/google/protobuf/pyext/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/nuke/vendor/google/protobuf/pyext/cpp_message.py b/openpype/hosts/nuke/vendor/google/protobuf/pyext/cpp_message.py new file mode 100644 index 0000000000..fc8eb32d79 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/pyext/cpp_message.py @@ -0,0 +1,65 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Protocol message implementation hooks for C++ implementation. + +Contains helper functions used to create protocol message classes from +Descriptor objects at runtime backed by the protocol buffer C++ API. +""" + +__author__ = 'tibell@google.com (Johan Tibell)' + +from google.protobuf.pyext import _message + + +class GeneratedProtocolMessageType(_message.MessageMeta): + + """Metaclass for protocol message classes created at runtime from Descriptors. + + The protocol compiler currently uses this metaclass to create protocol + message classes at runtime. Clients can also manually create their own + classes at runtime, as in this example: + + mydescriptor = Descriptor(.....) + factory = symbol_database.Default() + factory.pool.AddDescriptor(mydescriptor) + MyProtoClass = factory.GetPrototype(mydescriptor) + myproto_instance = MyProtoClass() + myproto.foo_field = 23 + ... + + The above example will not work for nested types. If you wish to include them, + use reflection.MakeClass() instead of manually instantiating the class in + order to create the appropriate class structure. + """ + + # Must be consistent with the protocol-compiler code in + # proto2/compiler/internal/generator.*. + _DESCRIPTOR_KEY = 'DESCRIPTOR' diff --git a/openpype/hosts/nuke/vendor/google/protobuf/pyext/python_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/pyext/python_pb2.py new file mode 100644 index 0000000000..2c6ecf4c98 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/pyext/python_pb2.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/pyext/python.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\"google/protobuf/pyext/python.proto\x12\x1fgoogle.protobuf.python.internal\"\xbc\x02\n\x0cTestAllTypes\x12\\\n\x17repeated_nested_message\x18\x01 \x03(\x0b\x32;.google.protobuf.python.internal.TestAllTypes.NestedMessage\x12\\\n\x17optional_nested_message\x18\x02 \x01(\x0b\x32;.google.protobuf.python.internal.TestAllTypes.NestedMessage\x12\x16\n\x0eoptional_int32\x18\x03 \x01(\x05\x1aX\n\rNestedMessage\x12\n\n\x02\x62\x62\x18\x01 \x01(\x05\x12;\n\x02\x63\x63\x18\x02 \x01(\x0b\x32/.google.protobuf.python.internal.ForeignMessage\"&\n\x0e\x46oreignMessage\x12\t\n\x01\x63\x18\x01 \x01(\x05\x12\t\n\x01\x64\x18\x02 \x03(\x05\"\x1d\n\x11TestAllExtensions*\x08\x08\x01\x10\x80\x80\x80\x80\x02:\x9a\x01\n!optional_nested_message_extension\x12\x32.google.protobuf.python.internal.TestAllExtensions\x18\x01 \x01(\x0b\x32;.google.protobuf.python.internal.TestAllTypes.NestedMessage:\x9a\x01\n!repeated_nested_message_extension\x12\x32.google.protobuf.python.internal.TestAllExtensions\x18\x02 \x03(\x0b\x32;.google.protobuf.python.internal.TestAllTypes.NestedMessageB\x02H\x01') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.pyext.python_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + TestAllExtensions.RegisterExtension(optional_nested_message_extension) + TestAllExtensions.RegisterExtension(repeated_nested_message_extension) + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'H\001' + _TESTALLTYPES._serialized_start=72 + _TESTALLTYPES._serialized_end=388 + _TESTALLTYPES_NESTEDMESSAGE._serialized_start=300 + _TESTALLTYPES_NESTEDMESSAGE._serialized_end=388 + _FOREIGNMESSAGE._serialized_start=390 + _FOREIGNMESSAGE._serialized_end=428 + _TESTALLEXTENSIONS._serialized_start=430 + _TESTALLEXTENSIONS._serialized_end=459 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/reflection.py b/openpype/hosts/nuke/vendor/google/protobuf/reflection.py new file mode 100644 index 0000000000..81e18859a8 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/reflection.py @@ -0,0 +1,95 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This code is meant to work on Python 2.4 and above only. + +"""Contains a metaclass and helper functions used to create +protocol message classes from Descriptor objects at runtime. + +Recall that a metaclass is the "type" of a class. +(A class is to a metaclass what an instance is to a class.) + +In this case, we use the GeneratedProtocolMessageType metaclass +to inject all the useful functionality into the classes +output by the protocol compiler at compile-time. + +The upshot of all this is that the real implementation +details for ALL pure-Python protocol buffers are *here in +this file*. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + + +from google.protobuf import message_factory +from google.protobuf import symbol_database + +# The type of all Message classes. +# Part of the public interface, but normally only used by message factories. +GeneratedProtocolMessageType = message_factory._GENERATED_PROTOCOL_MESSAGE_TYPE + +MESSAGE_CLASS_CACHE = {} + + +# Deprecated. Please NEVER use reflection.ParseMessage(). +def ParseMessage(descriptor, byte_str): + """Generate a new Message instance from this Descriptor and a byte string. + + DEPRECATED: ParseMessage is deprecated because it is using MakeClass(). + Please use MessageFactory.GetPrototype() instead. + + Args: + descriptor: Protobuf Descriptor object + byte_str: Serialized protocol buffer byte string + + Returns: + Newly created protobuf Message object. + """ + result_class = MakeClass(descriptor) + new_msg = result_class() + new_msg.ParseFromString(byte_str) + return new_msg + + +# Deprecated. Please NEVER use reflection.MakeClass(). +def MakeClass(descriptor): + """Construct a class object for a protobuf described by descriptor. + + DEPRECATED: use MessageFactory.GetPrototype() instead. + + Args: + descriptor: A descriptor.Descriptor object describing the protobuf. + Returns: + The Message class object described by the descriptor. + """ + # Original implementation leads to duplicate message classes, which won't play + # well with extensions. Message factory info is also missing. + # Redirect to message_factory. + return symbol_database.Default().GetPrototype(descriptor) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/service.py b/openpype/hosts/nuke/vendor/google/protobuf/service.py new file mode 100644 index 0000000000..5625246324 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/service.py @@ -0,0 +1,228 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""DEPRECATED: Declares the RPC service interfaces. + +This module declares the abstract interfaces underlying proto2 RPC +services. These are intended to be independent of any particular RPC +implementation, so that proto2 services can be used on top of a variety +of implementations. Starting with version 2.3.0, RPC implementations should +not try to build on these, but should instead provide code generator plugins +which generate code specific to the particular RPC implementation. This way +the generated code can be more appropriate for the implementation in use +and can avoid unnecessary layers of indirection. +""" + +__author__ = 'petar@google.com (Petar Petrov)' + + +class RpcException(Exception): + """Exception raised on failed blocking RPC method call.""" + pass + + +class Service(object): + + """Abstract base interface for protocol-buffer-based RPC services. + + Services themselves are abstract classes (implemented either by servers or as + stubs), but they subclass this base interface. The methods of this + interface can be used to call the methods of the service without knowing + its exact type at compile time (analogous to the Message interface). + """ + + def GetDescriptor(): + """Retrieves this service's descriptor.""" + raise NotImplementedError + + def CallMethod(self, method_descriptor, rpc_controller, + request, done): + """Calls a method of the service specified by method_descriptor. + + If "done" is None then the call is blocking and the response + message will be returned directly. Otherwise the call is asynchronous + and "done" will later be called with the response value. + + In the blocking case, RpcException will be raised on error. + + Preconditions: + + * method_descriptor.service == GetDescriptor + * request is of the exact same classes as returned by + GetRequestClass(method). + * After the call has started, the request must not be modified. + * "rpc_controller" is of the correct type for the RPC implementation being + used by this Service. For stubs, the "correct type" depends on the + RpcChannel which the stub is using. + + Postconditions: + + * "done" will be called when the method is complete. This may be + before CallMethod() returns or it may be at some point in the future. + * If the RPC failed, the response value passed to "done" will be None. + Further details about the failure can be found by querying the + RpcController. + """ + raise NotImplementedError + + def GetRequestClass(self, method_descriptor): + """Returns the class of the request message for the specified method. + + CallMethod() requires that the request is of a particular subclass of + Message. GetRequestClass() gets the default instance of this required + type. + + Example: + method = service.GetDescriptor().FindMethodByName("Foo") + request = stub.GetRequestClass(method)() + request.ParseFromString(input) + service.CallMethod(method, request, callback) + """ + raise NotImplementedError + + def GetResponseClass(self, method_descriptor): + """Returns the class of the response message for the specified method. + + This method isn't really needed, as the RpcChannel's CallMethod constructs + the response protocol message. It's provided anyway in case it is useful + for the caller to know the response type in advance. + """ + raise NotImplementedError + + +class RpcController(object): + + """An RpcController mediates a single method call. + + The primary purpose of the controller is to provide a way to manipulate + settings specific to the RPC implementation and to find out about RPC-level + errors. The methods provided by the RpcController interface are intended + to be a "least common denominator" set of features which we expect all + implementations to support. Specific implementations may provide more + advanced features (e.g. deadline propagation). + """ + + # Client-side methods below + + def Reset(self): + """Resets the RpcController to its initial state. + + After the RpcController has been reset, it may be reused in + a new call. Must not be called while an RPC is in progress. + """ + raise NotImplementedError + + def Failed(self): + """Returns true if the call failed. + + After a call has finished, returns true if the call failed. The possible + reasons for failure depend on the RPC implementation. Failed() must not + be called before a call has finished. If Failed() returns true, the + contents of the response message are undefined. + """ + raise NotImplementedError + + def ErrorText(self): + """If Failed is true, returns a human-readable description of the error.""" + raise NotImplementedError + + def StartCancel(self): + """Initiate cancellation. + + Advises the RPC system that the caller desires that the RPC call be + canceled. The RPC system may cancel it immediately, may wait awhile and + then cancel it, or may not even cancel the call at all. If the call is + canceled, the "done" callback will still be called and the RpcController + will indicate that the call failed at that time. + """ + raise NotImplementedError + + # Server-side methods below + + def SetFailed(self, reason): + """Sets a failure reason. + + Causes Failed() to return true on the client side. "reason" will be + incorporated into the message returned by ErrorText(). If you find + you need to return machine-readable information about failures, you + should incorporate it into your response protocol buffer and should + NOT call SetFailed(). + """ + raise NotImplementedError + + def IsCanceled(self): + """Checks if the client cancelled the RPC. + + If true, indicates that the client canceled the RPC, so the server may + as well give up on replying to it. The server should still call the + final "done" callback. + """ + raise NotImplementedError + + def NotifyOnCancel(self, callback): + """Sets a callback to invoke on cancel. + + Asks that the given callback be called when the RPC is canceled. The + callback will always be called exactly once. If the RPC completes without + being canceled, the callback will be called after completion. If the RPC + has already been canceled when NotifyOnCancel() is called, the callback + will be called immediately. + + NotifyOnCancel() must be called no more than once per request. + """ + raise NotImplementedError + + +class RpcChannel(object): + + """Abstract interface for an RPC channel. + + An RpcChannel represents a communication line to a service which can be used + to call that service's methods. The service may be running on another + machine. Normally, you should not use an RpcChannel directly, but instead + construct a stub {@link Service} wrapping it. Example: + + Example: + RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234") + RpcController controller = rpcImpl.Controller() + MyService service = MyService_Stub(channel) + service.MyMethod(controller, request, callback) + """ + + def CallMethod(self, method_descriptor, rpc_controller, + request, response_class, done): + """Calls the method identified by the descriptor. + + Call the given method of the remote service. The signature of this + procedure looks the same as Service.CallMethod(), but the requirements + are less strict in one important way: the request object doesn't have to + be of any specific class as long as its descriptor is method.input_type. + """ + raise NotImplementedError diff --git a/openpype/hosts/nuke/vendor/google/protobuf/service_reflection.py b/openpype/hosts/nuke/vendor/google/protobuf/service_reflection.py new file mode 100644 index 0000000000..f82ab7145a --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/service_reflection.py @@ -0,0 +1,295 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains metaclasses used to create protocol service and service stub +classes from ServiceDescriptor objects at runtime. + +The GeneratedServiceType and GeneratedServiceStubType metaclasses are used to +inject all useful functionality into the classes output by the protocol +compiler at compile-time. +""" + +__author__ = 'petar@google.com (Petar Petrov)' + + +class GeneratedServiceType(type): + + """Metaclass for service classes created at runtime from ServiceDescriptors. + + Implementations for all methods described in the Service class are added here + by this class. We also create properties to allow getting/setting all fields + in the protocol message. + + The protocol compiler currently uses this metaclass to create protocol service + classes at runtime. Clients can also manually create their own classes at + runtime, as in this example:: + + mydescriptor = ServiceDescriptor(.....) + class MyProtoService(service.Service): + __metaclass__ = GeneratedServiceType + DESCRIPTOR = mydescriptor + myservice_instance = MyProtoService() + # ... + """ + + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __init__(cls, name, bases, dictionary): + """Creates a message service class. + + Args: + name: Name of the class (ignored, but required by the metaclass + protocol). + bases: Base classes of the class being constructed. + dictionary: The class dictionary of the class being constructed. + dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object + describing this protocol service type. + """ + # Don't do anything if this class doesn't have a descriptor. This happens + # when a service class is subclassed. + if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary: + return + + descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY] + service_builder = _ServiceBuilder(descriptor) + service_builder.BuildService(cls) + cls.DESCRIPTOR = descriptor + + +class GeneratedServiceStubType(GeneratedServiceType): + + """Metaclass for service stubs created at runtime from ServiceDescriptors. + + This class has similar responsibilities as GeneratedServiceType, except that + it creates the service stub classes. + """ + + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __init__(cls, name, bases, dictionary): + """Creates a message service stub class. + + Args: + name: Name of the class (ignored, here). + bases: Base classes of the class being constructed. + dictionary: The class dictionary of the class being constructed. + dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object + describing this protocol service type. + """ + super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary) + # Don't do anything if this class doesn't have a descriptor. This happens + # when a service stub is subclassed. + if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary: + return + + descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY] + service_stub_builder = _ServiceStubBuilder(descriptor) + service_stub_builder.BuildServiceStub(cls) + + +class _ServiceBuilder(object): + + """This class constructs a protocol service class using a service descriptor. + + Given a service descriptor, this class constructs a class that represents + the specified service descriptor. One service builder instance constructs + exactly one service class. That means all instances of that class share the + same builder. + """ + + def __init__(self, service_descriptor): + """Initializes an instance of the service class builder. + + Args: + service_descriptor: ServiceDescriptor to use when constructing the + service class. + """ + self.descriptor = service_descriptor + + def BuildService(builder, cls): + """Constructs the service class. + + Args: + cls: The class that will be constructed. + """ + + # CallMethod needs to operate with an instance of the Service class. This + # internal wrapper function exists only to be able to pass the service + # instance to the method that does the real CallMethod work. + # Making sure to use exact argument names from the abstract interface in + # service.py to match the type signature + def _WrapCallMethod(self, method_descriptor, rpc_controller, request, done): + return builder._CallMethod(self, method_descriptor, rpc_controller, + request, done) + + def _WrapGetRequestClass(self, method_descriptor): + return builder._GetRequestClass(method_descriptor) + + def _WrapGetResponseClass(self, method_descriptor): + return builder._GetResponseClass(method_descriptor) + + builder.cls = cls + cls.CallMethod = _WrapCallMethod + cls.GetDescriptor = staticmethod(lambda: builder.descriptor) + cls.GetDescriptor.__doc__ = 'Returns the service descriptor.' + cls.GetRequestClass = _WrapGetRequestClass + cls.GetResponseClass = _WrapGetResponseClass + for method in builder.descriptor.methods: + setattr(cls, method.name, builder._GenerateNonImplementedMethod(method)) + + def _CallMethod(self, srvc, method_descriptor, + rpc_controller, request, callback): + """Calls the method described by a given method descriptor. + + Args: + srvc: Instance of the service for which this method is called. + method_descriptor: Descriptor that represent the method to call. + rpc_controller: RPC controller to use for this method's execution. + request: Request protocol message. + callback: A callback to invoke after the method has completed. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'CallMethod() given method descriptor for wrong service type.') + method = getattr(srvc, method_descriptor.name) + return method(rpc_controller, request, callback) + + def _GetRequestClass(self, method_descriptor): + """Returns the class of the request protocol message. + + Args: + method_descriptor: Descriptor of the method for which to return the + request protocol message class. + + Returns: + A class that represents the input protocol message of the specified + method. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'GetRequestClass() given method descriptor for wrong service type.') + return method_descriptor.input_type._concrete_class + + def _GetResponseClass(self, method_descriptor): + """Returns the class of the response protocol message. + + Args: + method_descriptor: Descriptor of the method for which to return the + response protocol message class. + + Returns: + A class that represents the output protocol message of the specified + method. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'GetResponseClass() given method descriptor for wrong service type.') + return method_descriptor.output_type._concrete_class + + def _GenerateNonImplementedMethod(self, method): + """Generates and returns a method that can be set for a service methods. + + Args: + method: Descriptor of the service method for which a method is to be + generated. + + Returns: + A method that can be added to the service class. + """ + return lambda inst, rpc_controller, request, callback: ( + self._NonImplementedMethod(method.name, rpc_controller, callback)) + + def _NonImplementedMethod(self, method_name, rpc_controller, callback): + """The body of all methods in the generated service class. + + Args: + method_name: Name of the method being executed. + rpc_controller: RPC controller used to execute this method. + callback: A callback which will be invoked when the method finishes. + """ + rpc_controller.SetFailed('Method %s not implemented.' % method_name) + callback(None) + + +class _ServiceStubBuilder(object): + + """Constructs a protocol service stub class using a service descriptor. + + Given a service descriptor, this class constructs a suitable stub class. + A stub is just a type-safe wrapper around an RpcChannel which emulates a + local implementation of the service. + + One service stub builder instance constructs exactly one class. It means all + instances of that class share the same service stub builder. + """ + + def __init__(self, service_descriptor): + """Initializes an instance of the service stub class builder. + + Args: + service_descriptor: ServiceDescriptor to use when constructing the + stub class. + """ + self.descriptor = service_descriptor + + def BuildServiceStub(self, cls): + """Constructs the stub class. + + Args: + cls: The class that will be constructed. + """ + + def _ServiceStubInit(stub, rpc_channel): + stub.rpc_channel = rpc_channel + self.cls = cls + cls.__init__ = _ServiceStubInit + for method in self.descriptor.methods: + setattr(cls, method.name, self._GenerateStubMethod(method)) + + def _GenerateStubMethod(self, method): + return (lambda inst, rpc_controller, request, callback=None: + self._StubMethod(inst, method, rpc_controller, request, callback)) + + def _StubMethod(self, stub, method_descriptor, + rpc_controller, request, callback): + """The body of all service methods in the generated stub class. + + Args: + stub: Stub instance. + method_descriptor: Descriptor of the invoked method. + rpc_controller: Rpc controller to execute the method. + request: Request protocol message. + callback: A callback to execute when the method finishes. + Returns: + Response message (in case of blocking call). + """ + return stub.rpc_channel.CallMethod( + method_descriptor, rpc_controller, request, + method_descriptor.output_type._concrete_class, callback) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/source_context_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/source_context_pb2.py new file mode 100644 index 0000000000..30cca2e06e --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/source_context_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/source_context.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n$google/protobuf/source_context.proto\x12\x0fgoogle.protobuf\"\"\n\rSourceContext\x12\x11\n\tfile_name\x18\x01 \x01(\tB\x8a\x01\n\x13\x63om.google.protobufB\x12SourceContextProtoP\x01Z6google.golang.org/protobuf/types/known/sourcecontextpb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.source_context_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\022SourceContextProtoP\001Z6google.golang.org/protobuf/types/known/sourcecontextpb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _SOURCECONTEXT._serialized_start=57 + _SOURCECONTEXT._serialized_end=91 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/struct_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/struct_pb2.py new file mode 100644 index 0000000000..149728ca08 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/struct_pb2.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/struct.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x84\x01\n\x06Struct\x12\x33\n\x06\x66ields\x18\x01 \x03(\x0b\x32#.google.protobuf.Struct.FieldsEntry\x1a\x45\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value:\x02\x38\x01\"\xea\x01\n\x05Value\x12\x30\n\nnull_value\x18\x01 \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00\x12\x16\n\x0cnumber_value\x18\x02 \x01(\x01H\x00\x12\x16\n\x0cstring_value\x18\x03 \x01(\tH\x00\x12\x14\n\nbool_value\x18\x04 \x01(\x08H\x00\x12/\n\x0cstruct_value\x18\x05 \x01(\x0b\x32\x17.google.protobuf.StructH\x00\x12\x30\n\nlist_value\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x42\x06\n\x04kind\"3\n\tListValue\x12&\n\x06values\x18\x01 \x03(\x0b\x32\x16.google.protobuf.Value*\x1b\n\tNullValue\x12\x0e\n\nNULL_VALUE\x10\x00\x42\x7f\n\x13\x63om.google.protobufB\x0bStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.struct_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\013StructProtoP\001Z/google.golang.org/protobuf/types/known/structpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _STRUCT_FIELDSENTRY._options = None + _STRUCT_FIELDSENTRY._serialized_options = b'8\001' + _NULLVALUE._serialized_start=474 + _NULLVALUE._serialized_end=501 + _STRUCT._serialized_start=50 + _STRUCT._serialized_end=182 + _STRUCT_FIELDSENTRY._serialized_start=113 + _STRUCT_FIELDSENTRY._serialized_end=182 + _VALUE._serialized_start=185 + _VALUE._serialized_end=419 + _LISTVALUE._serialized_start=421 + _LISTVALUE._serialized_end=472 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/symbol_database.py b/openpype/hosts/nuke/vendor/google/protobuf/symbol_database.py new file mode 100644 index 0000000000..fdcf8cf06c --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/symbol_database.py @@ -0,0 +1,194 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A database of Python protocol buffer generated symbols. + +SymbolDatabase is the MessageFactory for messages generated at compile time, +and makes it easy to create new instances of a registered type, given only the +type's protocol buffer symbol name. + +Example usage:: + + db = symbol_database.SymbolDatabase() + + # Register symbols of interest, from one or multiple files. + db.RegisterFileDescriptor(my_proto_pb2.DESCRIPTOR) + db.RegisterMessage(my_proto_pb2.MyMessage) + db.RegisterEnumDescriptor(my_proto_pb2.MyEnum.DESCRIPTOR) + + # The database can be used as a MessageFactory, to generate types based on + # their name: + types = db.GetMessages(['my_proto.proto']) + my_message_instance = types['MyMessage']() + + # The database's underlying descriptor pool can be queried, so it's not + # necessary to know a type's filename to be able to generate it: + filename = db.pool.FindFileContainingSymbol('MyMessage') + my_message_instance = db.GetMessages([filename])['MyMessage']() + + # This functionality is also provided directly via a convenience method: + my_message_instance = db.GetSymbol('MyMessage')() +""" + + +from google.protobuf.internal import api_implementation +from google.protobuf import descriptor_pool +from google.protobuf import message_factory + + +class SymbolDatabase(message_factory.MessageFactory): + """A database of Python generated symbols.""" + + def RegisterMessage(self, message): + """Registers the given message type in the local database. + + Calls to GetSymbol() and GetMessages() will return messages registered here. + + Args: + message: A :class:`google.protobuf.message.Message` subclass (or + instance); its descriptor will be registered. + + Returns: + The provided message. + """ + + desc = message.DESCRIPTOR + self._classes[desc] = message + self.RegisterMessageDescriptor(desc) + return message + + def RegisterMessageDescriptor(self, message_descriptor): + """Registers the given message descriptor in the local database. + + Args: + message_descriptor (Descriptor): the message descriptor to add. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddDescriptor(message_descriptor) + + def RegisterEnumDescriptor(self, enum_descriptor): + """Registers the given enum descriptor in the local database. + + Args: + enum_descriptor (EnumDescriptor): The enum descriptor to register. + + Returns: + EnumDescriptor: The provided descriptor. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddEnumDescriptor(enum_descriptor) + return enum_descriptor + + def RegisterServiceDescriptor(self, service_descriptor): + """Registers the given service descriptor in the local database. + + Args: + service_descriptor (ServiceDescriptor): the service descriptor to + register. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddServiceDescriptor(service_descriptor) + + def RegisterFileDescriptor(self, file_descriptor): + """Registers the given file descriptor in the local database. + + Args: + file_descriptor (FileDescriptor): The file descriptor to register. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._InternalAddFileDescriptor(file_descriptor) + + def GetSymbol(self, symbol): + """Tries to find a symbol in the local database. + + Currently, this method only returns message.Message instances, however, if + may be extended in future to support other symbol types. + + Args: + symbol (str): a protocol buffer symbol. + + Returns: + A Python class corresponding to the symbol. + + Raises: + KeyError: if the symbol could not be found. + """ + + return self._classes[self.pool.FindMessageTypeByName(symbol)] + + def GetMessages(self, files): + # TODO(amauryfa): Fix the differences with MessageFactory. + """Gets all registered messages from a specified file. + + Only messages already created and registered will be returned; (this is the + case for imported _pb2 modules) + But unlike MessageFactory, this version also returns already defined nested + messages, but does not register any message extensions. + + Args: + files (list[str]): The file names to extract messages from. + + Returns: + A dictionary mapping proto names to the message classes. + + Raises: + KeyError: if a file could not be found. + """ + + def _GetAllMessages(desc): + """Walk a message Descriptor and recursively yields all message names.""" + yield desc + for msg_desc in desc.nested_types: + for nested_desc in _GetAllMessages(msg_desc): + yield nested_desc + + result = {} + for file_name in files: + file_desc = self.pool.FindFileByName(file_name) + for msg_desc in file_desc.message_types_by_name.values(): + for desc in _GetAllMessages(msg_desc): + try: + result[desc.full_name] = self._classes[desc] + except KeyError: + # This descriptor has no registered class, skip it. + pass + return result + + +_DEFAULT = SymbolDatabase(pool=descriptor_pool.Default()) + + +def Default(): + """Returns the default SymbolDatabase.""" + return _DEFAULT diff --git a/openpype/hosts/nuke/vendor/google/protobuf/text_encoding.py b/openpype/hosts/nuke/vendor/google/protobuf/text_encoding.py new file mode 100644 index 0000000000..759cf11f62 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/text_encoding.py @@ -0,0 +1,110 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Encoding related utilities.""" +import re + +_cescape_chr_to_symbol_map = {} +_cescape_chr_to_symbol_map[9] = r'\t' # optional escape +_cescape_chr_to_symbol_map[10] = r'\n' # optional escape +_cescape_chr_to_symbol_map[13] = r'\r' # optional escape +_cescape_chr_to_symbol_map[34] = r'\"' # necessary escape +_cescape_chr_to_symbol_map[39] = r"\'" # optional escape +_cescape_chr_to_symbol_map[92] = r'\\' # necessary escape + +# Lookup table for unicode +_cescape_unicode_to_str = [chr(i) for i in range(0, 256)] +for byte, string in _cescape_chr_to_symbol_map.items(): + _cescape_unicode_to_str[byte] = string + +# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32) +_cescape_byte_to_str = ([r'\%03o' % i for i in range(0, 32)] + + [chr(i) for i in range(32, 127)] + + [r'\%03o' % i for i in range(127, 256)]) +for byte, string in _cescape_chr_to_symbol_map.items(): + _cescape_byte_to_str[byte] = string +del byte, string + + +def CEscape(text, as_utf8): + # type: (...) -> str + """Escape a bytes string for use in an text protocol buffer. + + Args: + text: A byte string to be escaped. + as_utf8: Specifies if result may contain non-ASCII characters. + In Python 3 this allows unescaped non-ASCII Unicode characters. + In Python 2 the return value will be valid UTF-8 rather than only ASCII. + Returns: + Escaped string (str). + """ + # Python's text.encode() 'string_escape' or 'unicode_escape' codecs do not + # satisfy our needs; they encodes unprintable characters using two-digit hex + # escapes whereas our C++ unescaping function allows hex escapes to be any + # length. So, "\0011".encode('string_escape') ends up being "\\x011", which + # will be decoded in C++ as a single-character string with char code 0x11. + text_is_unicode = isinstance(text, str) + if as_utf8 and text_is_unicode: + # We're already unicode, no processing beyond control char escapes. + return text.translate(_cescape_chr_to_symbol_map) + ord_ = ord if text_is_unicode else lambda x: x # bytes iterate as ints. + if as_utf8: + return ''.join(_cescape_unicode_to_str[ord_(c)] for c in text) + return ''.join(_cescape_byte_to_str[ord_(c)] for c in text) + + +_CUNESCAPE_HEX = re.compile(r'(\\+)x([0-9a-fA-F])(?![0-9a-fA-F])') + + +def CUnescape(text): + # type: (str) -> bytes + """Unescape a text string with C-style escape sequences to UTF-8 bytes. + + Args: + text: The data to parse in a str. + Returns: + A byte string. + """ + + def ReplaceHex(m): + # Only replace the match if the number of leading back slashes is odd. i.e. + # the slash itself is not escaped. + if len(m.group(1)) & 1: + return m.group(1) + 'x0' + m.group(2) + return m.group(0) + + # This is required because the 'string_escape' encoding doesn't + # allow single-digit hex escapes (like '\xf'). + result = _CUNESCAPE_HEX.sub(ReplaceHex, text) + + return (result.encode('utf-8') # Make it bytes to allow decode. + .decode('unicode_escape') + # Make it bytes again to return the proper type. + .encode('raw_unicode_escape')) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/text_format.py b/openpype/hosts/nuke/vendor/google/protobuf/text_format.py new file mode 100644 index 0000000000..412385c26f --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/text_format.py @@ -0,0 +1,1795 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains routines for printing protocol messages in text format. + +Simple usage example:: + + # Create a proto object and serialize it to a text proto string. + message = my_proto_pb2.MyMessage(foo='bar') + text_proto = text_format.MessageToString(message) + + # Parse a text proto string. + message = text_format.Parse(text_proto, my_proto_pb2.MyMessage()) +""" + +__author__ = 'kenton@google.com (Kenton Varda)' + +# TODO(b/129989314) Import thread contention leads to test failures. +import encodings.raw_unicode_escape # pylint: disable=unused-import +import encodings.unicode_escape # pylint: disable=unused-import +import io +import math +import re + +from google.protobuf.internal import decoder +from google.protobuf.internal import type_checkers +from google.protobuf import descriptor +from google.protobuf import text_encoding + +# pylint: disable=g-import-not-at-top +__all__ = ['MessageToString', 'Parse', 'PrintMessage', 'PrintField', + 'PrintFieldValue', 'Merge', 'MessageToBytes'] + +_INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(), + type_checkers.Int32ValueChecker(), + type_checkers.Uint64ValueChecker(), + type_checkers.Int64ValueChecker()) +_FLOAT_INFINITY = re.compile('-?inf(?:inity)?f?$', re.IGNORECASE) +_FLOAT_NAN = re.compile('nanf?$', re.IGNORECASE) +_QUOTES = frozenset(("'", '"')) +_ANY_FULL_TYPE_NAME = 'google.protobuf.Any' + + +class Error(Exception): + """Top-level module error for text_format.""" + + +class ParseError(Error): + """Thrown in case of text parsing or tokenizing error.""" + + def __init__(self, message=None, line=None, column=None): + if message is not None and line is not None: + loc = str(line) + if column is not None: + loc += ':{0}'.format(column) + message = '{0} : {1}'.format(loc, message) + if message is not None: + super(ParseError, self).__init__(message) + else: + super(ParseError, self).__init__() + self._line = line + self._column = column + + def GetLine(self): + return self._line + + def GetColumn(self): + return self._column + + +class TextWriter(object): + + def __init__(self, as_utf8): + self._writer = io.StringIO() + + def write(self, val): + return self._writer.write(val) + + def close(self): + return self._writer.close() + + def getvalue(self): + return self._writer.getvalue() + + +def MessageToString( + message, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + indent=0, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + # type: (...) -> str + """Convert protobuf message to text format. + + Double values can be formatted compactly with 15 digits of + precision (which is the most that IEEE 754 "double" can guarantee) + using double_format='.15g'. To ensure that converting to text and back to a + proto will result in an identical value, double_format='.17g' should be used. + + Args: + message: The protocol buffers message. + as_utf8: Return unescaped Unicode for non-ASCII characters. + In Python 3 actual Unicode characters may appear as is in strings. + In Python 2 the return value will be valid UTF-8 rather than only ASCII. + as_one_line: Don't introduce newlines between fields. + use_short_repeated_primitives: Use short repeated format for primitives. + pointy_brackets: If True, use angle brackets instead of curly braces for + nesting. + use_index_order: If True, fields of a proto message will be printed using + the order defined in source code instead of the field number, extensions + will be printed at the end of the message and their relative order is + determined by the extension number. By default, use the field number + order. + float_format (str): If set, use this to specify float field formatting + (per the "Format Specification Mini-Language"); otherwise, shortest float + that has same value in wire will be printed. Also affect double field + if double_format is not set but float_format is set. + double_format (str): If set, use this to specify double field formatting + (per the "Format Specification Mini-Language"); if it is not set but + float_format is set, use float_format. Otherwise, use ``str()`` + use_field_number: If True, print field numbers instead of names. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + indent (int): The initial indent level, in terms of spaces, for pretty + print. + message_formatter (function(message, indent, as_one_line) -> unicode|None): + Custom formatter for selected sub-messages (usually based on message + type). Use to pretty print parts of the protobuf for easier diffing. + print_unknown_fields: If True, unknown fields will be printed. + force_colon: If set, a colon will be added after the field name even if the + field is a proto message. + + Returns: + str: A string of the text formatted protocol buffer message. + """ + out = TextWriter(as_utf8) + printer = _Printer( + out, + indent, + as_utf8, + as_one_line, + use_short_repeated_primitives, + pointy_brackets, + use_index_order, + float_format, + double_format, + use_field_number, + descriptor_pool, + message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintMessage(message) + result = out.getvalue() + out.close() + if as_one_line: + return result.rstrip() + return result + + +def MessageToBytes(message, **kwargs): + # type: (...) -> bytes + """Convert protobuf message to encoded text format. See MessageToString.""" + text = MessageToString(message, **kwargs) + if isinstance(text, bytes): + return text + codec = 'utf-8' if kwargs.get('as_utf8') else 'ascii' + return text.encode(codec) + + +def _IsMapEntry(field): + return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.message_type.has_options and + field.message_type.GetOptions().map_entry) + + +def PrintMessage(message, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + printer = _Printer( + out=out, indent=indent, as_utf8=as_utf8, + as_one_line=as_one_line, + use_short_repeated_primitives=use_short_repeated_primitives, + pointy_brackets=pointy_brackets, + use_index_order=use_index_order, + float_format=float_format, + double_format=double_format, + use_field_number=use_field_number, + descriptor_pool=descriptor_pool, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintMessage(message) + + +def PrintField(field, + value, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Print a single field name/value pair.""" + printer = _Printer(out, indent, as_utf8, as_one_line, + use_short_repeated_primitives, pointy_brackets, + use_index_order, float_format, double_format, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintField(field, value) + + +def PrintFieldValue(field, + value, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Print a single field value (not including name).""" + printer = _Printer(out, indent, as_utf8, as_one_line, + use_short_repeated_primitives, pointy_brackets, + use_index_order, float_format, double_format, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintFieldValue(field, value) + + +def _BuildMessageFromTypeName(type_name, descriptor_pool): + """Returns a protobuf message instance. + + Args: + type_name: Fully-qualified protobuf message type name string. + descriptor_pool: DescriptorPool instance. + + Returns: + A Message instance of type matching type_name, or None if the a Descriptor + wasn't found matching type_name. + """ + # pylint: disable=g-import-not-at-top + if descriptor_pool is None: + from google.protobuf import descriptor_pool as pool_mod + descriptor_pool = pool_mod.Default() + from google.protobuf import symbol_database + database = symbol_database.Default() + try: + message_descriptor = descriptor_pool.FindMessageTypeByName(type_name) + except KeyError: + return None + message_type = database.GetPrototype(message_descriptor) + return message_type() + + +# These values must match WireType enum in google/protobuf/wire_format.h. +WIRETYPE_LENGTH_DELIMITED = 2 +WIRETYPE_START_GROUP = 3 + + +class _Printer(object): + """Text format printer for protocol message.""" + + def __init__( + self, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Initialize the Printer. + + Double values can be formatted compactly with 15 digits of precision + (which is the most that IEEE 754 "double" can guarantee) using + double_format='.15g'. To ensure that converting to text and back to a proto + will result in an identical value, double_format='.17g' should be used. + + Args: + out: To record the text format result. + indent: The initial indent level for pretty print. + as_utf8: Return unescaped Unicode for non-ASCII characters. + In Python 3 actual Unicode characters may appear as is in strings. + In Python 2 the return value will be valid UTF-8 rather than ASCII. + as_one_line: Don't introduce newlines between fields. + use_short_repeated_primitives: Use short repeated format for primitives. + pointy_brackets: If True, use angle brackets instead of curly braces for + nesting. + use_index_order: If True, print fields of a proto message using the order + defined in source code instead of the field number. By default, use the + field number order. + float_format: If set, use this to specify float field formatting + (per the "Format Specification Mini-Language"); otherwise, shortest + float that has same value in wire will be printed. Also affect double + field if double_format is not set but float_format is set. + double_format: If set, use this to specify double field formatting + (per the "Format Specification Mini-Language"); if it is not set but + float_format is set, use float_format. Otherwise, str() is used. + use_field_number: If True, print field numbers instead of names. + descriptor_pool: A DescriptorPool used to resolve Any types. + message_formatter: A function(message, indent, as_one_line): unicode|None + to custom format selected sub-messages (usually based on message type). + Use to pretty print parts of the protobuf for easier diffing. + print_unknown_fields: If True, unknown fields will be printed. + force_colon: If set, a colon will be added after the field name even if + the field is a proto message. + """ + self.out = out + self.indent = indent + self.as_utf8 = as_utf8 + self.as_one_line = as_one_line + self.use_short_repeated_primitives = use_short_repeated_primitives + self.pointy_brackets = pointy_brackets + self.use_index_order = use_index_order + self.float_format = float_format + if double_format is not None: + self.double_format = double_format + else: + self.double_format = float_format + self.use_field_number = use_field_number + self.descriptor_pool = descriptor_pool + self.message_formatter = message_formatter + self.print_unknown_fields = print_unknown_fields + self.force_colon = force_colon + + def _TryPrintAsAnyMessage(self, message): + """Serializes if message is a google.protobuf.Any field.""" + if '/' not in message.type_url: + return False + packed_message = _BuildMessageFromTypeName(message.TypeName(), + self.descriptor_pool) + if packed_message: + packed_message.MergeFromString(message.value) + colon = ':' if self.force_colon else '' + self.out.write('%s[%s]%s ' % (self.indent * ' ', message.type_url, colon)) + self._PrintMessageFieldValue(packed_message) + self.out.write(' ' if self.as_one_line else '\n') + return True + else: + return False + + def _TryCustomFormatMessage(self, message): + formatted = self.message_formatter(message, self.indent, self.as_one_line) + if formatted is None: + return False + + out = self.out + out.write(' ' * self.indent) + out.write(formatted) + out.write(' ' if self.as_one_line else '\n') + return True + + def PrintMessage(self, message): + """Convert protobuf message to text format. + + Args: + message: The protocol buffers message. + """ + if self.message_formatter and self._TryCustomFormatMessage(message): + return + if (message.DESCRIPTOR.full_name == _ANY_FULL_TYPE_NAME and + self._TryPrintAsAnyMessage(message)): + return + fields = message.ListFields() + if self.use_index_order: + fields.sort( + key=lambda x: x[0].number if x[0].is_extension else x[0].index) + for field, value in fields: + if _IsMapEntry(field): + for key in sorted(value): + # This is slow for maps with submessage entries because it copies the + # entire tree. Unfortunately this would take significant refactoring + # of this file to work around. + # + # TODO(haberman): refactor and optimize if this becomes an issue. + entry_submsg = value.GetEntryClass()(key=key, value=value[key]) + self.PrintField(field, entry_submsg) + elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if (self.use_short_repeated_primitives + and field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE + and field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_STRING): + self._PrintShortRepeatedPrimitivesValue(field, value) + else: + for element in value: + self.PrintField(field, element) + else: + self.PrintField(field, value) + + if self.print_unknown_fields: + self._PrintUnknownFields(message.UnknownFields()) + + def _PrintUnknownFields(self, unknown_fields): + """Print unknown fields.""" + out = self.out + for field in unknown_fields: + out.write(' ' * self.indent) + out.write(str(field.field_number)) + if field.wire_type == WIRETYPE_START_GROUP: + if self.as_one_line: + out.write(' { ') + else: + out.write(' {\n') + self.indent += 2 + + self._PrintUnknownFields(field.data) + + if self.as_one_line: + out.write('} ') + else: + self.indent -= 2 + out.write(' ' * self.indent + '}\n') + elif field.wire_type == WIRETYPE_LENGTH_DELIMITED: + try: + # If this field is parseable as a Message, it is probably + # an embedded message. + # pylint: disable=protected-access + (embedded_unknown_message, pos) = decoder._DecodeUnknownFieldSet( + memoryview(field.data), 0, len(field.data)) + except Exception: # pylint: disable=broad-except + pos = 0 + + if pos == len(field.data): + if self.as_one_line: + out.write(' { ') + else: + out.write(' {\n') + self.indent += 2 + + self._PrintUnknownFields(embedded_unknown_message) + + if self.as_one_line: + out.write('} ') + else: + self.indent -= 2 + out.write(' ' * self.indent + '}\n') + else: + # A string or bytes field. self.as_utf8 may not work. + out.write(': \"') + out.write(text_encoding.CEscape(field.data, False)) + out.write('\" ' if self.as_one_line else '\"\n') + else: + # varint, fixed32, fixed64 + out.write(': ') + out.write(str(field.data)) + out.write(' ' if self.as_one_line else '\n') + + def _PrintFieldName(self, field): + """Print field name.""" + out = self.out + out.write(' ' * self.indent) + if self.use_field_number: + out.write(str(field.number)) + else: + if field.is_extension: + out.write('[') + if (field.containing_type.GetOptions().message_set_wire_format and + field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL): + out.write(field.message_type.full_name) + else: + out.write(field.full_name) + out.write(']') + elif field.type == descriptor.FieldDescriptor.TYPE_GROUP: + # For groups, use the capitalized name. + out.write(field.message_type.name) + else: + out.write(field.name) + + if (self.force_colon or + field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE): + # The colon is optional in this case, but our cross-language golden files + # don't include it. Here, the colon is only included if force_colon is + # set to True + out.write(':') + + def PrintField(self, field, value): + """Print a single field name/value pair.""" + self._PrintFieldName(field) + self.out.write(' ') + self.PrintFieldValue(field, value) + self.out.write(' ' if self.as_one_line else '\n') + + def _PrintShortRepeatedPrimitivesValue(self, field, value): + """"Prints short repeated primitives value.""" + # Note: this is called only when value has at least one element. + self._PrintFieldName(field) + self.out.write(' [') + for i in range(len(value) - 1): + self.PrintFieldValue(field, value[i]) + self.out.write(', ') + self.PrintFieldValue(field, value[-1]) + self.out.write(']') + self.out.write(' ' if self.as_one_line else '\n') + + def _PrintMessageFieldValue(self, value): + if self.pointy_brackets: + openb = '<' + closeb = '>' + else: + openb = '{' + closeb = '}' + + if self.as_one_line: + self.out.write('%s ' % openb) + self.PrintMessage(value) + self.out.write(closeb) + else: + self.out.write('%s\n' % openb) + self.indent += 2 + self.PrintMessage(value) + self.indent -= 2 + self.out.write(' ' * self.indent + closeb) + + def PrintFieldValue(self, field, value): + """Print a single field value (not including name). + + For repeated fields, the value should be a single element. + + Args: + field: The descriptor of the field to be printed. + value: The value of the field. + """ + out = self.out + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + self._PrintMessageFieldValue(value) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: + enum_value = field.enum_type.values_by_number.get(value, None) + if enum_value is not None: + out.write(enum_value.name) + else: + out.write(str(value)) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: + out.write('\"') + if isinstance(value, str) and not self.as_utf8: + out_value = value.encode('utf-8') + else: + out_value = value + if field.type == descriptor.FieldDescriptor.TYPE_BYTES: + # We always need to escape all binary data in TYPE_BYTES fields. + out_as_utf8 = False + else: + out_as_utf8 = self.as_utf8 + out.write(text_encoding.CEscape(out_value, out_as_utf8)) + out.write('\"') + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: + if value: + out.write('true') + else: + out.write('false') + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT: + if self.float_format is not None: + out.write('{1:{0}}'.format(self.float_format, value)) + else: + if math.isnan(value): + out.write(str(value)) + else: + out.write(str(type_checkers.ToShortestFloat(value))) + elif (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_DOUBLE and + self.double_format is not None): + out.write('{1:{0}}'.format(self.double_format, value)) + else: + out.write(str(value)) + + +def Parse(text, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + NOTE: for historical reasons this function does not clear the input + message. This is different from what the binary msg.ParseFrom(...) does. + If text contains a field already set in message, the value is appended if the + field is repeated. Otherwise, an error is raised. + + Example:: + + a = MyProto() + a.repeated_field.append('test') + b = MyProto() + + # Repeated fields are combined + text_format.Parse(repr(a), b) + text_format.Parse(repr(a), b) # repeated_field contains ["test", "test"] + + # Non-repeated fields cannot be overwritten + a.singular_field = 1 + b.singular_field = 2 + text_format.Parse(repr(a), b) # ParseError + + # Binary version: + b.ParseFromString(a.SerializeToString()) # repeated_field is now "test" + + Caller is responsible for clearing the message as needed. + + Args: + text (str): Message text representation. + message (Message): A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + Message: The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + return ParseLines(text.split(b'\n' if isinstance(text, bytes) else u'\n'), + message, + allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + + +def Merge(text, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + Like Parse(), but allows repeated values for a non-repeated field, and uses + the last one. This means any non-repeated, top-level fields specified in text + replace those in the message. + + Args: + text (str): Message text representation. + message (Message): A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + Message: The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + return MergeLines( + text.split(b'\n' if isinstance(text, bytes) else u'\n'), + message, + allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + + +def ParseLines(lines, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + See Parse() for caveats. + + Args: + lines: An iterable of lines of a message's text representation. + message: A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool: A DescriptorPool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + parser = _Parser(allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + return parser.ParseLines(lines, message) + + +def MergeLines(lines, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + See Merge() for more details. + + Args: + lines: An iterable of lines of a message's text representation. + message: A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool: A DescriptorPool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + parser = _Parser(allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + return parser.MergeLines(lines, message) + + +class _Parser(object): + """Text format parser for protocol message.""" + + def __init__(self, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + self.allow_unknown_extension = allow_unknown_extension + self.allow_field_number = allow_field_number + self.descriptor_pool = descriptor_pool + self.allow_unknown_field = allow_unknown_field + + def ParseLines(self, lines, message): + """Parses a text representation of a protocol message into a message.""" + self._allow_multiple_scalars = False + self._ParseOrMerge(lines, message) + return message + + def MergeLines(self, lines, message): + """Merges a text representation of a protocol message into a message.""" + self._allow_multiple_scalars = True + self._ParseOrMerge(lines, message) + return message + + def _ParseOrMerge(self, lines, message): + """Converts a text representation of a protocol message into a message. + + Args: + lines: Lines of a message's text representation. + message: A protocol buffer message to merge into. + + Raises: + ParseError: On text parsing problems. + """ + # Tokenize expects native str lines. + str_lines = ( + line if isinstance(line, str) else line.decode('utf-8') + for line in lines) + tokenizer = Tokenizer(str_lines) + while not tokenizer.AtEnd(): + self._MergeField(tokenizer, message) + + def _MergeField(self, tokenizer, message): + """Merges a single protocol message field into a message. + + Args: + tokenizer: A tokenizer to parse the field name and values. + message: A protocol message to record the data. + + Raises: + ParseError: In case of text parsing problems. + """ + message_descriptor = message.DESCRIPTOR + if (message_descriptor.full_name == _ANY_FULL_TYPE_NAME and + tokenizer.TryConsume('[')): + type_url_prefix, packed_type_name = self._ConsumeAnyTypeUrl(tokenizer) + tokenizer.Consume(']') + tokenizer.TryConsume(':') + if tokenizer.TryConsume('<'): + expanded_any_end_token = '>' + else: + tokenizer.Consume('{') + expanded_any_end_token = '}' + expanded_any_sub_message = _BuildMessageFromTypeName(packed_type_name, + self.descriptor_pool) + if not expanded_any_sub_message: + raise ParseError('Type %s not found in descriptor pool' % + packed_type_name) + while not tokenizer.TryConsume(expanded_any_end_token): + if tokenizer.AtEnd(): + raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % + (expanded_any_end_token,)) + self._MergeField(tokenizer, expanded_any_sub_message) + deterministic = False + + message.Pack(expanded_any_sub_message, + type_url_prefix=type_url_prefix, + deterministic=deterministic) + return + + if tokenizer.TryConsume('['): + name = [tokenizer.ConsumeIdentifier()] + while tokenizer.TryConsume('.'): + name.append(tokenizer.ConsumeIdentifier()) + name = '.'.join(name) + + if not message_descriptor.is_extendable: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" does not have extensions.' % + message_descriptor.full_name) + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(name) + # pylint: enable=protected-access + + + if not field: + if self.allow_unknown_extension: + field = None + else: + raise tokenizer.ParseErrorPreviousToken( + 'Extension "%s" not registered. ' + 'Did you import the _pb2 module which defines it? ' + 'If you are trying to place the extension in the MessageSet ' + 'field of another message that is in an Any or MessageSet field, ' + 'that message\'s _pb2 module must be imported as well' % name) + elif message_descriptor != field.containing_type: + raise tokenizer.ParseErrorPreviousToken( + 'Extension "%s" does not extend message type "%s".' % + (name, message_descriptor.full_name)) + + tokenizer.Consume(']') + + else: + name = tokenizer.ConsumeIdentifierOrNumber() + if self.allow_field_number and name.isdigit(): + number = ParseInteger(name, True, True) + field = message_descriptor.fields_by_number.get(number, None) + if not field and message_descriptor.is_extendable: + field = message.Extensions._FindExtensionByNumber(number) + else: + field = message_descriptor.fields_by_name.get(name, None) + + # Group names are expected to be capitalized as they appear in the + # .proto file, which actually matches their type names, not their field + # names. + if not field: + field = message_descriptor.fields_by_name.get(name.lower(), None) + if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP: + field = None + + if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and + field.message_type.name != name): + field = None + + if not field and not self.allow_unknown_field: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" has no field named "%s".' % + (message_descriptor.full_name, name)) + + if field: + if not self._allow_multiple_scalars and field.containing_oneof: + # Check if there's a different field set in this oneof. + # Note that we ignore the case if the same field was set before, and we + # apply _allow_multiple_scalars to non-scalar fields as well. + which_oneof = message.WhichOneof(field.containing_oneof.name) + if which_oneof is not None and which_oneof != field.name: + raise tokenizer.ParseErrorPreviousToken( + 'Field "%s" is specified along with field "%s", another member ' + 'of oneof "%s" for message type "%s".' % + (field.name, which_oneof, field.containing_oneof.name, + message_descriptor.full_name)) + + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + tokenizer.TryConsume(':') + merger = self._MergeMessageField + else: + tokenizer.Consume(':') + merger = self._MergeScalarField + + if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED and + tokenizer.TryConsume('[')): + # Short repeated format, e.g. "foo: [1, 2, 3]" + if not tokenizer.TryConsume(']'): + while True: + merger(tokenizer, message, field) + if tokenizer.TryConsume(']'): + break + tokenizer.Consume(',') + + else: + merger(tokenizer, message, field) + + else: # Proto field is unknown. + assert (self.allow_unknown_extension or self.allow_unknown_field) + _SkipFieldContents(tokenizer) + + # For historical reasons, fields may optionally be separated by commas or + # semicolons. + if not tokenizer.TryConsume(','): + tokenizer.TryConsume(';') + + + def _ConsumeAnyTypeUrl(self, tokenizer): + """Consumes a google.protobuf.Any type URL and returns the type name.""" + # Consume "type.googleapis.com/". + prefix = [tokenizer.ConsumeIdentifier()] + tokenizer.Consume('.') + prefix.append(tokenizer.ConsumeIdentifier()) + tokenizer.Consume('.') + prefix.append(tokenizer.ConsumeIdentifier()) + tokenizer.Consume('/') + # Consume the fully-qualified type name. + name = [tokenizer.ConsumeIdentifier()] + while tokenizer.TryConsume('.'): + name.append(tokenizer.ConsumeIdentifier()) + return '.'.join(prefix), '.'.join(name) + + def _MergeMessageField(self, tokenizer, message, field): + """Merges a single scalar field into a message. + + Args: + tokenizer: A tokenizer to parse the field value. + message: The message of which field is a member. + field: The descriptor of the field to be merged. + + Raises: + ParseError: In case of text parsing problems. + """ + is_map_entry = _IsMapEntry(field) + + if tokenizer.TryConsume('<'): + end_token = '>' + else: + tokenizer.Consume('{') + end_token = '}' + + if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if field.is_extension: + sub_message = message.Extensions[field].add() + elif is_map_entry: + sub_message = getattr(message, field.name).GetEntryClass()() + else: + sub_message = getattr(message, field.name).add() + else: + if field.is_extension: + if (not self._allow_multiple_scalars and + message.HasExtension(field)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" extensions.' % + (message.DESCRIPTOR.full_name, field.full_name)) + sub_message = message.Extensions[field] + else: + # Also apply _allow_multiple_scalars to message field. + # TODO(jieluo): Change to _allow_singular_overwrites. + if (not self._allow_multiple_scalars and + message.HasField(field.name)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" fields.' % + (message.DESCRIPTOR.full_name, field.name)) + sub_message = getattr(message, field.name) + sub_message.SetInParent() + + while not tokenizer.TryConsume(end_token): + if tokenizer.AtEnd(): + raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,)) + self._MergeField(tokenizer, sub_message) + + if is_map_entry: + value_cpptype = field.message_type.fields_by_name['value'].cpp_type + if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + value = getattr(message, field.name)[sub_message.key] + value.CopyFrom(sub_message.value) + else: + getattr(message, field.name)[sub_message.key] = sub_message.value + + @staticmethod + def _IsProto3Syntax(message): + message_descriptor = message.DESCRIPTOR + return (hasattr(message_descriptor, 'syntax') and + message_descriptor.syntax == 'proto3') + + def _MergeScalarField(self, tokenizer, message, field): + """Merges a single scalar field into a message. + + Args: + tokenizer: A tokenizer to parse the field value. + message: A protocol message to record the data. + field: The descriptor of the field to be merged. + + Raises: + ParseError: In case of text parsing problems. + RuntimeError: On runtime errors. + """ + _ = self.allow_unknown_extension + value = None + + if field.type in (descriptor.FieldDescriptor.TYPE_INT32, + descriptor.FieldDescriptor.TYPE_SINT32, + descriptor.FieldDescriptor.TYPE_SFIXED32): + value = _ConsumeInt32(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_INT64, + descriptor.FieldDescriptor.TYPE_SINT64, + descriptor.FieldDescriptor.TYPE_SFIXED64): + value = _ConsumeInt64(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32, + descriptor.FieldDescriptor.TYPE_FIXED32): + value = _ConsumeUint32(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64, + descriptor.FieldDescriptor.TYPE_FIXED64): + value = _ConsumeUint64(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT, + descriptor.FieldDescriptor.TYPE_DOUBLE): + value = tokenizer.ConsumeFloat() + elif field.type == descriptor.FieldDescriptor.TYPE_BOOL: + value = tokenizer.ConsumeBool() + elif field.type == descriptor.FieldDescriptor.TYPE_STRING: + value = tokenizer.ConsumeString() + elif field.type == descriptor.FieldDescriptor.TYPE_BYTES: + value = tokenizer.ConsumeByteString() + elif field.type == descriptor.FieldDescriptor.TYPE_ENUM: + value = tokenizer.ConsumeEnum(field) + else: + raise RuntimeError('Unknown field type %d' % field.type) + + if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if field.is_extension: + message.Extensions[field].append(value) + else: + getattr(message, field.name).append(value) + else: + if field.is_extension: + if (not self._allow_multiple_scalars and + not self._IsProto3Syntax(message) and + message.HasExtension(field)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" extensions.' % + (message.DESCRIPTOR.full_name, field.full_name)) + else: + message.Extensions[field] = value + else: + duplicate_error = False + if not self._allow_multiple_scalars: + if self._IsProto3Syntax(message): + # Proto3 doesn't represent presence so we try best effort to check + # multiple scalars by compare to default values. + duplicate_error = bool(getattr(message, field.name)) + else: + duplicate_error = message.HasField(field.name) + + if duplicate_error: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" fields.' % + (message.DESCRIPTOR.full_name, field.name)) + else: + setattr(message, field.name, value) + + +def _SkipFieldContents(tokenizer): + """Skips over contents (value or message) of a field. + + Args: + tokenizer: A tokenizer to parse the field name and values. + """ + # Try to guess the type of this field. + # If this field is not a message, there should be a ":" between the + # field name and the field value and also the field value should not + # start with "{" or "<" which indicates the beginning of a message body. + # If there is no ":" or there is a "{" or "<" after ":", this field has + # to be a message or the input is ill-formed. + if tokenizer.TryConsume(':') and not tokenizer.LookingAt( + '{') and not tokenizer.LookingAt('<'): + _SkipFieldValue(tokenizer) + else: + _SkipFieldMessage(tokenizer) + + +def _SkipField(tokenizer): + """Skips over a complete field (name and value/message). + + Args: + tokenizer: A tokenizer to parse the field name and values. + """ + if tokenizer.TryConsume('['): + # Consume extension name. + tokenizer.ConsumeIdentifier() + while tokenizer.TryConsume('.'): + tokenizer.ConsumeIdentifier() + tokenizer.Consume(']') + else: + tokenizer.ConsumeIdentifierOrNumber() + + _SkipFieldContents(tokenizer) + + # For historical reasons, fields may optionally be separated by commas or + # semicolons. + if not tokenizer.TryConsume(','): + tokenizer.TryConsume(';') + + +def _SkipFieldMessage(tokenizer): + """Skips over a field message. + + Args: + tokenizer: A tokenizer to parse the field name and values. + """ + + if tokenizer.TryConsume('<'): + delimiter = '>' + else: + tokenizer.Consume('{') + delimiter = '}' + + while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'): + _SkipField(tokenizer) + + tokenizer.Consume(delimiter) + + +def _SkipFieldValue(tokenizer): + """Skips over a field value. + + Args: + tokenizer: A tokenizer to parse the field name and values. + + Raises: + ParseError: In case an invalid field value is found. + """ + # String/bytes tokens can come in multiple adjacent string literals. + # If we can consume one, consume as many as we can. + if tokenizer.TryConsumeByteString(): + while tokenizer.TryConsumeByteString(): + pass + return + + if (not tokenizer.TryConsumeIdentifier() and + not _TryConsumeInt64(tokenizer) and not _TryConsumeUint64(tokenizer) and + not tokenizer.TryConsumeFloat()): + raise ParseError('Invalid field value: ' + tokenizer.token) + + +class Tokenizer(object): + """Protocol buffer text representation tokenizer. + + This class handles the lower level string parsing by splitting it into + meaningful tokens. + + It was directly ported from the Java protocol buffer API. + """ + + _WHITESPACE = re.compile(r'\s+') + _COMMENT = re.compile(r'(\s*#.*$)', re.MULTILINE) + _WHITESPACE_OR_COMMENT = re.compile(r'(\s|(#.*$))+', re.MULTILINE) + _TOKEN = re.compile('|'.join([ + r'[a-zA-Z_][0-9a-zA-Z_+-]*', # an identifier + r'([0-9+-]|(\.[0-9]))[0-9a-zA-Z_.+-]*', # a number + ] + [ # quoted str for each quote mark + # Avoid backtracking! https://stackoverflow.com/a/844267 + r'{qt}[^{qt}\n\\]*((\\.)+[^{qt}\n\\]*)*({qt}|\\?$)'.format(qt=mark) + for mark in _QUOTES + ])) + + _IDENTIFIER = re.compile(r'[^\d\W]\w*') + _IDENTIFIER_OR_NUMBER = re.compile(r'\w+') + + def __init__(self, lines, skip_comments=True): + self._position = 0 + self._line = -1 + self._column = 0 + self._token_start = None + self.token = '' + self._lines = iter(lines) + self._current_line = '' + self._previous_line = 0 + self._previous_column = 0 + self._more_lines = True + self._skip_comments = skip_comments + self._whitespace_pattern = (skip_comments and self._WHITESPACE_OR_COMMENT + or self._WHITESPACE) + self._SkipWhitespace() + self.NextToken() + + def LookingAt(self, token): + return self.token == token + + def AtEnd(self): + """Checks the end of the text was reached. + + Returns: + True iff the end was reached. + """ + return not self.token + + def _PopLine(self): + while len(self._current_line) <= self._column: + try: + self._current_line = next(self._lines) + except StopIteration: + self._current_line = '' + self._more_lines = False + return + else: + self._line += 1 + self._column = 0 + + def _SkipWhitespace(self): + while True: + self._PopLine() + match = self._whitespace_pattern.match(self._current_line, self._column) + if not match: + break + length = len(match.group(0)) + self._column += length + + def TryConsume(self, token): + """Tries to consume a given piece of text. + + Args: + token: Text to consume. + + Returns: + True iff the text was consumed. + """ + if self.token == token: + self.NextToken() + return True + return False + + def Consume(self, token): + """Consumes a piece of text. + + Args: + token: Text to consume. + + Raises: + ParseError: If the text couldn't be consumed. + """ + if not self.TryConsume(token): + raise self.ParseError('Expected "%s".' % token) + + def ConsumeComment(self): + result = self.token + if not self._COMMENT.match(result): + raise self.ParseError('Expected comment.') + self.NextToken() + return result + + def ConsumeCommentOrTrailingComment(self): + """Consumes a comment, returns a 2-tuple (trailing bool, comment str).""" + + # Tokenizer initializes _previous_line and _previous_column to 0. As the + # tokenizer starts, it looks like there is a previous token on the line. + just_started = self._line == 0 and self._column == 0 + + before_parsing = self._previous_line + comment = self.ConsumeComment() + + # A trailing comment is a comment on the same line than the previous token. + trailing = (self._previous_line == before_parsing + and not just_started) + + return trailing, comment + + def TryConsumeIdentifier(self): + try: + self.ConsumeIdentifier() + return True + except ParseError: + return False + + def ConsumeIdentifier(self): + """Consumes protocol message field identifier. + + Returns: + Identifier string. + + Raises: + ParseError: If an identifier couldn't be consumed. + """ + result = self.token + if not self._IDENTIFIER.match(result): + raise self.ParseError('Expected identifier.') + self.NextToken() + return result + + def TryConsumeIdentifierOrNumber(self): + try: + self.ConsumeIdentifierOrNumber() + return True + except ParseError: + return False + + def ConsumeIdentifierOrNumber(self): + """Consumes protocol message field identifier. + + Returns: + Identifier string. + + Raises: + ParseError: If an identifier couldn't be consumed. + """ + result = self.token + if not self._IDENTIFIER_OR_NUMBER.match(result): + raise self.ParseError('Expected identifier or number, got %s.' % result) + self.NextToken() + return result + + def TryConsumeInteger(self): + try: + self.ConsumeInteger() + return True + except ParseError: + return False + + def ConsumeInteger(self): + """Consumes an integer number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an integer couldn't be consumed. + """ + try: + result = _ParseAbstractInteger(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def TryConsumeFloat(self): + try: + self.ConsumeFloat() + return True + except ParseError: + return False + + def ConsumeFloat(self): + """Consumes an floating point number. + + Returns: + The number parsed. + + Raises: + ParseError: If a floating point number couldn't be consumed. + """ + try: + result = ParseFloat(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ConsumeBool(self): + """Consumes a boolean value. + + Returns: + The bool parsed. + + Raises: + ParseError: If a boolean value couldn't be consumed. + """ + try: + result = ParseBool(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def TryConsumeByteString(self): + try: + self.ConsumeByteString() + return True + except ParseError: + return False + + def ConsumeString(self): + """Consumes a string value. + + Returns: + The string parsed. + + Raises: + ParseError: If a string value couldn't be consumed. + """ + the_bytes = self.ConsumeByteString() + try: + return str(the_bytes, 'utf-8') + except UnicodeDecodeError as e: + raise self._StringParseError(e) + + def ConsumeByteString(self): + """Consumes a byte array value. + + Returns: + The array parsed (as a string). + + Raises: + ParseError: If a byte array value couldn't be consumed. + """ + the_list = [self._ConsumeSingleByteString()] + while self.token and self.token[0] in _QUOTES: + the_list.append(self._ConsumeSingleByteString()) + return b''.join(the_list) + + def _ConsumeSingleByteString(self): + """Consume one token of a string literal. + + String literals (whether bytes or text) can come in multiple adjacent + tokens which are automatically concatenated, like in C or Python. This + method only consumes one token. + + Returns: + The token parsed. + Raises: + ParseError: When the wrong format data is found. + """ + text = self.token + if len(text) < 1 or text[0] not in _QUOTES: + raise self.ParseError('Expected string but found: %r' % (text,)) + + if len(text) < 2 or text[-1] != text[0]: + raise self.ParseError('String missing ending quote: %r' % (text,)) + + try: + result = text_encoding.CUnescape(text[1:-1]) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ConsumeEnum(self, field): + try: + result = ParseEnum(field, self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ParseErrorPreviousToken(self, message): + """Creates and *returns* a ParseError for the previously read token. + + Args: + message: A message to set for the exception. + + Returns: + A ParseError instance. + """ + return ParseError(message, self._previous_line + 1, + self._previous_column + 1) + + def ParseError(self, message): + """Creates and *returns* a ParseError for the current token.""" + return ParseError('\'' + self._current_line + '\': ' + message, + self._line + 1, self._column + 1) + + def _StringParseError(self, e): + return self.ParseError('Couldn\'t parse string: ' + str(e)) + + def NextToken(self): + """Reads the next meaningful token.""" + self._previous_line = self._line + self._previous_column = self._column + + self._column += len(self.token) + self._SkipWhitespace() + + if not self._more_lines: + self.token = '' + return + + match = self._TOKEN.match(self._current_line, self._column) + if not match and not self._skip_comments: + match = self._COMMENT.match(self._current_line, self._column) + if match: + token = match.group(0) + self.token = token + else: + self.token = self._current_line[self._column] + +# Aliased so it can still be accessed by current visibility violators. +# TODO(dbarnett): Migrate violators to textformat_tokenizer. +_Tokenizer = Tokenizer # pylint: disable=invalid-name + + +def _ConsumeInt32(tokenizer): + """Consumes a signed 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If a signed 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=True, is_long=False) + + +def _ConsumeUint32(tokenizer): + """Consumes an unsigned 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an unsigned 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=False, is_long=False) + + +def _TryConsumeInt64(tokenizer): + try: + _ConsumeInt64(tokenizer) + return True + except ParseError: + return False + + +def _ConsumeInt64(tokenizer): + """Consumes a signed 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If a signed 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=True, is_long=True) + + +def _TryConsumeUint64(tokenizer): + try: + _ConsumeUint64(tokenizer) + return True + except ParseError: + return False + + +def _ConsumeUint64(tokenizer): + """Consumes an unsigned 64bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an unsigned 64bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=False, is_long=True) + + +def _ConsumeInteger(tokenizer, is_signed=False, is_long=False): + """Consumes an integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + is_signed: True if a signed integer must be parsed. + is_long: True if a long integer must be parsed. + + Returns: + The integer parsed. + + Raises: + ParseError: If an integer with given characteristics couldn't be consumed. + """ + try: + result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long) + except ValueError as e: + raise tokenizer.ParseError(str(e)) + tokenizer.NextToken() + return result + + +def ParseInteger(text, is_signed=False, is_long=False): + """Parses an integer. + + Args: + text: The text to parse. + is_signed: True if a signed integer must be parsed. + is_long: True if a long integer must be parsed. + + Returns: + The integer value. + + Raises: + ValueError: Thrown Iff the text is not a valid integer. + """ + # Do the actual parsing. Exception handling is propagated to caller. + result = _ParseAbstractInteger(text) + + # Check if the integer is sane. Exceptions handled by callers. + checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] + checker.CheckValue(result) + return result + + +def _ParseAbstractInteger(text): + """Parses an integer without checking size/signedness. + + Args: + text: The text to parse. + + Returns: + The integer value. + + Raises: + ValueError: Thrown Iff the text is not a valid integer. + """ + # Do the actual parsing. Exception handling is propagated to caller. + orig_text = text + c_octal_match = re.match(r'(-?)0(\d+)$', text) + if c_octal_match: + # Python 3 no longer supports 0755 octal syntax without the 'o', so + # we always use the '0o' prefix for multi-digit numbers starting with 0. + text = c_octal_match.group(1) + '0o' + c_octal_match.group(2) + try: + return int(text, 0) + except ValueError: + raise ValueError('Couldn\'t parse integer: %s' % orig_text) + + +def ParseFloat(text): + """Parse a floating point number. + + Args: + text: Text to parse. + + Returns: + The number parsed. + + Raises: + ValueError: If a floating point number couldn't be parsed. + """ + try: + # Assume Python compatible syntax. + return float(text) + except ValueError: + # Check alternative spellings. + if _FLOAT_INFINITY.match(text): + if text[0] == '-': + return float('-inf') + else: + return float('inf') + elif _FLOAT_NAN.match(text): + return float('nan') + else: + # assume '1.0f' format + try: + return float(text.rstrip('f')) + except ValueError: + raise ValueError('Couldn\'t parse float: %s' % text) + + +def ParseBool(text): + """Parse a boolean value. + + Args: + text: Text to parse. + + Returns: + Boolean values parsed + + Raises: + ValueError: If text is not a valid boolean. + """ + if text in ('true', 't', '1', 'True'): + return True + elif text in ('false', 'f', '0', 'False'): + return False + else: + raise ValueError('Expected "true" or "false".') + + +def ParseEnum(field, value): + """Parse an enum value. + + The value can be specified by a number (the enum value), or by + a string literal (the enum name). + + Args: + field: Enum field descriptor. + value: String value. + + Returns: + Enum value number. + + Raises: + ValueError: If the enum value could not be parsed. + """ + enum_descriptor = field.enum_type + try: + number = int(value, 0) + except ValueError: + # Identifier. + enum_value = enum_descriptor.values_by_name.get(value, None) + if enum_value is None: + raise ValueError('Enum type "%s" has no value named %s.' % + (enum_descriptor.full_name, value)) + else: + # Numeric value. + if hasattr(field.file, 'syntax'): + # Attribute is checked for compatibility. + if field.file.syntax == 'proto3': + # Proto3 accept numeric unknown enums. + return number + enum_value = enum_descriptor.values_by_number.get(number, None) + if enum_value is None: + raise ValueError('Enum type "%s" has no value with number %d.' % + (enum_descriptor.full_name, number)) + return enum_value.number diff --git a/openpype/hosts/nuke/vendor/google/protobuf/timestamp_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/timestamp_pb2.py new file mode 100644 index 0000000000..558d496941 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/timestamp_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/timestamp.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\"+\n\tTimestamp\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42\x85\x01\n\x13\x63om.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.timestamp_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\016TimestampProtoP\001Z2google.golang.org/protobuf/types/known/timestamppb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _TIMESTAMP._serialized_start=52 + _TIMESTAMP._serialized_end=95 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/type_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/type_pb2.py new file mode 100644 index 0000000000..19903fb6b4 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/type_pb2.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/type.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 +from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1agoogle/protobuf/type.proto\x12\x0fgoogle.protobuf\x1a\x19google/protobuf/any.proto\x1a$google/protobuf/source_context.proto\"\xd7\x01\n\x04Type\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x16.google.protobuf.Field\x12\x0e\n\x06oneofs\x18\x03 \x03(\t\x12(\n\x07options\x18\x04 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x06 \x01(\x0e\x32\x17.google.protobuf.Syntax\"\xd5\x05\n\x05\x46ield\x12)\n\x04kind\x18\x01 \x01(\x0e\x32\x1b.google.protobuf.Field.Kind\x12\x37\n\x0b\x63\x61rdinality\x18\x02 \x01(\x0e\x32\".google.protobuf.Field.Cardinality\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x10\n\x08type_url\x18\x06 \x01(\t\x12\x13\n\x0boneof_index\x18\x07 \x01(\x05\x12\x0e\n\x06packed\x18\x08 \x01(\x08\x12(\n\x07options\x18\t \x03(\x0b\x32\x17.google.protobuf.Option\x12\x11\n\tjson_name\x18\n \x01(\t\x12\x15\n\rdefault_value\x18\x0b \x01(\t\"\xc8\x02\n\x04Kind\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"t\n\x0b\x43\x61rdinality\x12\x17\n\x13\x43\x41RDINALITY_UNKNOWN\x10\x00\x12\x18\n\x14\x43\x41RDINALITY_OPTIONAL\x10\x01\x12\x18\n\x14\x43\x41RDINALITY_REQUIRED\x10\x02\x12\x18\n\x14\x43\x41RDINALITY_REPEATED\x10\x03\"\xce\x01\n\x04\x45num\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\tenumvalue\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.EnumValue\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x05 \x01(\x0e\x32\x17.google.protobuf.Syntax\"S\n\tEnumValue\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\";\n\x06Option\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any*.\n\x06Syntax\x12\x11\n\rSYNTAX_PROTO2\x10\x00\x12\x11\n\rSYNTAX_PROTO3\x10\x01\x42{\n\x13\x63om.google.protobufB\tTypeProtoP\x01Z-google.golang.org/protobuf/types/known/typepb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.type_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\tTypeProtoP\001Z-google.golang.org/protobuf/types/known/typepb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _SYNTAX._serialized_start=1413 + _SYNTAX._serialized_end=1459 + _TYPE._serialized_start=113 + _TYPE._serialized_end=328 + _FIELD._serialized_start=331 + _FIELD._serialized_end=1056 + _FIELD_KIND._serialized_start=610 + _FIELD_KIND._serialized_end=938 + _FIELD_CARDINALITY._serialized_start=940 + _FIELD_CARDINALITY._serialized_end=1056 + _ENUM._serialized_start=1059 + _ENUM._serialized_end=1265 + _ENUMVALUE._serialized_start=1267 + _ENUMVALUE._serialized_end=1350 + _OPTION._serialized_start=1352 + _OPTION._serialized_end=1411 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/util/__init__.py b/openpype/hosts/nuke/vendor/google/protobuf/util/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/nuke/vendor/google/protobuf/util/json_format_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/util/json_format_pb2.py new file mode 100644 index 0000000000..66a5836c82 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/util/json_format_pb2.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/util/json_format.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&google/protobuf/util/json_format.proto\x12\x11protobuf_unittest\"\x89\x01\n\x13TestFlagsAndStrings\x12\t\n\x01\x41\x18\x01 \x02(\x05\x12K\n\rrepeatedgroup\x18\x02 \x03(\n24.protobuf_unittest.TestFlagsAndStrings.RepeatedGroup\x1a\x1a\n\rRepeatedGroup\x12\t\n\x01\x66\x18\x03 \x02(\t\"!\n\x14TestBase64ByteArrays\x12\t\n\x01\x61\x18\x01 \x02(\x0c\"G\n\x12TestJavaScriptJSON\x12\t\n\x01\x61\x18\x01 \x01(\x05\x12\r\n\x05\x66inal\x18\x02 \x01(\x02\x12\n\n\x02in\x18\x03 \x01(\t\x12\x0b\n\x03Var\x18\x04 \x01(\t\"Q\n\x18TestJavaScriptOrderJSON1\x12\t\n\x01\x64\x18\x01 \x01(\x05\x12\t\n\x01\x63\x18\x02 \x01(\x05\x12\t\n\x01x\x18\x03 \x01(\x08\x12\t\n\x01\x62\x18\x04 \x01(\x05\x12\t\n\x01\x61\x18\x05 \x01(\x05\"\x89\x01\n\x18TestJavaScriptOrderJSON2\x12\t\n\x01\x64\x18\x01 \x01(\x05\x12\t\n\x01\x63\x18\x02 \x01(\x05\x12\t\n\x01x\x18\x03 \x01(\x08\x12\t\n\x01\x62\x18\x04 \x01(\x05\x12\t\n\x01\x61\x18\x05 \x01(\x05\x12\x36\n\x01z\x18\x06 \x03(\x0b\x32+.protobuf_unittest.TestJavaScriptOrderJSON1\"$\n\x0cTestLargeInt\x12\t\n\x01\x61\x18\x01 \x02(\x03\x12\t\n\x01\x62\x18\x02 \x02(\x04\"\xa0\x01\n\x0bTestNumbers\x12\x30\n\x01\x61\x18\x01 \x01(\x0e\x32%.protobuf_unittest.TestNumbers.MyType\x12\t\n\x01\x62\x18\x02 \x01(\x05\x12\t\n\x01\x63\x18\x03 \x01(\x02\x12\t\n\x01\x64\x18\x04 \x01(\x08\x12\t\n\x01\x65\x18\x05 \x01(\x01\x12\t\n\x01\x66\x18\x06 \x01(\r\"(\n\x06MyType\x12\x06\n\x02OK\x10\x00\x12\x0b\n\x07WARNING\x10\x01\x12\t\n\x05\x45RROR\x10\x02\"T\n\rTestCamelCase\x12\x14\n\x0cnormal_field\x18\x01 \x01(\t\x12\x15\n\rCAPITAL_FIELD\x18\x02 \x01(\x05\x12\x16\n\x0e\x43\x61melCaseField\x18\x03 \x01(\x05\"|\n\x0bTestBoolMap\x12=\n\x08\x62ool_map\x18\x01 \x03(\x0b\x32+.protobuf_unittest.TestBoolMap.BoolMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"O\n\rTestRecursion\x12\r\n\x05value\x18\x01 \x01(\x05\x12/\n\x05\x63hild\x18\x02 \x01(\x0b\x32 .protobuf_unittest.TestRecursion\"\x86\x01\n\rTestStringMap\x12\x43\n\nstring_map\x18\x01 \x03(\x0b\x32/.protobuf_unittest.TestStringMap.StringMapEntry\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc4\x01\n\x14TestStringSerializer\x12\x15\n\rscalar_string\x18\x01 \x01(\t\x12\x17\n\x0frepeated_string\x18\x02 \x03(\t\x12J\n\nstring_map\x18\x03 \x03(\x0b\x32\x36.protobuf_unittest.TestStringSerializer.StringMapEntry\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"$\n\x18TestMessageWithExtension*\x08\x08\x64\x10\x80\x80\x80\x80\x02\"z\n\rTestExtension\x12\r\n\x05value\x18\x01 \x01(\t2Z\n\x03\x65xt\x12+.protobuf_unittest.TestMessageWithExtension\x18\x64 \x01(\x0b\x32 .protobuf_unittest.TestExtension\"Q\n\x14TestDefaultEnumValue\x12\x39\n\nenum_value\x18\x01 \x01(\x0e\x32\x1c.protobuf_unittest.EnumValue:\x07\x44\x45\x46\x41ULT*2\n\tEnumValue\x12\x0c\n\x08PROTOCOL\x10\x00\x12\n\n\x06\x42UFFER\x10\x01\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x02') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.util.json_format_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + TestMessageWithExtension.RegisterExtension(_TESTEXTENSION.extensions_by_name['ext']) + + DESCRIPTOR._options = None + _TESTBOOLMAP_BOOLMAPENTRY._options = None + _TESTBOOLMAP_BOOLMAPENTRY._serialized_options = b'8\001' + _TESTSTRINGMAP_STRINGMAPENTRY._options = None + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._options = None + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._serialized_options = b'8\001' + _ENUMVALUE._serialized_start=1607 + _ENUMVALUE._serialized_end=1657 + _TESTFLAGSANDSTRINGS._serialized_start=62 + _TESTFLAGSANDSTRINGS._serialized_end=199 + _TESTFLAGSANDSTRINGS_REPEATEDGROUP._serialized_start=173 + _TESTFLAGSANDSTRINGS_REPEATEDGROUP._serialized_end=199 + _TESTBASE64BYTEARRAYS._serialized_start=201 + _TESTBASE64BYTEARRAYS._serialized_end=234 + _TESTJAVASCRIPTJSON._serialized_start=236 + _TESTJAVASCRIPTJSON._serialized_end=307 + _TESTJAVASCRIPTORDERJSON1._serialized_start=309 + _TESTJAVASCRIPTORDERJSON1._serialized_end=390 + _TESTJAVASCRIPTORDERJSON2._serialized_start=393 + _TESTJAVASCRIPTORDERJSON2._serialized_end=530 + _TESTLARGEINT._serialized_start=532 + _TESTLARGEINT._serialized_end=568 + _TESTNUMBERS._serialized_start=571 + _TESTNUMBERS._serialized_end=731 + _TESTNUMBERS_MYTYPE._serialized_start=691 + _TESTNUMBERS_MYTYPE._serialized_end=731 + _TESTCAMELCASE._serialized_start=733 + _TESTCAMELCASE._serialized_end=817 + _TESTBOOLMAP._serialized_start=819 + _TESTBOOLMAP._serialized_end=943 + _TESTBOOLMAP_BOOLMAPENTRY._serialized_start=897 + _TESTBOOLMAP_BOOLMAPENTRY._serialized_end=943 + _TESTRECURSION._serialized_start=945 + _TESTRECURSION._serialized_end=1024 + _TESTSTRINGMAP._serialized_start=1027 + _TESTSTRINGMAP._serialized_end=1161 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_start=1113 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_end=1161 + _TESTSTRINGSERIALIZER._serialized_start=1164 + _TESTSTRINGSERIALIZER._serialized_end=1360 + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._serialized_start=1113 + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._serialized_end=1161 + _TESTMESSAGEWITHEXTENSION._serialized_start=1362 + _TESTMESSAGEWITHEXTENSION._serialized_end=1398 + _TESTEXTENSION._serialized_start=1400 + _TESTEXTENSION._serialized_end=1522 + _TESTDEFAULTENUMVALUE._serialized_start=1524 + _TESTDEFAULTENUMVALUE._serialized_end=1605 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/util/json_format_proto3_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/util/json_format_proto3_pb2.py new file mode 100644 index 0000000000..5498deafa9 --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/util/json_format_proto3_pb2.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/util/json_format_proto3.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 +from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 +from google.protobuf import unittest_pb2 as google_dot_protobuf_dot_unittest__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n-google/protobuf/util/json_format_proto3.proto\x12\x06proto3\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a google/protobuf/field_mask.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1egoogle/protobuf/unittest.proto\"\x1c\n\x0bMessageType\x12\r\n\x05value\x18\x01 \x01(\x05\"\x94\x05\n\x0bTestMessage\x12\x12\n\nbool_value\x18\x01 \x01(\x08\x12\x13\n\x0bint32_value\x18\x02 \x01(\x05\x12\x13\n\x0bint64_value\x18\x03 \x01(\x03\x12\x14\n\x0cuint32_value\x18\x04 \x01(\r\x12\x14\n\x0cuint64_value\x18\x05 \x01(\x04\x12\x13\n\x0b\x66loat_value\x18\x06 \x01(\x02\x12\x14\n\x0c\x64ouble_value\x18\x07 \x01(\x01\x12\x14\n\x0cstring_value\x18\x08 \x01(\t\x12\x13\n\x0b\x62ytes_value\x18\t \x01(\x0c\x12$\n\nenum_value\x18\n \x01(\x0e\x32\x10.proto3.EnumType\x12*\n\rmessage_value\x18\x0b \x01(\x0b\x32\x13.proto3.MessageType\x12\x1b\n\x13repeated_bool_value\x18\x15 \x03(\x08\x12\x1c\n\x14repeated_int32_value\x18\x16 \x03(\x05\x12\x1c\n\x14repeated_int64_value\x18\x17 \x03(\x03\x12\x1d\n\x15repeated_uint32_value\x18\x18 \x03(\r\x12\x1d\n\x15repeated_uint64_value\x18\x19 \x03(\x04\x12\x1c\n\x14repeated_float_value\x18\x1a \x03(\x02\x12\x1d\n\x15repeated_double_value\x18\x1b \x03(\x01\x12\x1d\n\x15repeated_string_value\x18\x1c \x03(\t\x12\x1c\n\x14repeated_bytes_value\x18\x1d \x03(\x0c\x12-\n\x13repeated_enum_value\x18\x1e \x03(\x0e\x32\x10.proto3.EnumType\x12\x33\n\x16repeated_message_value\x18\x1f \x03(\x0b\x32\x13.proto3.MessageType\"\x8c\x02\n\tTestOneof\x12\x1b\n\x11oneof_int32_value\x18\x01 \x01(\x05H\x00\x12\x1c\n\x12oneof_string_value\x18\x02 \x01(\tH\x00\x12\x1b\n\x11oneof_bytes_value\x18\x03 \x01(\x0cH\x00\x12,\n\x10oneof_enum_value\x18\x04 \x01(\x0e\x32\x10.proto3.EnumTypeH\x00\x12\x32\n\x13oneof_message_value\x18\x05 \x01(\x0b\x32\x13.proto3.MessageTypeH\x00\x12\x36\n\x10oneof_null_value\x18\x06 \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00\x42\r\n\x0boneof_value\"\xe1\x04\n\x07TestMap\x12.\n\x08\x62ool_map\x18\x01 \x03(\x0b\x32\x1c.proto3.TestMap.BoolMapEntry\x12\x30\n\tint32_map\x18\x02 \x03(\x0b\x32\x1d.proto3.TestMap.Int32MapEntry\x12\x30\n\tint64_map\x18\x03 \x03(\x0b\x32\x1d.proto3.TestMap.Int64MapEntry\x12\x32\n\nuint32_map\x18\x04 \x03(\x0b\x32\x1e.proto3.TestMap.Uint32MapEntry\x12\x32\n\nuint64_map\x18\x05 \x03(\x0b\x32\x1e.proto3.TestMap.Uint64MapEntry\x12\x32\n\nstring_map\x18\x06 \x03(\x0b\x32\x1e.proto3.TestMap.StringMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"\x85\x06\n\rTestNestedMap\x12\x34\n\x08\x62ool_map\x18\x01 \x03(\x0b\x32\".proto3.TestNestedMap.BoolMapEntry\x12\x36\n\tint32_map\x18\x02 \x03(\x0b\x32#.proto3.TestNestedMap.Int32MapEntry\x12\x36\n\tint64_map\x18\x03 \x03(\x0b\x32#.proto3.TestNestedMap.Int64MapEntry\x12\x38\n\nuint32_map\x18\x04 \x03(\x0b\x32$.proto3.TestNestedMap.Uint32MapEntry\x12\x38\n\nuint64_map\x18\x05 \x03(\x0b\x32$.proto3.TestNestedMap.Uint64MapEntry\x12\x38\n\nstring_map\x18\x06 \x03(\x0b\x32$.proto3.TestNestedMap.StringMapEntry\x12\x32\n\x07map_map\x18\x07 \x03(\x0b\x32!.proto3.TestNestedMap.MapMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x44\n\x0bMapMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.proto3.TestNestedMap:\x02\x38\x01\"{\n\rTestStringMap\x12\x38\n\nstring_map\x18\x01 \x03(\x0b\x32$.proto3.TestStringMap.StringMapEntry\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xee\x07\n\x0bTestWrapper\x12.\n\nbool_value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x30\n\x0bint32_value\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x30\n\x0bint64_value\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x32\n\x0cuint32_value\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x32\n\x0cuint64_value\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x30\n\x0b\x66loat_value\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.FloatValue\x12\x32\n\x0c\x64ouble_value\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x32\n\x0cstring_value\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\x0b\x62ytes_value\x18\t \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x37\n\x13repeated_bool_value\x18\x0b \x03(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x39\n\x14repeated_int32_value\x18\x0c \x03(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x39\n\x14repeated_int64_value\x18\r \x03(\x0b\x32\x1b.google.protobuf.Int64Value\x12;\n\x15repeated_uint32_value\x18\x0e \x03(\x0b\x32\x1c.google.protobuf.UInt32Value\x12;\n\x15repeated_uint64_value\x18\x0f \x03(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x39\n\x14repeated_float_value\x18\x10 \x03(\x0b\x32\x1b.google.protobuf.FloatValue\x12;\n\x15repeated_double_value\x18\x11 \x03(\x0b\x32\x1c.google.protobuf.DoubleValue\x12;\n\x15repeated_string_value\x18\x12 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x39\n\x14repeated_bytes_value\x18\x13 \x03(\x0b\x32\x1b.google.protobuf.BytesValue\"n\n\rTestTimestamp\x12)\n\x05value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.Timestamp\"k\n\x0cTestDuration\x12(\n\x05value\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x31\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x19.google.protobuf.Duration\":\n\rTestFieldMask\x12)\n\x05value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"e\n\nTestStruct\x12&\n\x05value\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12/\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x17.google.protobuf.Struct\"\\\n\x07TestAny\x12#\n\x05value\x18\x01 \x01(\x0b\x32\x14.google.protobuf.Any\x12,\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x14.google.protobuf.Any\"b\n\tTestValue\x12%\n\x05value\x18\x01 \x01(\x0b\x32\x16.google.protobuf.Value\x12.\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x16.google.protobuf.Value\"n\n\rTestListValue\x12)\n\x05value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.ListValue\x12\x32\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.ListValue\"\x89\x01\n\rTestBoolValue\x12\x12\n\nbool_value\x18\x01 \x01(\x08\x12\x34\n\x08\x62ool_map\x18\x02 \x03(\x0b\x32\".proto3.TestBoolValue.BoolMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"+\n\x12TestCustomJsonName\x12\x15\n\x05value\x18\x01 \x01(\x05R\x06@value\"J\n\x0eTestExtensions\x12\x38\n\nextensions\x18\x01 \x01(\x0b\x32$.protobuf_unittest.TestAllExtensions\"\x84\x01\n\rTestEnumValue\x12%\n\x0b\x65num_value1\x18\x01 \x01(\x0e\x32\x10.proto3.EnumType\x12%\n\x0b\x65num_value2\x18\x02 \x01(\x0e\x32\x10.proto3.EnumType\x12%\n\x0b\x65num_value3\x18\x03 \x01(\x0e\x32\x10.proto3.EnumType*\x1c\n\x08\x45numType\x12\x07\n\x03\x46OO\x10\x00\x12\x07\n\x03\x42\x41R\x10\x01\x42,\n\x18\x63om.google.protobuf.utilB\x10JsonFormatProto3b\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.util.json_format_proto3_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030com.google.protobuf.utilB\020JsonFormatProto3' + _TESTMAP_BOOLMAPENTRY._options = None + _TESTMAP_BOOLMAPENTRY._serialized_options = b'8\001' + _TESTMAP_INT32MAPENTRY._options = None + _TESTMAP_INT32MAPENTRY._serialized_options = b'8\001' + _TESTMAP_INT64MAPENTRY._options = None + _TESTMAP_INT64MAPENTRY._serialized_options = b'8\001' + _TESTMAP_UINT32MAPENTRY._options = None + _TESTMAP_UINT32MAPENTRY._serialized_options = b'8\001' + _TESTMAP_UINT64MAPENTRY._options = None + _TESTMAP_UINT64MAPENTRY._serialized_options = b'8\001' + _TESTMAP_STRINGMAPENTRY._options = None + _TESTMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_BOOLMAPENTRY._options = None + _TESTNESTEDMAP_BOOLMAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_INT32MAPENTRY._options = None + _TESTNESTEDMAP_INT32MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_INT64MAPENTRY._options = None + _TESTNESTEDMAP_INT64MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_UINT32MAPENTRY._options = None + _TESTNESTEDMAP_UINT32MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_UINT64MAPENTRY._options = None + _TESTNESTEDMAP_UINT64MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_STRINGMAPENTRY._options = None + _TESTNESTEDMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_MAPMAPENTRY._options = None + _TESTNESTEDMAP_MAPMAPENTRY._serialized_options = b'8\001' + _TESTSTRINGMAP_STRINGMAPENTRY._options = None + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTBOOLVALUE_BOOLMAPENTRY._options = None + _TESTBOOLVALUE_BOOLMAPENTRY._serialized_options = b'8\001' + _ENUMTYPE._serialized_start=4849 + _ENUMTYPE._serialized_end=4877 + _MESSAGETYPE._serialized_start=277 + _MESSAGETYPE._serialized_end=305 + _TESTMESSAGE._serialized_start=308 + _TESTMESSAGE._serialized_end=968 + _TESTONEOF._serialized_start=971 + _TESTONEOF._serialized_end=1239 + _TESTMAP._serialized_start=1242 + _TESTMAP._serialized_end=1851 + _TESTMAP_BOOLMAPENTRY._serialized_start=1557 + _TESTMAP_BOOLMAPENTRY._serialized_end=1603 + _TESTMAP_INT32MAPENTRY._serialized_start=1605 + _TESTMAP_INT32MAPENTRY._serialized_end=1652 + _TESTMAP_INT64MAPENTRY._serialized_start=1654 + _TESTMAP_INT64MAPENTRY._serialized_end=1701 + _TESTMAP_UINT32MAPENTRY._serialized_start=1703 + _TESTMAP_UINT32MAPENTRY._serialized_end=1751 + _TESTMAP_UINT64MAPENTRY._serialized_start=1753 + _TESTMAP_UINT64MAPENTRY._serialized_end=1801 + _TESTMAP_STRINGMAPENTRY._serialized_start=1803 + _TESTMAP_STRINGMAPENTRY._serialized_end=1851 + _TESTNESTEDMAP._serialized_start=1854 + _TESTNESTEDMAP._serialized_end=2627 + _TESTNESTEDMAP_BOOLMAPENTRY._serialized_start=1557 + _TESTNESTEDMAP_BOOLMAPENTRY._serialized_end=1603 + _TESTNESTEDMAP_INT32MAPENTRY._serialized_start=1605 + _TESTNESTEDMAP_INT32MAPENTRY._serialized_end=1652 + _TESTNESTEDMAP_INT64MAPENTRY._serialized_start=1654 + _TESTNESTEDMAP_INT64MAPENTRY._serialized_end=1701 + _TESTNESTEDMAP_UINT32MAPENTRY._serialized_start=1703 + _TESTNESTEDMAP_UINT32MAPENTRY._serialized_end=1751 + _TESTNESTEDMAP_UINT64MAPENTRY._serialized_start=1753 + _TESTNESTEDMAP_UINT64MAPENTRY._serialized_end=1801 + _TESTNESTEDMAP_STRINGMAPENTRY._serialized_start=1803 + _TESTNESTEDMAP_STRINGMAPENTRY._serialized_end=1851 + _TESTNESTEDMAP_MAPMAPENTRY._serialized_start=2559 + _TESTNESTEDMAP_MAPMAPENTRY._serialized_end=2627 + _TESTSTRINGMAP._serialized_start=2629 + _TESTSTRINGMAP._serialized_end=2752 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_start=2704 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_end=2752 + _TESTWRAPPER._serialized_start=2755 + _TESTWRAPPER._serialized_end=3761 + _TESTTIMESTAMP._serialized_start=3763 + _TESTTIMESTAMP._serialized_end=3873 + _TESTDURATION._serialized_start=3875 + _TESTDURATION._serialized_end=3982 + _TESTFIELDMASK._serialized_start=3984 + _TESTFIELDMASK._serialized_end=4042 + _TESTSTRUCT._serialized_start=4044 + _TESTSTRUCT._serialized_end=4145 + _TESTANY._serialized_start=4147 + _TESTANY._serialized_end=4239 + _TESTVALUE._serialized_start=4241 + _TESTVALUE._serialized_end=4339 + _TESTLISTVALUE._serialized_start=4341 + _TESTLISTVALUE._serialized_end=4451 + _TESTBOOLVALUE._serialized_start=4454 + _TESTBOOLVALUE._serialized_end=4591 + _TESTBOOLVALUE_BOOLMAPENTRY._serialized_start=1557 + _TESTBOOLVALUE_BOOLMAPENTRY._serialized_end=1603 + _TESTCUSTOMJSONNAME._serialized_start=4593 + _TESTCUSTOMJSONNAME._serialized_end=4636 + _TESTEXTENSIONS._serialized_start=4638 + _TESTEXTENSIONS._serialized_end=4712 + _TESTENUMVALUE._serialized_start=4715 + _TESTENUMVALUE._serialized_end=4847 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/wrappers_pb2.py b/openpype/hosts/nuke/vendor/google/protobuf/wrappers_pb2.py new file mode 100644 index 0000000000..e49eb4c15d --- /dev/null +++ b/openpype/hosts/nuke/vendor/google/protobuf/wrappers_pb2.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/wrappers.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"\x1c\n\x0b\x44oubleValue\x12\r\n\x05value\x18\x01 \x01(\x01\"\x1b\n\nFloatValue\x12\r\n\x05value\x18\x01 \x01(\x02\"\x1b\n\nInt64Value\x12\r\n\x05value\x18\x01 \x01(\x03\"\x1c\n\x0bUInt64Value\x12\r\n\x05value\x18\x01 \x01(\x04\"\x1b\n\nInt32Value\x12\r\n\x05value\x18\x01 \x01(\x05\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bStringValue\x12\r\n\x05value\x18\x01 \x01(\t\"\x1b\n\nBytesValue\x12\r\n\x05value\x18\x01 \x01(\x0c\x42\x83\x01\n\x13\x63om.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.wrappers_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\rWrappersProtoP\001Z1google.golang.org/protobuf/types/known/wrapperspb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _DOUBLEVALUE._serialized_start=51 + _DOUBLEVALUE._serialized_end=79 + _FLOATVALUE._serialized_start=81 + _FLOATVALUE._serialized_end=108 + _INT64VALUE._serialized_start=110 + _INT64VALUE._serialized_end=137 + _UINT64VALUE._serialized_start=139 + _UINT64VALUE._serialized_end=167 + _INT32VALUE._serialized_start=169 + _INT32VALUE._serialized_end=196 + _UINT32VALUE._serialized_start=198 + _UINT32VALUE._serialized_end=226 + _BOOLVALUE._serialized_start=228 + _BOOLVALUE._serialized_end=254 + _STRINGVALUE._serialized_start=256 + _STRINGVALUE._serialized_end=284 + _BYTESVALUE._serialized_start=286 + _BYTESVALUE._serialized_end=313 +# @@protoc_insertion_point(module_scope) diff --git a/openpype/hosts/photoshop/__init__.py b/openpype/hosts/photoshop/__init__.py index a91e0a65ff..773f73d624 100644 --- a/openpype/hosts/photoshop/__init__.py +++ b/openpype/hosts/photoshop/__init__.py @@ -1,9 +1,10 @@ -def add_implementation_envs(env, _app): - """Modify environments to contain all required for implementation.""" - defaults = { - "OPENPYPE_LOG_NO_COLORS": "True", - "WEBSOCKET_URL": "ws://localhost:8099/ws/" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value +from .addon import ( + PhotoshopAddon, + PHOTOSHOP_HOST_DIR, +) + + +__all__ = ( + "PhotoshopAddon", + "PHOTOSHOP_HOST_DIR", +) diff --git a/openpype/hosts/photoshop/addon.py b/openpype/hosts/photoshop/addon.py new file mode 100644 index 0000000000..965a545ac5 --- /dev/null +++ b/openpype/hosts/photoshop/addon.py @@ -0,0 +1,25 @@ +import os +from openpype.modules import OpenPypeModule, IHostAddon + +PHOTOSHOP_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class PhotoshopAddon(OpenPypeModule, IHostAddon): + name = "photoshop" + host_name = "photoshop" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + """Modify environments to contain all required for implementation.""" + defaults = { + "OPENPYPE_LOG_NO_COLORS": "True", + "WEBSOCKET_URL": "ws://localhost:8099/ws/" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_workfile_extensions(self): + return [".psd", ".psb"] diff --git a/openpype/hosts/photoshop/api/README.md b/openpype/hosts/photoshop/api/README.md index 80792a4da0..4a36746cb2 100644 --- a/openpype/hosts/photoshop/api/README.md +++ b/openpype/hosts/photoshop/api/README.md @@ -127,11 +127,11 @@ class CollectInstances(pyblish.api.ContextPlugin): ```python import os -import openpype.api -from avalon import photoshop +from openpype.pipeline import publish +from openpype.hosts.photoshop import api as photoshop -class ExtractImage(openpype.api.Extractor): +class ExtractImage(publish.Extractor): """Produce a flattened image file from instance This plug-in takes into account only the layers in the group. diff --git a/openpype/hosts/photoshop/api/launch_logic.py b/openpype/hosts/photoshop/api/launch_logic.py index 0bbb19523d..1f0203dca6 100644 --- a/openpype/hosts/photoshop/api/launch_logic.py +++ b/openpype/hosts/photoshop/api/launch_logic.py @@ -10,7 +10,7 @@ from wsrpc_aiohttp import ( from Qt import QtCore -from openpype.api import Logger +from openpype.lib import Logger from openpype.pipeline import legacy_io from openpype.tools.utils import host_tools from openpype.tools.adobe_webserver.app import WebServerTool diff --git a/openpype/hosts/photoshop/api/lib.py b/openpype/hosts/photoshop/api/lib.py index 2f57d64464..221b4314e6 100644 --- a/openpype/hosts/photoshop/api/lib.py +++ b/openpype/hosts/photoshop/api/lib.py @@ -5,11 +5,10 @@ import traceback from Qt import QtWidgets -from openpype.api import Logger +from openpype.lib import env_value_to_bool, Logger +from openpype.modules import ModulesManager from openpype.pipeline import install_host from openpype.tools.utils import host_tools -from openpype.lib.remote_publish import headless_publish -from openpype.lib import env_value_to_bool from .launch_logic import ProcessLauncher, stub @@ -35,8 +34,10 @@ def main(*subprocess_args): launcher.start() if env_value_to_bool("HEADLESS_PUBLISH"): + manager = ModulesManager() + webpublisher_addon = manager["webpublisher"] launcher.execute_in_main_thread( - headless_publish, + webpublisher_addon.headless_publish, log, "ClosePS", os.environ.get("IS_TEST") @@ -63,10 +64,15 @@ def maintained_selection(): @contextlib.contextmanager -def maintained_visibility(): - """Maintain visibility during context.""" +def maintained_visibility(layers=None): + """Maintain visibility during context. + + Args: + layers (list) of PSItem (used for caching) + """ visibility = {} - layers = stub().get_layers() + if not layers: + layers = stub().get_layers() for layer in layers: visibility[layer.id] = layer.visible try: diff --git a/openpype/hosts/photoshop/api/pipeline.py b/openpype/hosts/photoshop/api/pipeline.py index 20a6e3169f..9f6fc0983c 100644 --- a/openpype/hosts/photoshop/api/pipeline.py +++ b/openpype/hosts/photoshop/api/pipeline.py @@ -1,11 +1,9 @@ import os from Qt import QtWidgets -from bson.objectid import ObjectId import pyblish.api -from openpype.api import Logger -from openpype.lib import register_event_callback +from openpype.lib import register_event_callback, Logger from openpype.pipeline import ( legacy_io, register_loader_plugin_path, @@ -13,16 +11,15 @@ from openpype.pipeline import ( deregister_loader_plugin_path, deregister_creator_plugin_path, AVALON_CONTAINER_ID, - registered_host, ) -import openpype.hosts.photoshop +from openpype.pipeline.load import any_outdated_containers +from openpype.hosts.photoshop import PHOTOSHOP_HOST_DIR from . import lib log = Logger.get_logger(__name__) -HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.photoshop.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +PLUGINS_DIR = os.path.join(PHOTOSHOP_HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") @@ -30,7 +27,7 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") def check_inventory(): - if not lib.any_outdated(): + if not any_outdated_containers(): return # Warn about outdated containers. diff --git a/openpype/hosts/photoshop/api/workio.py b/openpype/hosts/photoshop/api/workio.py index 951c5dbfff..35b44d6070 100644 --- a/openpype/hosts/photoshop/api/workio.py +++ b/openpype/hosts/photoshop/api/workio.py @@ -1,7 +1,6 @@ """Host API required Work Files tool""" import os -from openpype.pipeline import HOST_WORKFILE_EXTENSIONS from . import lib @@ -14,7 +13,7 @@ def _active_document(): def file_extensions(): - return HOST_WORKFILE_EXTENSIONS["photoshop"] + return [".psd", ".psb"] def has_unsaved_changes(): diff --git a/openpype/hosts/photoshop/api/ws_stub.py b/openpype/hosts/photoshop/api/ws_stub.py index b49bf1c73f..2c4d0ad5fc 100644 --- a/openpype/hosts/photoshop/api/ws_stub.py +++ b/openpype/hosts/photoshop/api/ws_stub.py @@ -229,10 +229,11 @@ class PhotoshopServerStub: return self._get_layers_in_layers(parent_ids) - def get_layers_in_layers_ids(self, layers_ids): + def get_layers_in_layers_ids(self, layers_ids, layers=None): """Return all layers that belong to layers (might be groups). Args: + layers_ids layers : Returns: @@ -240,10 +241,13 @@ class PhotoshopServerStub: """ parent_ids = set(layers_ids) - return self._get_layers_in_layers(parent_ids) + return self._get_layers_in_layers(parent_ids, layers) - def _get_layers_in_layers(self, parent_ids): - all_layers = self.get_layers() + def _get_layers_in_layers(self, parent_ids, layers=None): + if not layers: + layers = self.get_layers() + + all_layers = layers ret = [] for layer in all_layers: @@ -394,14 +398,17 @@ class PhotoshopServerStub: self.hide_all_others_layers_ids(extract_ids) - def hide_all_others_layers_ids(self, extract_ids): + def hide_all_others_layers_ids(self, extract_ids, layers=None): """hides all layers that are not part of the list or that are not children of this list Args: extract_ids (list): list of integer that should be visible + layers (list) of PSItem (used for caching) """ - for layer in self.get_layers(): + if not layers: + layers = self.get_layers() + for layer in layers: if layer.visible and layer.id not in extract_ids: self.set_visible(layer.id, False) diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py index f15068b031..2cfbfa8778 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_image.py @@ -1,3 +1,5 @@ +import re + from openpype.hosts.photoshop import api from openpype.lib import BoolDef from openpype.pipeline import ( @@ -5,6 +7,8 @@ from openpype.pipeline import ( CreatedInstance, legacy_io ) +from openpype.lib import prepare_template_data +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS class ImageCreator(Creator): @@ -38,17 +42,24 @@ class ImageCreator(Creator): top_level_selected_items = stub.get_selected_layers() if pre_create_data.get("use_selection"): only_single_item_selected = len(top_level_selected_items) == 1 - for selected_item in top_level_selected_items: - if ( - only_single_item_selected or - pre_create_data.get("create_multiple")): + if ( + only_single_item_selected or + pre_create_data.get("create_multiple")): + for selected_item in top_level_selected_items: if selected_item.group: groups_to_create.append(selected_item) else: top_layers_to_wrap.append(selected_item) - else: - group = stub.group_selected_layers(subset_name_from_ui) - groups_to_create.append(group) + else: + group = stub.group_selected_layers(subset_name_from_ui) + groups_to_create.append(group) + else: + stub.select_layers(stub.get_layers()) + try: + group = stub.group_selected_layers(subset_name_from_ui) + except: + raise ValueError("Cannot group locked Bakcground layer!") + groups_to_create.append(group) if not groups_to_create and not top_layers_to_wrap: group = stub.create_group(subset_name_from_ui) @@ -60,6 +71,7 @@ class ImageCreator(Creator): group = stub.group_selected_layers(layer.name) groups_to_create.append(group) + layer_name = '' creating_multiple_groups = len(groups_to_create) > 1 for group in groups_to_create: subset_name = subset_name_from_ui # reset to name from creator UI @@ -67,8 +79,16 @@ class ImageCreator(Creator): created_group_name = self._clean_highlights(stub, group.name) if creating_multiple_groups: - # concatenate with layer name to differentiate subsets - subset_name += group.name.title().replace(" ", "") + layer_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + group.name + ) + if "{layer}" not in subset_name.lower(): + subset_name += "{Layer}" + + layer_fill = prepare_template_data({"layer": layer_name}) + subset_name = subset_name.format(**layer_fill) if group.long_name: for directory in group.long_name[::-1]: @@ -143,3 +163,6 @@ class ImageCreator(Creator): def _clean_highlights(self, stub, item): return item.replace(stub.PUBLISH_ICON, '').replace(stub.LOADED_ICON, '') + @classmethod + def get_dynamic_data(cls, *args, **kwargs): + return {"layer": "{layer}"} diff --git a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py index 9736471a26..2792a775e0 100644 --- a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py @@ -1,7 +1,12 @@ +import re + from Qt import QtWidgets from openpype.pipeline import create from openpype.hosts.photoshop import api as photoshop +from openpype.lib import prepare_template_data +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS + class CreateImage(create.LegacyCreator): """Image folder for publish.""" @@ -75,6 +80,7 @@ class CreateImage(create.LegacyCreator): groups.append(group) creator_subset_name = self.data["subset"] + layer_name = '' for group in groups: long_names = [] group.name = group.name.replace(stub.PUBLISH_ICON, ''). \ @@ -82,7 +88,16 @@ class CreateImage(create.LegacyCreator): subset_name = creator_subset_name if len(groups) > 1: - subset_name += group.name.title().replace(" ", "") + layer_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + group.name + ) + if "{layer}" not in subset_name.lower(): + subset_name += "{Layer}" + + layer_fill = prepare_template_data({"layer": layer_name}) + subset_name = subset_name.format(**layer_fill) if group.long_name: for directory in group.long_name[::-1]: @@ -98,3 +113,7 @@ class CreateImage(create.LegacyCreator): # reusing existing group, need to rename afterwards if not create_group: stub.rename_layer(group.id, stub.PUBLISH_ICON + group.name) + + @classmethod + def get_dynamic_data(cls, *args, **kwargs): + return {"layer": "{layer}"} diff --git a/openpype/hosts/photoshop/plugins/create/workfile_creator.py b/openpype/hosts/photoshop/plugins/create/workfile_creator.py index 875a9b8a94..e79d16d154 100644 --- a/openpype/hosts/photoshop/plugins/create/workfile_creator.py +++ b/openpype/hosts/photoshop/plugins/create/workfile_creator.py @@ -1,4 +1,5 @@ import openpype.hosts.photoshop.api as api +from openpype.client import get_asset_by_name from openpype.pipeline import ( AutoCreator, CreatedInstance, @@ -10,6 +11,8 @@ class PSWorkfileCreator(AutoCreator): identifier = "workfile" family = "workfile" + default_variant = "Main" + def get_instance_attr_defs(self): return [] @@ -34,26 +37,24 @@ class PSWorkfileCreator(AutoCreator): existing_instance = instance break - variant = '' project_name = legacy_io.Session["AVALON_PROJECT"] asset_name = legacy_io.Session["AVALON_ASSET"] task_name = legacy_io.Session["AVALON_TASK"] host_name = legacy_io.Session["AVALON_APP"] if existing_instance is None: - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( - variant, task_name, asset_doc, project_name, host_name + self.default_variant, task_name, asset_doc, + project_name, host_name ) data = { "asset": asset_name, "task": task_name, - "variant": variant + "variant": self.default_variant } data.update(self.get_dynamic_data( - variant, task_name, asset_doc, project_name, host_name + self.default_variant, task_name, asset_doc, + project_name, host_name )) new_instance = CreatedInstance( @@ -67,12 +68,11 @@ class PSWorkfileCreator(AutoCreator): existing_instance["asset"] != asset_name or existing_instance["task"] != task_name ): - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( - variant, task_name, asset_doc, project_name, host_name + self.default_variant, task_name, asset_doc, + project_name, host_name ) existing_instance["asset"] = asset_name existing_instance["task"] = task_name + existing_instance["subset"] = subset_name diff --git a/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py index 448493d370..5d50a78914 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py @@ -17,11 +17,11 @@ import os import pyblish.api -from openpype.lib.plugin_tools import ( - parse_json, - get_batch_asset_task_info -) from openpype.pipeline import legacy_io +from openpype_modules.webpublisher.lib import ( + get_batch_asset_task_info, + parse_json +) class CollectBatchData(pyblish.api.ContextPlugin): @@ -39,6 +39,9 @@ class CollectBatchData(pyblish.api.ContextPlugin): def process(self, context): self.log.info("CollectBatchData") batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") + if os.environ.get("IS_TEST"): + self.log.debug("Automatic testing, no batch data, skipping") + return assert batch_dir, ( "Missing `OPENPYPE_PUBLISH_DATA`") diff --git a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py index ae025fc61d..c157c932fd 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py @@ -9,14 +9,22 @@ from openpype.settings import get_project_settings class CollectColorCodedInstances(pyblish.api.ContextPlugin): - """Creates instances for configured color code of a layer. + """Creates instances for layers marked by configurable color. Used in remote publishing when artists marks publishable layers by color- - coding. + coding. Top level layers (group) must be marked by specific color to be + published as an instance of 'image' family. Can add group for all publishable layers to allow creation of flattened image. (Cannot contain special background layer as it cannot be grouped!) + Based on value `create_flatten_image` from Settings: + - "yes": create flattened 'image' subset of all publishable layers + create + 'image' subset per publishable layer + - "only": create ONLY flattened 'image' subset of all publishable layers + - "no": do not create flattened 'image' subset at all, + only separate subsets per marked layer. + Identifier: id (str): "pyblish.avalon.instance" """ @@ -32,8 +40,7 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): # TODO check if could be set globally, probably doesn't make sense when # flattened template cannot subset_template_name = "" - create_flatten_image = False - # probably not possible to configure this globally + create_flatten_image = "no" flatten_subset_template = "" def process(self, context): @@ -62,6 +69,7 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): publishable_layers = [] created_instances = [] + family_from_settings = None for layer in layers: self.log.debug("Layer:: {}".format(layer)) if layer.parents: @@ -80,11 +88,14 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): self.log.debug("!!! Not found family or template, skip") continue + if not family_from_settings: + family_from_settings = resolved_family + fill_pairs = { "variant": variant, "family": resolved_family, "task": task_name, - "layer": layer.name + "layer": layer.clean_name } subset = resolved_subset_template.format( @@ -98,13 +109,16 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): "Subset {} already created, skipping.".format(subset)) continue - instance = self._create_instance(context, layer, resolved_family, - asset_name, subset, task_name) + if self.create_flatten_image != "flatten_only": + instance = self._create_instance(context, layer, + resolved_family, + asset_name, subset, task_name) + created_instances.append(instance) + existing_subset_names.append(subset) publishable_layers.append(layer) - created_instances.append(instance) - if self.create_flatten_image and publishable_layers: + if self.create_flatten_image != "no" and publishable_layers: self.log.debug("create_flatten_image") if not self.flatten_subset_template: self.log.warning("No template for flatten image") @@ -116,7 +130,7 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): first_layer = publishable_layers[0] # dummy layer first_layer.name = subset - family = created_instances[0].data["family"] # inherit family + family = family_from_settings # inherit family instance = self._create_instance(context, first_layer, family, asset_name, subset, task_name) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_published_version.py b/openpype/hosts/photoshop/plugins/publish/collect_published_version.py new file mode 100644 index 0000000000..2502689e4b --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/collect_published_version.py @@ -0,0 +1,55 @@ +"""Collects published version of workfile and increments it. + +For synchronization of published image and workfile version it is required +to store workfile version from workfile file name in context.data["version"]. +In remote publishing this name is unreliable (artist might not follow naming +convention etc.), last published workfile version for particular workfile +subset is used instead. + +This plugin runs only in remote publishing (eg. Webpublisher). + +Requires: + context.data["assetEntity"] + +Provides: + context["version"] - incremented latest published workfile version +""" + +import pyblish.api + +from openpype.client import get_last_version_by_subset_name + + +class CollectPublishedVersion(pyblish.api.ContextPlugin): + """Collects published version of workfile and increments it.""" + + order = pyblish.api.CollectorOrder + 0.190 + label = "Collect published version" + hosts = ["photoshop"] + targets = ["remotepublish"] + + def process(self, context): + workfile_subset_name = None + for instance in context: + if instance.data["family"] == "workfile": + workfile_subset_name = instance.data["subset"] + break + + if not workfile_subset_name: + self.log.warning("No workfile instance found, " + "synchronization of version will not work.") + return + + project_name = context.data["projectName"] + asset_doc = context.data["assetEntity"] + asset_id = asset_doc["_id"] + + version_doc = get_last_version_by_subset_name(project_name, + workfile_subset_name, + asset_id) + version_int = 1 + if version_doc: + version_int += int(version_doc["name"]) + + self.log.debug(f"Setting {version_int} to context.") + context.data["version"] = version_int diff --git a/openpype/hosts/photoshop/plugins/publish/collect_review.py b/openpype/hosts/photoshop/plugins/publish/collect_review.py index 2ea5503f3f..7e598a8250 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_review.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_review.py @@ -10,7 +10,7 @@ import os import pyblish.api -from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline.create import get_subset_name class CollectReview(pyblish.api.ContextPlugin): @@ -25,15 +25,18 @@ class CollectReview(pyblish.api.ContextPlugin): hosts = ["photoshop"] order = pyblish.api.CollectorOrder + 0.1 + publish = True + def process(self, context): family = "review" - subset = get_subset_name_with_asset_doc( + subset = get_subset_name( family, context.data.get("variant", ''), context.data["anatomyData"]["task"]["name"], context.data["assetEntity"], context.data["anatomyData"]["project"]["name"], - host_name=context.data["hostName"] + host_name=context.data["hostName"], + project_settings=context.data["project_settings"] ) instance = context.create_instance(subset) @@ -44,5 +47,6 @@ class CollectReview(pyblish.api.ContextPlugin): "family": family, "families": [], "representations": [], - "asset": os.environ["AVALON_ASSET"] + "asset": os.environ["AVALON_ASSET"], + "publish": self.publish }) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_version.py b/openpype/hosts/photoshop/plugins/publish/collect_version.py new file mode 100644 index 0000000000..cda71d8643 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/collect_version.py @@ -0,0 +1,29 @@ +import pyblish.api + + +class CollectVersion(pyblish.api.InstancePlugin): + """Collect version for publishable instances. + + Used to synchronize version from workfile to all publishable instances: + - image (manually created or color coded) + - review + - workfile + + Dev comment: + Explicit collector created to control this from single place and not from + 3 different. + + Workfile set here explicitly as version might to be forced from latest + 1 + because of Webpublisher. + (This plugin must run after CollectPublishedVersion!) + """ + order = pyblish.api.CollectorOrder + 0.200 + label = 'Collect Version' + + hosts = ["photoshop"] + families = ["image", "review", "workfile"] + + def process(self, instance): + workfile_version = instance.context.data["version"] + self.log.debug(f"Applying version {workfile_version}") + instance.data["version"] = workfile_version diff --git a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py index e4f0a07b34..9a5aad5569 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py @@ -1,7 +1,7 @@ import os import pyblish.api -from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline.create import get_subset_name class CollectWorkfile(pyblish.api.ContextPlugin): @@ -11,6 +11,8 @@ class CollectWorkfile(pyblish.api.ContextPlugin): label = "Collect Workfile" hosts = ["photoshop"] + default_variant = "Main" + def process(self, context): existing_instance = None for instance in context: @@ -20,13 +22,16 @@ class CollectWorkfile(pyblish.api.ContextPlugin): break family = "workfile" - subset = get_subset_name_with_asset_doc( + # context.data["variant"] might come only from collect_batch_data + variant = context.data.get("variant") or self.default_variant + subset = get_subset_name( family, - "", + variant, context.data["anatomyData"]["task"]["name"], context.data["assetEntity"], context.data["anatomyData"]["project"]["name"], - host_name=context.data["hostName"] + host_name=context.data["hostName"], + project_settings=context.data["project_settings"] ) file_path = context.data["currentFile"] diff --git a/openpype/hosts/photoshop/plugins/publish/extract_image.py b/openpype/hosts/photoshop/plugins/publish/extract_image.py index a133e33409..cdb28c742d 100644 --- a/openpype/hosts/photoshop/plugins/publish/extract_image.py +++ b/openpype/hosts/photoshop/plugins/publish/extract_image.py @@ -1,61 +1,99 @@ import os -import openpype.api +import pyblish.api +from openpype.pipeline import publish from openpype.hosts.photoshop import api as photoshop -class ExtractImage(openpype.api.Extractor): - """Produce a flattened image file from instance +class ExtractImage(pyblish.api.ContextPlugin): + """Extract all layers (groups) marked for publish. - This plug-in takes into account only the layers in the group. + Usually publishable instance is created as a wrapper of layer(s). For each + publishable instance so many images as there is 'formats' is created. + + Logic tries to hide/unhide layers minimum times. + + Called once for all publishable instances. """ + order = publish.Extractor.order - 0.48 label = "Extract Image" hosts = ["photoshop"] + families = ["image", "background"] formats = ["png", "jpg"] - def process(self, instance): - staging_dir = self.staging_dir(instance) - self.log.info("Outputting image to {}".format(staging_dir)) - - # Perform extraction + def process(self, context): stub = photoshop.stub() - files = {} + hidden_layer_ids = set() + + all_layers = stub.get_layers() + for layer in all_layers: + if not layer.visible: + hidden_layer_ids.add(layer.id) + stub.hide_all_others_layers_ids([], layers=all_layers) + with photoshop.maintained_selection(): - self.log.info("Extracting %s" % str(list(instance))) - with photoshop.maintained_visibility(): - ids = set() - layer = instance.data.get("layer") - if layer: - ids.add(layer.id) - add_ids = instance.data.pop("ids", None) - if add_ids: - ids.update(set(add_ids)) - extract_ids = set([ll.id for ll in stub. - get_layers_in_layers_ids(ids)]) - stub.hide_all_others_layers_ids(extract_ids) + with photoshop.maintained_visibility(layers=all_layers): + for instance in context: + if instance.data["family"] not in self.families: + continue - file_basename = os.path.splitext( - stub.get_active_document_name() - )[0] - for extension in self.formats: - _filename = "{}.{}".format(file_basename, extension) - files[extension] = _filename + staging_dir = self.staging_dir(instance) + self.log.info("Outputting image to {}".format(staging_dir)) - full_filename = os.path.join(staging_dir, _filename) - stub.saveAs(full_filename, extension, True) - self.log.info(f"Extracted: {extension}") + # Perform extraction + files = {} + ids = set() + layer = instance.data.get("layer") + if layer: + ids.add(layer.id) + add_ids = instance.data.pop("ids", None) + if add_ids: + ids.update(set(add_ids)) + extract_ids = set([ll.id for ll in stub. + get_layers_in_layers_ids(ids, all_layers) + if ll.id not in hidden_layer_ids]) - representations = [] - for extension, filename in files.items(): - representations.append({ - "name": extension, - "ext": extension, - "files": filename, - "stagingDir": staging_dir - }) - instance.data["representations"] = representations - instance.data["stagingDir"] = staging_dir + for extracted_id in extract_ids: + stub.set_visible(extracted_id, True) - self.log.info(f"Extracted {instance} to {staging_dir}") + file_basename = os.path.splitext( + stub.get_active_document_name() + )[0] + for extension in self.formats: + _filename = "{}.{}".format(file_basename, + extension) + files[extension] = _filename + + full_filename = os.path.join(staging_dir, + _filename) + stub.saveAs(full_filename, extension, True) + self.log.info(f"Extracted: {extension}") + + representations = [] + for extension, filename in files.items(): + representations.append({ + "name": extension, + "ext": extension, + "files": filename, + "stagingDir": staging_dir + }) + instance.data["representations"] = representations + instance.data["stagingDir"] = staging_dir + + self.log.info(f"Extracted {instance} to {staging_dir}") + + for extracted_id in extract_ids: + stub.set_visible(extracted_id, False) + + def staging_dir(self, instance): + """Provide a temporary directory in which to store extracted files + + Upon calling this method the staging directory is stored inside + the instance.data['stagingDir'] + """ + + from openpype.pipeline.publish import get_instance_staging_dir + + return get_instance_staging_dir(instance) diff --git a/openpype/hosts/photoshop/plugins/publish/extract_review.py b/openpype/hosts/photoshop/plugins/publish/extract_review.py index d076610ead..01022ce0b2 100644 --- a/openpype/hosts/photoshop/plugins/publish/extract_review.py +++ b/openpype/hosts/photoshop/plugins/publish/extract_review.py @@ -1,17 +1,28 @@ import os import shutil +from PIL import Image -import openpype.api -import openpype.lib +from openpype.lib import ( + run_subprocess, + get_ffmpeg_tool_path, +) +from openpype.pipeline import publish from openpype.hosts.photoshop import api as photoshop -class ExtractReview(openpype.api.Extractor): +class ExtractReview(publish.Extractor): """ - Produce a flattened or sequence image file from all 'image' instances. + Produce a flattened or sequence image files from all 'image' instances. If no 'image' instance is created, it produces flattened image from all visible layers. + + It creates review, thumbnail and mov representations. + + 'review' family could be used in other steps as a reference, as it + contains flattened image by default. (Eg. artist could load this + review as a single item and see full image. In most cases 'image' + family is separated by layers to better usage in animation or comp.) """ label = "Extract Review" @@ -22,6 +33,7 @@ class ExtractReview(openpype.api.Extractor): jpg_options = None mov_options = None make_image_sequence = None + max_downscale_size = 8192 def process(self, instance): staging_dir = self.staging_dir(instance) @@ -37,7 +49,7 @@ class ExtractReview(openpype.api.Extractor): if self.make_image_sequence and len(layers) > 1: self.log.info("Extract layers to image sequence.") - img_list = self._saves_sequences_layers(staging_dir, layers) + img_list = self._save_sequence_images(staging_dir, layers) instance.data["representations"].append({ "name": "jpg", @@ -49,108 +61,146 @@ class ExtractReview(openpype.api.Extractor): "stagingDir": staging_dir, "tags": self.jpg_options['tags'], }) - + processed_img_names = img_list else: self.log.info("Extract layers to flatten image.") - img_list = self._saves_flattened_layers(staging_dir, layers) + img_list = self._save_flatten_image(staging_dir, layers) instance.data["representations"].append({ "name": "jpg", "ext": "jpg", - "files": img_list, + "files": img_list, # cannot be [] for single frame "stagingDir": staging_dir, "tags": self.jpg_options['tags'] }) + processed_img_names = [img_list] - ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") instance.data["stagingDir"] = staging_dir - # Generate thumbnail. - thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg") - self.log.info(f"Generate thumbnail {thumbnail_path}") - args = [ - ffmpeg_path, - "-y", - "-i", os.path.join(staging_dir, self.output_seq_filename), - "-vf", "scale=300:-1", - "-vframes", "1", - thumbnail_path - ] - output = openpype.lib.run_subprocess(args) + source_files_pattern = os.path.join(staging_dir, + self.output_seq_filename) + source_files_pattern = self._check_and_resize(processed_img_names, + source_files_pattern, + staging_dir) + self._generate_thumbnail(ffmpeg_path, instance, source_files_pattern, + staging_dir) - instance.data["representations"].append({ - "name": "thumbnail", - "ext": "jpg", - "files": os.path.basename(thumbnail_path), - "stagingDir": staging_dir, - "tags": ["thumbnail"] - }) + no_of_frames = len(processed_img_names) + if no_of_frames > 1: + self._generate_mov(ffmpeg_path, instance, fps, no_of_frames, + source_files_pattern, staging_dir) + self.log.info(f"Extracted {instance} to {staging_dir}") + + def _generate_mov(self, ffmpeg_path, instance, fps, no_of_frames, + source_files_pattern, staging_dir): + """Generates .mov to upload to Ftrack. + + Args: + ffmpeg_path (str): path to ffmpeg + instance (Pyblish Instance) + fps (str) + no_of_frames (int): + source_files_pattern (str): name of source file + staging_dir (str): temporary location to store thumbnail + Updates: + instance - adds representation portion + """ # Generate mov. mov_path = os.path.join(staging_dir, "review.mov") self.log.info(f"Generate mov review: {mov_path}") - img_number = len(img_list) args = [ ffmpeg_path, "-y", - "-i", os.path.join(staging_dir, self.output_seq_filename), + "-i", source_files_pattern, "-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2", - "-vframes", str(img_number), + "-vframes", str(no_of_frames), mov_path ] - output = openpype.lib.run_subprocess(args) - self.log.debug(output) + self.log.debug("mov args:: {}".format(args)) + _output = run_subprocess(args) instance.data["representations"].append({ "name": "mov", "ext": "mov", "files": os.path.basename(mov_path), "stagingDir": staging_dir, "frameStart": 1, - "frameEnd": img_number, + "frameEnd": no_of_frames, "fps": fps, "preview": True, "tags": self.mov_options['tags'] }) - # Required for extract_review plugin (L222 onwards). - instance.data["frameStart"] = 1 - instance.data["frameEnd"] = img_number - instance.data["fps"] = 25 + def _generate_thumbnail(self, ffmpeg_path, instance, source_files_pattern, + staging_dir): + """Generates scaled down thumbnail and adds it as representation. - self.log.info(f"Extracted {instance} to {staging_dir}") + Args: + ffmpeg_path (str): path to ffmpeg + instance (Pyblish Instance) + source_files_pattern (str): name of source file + staging_dir (str): temporary location to store thumbnail + Updates: + instance - adds representation portion + """ + # Generate thumbnail + thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg") + self.log.info(f"Generate thumbnail {thumbnail_path}") + args = [ + ffmpeg_path, + "-y", + "-i", source_files_pattern, + "-vf", "scale=300:-1", + "-vframes", "1", + thumbnail_path + ] + self.log.debug("thumbnail args:: {}".format(args)) + _output = run_subprocess(args) + instance.data["representations"].append({ + "name": "thumbnail", + "ext": "jpg", + "outputName": "thumb", + "files": os.path.basename(thumbnail_path), + "stagingDir": staging_dir, + "tags": ["thumbnail", "delete"] + }) - def _get_image_path_from_instances(self, instance): - img_list = [] + def _check_and_resize(self, processed_img_names, source_files_pattern, + staging_dir): + """Check if saved image could be used in ffmpeg. - for instance in sorted(instance.context): - if instance.data["family"] != "image": - continue + Ffmpeg has max size 16384x16384. Saved image(s) must be resized to be + used as a source for thumbnail or review mov. + """ + Image.MAX_IMAGE_PIXELS = None + first_url = os.path.join(staging_dir, processed_img_names[0]) + with Image.open(first_url) as im: + width, height = im.size - for rep in instance.data["representations"]: - img_path = os.path.join( - rep["stagingDir"], - rep["files"] - ) - img_list.append(img_path) + if width > self.max_downscale_size or height > self.max_downscale_size: + resized_dir = os.path.join(staging_dir, "resized") + os.mkdir(resized_dir) + source_files_pattern = os.path.join(resized_dir, + self.output_seq_filename) + for file_name in processed_img_names: + source_url = os.path.join(staging_dir, file_name) + with Image.open(source_url) as res_img: + # 'thumbnail' automatically keeps aspect ratio + res_img.thumbnail((self.max_downscale_size, + self.max_downscale_size), + Image.ANTIALIAS) + res_img.save(os.path.join(resized_dir, file_name)) - return img_list - - def _copy_image_to_staging_dir(self, staging_dir, img_list): - copy_files = [] - for i, img_src in enumerate(img_list): - img_filename = self.output_seq_filename % i - img_dst = os.path.join(staging_dir, img_filename) - - self.log.debug( - "Copying file .. {} -> {}".format(img_src, img_dst) - ) - shutil.copy(img_src, img_dst) - copy_files.append(img_filename) - - return copy_files + return source_files_pattern def _get_layers_from_image_instances(self, instance): + """Collect all layers from 'instance'. + + Returns: + (list) of PSItem + """ layers = [] for image_instance in instance.context: if image_instance.data["family"] != "image": @@ -162,7 +212,12 @@ class ExtractReview(openpype.api.Extractor): return sorted(layers) - def _saves_flattened_layers(self, staging_dir, layers): + def _save_flatten_image(self, staging_dir, layers): + """Creates flat image from 'layers' into 'staging_dir'. + + Returns: + (str): path to new image + """ img_filename = self.output_seq_filename % 0 output_image_path = os.path.join(staging_dir, img_filename) stub = photoshop.stub() @@ -176,7 +231,13 @@ class ExtractReview(openpype.api.Extractor): return img_filename - def _saves_sequences_layers(self, staging_dir, layers): + def _save_sequence_images(self, staging_dir, layers): + """Creates separate flat images from 'layers' into 'staging_dir'. + + Used as source for multi frames .mov to review at once. + Returns: + (list): paths to new images + """ stub = photoshop.stub() list_img_filename = [] diff --git a/openpype/hosts/photoshop/plugins/publish/extract_save_scene.py b/openpype/hosts/photoshop/plugins/publish/extract_save_scene.py index 03086f389f..aa900fec9f 100644 --- a/openpype/hosts/photoshop/plugins/publish/extract_save_scene.py +++ b/openpype/hosts/photoshop/plugins/publish/extract_save_scene.py @@ -1,11 +1,11 @@ -import openpype.api +from openpype.pipeline import publish from openpype.hosts.photoshop import api as photoshop -class ExtractSaveScene(openpype.api.Extractor): +class ExtractSaveScene(publish.Extractor): """Save scene before extraction.""" - order = openpype.api.Extractor.order - 0.49 + order = publish.Extractor.order - 0.49 label = "Extract Save Scene" hosts = ["photoshop"] families = ["workfile"] diff --git a/openpype/hosts/photoshop/plugins/publish/increment_workfile.py b/openpype/hosts/photoshop/plugins/publish/increment_workfile.py index 92132c393b..665dd67fc5 100644 --- a/openpype/hosts/photoshop/plugins/publish/increment_workfile.py +++ b/openpype/hosts/photoshop/plugins/publish/increment_workfile.py @@ -1,6 +1,6 @@ import os import pyblish.api -from openpype.action import get_errored_plugins_from_data +from openpype.pipeline.publish import get_errored_plugins_from_context from openpype.lib import version_up from openpype.hosts.photoshop import api as photoshop @@ -19,7 +19,7 @@ class IncrementWorkfile(pyblish.api.InstancePlugin): optional = True def process(self, instance): - errored_plugins = get_errored_plugins_from_data(instance.context) + errored_plugins = get_errored_plugins_from_context(instance.context) if errored_plugins: raise RuntimeError( "Skipping incrementing current file because publishing failed." diff --git a/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py b/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py index b65f9d259f..2609f7a8cf 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py @@ -1,7 +1,7 @@ import pyblish.api -import openpype.api from openpype.pipeline import legacy_io +from openpype.pipeline.publish import ValidateContentsOrder from openpype.hosts.photoshop import api as photoshop @@ -45,7 +45,7 @@ class ValidateInstanceAsset(pyblish.api.InstancePlugin): label = "Validate Instance Asset" hosts = ["photoshop"] actions = [ValidateInstanceAssetRepair] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder def process(self, instance): instance_asset = instance.data["asset"] diff --git a/openpype/hosts/photoshop/plugins/publish/validate_naming.py b/openpype/hosts/photoshop/plugins/publish/validate_naming.py index b53f4e8198..0665aff9d0 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_naming.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_naming.py @@ -1,9 +1,13 @@ import re import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError + from openpype.hosts.photoshop import api as photoshop +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateNamingRepair(pyblish.api.Action): @@ -50,6 +54,13 @@ class ValidateNamingRepair(pyblish.api.Action): subset_name = re.sub(invalid_chars, replace_char, instance.data["subset"]) + # format from Tool Creator + subset_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + subset_name + ) + layer_meta["subset"] = subset_name stub.imprint(instance_id, layer_meta) @@ -64,7 +75,7 @@ class ValidateNaming(pyblish.api.InstancePlugin): label = "Validate Naming" hosts = ["photoshop"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["image"] actions = [ValidateNamingRepair] diff --git a/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py b/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py index 01f2323157..78e84729ce 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py @@ -1,7 +1,9 @@ import collections import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateSubsetUniqueness(pyblish.api.ContextPlugin): @@ -11,7 +13,7 @@ class ValidateSubsetUniqueness(pyblish.api.ContextPlugin): label = "Validate Subset Uniqueness" hosts = ["photoshop"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["image"] def process(self, context): diff --git a/openpype/hosts/resolve/README.markdown b/openpype/hosts/resolve/README.markdown index 8c9f72fb0c..a8bb071e7e 100644 --- a/openpype/hosts/resolve/README.markdown +++ b/openpype/hosts/resolve/README.markdown @@ -1,22 +1,24 @@ -#### Basic setup +## Basic setup -- Install [latest DaVinci Resolve](https://sw.blackmagicdesign.com/DaVinciResolve/v16.2.8/DaVinci_Resolve_Studio_16.2.8_Windows.zip?Key-Pair-Id=APKAJTKA3ZJMJRQITVEA&Signature=EcFuwQFKHZIBu2zDj5LTCQaQDXcKOjhZY7Fs07WGw24xdDqfwuALOyKu+EVzDX2Tik0cWDunYyV0r7hzp+mHmczp9XP4YaQXHdyhD/2BGWDgiMsiTQbNkBgbfy5MsAMFY8FHCl724Rxm8ke1foWeUVyt/Cdkil+ay+9sL72yFhaSV16sncko1jCIlCZeMkHhbzqPwyRuqLGmxmp8ey9KgBhI3wGFFPN201VMaV+RHrpX+KAfaR6p6dwo3FrPbRHK9TvMI1RA/1lJ3fVtrkDW69LImIKAWmIxgcStUxR9/taqLOD66FNiflHd1tufHv3FBa9iYQsjb3VLMPx7OCwLyg==&Expires=1608308139) -- add absolute path to ffmpeg into openpype settings - ![image](https://user-images.githubusercontent.com/40640033/102630786-43294f00-414d-11eb-98de-f0ae51f62077.png) -- install Python 3.6 into `%LOCALAPPDATA%/Programs/Python/Python36` (only respected path by Resolve) -- install OpenTimelineIO for 3.6 `%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move built files from `%LOCALAPPDATA%/Programs/Python/Python36/Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `%LOCALAPPDATA%/Programs/Python/Python36/Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and +- Actually supported version is up to v18 +- install Python 3.6.2 (latest tested v17) or up to 3.9.13 (latest tested on v18) +- pip install PySide2: + - Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install PySide2` +- pip install OpenTimelineIO: + - Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install OpenTimelineIO` + - Python 3.6: open terminal and go to python.exe directory, then `python -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move built files from `./Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `./Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and ![image](https://user-images.githubusercontent.com/40640033/102792588-ffcb1c80-43a8-11eb-9c6b-bf2114ed578e.png) with installed CMake in PATH. -- install PySide2 for 3.6 `%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install PySide2` - make sure Resolve Fusion (Fusion Tab/menu/Fusion/Fusion Settings) is set to Python 3.6 ![image](https://user-images.githubusercontent.com/40640033/102631545-280b0f00-414e-11eb-89fc-98ac268d209d.png) +- Open OpenPype **Tray/Admin/Studio settings** > `applications/resolve/environment` and add Python3 path to `RESOLVE_PYTHON3_HOME` platform related. -#### Editorial setup +## Editorial setup This is how it looks on my testing project timeline ![image](https://user-images.githubusercontent.com/40640033/102637638-96ec6600-4156-11eb-9656-6e8e3ce4baf8.png) Notice I had renamed tracks to `main` (holding metadata markers) and `review` used for generating review data with ffmpeg confersion to jpg sequence. -1. you need to start OpenPype menu from Resolve/EditTab/Menu/Workspace/Scripts/**__OpenPype_Menu__** +1. you need to start OpenPype menu from Resolve/EditTab/Menu/Workspace/Scripts/Comp/**__OpenPype_Menu__** 2. then select any clips in `main` track and change their color to `Chocolate` 3. in OpenPype Menu select `Create` 4. in Creator select `Create Publishable Clip [New]` (temporary name) diff --git a/openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_down.txt b/openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_down.txt deleted file mode 100644 index 139b66bc24..0000000000 --- a/openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_down.txt +++ /dev/null @@ -1,189 +0,0 @@ -Updated as of 08 March 2019 - --------------------------- -In this package, you will find a brief introduction to the Scripting API for DaVinci Resolve Studio. Apart from this README.txt file, this package contains folders containing the basic import modules for scripting access (DaVinciResolve.py) and some representative examples. - -Overview --------- - -As with Blackmagic Design Fusion scripts, user scripts written in Lua and Python programming languages are supported. By default, scripts can be invoked from the Console window in the Fusion page, or via command line. This permission can be changed in Resolve Preferences, to be only from Console, or to be invoked from the local network. Please be aware of the security implications when allowing scripting access from outside of the Resolve application. - - -Using a script --------------- -DaVinci Resolve needs to be running for a script to be invoked. - -For a Resolve script to be executed from an external folder, the script needs to know of the API location. -You may need to set the these environment variables to allow for your Python installation to pick up the appropriate dependencies as shown below: - - Mac OS X: - RESOLVE_SCRIPT_API="/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting/" - RESOLVE_SCRIPT_LIB="/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/fusionscript.so" - PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/" - - Windows: - RESOLVE_SCRIPT_API="%PROGRAMDATA%\\Blackmagic Design\\DaVinci Resolve\\Support\\Developer\\Scripting\\" - RESOLVE_SCRIPT_LIB="C:\\Program Files\\Blackmagic Design\\DaVinci Resolve\\fusionscript.dll" - PYTHONPATH="%PYTHONPATH%;%RESOLVE_SCRIPT_API%\\Modules\\" - - Linux: - RESOLVE_SCRIPT_API="/opt/resolve/Developer/Scripting/" - RESOLVE_SCRIPT_LIB="/opt/resolve/libs/Fusion/fusionscript.so" - PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/" - (Note: For standard ISO Linux installations, the path above may need to be modified to refer to /home/resolve instead of /opt/resolve) - -As with Fusion scripts, Resolve scripts can also be invoked via the menu and the Console. - -On startup, DaVinci Resolve scans the Utility Scripts directory and enumerates the scripts found in the Script application menu. Placing your script in this folder and invoking it from this menu is the easiest way to use scripts. The Utility Scripts folder is located in: - Mac OS X: /Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp/ - Windows: %APPDATA%\Blackmagic Design\DaVinci Resolve\Fusion\Scripts\Comp\ - Linux: /opt/resolve/Fusion/Scripts/Comp/ (or /home/resolve/Fusion/Scripts/Comp/ depending on installation) - -The interactive Console window allows for an easy way to execute simple scripting commands, to query or modify properties, and to test scripts. The console accepts commands in Python 2.7, Python 3.6 and Lua and evaluates and executes them immediately. For more information on how to use the Console, please refer to the DaVinci Resolve User Manual. - -This example Python script creates a simple project: - #!/usr/bin/env python - import DaVinciResolveScript as dvr_script - resolve = dvr_script.scriptapp("Resolve") - fusion = resolve.Fusion() - projectManager = resolve.GetProjectManager() - projectManager.CreateProject("Hello World") - -The resolve object is the fundamental starting point for scripting via Resolve. As a native object, it can be inspected for further scriptable properties - using table iteration and `getmetatable` in Lua and dir, help etc in Python (among other methods). A notable scriptable object above is fusion - it allows access to all existing Fusion scripting functionality. - -Running DaVinci Resolve in headless mode ----------------------------------------- - -DaVinci Resolve can be launched in a headless mode without the user interface using the -nogui command line option. When DaVinci Resolve is launched using this option, the user interface is disabled. However, the various scripting APIs will continue to work as expected. - -Basic Resolve API ------------------ - -Some commonly used API functions are described below (*). As with the resolve object, each object is inspectable for properties and functions. - - -Resolve - Fusion() --> Fusion # Returns the Fusion object. Starting point for Fusion scripts. - GetMediaStorage() --> MediaStorage # Returns media storage object to query and act on media locations. - GetProjectManager() --> ProjectManager # Returns project manager object for currently open database. - OpenPage(pageName) --> None # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "edit", "fusion", "color", "fairlight", "deliver"). -ProjectManager - CreateProject(projectName) --> Project # Creates and returns a project if projectName (text) is unique, and None if it is not. - LoadProject(projectName) --> Project # Loads and returns the project with name = projectName (text) if there is a match found, and None if there is no matching Project. - GetCurrentProject() --> Project # Returns the currently loaded Resolve project. - SaveProject() --> Bool # Saves the currently loaded project with its own name. Returns True if successful. - CreateFolder(folderName) --> Bool # Creates a folder if folderName (text) is unique. - GetProjectsInCurrentFolder() --> [project names...] # Returns an array of project names in current folder. - GetFoldersInCurrentFolder() --> [folder names...] # Returns an array of folder names in current folder. - GotoRootFolder() --> Bool # Opens root folder in database. - GotoParentFolder() --> Bool # Opens parent folder of current folder in database if current folder has parent. - OpenFolder(folderName) --> Bool # Opens folder under given name. - ImportProject(filePath) --> Bool # Imports a project under given file path. Returns true in case of success. - ExportProject(projectName, filePath) --> Bool # Exports a project based on given name into provided file path. Returns true in case of success. - RestoreProject(filePath) --> Bool # Restores a project under given backup file path. Returns true in case of success. -Project - GetMediaPool() --> MediaPool # Returns the Media Pool object. - GetTimelineCount() --> int # Returns the number of timelines currently present in the project. - GetTimelineByIndex(idx) --> Timeline # Returns timeline at the given index, 1 <= idx <= project.GetTimelineCount() - GetCurrentTimeline() --> Timeline # Returns the currently loaded timeline. - SetCurrentTimeline(timeline) --> Bool # Sets given timeline as current timeline for the project. Returns True if successful. - GetName() --> string # Returns project name. - SetName(projectName) --> Bool # Sets project name if given projectname (text) is unique. - GetPresets() --> [presets...] # Returns a table of presets and their information. - SetPreset(presetName) --> Bool # Sets preset by given presetName (string) into project. - GetRenderJobs() --> [render jobs...] # Returns a table of render jobs and their information. - GetRenderPresets() --> [presets...] # Returns a table of render presets and their information. - StartRendering(index1, index2, ...) --> Bool # Starts rendering for given render jobs based on their indices. If no parameter is given rendering would start for all render jobs. - StartRendering([idxs...]) --> Bool # Starts rendering for given render jobs based on their indices. If no parameter is given rendering would start for all render jobs. - StopRendering() --> None # Stops rendering for all render jobs. - IsRenderingInProgress() --> Bool # Returns true is rendering is in progress. - AddRenderJob() --> Bool # Adds render job to render queue. - DeleteRenderJobByIndex(idx) --> Bool # Deletes render job based on given job index (int). - DeleteAllRenderJobs() --> Bool # Deletes all render jobs. - LoadRenderPreset(presetName) --> Bool # Sets a preset as current preset for rendering if presetName (text) exists. - SaveAsNewRenderPreset(presetName) --> Bool # Creates a new render preset by given name if presetName(text) is unique. - SetRenderSettings([settings map]) --> Bool # Sets given settings for rendering. Settings map is a map, keys of map are: "SelectAllFrames", "MarkIn", "MarkOut", "TargetDir", "CustomName". - GetRenderJobStatus(idx) --> [status info] # Returns job status and completion rendering percentage of the job by given job index (int). - GetSetting(settingName) --> string # Returns setting value by given settingName (string) if the setting exist. With empty settingName the function returns a full list of settings. - SetSetting(settingName, settingValue) --> Bool # Sets project setting base on given name (string) and value (string). - GetRenderFormats() --> [render formats...]# Returns a list of available render formats. - GetRenderCodecs(renderFormat) --> [render codecs...] # Returns a list of available codecs for given render format (string). - GetCurrentRenderFormatAndCodec() --> [format, codec] # Returns currently selected render format and render codec. - SetCurrentRenderFormatAndCodec(format, codec) --> Bool # Sets given render format (string) and render codec (string) as options for rendering. -MediaStorage - GetMountedVolumes() --> [paths...] # Returns an array of folder paths corresponding to mounted volumes displayed in Resolve’s Media Storage. - GetSubFolders(folderPath) --> [paths...] # Returns an array of folder paths in the given absolute folder path. - GetFiles(folderPath) --> [paths...] # Returns an array of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries. - RevealInStorage(path) --> None # Expands and displays a given file/folder path in Resolve’s Media Storage. - AddItemsToMediaPool(item1, item2, ...) --> [clips...] # Adds specified file/folder paths from Media Store into current Media Pool folder. Input is one or more file/folder paths. - AddItemsToMediaPool([items...]) --> [clips...] # Adds specified file/folder paths from Media Store into current Media Pool folder. Input is an array of file/folder paths. -MediaPool - GetRootFolder() --> Folder # Returns the root Folder of Media Pool - AddSubFolder(folder, name) --> Folder # Adds a new subfolder under specified Folder object with the given name. - CreateEmptyTimeline(name) --> Timeline # Adds a new timeline with given name. - AppendToTimeline(clip1, clip2...) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful. - AppendToTimeline([clips]) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful. - CreateTimelineFromClips(name, clip1, clip2, ...)--> Timeline # Creates a new timeline with specified name, and appends the specified MediaPoolItem objects. - CreateTimelineFromClips(name, [clips]) --> Timeline # Creates a new timeline with specified name, and appends the specified MediaPoolItem objects. - ImportTimelineFromFile(filePath) --> Timeline # Creates timeline based on parameters within given file. - GetCurrentFolder() --> Folder # Returns currently selected Folder. - SetCurrentFolder(Folder) --> Bool # Sets current folder by given Folder. -Folder - GetClips() --> [clips...] # Returns a list of clips (items) within the folder. - GetName() --> string # Returns user-defined name of the folder. - GetSubFolders() --> [folders...] # Returns a list of subfolders in the folder. -MediaPoolItem - GetMetadata(metadataType) --> [[types],[values]] # Returns a value of metadataType. If parameter is not specified returns all set metadata parameters. - SetMetadata(metadataType, metadataValue) --> Bool # Sets metadata by given type and value. Returns True if successful. - GetMediaId() --> string # Returns a unique ID name related to MediaPoolItem. - AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information. - GetMarkers() --> [markers...] # Returns a list of all markers and their information. - AddFlag(color) --> Bool # Adds a flag with given color (text). - GetFlags() --> [colors...] # Returns a list of flag colors assigned to the item. - GetClipColor() --> string # Returns an item color as a string. - GetClipProperty(propertyName) --> [[types],[values]] # Returns property value related to the item based on given propertyName (string). if propertyName is empty then it returns a full list of properties. - SetClipProperty(propertyName, propertyValue) --> Bool # Sets into given propertyName (string) propertyValue (string). -Timeline - GetName() --> string # Returns user-defined name of the timeline. - SetName(timelineName) --> Bool # Sets timeline name is timelineName (text) is unique. - GetStartFrame() --> int # Returns frame number at the start of timeline. - GetEndFrame() --> int # Returns frame number at the end of timeline. - GetTrackCount(trackType) --> int # Returns a number of track based on specified track type ("audio", "video" or "subtitle"). - GetItemsInTrack(trackType, index) --> [items...] # Returns an array of Timeline items on the video or audio track (based on trackType) at specified index. 1 <= index <= GetTrackCount(trackType). - AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information. - GetMarkers() --> [markers...] # Returns a list of all markers and their information. - ApplyGradeFromDRX(path, gradeMode, item1, item2, ...)--> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned". - ApplyGradeFromDRX(path, gradeMode, [items]) --> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned". - GetCurrentTimecode() --> string # Returns a string representing a timecode for current position of the timeline, while on Cut, Edit, Color and Deliver page. - GetCurrentVideoItem() --> item # Returns current video timeline item. - GetCurrentClipThumbnailImage() --> [width, height, format, data] # Returns raw thumbnail image data (This image data is encoded in base 64 format and the image format is RGB 8 bit) for the current media in the Color Page in the format of dictionary (in Python) and table (in Lua). Information return are "width", "height", "format" and "data". Example is provided in 6_get_current_media_thumbnail.py in Example folder. -TimelineItem - GetName() --> string # Returns a name of the item. - GetDuration() --> int # Returns a duration of item. - GetEnd() --> int # Returns a position of end frame. - GetFusionCompCount() --> int # Returns the number of Fusion compositions associated with the timeline item. - GetFusionCompByIndex(compIndex) --> fusionComp # Returns Fusion composition object based on given index. 1 <= compIndex <= timelineItem.GetFusionCompCount() - GetFusionCompNames() --> [names...] # Returns a list of Fusion composition names associated with the timeline item. - GetFusionCompByName(compName) --> fusionComp # Returns Fusion composition object based on given name. - GetLeftOffset() --> int # Returns a maximum extension by frame for clip from left side. - GetRightOffset() --> int # Returns a maximum extension by frame for clip from right side. - GetStart() --> int # Returns a position of first frame. - AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information. - GetMarkers() --> [markers...] # Returns a list of all markers and their information. - GetFlags() --> [colors...] # Returns a list of flag colors assigned to the item. - GetClipColor() --> string # Returns an item color as a string. - AddFusionComp() --> fusionComp # Adds a new Fusion composition associated with the timeline item. - ImportFusionComp(path) --> fusionComp # Imports Fusion composition from given file path by creating and adding a new composition for the item. - ExportFusionComp(path, compIndex) --> Bool # Exports Fusion composition based on given index into provided file name path. - DeleteFusionCompByName(compName) --> Bool # Deletes Fusion composition by provided name. - LoadFusionCompByName(compName) --> fusionComp # Loads Fusion composition by provided name and sets it as active composition. - RenameFusionCompByName(oldName, newName) --> Bool # Renames Fusion composition by provided name with new given name. - AddVersion(versionName, versionType) --> Bool # Adds a new Version associated with the timeline item. versionType: 0 - local, 1 - remote. - DeleteVersionByName(versionName, versionType) --> Bool # Deletes Version by provided name. versionType: 0 - local, 1 - remote. - LoadVersionByName(versionName, versionType) --> Bool # Loads Version by provided name and sets it as active Version. versionType: 0 - local, 1 - remote. - RenameVersionByName(oldName, newName, versionType)--> Bool # Renames Version by provided name with new given name. versionType: 0 - local, 1 - remote. - GetMediaPoolItem() --> MediaPoolItem # Returns a corresponding to the timeline item media pool item if it exists. - GetVersionNames(versionType) --> [strings...] # Returns a list of version names by provided versionType: 0 - local, 1 - remote. - GetStereoConvergenceValues() --> [offset, value] # Returns a table of keyframe offsets and respective convergence values - GetStereoLeftFloatingWindowParams() --> [offset, value] # For the LEFT eye -> returns a table of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values - GetStereoRightFloatingWindowParams() --> [offset, value] # For the RIGHT eye -> returns a table of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values diff --git a/openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_up.txt b/openpype/hosts/resolve/RESOLVE_API_v18.0.4.txt similarity index 70% rename from openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_up.txt rename to openpype/hosts/resolve/RESOLVE_API_v18.0.4.txt index f1b8b81a71..98597a12cb 100644 --- a/openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_up.txt +++ b/openpype/hosts/resolve/RESOLVE_API_v18.0.4.txt @@ -1,5 +1,5 @@ -Updated as of 20 October 2020 ------------------------------ +Updated as of 9 May 2022 +---------------------------- In this package, you will find a brief introduction to the Scripting API for DaVinci Resolve Studio. Apart from this README.txt file, this package contains folders containing the basic import modules for scripting access (DaVinciResolve.py) and some representative examples. @@ -89,12 +89,25 @@ Resolve Fusion() --> Fusion # Returns the Fusion object. Starting point for Fusion scripts. GetMediaStorage() --> MediaStorage # Returns the media storage object to query and act on media locations. GetProjectManager() --> ProjectManager # Returns the project manager object for currently open database. - OpenPage(pageName) --> None # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver"). + OpenPage(pageName) --> Bool # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver"). + GetCurrentPage() --> String # Returns the page currently displayed in the main window. Returned value can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver", None). GetProductName() --> string # Returns product name. GetVersion() --> [version fields] # Returns list of product version fields in [major, minor, patch, build, suffix] format. GetVersionString() --> string # Returns product version in "major.minor.patch[suffix].build" format. + LoadLayoutPreset(presetName) --> Bool # Loads UI layout from saved preset named 'presetName'. + UpdateLayoutPreset(presetName) --> Bool # Overwrites preset named 'presetName' with current UI layout. + ExportLayoutPreset(presetName, presetFilePath) --> Bool # Exports preset named 'presetName' to path 'presetFilePath'. + DeleteLayoutPreset(presetName) --> Bool # Deletes preset named 'presetName'. + SaveLayoutPreset(presetName) --> Bool # Saves current UI layout as a preset named 'presetName'. + ImportLayoutPreset(presetFilePath, presetName) --> Bool # Imports preset from path 'presetFilePath'. The optional argument 'presetName' specifies how the preset shall be named. If not specified, the preset is named based on the filename. + Quit() --> None # Quits the Resolve App. ProjectManager + ArchiveProject(projectName, + filePath, + isArchiveSrcMedia=True, + isArchiveRenderCache=True, + isArchiveProxyMedia=False) --> Bool # Archives project to provided file path with the configuration as provided by the optional arguments CreateProject(projectName) --> Project # Creates and returns a project if projectName (string) is unique, and None if it is not. DeleteProject(projectName) --> Bool # Delete project in the current folder if not currently loaded LoadProject(projectName) --> Project # Loads and returns the project with name = projectName (string) if there is a match found, and None if there is no matching Project. @@ -109,9 +122,9 @@ ProjectManager GotoParentFolder() --> Bool # Opens parent folder of current folder in database if current folder has parent. GetCurrentFolder() --> string # Returns the current folder name. OpenFolder(folderName) --> Bool # Opens folder under given name. - ImportProject(filePath) --> Bool # Imports a project from the file path provided. Returns True if successful. + ImportProject(filePath, projectName=None) --> Bool # Imports a project from the file path provided with given project name, if any. Returns True if successful. ExportProject(projectName, filePath, withStillsAndLUTs=True) --> Bool # Exports project to provided file path, including stills and LUTs if withStillsAndLUTs is True (enabled by default). Returns True in case of success. - RestoreProject(filePath) --> Bool # Restores a project from the file path provided. Returns True if successful. + RestoreProject(filePath, projectName=None) --> Bool # Restores a project from the file path provided with given project name, if any. Returns True if successful. GetCurrentDatabase() --> {dbInfo} # Returns a dictionary (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to the current database connection GetDatabaseList() --> [{dbInfo}] # Returns a list of dictionary items (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to all the databases added to Resolve SetCurrentDatabase({dbInfo}) --> Bool # Switches current database connection to the database specified by the keys below, and closes any open project. @@ -125,8 +138,9 @@ Project GetTimelineByIndex(idx) --> Timeline # Returns timeline at the given index, 1 <= idx <= project.GetTimelineCount() GetCurrentTimeline() --> Timeline # Returns the currently loaded timeline. SetCurrentTimeline(timeline) --> Bool # Sets given timeline as current timeline for the project. Returns True if successful. + GetGallery() --> Gallery # Returns the Gallery object. GetName() --> string # Returns project name. - SetName(projectName) --> Bool # Sets project name if given projectname (string) is unique. + SetName(projectName) --> Bool # Sets project name if given projectName (string) is unique. GetPresetList() --> [presets...] # Returns a list of presets and their information. SetPreset(presetName) --> Bool # Sets preset by given presetName (string) into project. AddRenderJob() --> string # Adds a render job based on current render settings to the render queue. Returns a unique job id (string) for the new render job. @@ -144,27 +158,7 @@ Project LoadRenderPreset(presetName) --> Bool # Sets a preset as current preset for rendering if presetName (string) exists. SaveAsNewRenderPreset(presetName) --> Bool # Creates new render preset by given name if presetName(string) is unique. SetRenderSettings({settings}) --> Bool # Sets given settings for rendering. Settings is a dict, with support for the keys: - # "SelectAllFrames": Bool - # "MarkIn": int - # "MarkOut": int - # "TargetDir": string - # "CustomName": string - # "UniqueFilenameStyle": 0 - Prefix, 1 - Suffix. - # "ExportVideo": Bool - # "ExportAudio": Bool - # "FormatWidth": int - # "FormatHeight": int - # "FrameRate": float (examples: 23.976, 24) - # "PixelAspectRatio": string (for SD resolution: "16_9" or "4_3") (other resolutions: "square" or "cinemascope") - # "VideoQuality" possible values for current codec (if applicable): - # 0 (int) - will set quality to automatic - # [1 -> MAX] (int) - will set input bit rate - # ["Least", "Low", "Medium", "High", "Best"] (String) - will set input quality level - # "AudioCodec": string (example: "aac") - # "AudioBitDepth": int - # "AudioSampleRate": int - # "ColorSpaceTag" : string (example: "Same as Project", "AstroDesign") - # "GammaTag" : string (example: "Same as Project", "ACEScct") + # Refer to "Looking up render settings" section for information for supported settings GetRenderJobStatus(jobId) --> {status info} # Returns a dict with job status and completion percentage of the job by given jobId (string). GetSetting(settingName) --> string # Returns value of project setting (indicated by settingName, string). Check the section below for more information. SetSetting(settingName, settingValue) --> Bool # Sets the project setting (indicated by settingName, string) to the value (settingValue, string). Check the section below for more information. @@ -176,12 +170,13 @@ Project SetCurrentRenderMode(renderMode) --> Bool # Sets the render mode. Specify renderMode = 0 for Individual clips, 1 for Single clip. GetRenderResolutions(format, codec) --> [{Resolution}] # Returns list of resolutions applicable for the given render format (string) and render codec (string). Returns full list of resolutions if no argument is provided. Each element in the list is a dictionary with 2 keys "Width" and "Height". RefreshLUTList() --> Bool # Refreshes LUT List + GetUniqueId() --> string # Returns a unique ID for the project item MediaStorage GetMountedVolumeList() --> [paths...] # Returns list of folder paths corresponding to mounted volumes displayed in Resolve’s Media Storage. GetSubFolderList(folderPath) --> [paths...] # Returns list of folder paths in the given absolute folder path. GetFileList(folderPath) --> [paths...] # Returns list of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries. - RevealInStorage(path) --> None # Expands and displays given file/folder path in Resolve’s Media Storage. + RevealInStorage(path) --> Bool # Expands and displays given file/folder path in Resolve’s Media Storage. AddItemListToMediaPool(item1, item2, ...) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is one or more file/folder paths. Returns a list of the MediaPoolItems created. AddItemListToMediaPool([items...]) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created. AddClipMattesToMediaPool(MediaPoolItem, [paths], stereoEye) --> Bool # Adds specified media files as mattes for the specified MediaPoolItem. StereoEye is an optional argument for specifying which eye to add the matte to for stereo clips ("left" or "right"). Returns True if successful. @@ -190,10 +185,11 @@ MediaStorage MediaPool GetRootFolder() --> Folder # Returns root Folder of Media Pool AddSubFolder(folder, name) --> Folder # Adds new subfolder under specified Folder object with the given name. + RefreshFolders() --> Bool # Updates the folders in collaboration mode CreateEmptyTimeline(name) --> Timeline # Adds new timeline with given name. - AppendToTimeline(clip1, clip2, ...) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful. - AppendToTimeline([clips]) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful. - AppendToTimeline([{clipInfo}, ...]) --> Bool # Appends list of clipInfos specified as dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int). + AppendToTimeline(clip1, clip2, ...) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems. + AppendToTimeline([clips]) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems. + AppendToTimeline([{clipInfo}, ...]) --> [TimelineItem] # Appends list of clipInfos specified as dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int), (optional) "mediaType" (int; 1 - Video only, 2 - Audio only). Returns the list of appended timelineItems. CreateTimelineFromClips(name, clip1, clip2,...) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects. CreateTimelineFromClips(name, [clips]) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects. CreateTimelineFromClips(name, [{clipInfo}]) --> Timeline # Creates new timeline with specified name, appending the list of clipInfos specified as a dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int). @@ -202,6 +198,8 @@ MediaPool # "importSourceClips": Bool, specifies whether source clips should be imported, True by default # "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "importSourceClips" is True # "sourceClipsFolders": List of Media Pool folder objects to search for source clips if the media is not present in current folder and if "importSourceClips" is False + # "interlaceProcessing": Bool, specifies whether to enable interlace processing on the imported timeline being created. valid only for AAF import + DeleteTimelines([timeline]) --> Bool # Deletes specified timelines in the media pool. GetCurrentFolder() --> Folder # Returns currently selected Folder. SetCurrentFolder(Folder) --> Bool # Sets current folder by given Folder. DeleteClips([clips]) --> Bool # Deletes specified clips or timeline mattes in the media pool @@ -214,19 +212,26 @@ MediaPool RelinkClips([MediaPoolItem], folderPath) --> Bool # Update the folder location of specified media pool clips with the specified folder path. UnlinkClips([MediaPoolItem]) --> Bool # Unlink specified media pool clips. ImportMedia([items...]) --> [MediaPoolItems] # Imports specified file/folder paths into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created. + ImportMedia([{clipInfo}]) --> [MediaPoolItems] # Imports file path(s) into current Media Pool folder as specified in list of clipInfo dict. Returns a list of the MediaPoolItems created. + # Each clipInfo gets imported as one MediaPoolItem unless 'Show Individual Frames' is turned on. + # Example: ImportMedia([{"FilePath":"file_%03d.dpx", "StartIndex":1, "EndIndex":100}]) would import clip "file_[001-100].dpx". ExportMetadata(fileName, [clips]) --> Bool # Exports metadata of specified clips to 'fileName' in CSV format. # If no clips are specified, all clips from media pool will be used. + GetUniqueId() --> string # Returns a unique ID for the media pool Folder GetClipList() --> [clips...] # Returns a list of clips (items) within the folder. GetName() --> string # Returns the media folder name. GetSubFolderList() --> [folders...] # Returns a list of subfolders in the folder. + GetIsFolderStale() --> bool # Returns true if folder is stale in collaboration mode, false otherwise + GetUniqueId() --> string # Returns a unique ID for the media pool folder MediaPoolItem GetName() --> string # Returns the clip name. GetMetadata(metadataType=None) --> string|dict # Returns the metadata value for the key 'metadataType'. # If no argument is specified, a dict of all set metadata properties is returned. SetMetadata(metadataType, metadataValue) --> Bool # Sets the given metadata to metadataValue (string). Returns True if successful. + SetMetadata({metadata}) --> Bool # Sets the item metadata with specified 'metadata' dict. Returns True if successful. GetMediaId() --> string # Returns the unique ID for the MediaPoolItem. AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker. customData) @@ -248,15 +253,18 @@ MediaPoolItem GetClipProperty(propertyName=None) --> string|dict # Returns the property value for the key 'propertyName'. # If no argument is specified, a dict of all clip properties is returned. Check the section below for more information. SetClipProperty(propertyName, propertyValue) --> Bool # Sets the given property to propertyValue (string). Check the section below for more information. - LinkProxyMedia(propertyName) --> Bool # Links proxy media (absolute path) with the current clip. + LinkProxyMedia(proxyMediaFilePath) --> Bool # Links proxy media located at path specified by arg 'proxyMediaFilePath' with the current clip. 'proxyMediaFilePath' should be absolute clip path. UnlinkProxyMedia() --> Bool # Unlinks any proxy media associated with clip. ReplaceClip(filePath) --> Bool # Replaces the underlying asset and metadata of MediaPoolItem with the specified absolute clip path. + GetUniqueId() --> string # Returns a unique ID for the media pool item Timeline GetName() --> string # Returns the timeline name. SetName(timelineName) --> Bool # Sets the timeline name if timelineName (string) is unique. Returns True if successful. GetStartFrame() --> int # Returns the frame number at the start of timeline. GetEndFrame() --> int # Returns the frame number at the end of timeline. + SetStartTimecode(timecode) --> Bool # Set the start timecode of the timeline to the string 'timecode'. Returns true when the change is successful, false otherwise. + GetStartTimecode() --> string # Returns the start timecode for the timeline. GetTrackCount(trackType) --> int # Returns the number of tracks for the given track type ("audio", "video" or "subtitle"). GetItemListInTrack(trackType, index) --> [items...] # Returns a list of timeline items on that track (based on trackType and index). 1 <= index <= GetTrackCount(trackType). AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker. @@ -271,7 +279,8 @@ Timeline DeleteMarkerByCustomData(customData) --> Bool # Delete first matching marker with specified customData. ApplyGradeFromDRX(path, gradeMode, item1, item2, ...)--> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned". ApplyGradeFromDRX(path, gradeMode, [items]) --> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned". - GetCurrentTimecode() --> string # Returns a string timecode representation for the current playhead position, while on Cut, Edit, Color and Deliver pages. + GetCurrentTimecode() --> string # Returns a string timecode representation for the current playhead position, while on Cut, Edit, Color, Fairlight and Deliver pages. + SetCurrentTimecode(timecode) --> Bool # Sets current playhead position from input timecode for Cut, Edit, Color, Fairlight and Deliver pages. GetCurrentVideoItem() --> item # Returns the current video timeline item. GetCurrentClipThumbnailImage() --> {thumbnailData} # Returns a dict (keys "width", "height", "format" and "data") with data containing raw thumbnail image data (RGB 8-bit image data encoded in base64 format) for current media in the Color Page. # An example of how to retrieve and interpret thumbnails is provided in 6_get_current_media_thumbnail.py in the Examples folder. @@ -280,37 +289,30 @@ Timeline DuplicateTimeline(timelineName) --> timeline # Duplicates the timeline and returns the created timeline, with the (optional) timelineName, on success. CreateCompoundClip([timelineItems], {clipInfo}) --> timelineItem # Creates a compound clip of input timeline items with an optional clipInfo map: {"startTimecode" : "00:00:00:00", "name" : "Compound Clip 1"}. It returns the created timeline item. CreateFusionClip([timelineItems]) --> timelineItem # Creates a Fusion clip of input timeline items. It returns the created timeline item. + ImportIntoTimeline(filePath, {importOptions}) --> Bool # Imports timeline items from an AAF file and optional importOptions dict into the timeline, with support for the keys: + # "autoImportSourceClipsIntoMediaPool": Bool, specifies if source clips should be imported into media pool, True by default + # "ignoreFileExtensionsWhenMatching": Bool, specifies if file extensions should be ignored when matching, False by default + # "linkToSourceCameraFiles": Bool, specifies if link to source camera files should be enabled, False by default + # "useSizingInfo": Bool, specifies if sizing information should be used, False by default + # "importMultiChannelAudioTracksAsLinkedGroups": Bool, specifies if multi-channel audio tracks should be imported as linked groups, False by default + # "insertAdditionalTracks": Bool, specifies if additional tracks should be inserted, True by default + # "insertWithOffset": string, specifies insert with offset value in timecode format - defaults to "00:00:00:00", applicable if "insertAdditionalTracks" is False + # "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "ignoreFileExtensionsWhenMatching" is True + # "sourceClipsFolders": string, list of Media Pool folder objects to search for source clips if the media is not present in current folder + Export(fileName, exportType, exportSubtype) --> Bool # Exports timeline to 'fileName' as per input exportType & exportSubtype format. - # exportType can be one of the following constants: - # resolve.EXPORT_AAF - # resolve.EXPORT_DRT - # resolve.EXPORT_EDL - # resolve.EXPORT_FCP_7_XML - # resolve.EXPORT_FCPXML_1_3 - # resolve.EXPORT_FCPXML_1_4 - # resolve.EXPORT_FCPXML_1_5 - # resolve.EXPORT_FCPXML_1_6 - # resolve.EXPORT_FCPXML_1_7 - # resolve.EXPORT_FCPXML_1_8 - # resolve.EXPORT_HDR_10_PROFILE_A - # resolve.EXPORT_HDR_10_PROFILE_B - # resolve.EXPORT_TEXT_CSV - # resolve.EXPORT_TEXT_TAB - # resolve.EXPORT_DOLBY_VISION_VER_2_9 - # resolve.EXPORT_DOLBY_VISION_VER_4_0 - # exportSubtype can be one of the following enums: - # resolve.EXPORT_NONE - # resolve.EXPORT_AAF_NEW - # resolve.EXPORT_AAF_EXISTING - # resolve.EXPORT_CDL - # resolve.EXPORT_SDL - # resolve.EXPORT_MISSING_CLIPS - # Please note that exportSubType is a required parameter for resolve.EXPORT_AAF and resolve.EXPORT_EDL. For rest of the exportType, exportSubtype is ignored. - # When exportType is resolve.EXPORT_AAF, valid exportSubtype values are resolve.EXPORT_AAF_NEW and resolve.EXPORT_AAF_EXISTING. - # When exportType is resolve.EXPORT_EDL, valid exportSubtype values are resolve.EXPORT_CDL, resolve.EXPORT_SDL, resolve.EXPORT_MISSING_CLIPS and resolve.EXPORT_NONE. - # Note: Replace 'resolve.' when using the constants above, if a different Resolve class instance name is used. + # Refer to section "Looking up timeline exports properties" for information on the parameters. GetSetting(settingName) --> string # Returns value of timeline setting (indicated by settingName : string). Check the section below for more information. SetSetting(settingName, settingValue) --> Bool # Sets timeline setting (indicated by settingName : string) to the value (settingValue : string). Check the section below for more information. + InsertGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a generator (indicated by generatorName : string) into the timeline. + InsertFusionGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a Fusion generator (indicated by generatorName : string) into the timeline. + InsertFusionCompositionIntoTimeline() --> TimelineItem # Inserts a Fusion composition into the timeline. + InsertOFXGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts an OFX generator (indicated by generatorName : string) into the timeline. + InsertTitleIntoTimeline(titleName) --> TimelineItem # Inserts a title (indicated by titleName : string) into the timeline. + InsertFusionTitleIntoTimeline(titleName) --> TimelineItem # Inserts a Fusion title (indicated by titleName : string) into the timeline. + GrabStill() --> galleryStill # Grabs still from the current video clip. Returns a GalleryStill object. + GrabAllStills(stillFrameSource) --> [galleryStill] # Grabs stills from all the clips of the timeline at 'stillFrameSource' (1 - First frame, 2 - Middle frame). Returns the list of GalleryStill objects. + GetUniqueId() --> string # Returns a unique ID for the timeline TimelineItem GetName() --> string # Returns the item name. @@ -323,6 +325,10 @@ TimelineItem GetLeftOffset() --> int # Returns the maximum extension by frame for clip from left side. GetRightOffset() --> int # Returns the maximum extension by frame for clip from right side. GetStart() --> int # Returns the start frame position on the timeline. + SetProperty(propertyKey, propertyValue) --> Bool # Sets the value of property "propertyKey" to value "propertyValue" + # Refer to "Looking up Timeline item properties" for more information + GetProperty(propertyKey) --> int/[key:value] # returns the value of the specified key + # if no key is specified, the method returns a dictionary(python) or table(lua) for all supported keys AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker. customData) GetMarkers() --> {markers...} # Returns a dict (frameId -> {information}) of all markers and dicts with their information. @@ -345,7 +351,8 @@ TimelineItem DeleteFusionCompByName(compName) --> Bool # Deletes the named Fusion composition. LoadFusionCompByName(compName) --> fusionComp # Loads the named Fusion composition as the active composition. RenameFusionCompByName(oldName, newName) --> Bool # Renames the Fusion composition identified by oldName. - AddVersion(versionName, versionType) --> Bool # Adds a new color version for a video clipbased on versionType (0 - local, 1 - remote). + AddVersion(versionName, versionType) --> Bool # Adds a new color version for a video clip based on versionType (0 - local, 1 - remote). + GetCurrentVersion() --> {versionName...} # Returns the current version of the video clip. The returned value will have the keys versionName and versionType(0 - local, 1 - remote). DeleteVersionByName(versionName, versionType) --> Bool # Deletes a color version by name and versionType (0 - local, 1 - remote). LoadVersionByName(versionName, versionType) --> Bool # Loads a named color version as the active version. versionType: 0 - local, 1 - remote. RenameVersionByName(oldName, newName, versionType)--> Bool # Renames the color version identified by oldName and versionType (0 - local, 1 - remote). @@ -354,12 +361,14 @@ TimelineItem GetStereoConvergenceValues() --> {keyframes...} # Returns a dict (offset -> value) of keyframe offsets and respective convergence values. GetStereoLeftFloatingWindowParams() --> {keyframes...} # For the LEFT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values. GetStereoRightFloatingWindowParams() --> {keyframes...} # For the RIGHT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values. + GetNumNodes() --> int # Returns the number of nodes in the current graph for the timeline item SetLUT(nodeIndex, lutPath) --> Bool # Sets LUT on the node mapping the node index provided, 1 <= nodeIndex <= total number of nodes. # The lutPath can be an absolute path, or a relative path (based off custom LUT paths or the master LUT path). # The operation is successful for valid lut paths that Resolve has already discovered (see Project.RefreshLUTList). + GetLUT(nodeIndex) --> String # Gets relative LUT path based on the node index provided, 1 <= nodeIndex <= total number of nodes. SetCDL([CDL map]) --> Bool # Keys of map are: "NodeIndex", "Slope", "Offset", "Power", "Saturation", where 1 <= NodeIndex <= total number of nodes. # Example python code - SetCDL({"NodeIndex" : "1", "Slope" : "0.5 0.4 0.2", "Offset" : "0.4 0.3 0.2", "Power" : "0.6 0.7 0.8", "Saturation" : "0.65"}) - AddTake(mediaPoolItem, startFrame=0, endFrame)=0 --> Bool # Adds mediaPoolItem as a new take. Initializes a take selector for the timeline item if needed. By default, the whole clip is added. startFrame and endFrame can be specified as extents. + AddTake(mediaPoolItem, startFrame, endFrame) --> Bool # Adds mediaPoolItem as a new take. Initializes a take selector for the timeline item if needed. By default, the full clip extents is added. startFrame (int) and endFrame (int) are optional arguments used to specify the extents. GetSelectedTakeIndex() --> int # Returns the index of the currently selected take, or 0 if the clip is not a take selector. GetTakesCount() --> int # Returns the number of takes in take selector, or 0 if the clip is not a take selector. GetTakeByIndex(idx) --> {takeInfo...} # Returns a dict (keys "startFrame", "endFrame" and "mediaPoolItem") with take info for specified index. @@ -367,7 +376,24 @@ TimelineItem SelectTakeByIndex(idx) --> Bool # Selects a take by index, 1 <= idx <= number of takes. FinalizeTake() --> Bool # Finalizes take selection. CopyGrades([tgtTimelineItems]) --> Bool # Copies the current grade to all the items in tgtTimelineItems list. Returns True on success and False if any error occurred. + UpdateSidecar() --> Bool # Updates sidecar file for BRAW clips or RMD file for R3D clips. + GetUniqueId() --> string # Returns a unique ID for the timeline item +Gallery + GetAlbumName(galleryStillAlbum) --> string # Returns the name of the GalleryStillAlbum object 'galleryStillAlbum'. + SetAlbumName(galleryStillAlbum, albumName) --> Bool # Sets the name of the GalleryStillAlbum object 'galleryStillAlbum' to 'albumName'. + GetCurrentStillAlbum() --> galleryStillAlbum # Returns current album as a GalleryStillAlbum object. + SetCurrentStillAlbum(galleryStillAlbum) --> Bool # Sets current album to GalleryStillAlbum object 'galleryStillAlbum'. + GetGalleryStillAlbums() --> [galleryStillAlbum] # Returns the gallery albums as a list of GalleryStillAlbum objects. + +GalleryStillAlbum + GetStills() --> [galleryStill] # Returns the list of GalleryStill objects in the album. + GetLabel(galleryStill) --> string # Returns the label of the galleryStill. + SetLabel(galleryStill, label) --> Bool # Sets the new 'label' to GalleryStill object 'galleryStill'. + ExportStills([galleryStill], folderPath, filePrefix, format) --> Bool # Exports list of GalleryStill objects '[galleryStill]' to directory 'folderPath', with filename prefix 'filePrefix', using file format 'format' (supported formats: dpx, cin, tif, jpg, png, ppm, bmp, xpm). + DeleteStills([galleryStill]) --> Bool # Deletes specified list of GalleryStill objects '[galleryStill]'. + +GalleryStill # This class does not provide any API functions but the object type is used by functions in other classes. List and Dict Data Structures ----------------------------- @@ -375,7 +401,6 @@ Beside primitive data types, Resolve's Python API mainly uses list and dict data As Lua does not support list and dict data structures, the Lua API implements "list" as a table with indices, e.g. { [1] = listValue1, [2] = listValue2, ... }. Similarly the Lua API implements "dict" as a table with the dictionary key as first element, e.g. { [dictKey1] = dictValue1, [dictKey2] = dictValue2, ... }. - Looking up Project and Clip properties -------------------------------------- This section covers additional notes for the functions "Project:GetSetting", "Project:SetSetting", "Timeline:GetSetting", "Timeline:SetSetting", "MediaPoolItem:GetClipProperty" and @@ -412,6 +437,179 @@ Affects: • x = MediaPoolItem:GetClipProperty('Super Scale') and MediaPoolItem:SetClipProperty('Super Scale', x) +Looking up Render Settings +-------------------------- +This section covers the supported settings for the method SetRenderSettings({settings}) + +The parameter setting is a dictionary containing the following keys: + - "SelectAllFrames": Bool (when set True, the settings MarkIn and MarkOut are ignored) + - "MarkIn": int + - "MarkOut": int + - "TargetDir": string + - "CustomName": string + - "UniqueFilenameStyle": 0 - Prefix, 1 - Suffix. + - "ExportVideo": Bool + - "ExportAudio": Bool + - "FormatWidth": int + - "FormatHeight": int + - "FrameRate": float (examples: 23.976, 24) + - "PixelAspectRatio": string (for SD resolution: "16_9" or "4_3") (other resolutions: "square" or "cinemascope") + - "VideoQuality" possible values for current codec (if applicable): + - 0 (int) - will set quality to automatic + - [1 -> MAX] (int) - will set input bit rate + - ["Least", "Low", "Medium", "High", "Best"] (String) - will set input quality level + - "AudioCodec": string (example: "aac") + - "AudioBitDepth": int + - "AudioSampleRate": int + - "ColorSpaceTag" : string (example: "Same as Project", "AstroDesign") + - "GammaTag" : string (example: "Same as Project", "ACEScct") + - "ExportAlpha": Bool + - "EncodingProfile": string (example: "Main10"). Can only be set for H.264 and H.265. + - "MultiPassEncode": Bool. Can only be set for H.264. + - "AlphaMode": 0 - Premultiplied, 1 - Straight. Can only be set if "ExportAlpha" is true. + - "NetworkOptimization": Bool. Only supported by QuickTime and MP4 formats. + +Looking up timeline export properties +------------------------------------- +This section covers the parameters for the argument Export(fileName, exportType, exportSubtype). + +exportType can be one of the following constants: + - resolve.EXPORT_AAF + - resolve.EXPORT_DRT + - resolve.EXPORT_EDL + - resolve.EXPORT_FCP_7_XML + - resolve.EXPORT_FCPXML_1_3 + - resolve.EXPORT_FCPXML_1_4 + - resolve.EXPORT_FCPXML_1_5 + - resolve.EXPORT_FCPXML_1_6 + - resolve.EXPORT_FCPXML_1_7 + - resolve.EXPORT_FCPXML_1_8 + - resolve.EXPORT_FCPXML_1_9 + - resolve.EXPORT_FCPXML_1_10 + - resolve.EXPORT_HDR_10_PROFILE_A + - resolve.EXPORT_HDR_10_PROFILE_B + - resolve.EXPORT_TEXT_CSV + - resolve.EXPORT_TEXT_TAB + - resolve.EXPORT_DOLBY_VISION_VER_2_9 + - resolve.EXPORT_DOLBY_VISION_VER_4_0 +exportSubtype can be one of the following enums: + - resolve.EXPORT_NONE + - resolve.EXPORT_AAF_NEW + - resolve.EXPORT_AAF_EXISTING + - resolve.EXPORT_CDL + - resolve.EXPORT_SDL + - resolve.EXPORT_MISSING_CLIPS +Please note that exportSubType is a required parameter for resolve.EXPORT_AAF and resolve.EXPORT_EDL. For rest of the exportType, exportSubtype is ignored. +When exportType is resolve.EXPORT_AAF, valid exportSubtype values are resolve.EXPORT_AAF_NEW and resolve.EXPORT_AAF_EXISTING. +When exportType is resolve.EXPORT_EDL, valid exportSubtype values are resolve.EXPORT_CDL, resolve.EXPORT_SDL, resolve.EXPORT_MISSING_CLIPS and resolve.EXPORT_NONE. +Note: Replace 'resolve.' when using the constants above, if a different Resolve class instance name is used. + +Looking up Timeline item properties +----------------------------------- +This section covers additional notes for the function "TimelineItem:SetProperty" and "TimelineItem:GetProperty". These functions are used to get and set properties mentioned. + +The supported keys with their accepted values are: + "Pan" : floating point values from -4.0*width to 4.0*width + "Tilt" : floating point values from -4.0*height to 4.0*height + "ZoomX" : floating point values from 0.0 to 100.0 + "ZoomY" : floating point values from 0.0 to 100.0 + "ZoomGang" : a boolean value + "RotationAngle" : floating point values from -360.0 to 360.0 + "AnchorPointX" : floating point values from -4.0*width to 4.0*width + "AnchorPointY" : floating point values from -4.0*height to 4.0*height + "Pitch" : floating point values from -1.5 to 1.5 + "Yaw" : floating point values from -1.5 to 1.5 + "FlipX" : boolean value for flipping horizontally + "FlipY" : boolean value for flipping vertically + "CropLeft" : floating point values from 0.0 to width + "CropRight" : floating point values from 0.0 to width + "CropTop" : floating point values from 0.0 to height + "CropBottom" : floating point values from 0.0 to height + "CropSoftness" : floating point values from -100.0 to 100.0 + "CropRetain" : boolean value for "Retain Image Position" checkbox + "DynamicZoomEase" : A value from the following constants + - DYNAMIC_ZOOM_EASE_LINEAR = 0 + - DYNAMIC_ZOOM_EASE_IN + - DYNAMIC_ZOOM_EASE_OUT + - DYNAMIC_ZOOM_EASE_IN_AND_OUT + "CompositeMode" : A value from the following constants + - COMPOSITE_NORMAL = 0 + - COMPOSITE_ADD + - COMPOSITE_SUBTRACT + - COMPOSITE_DIFF + - COMPOSITE_MULTIPLY + - COMPOSITE_SCREEN + - COMPOSITE_OVERLAY + - COMPOSITE_HARDLIGHT + - COMPOSITE_SOFTLIGHT + - COMPOSITE_DARKEN + - COMPOSITE_LIGHTEN + - COMPOSITE_COLOR_DODGE + - COMPOSITE_COLOR_BURN + - COMPOSITE_EXCLUSION + - COMPOSITE_HUE + - COMPOSITE_SATURATE + - COMPOSITE_COLORIZE + - COMPOSITE_LUMA_MASK + - COMPOSITE_DIVIDE + - COMPOSITE_LINEAR_DODGE + - COMPOSITE_LINEAR_BURN + - COMPOSITE_LINEAR_LIGHT + - COMPOSITE_VIVID_LIGHT + - COMPOSITE_PIN_LIGHT + - COMPOSITE_HARD_MIX + - COMPOSITE_LIGHTER_COLOR + - COMPOSITE_DARKER_COLOR + - COMPOSITE_FOREGROUND + - COMPOSITE_ALPHA + - COMPOSITE_INVERTED_ALPHA + - COMPOSITE_LUM + - COMPOSITE_INVERTED_LUM + "Opacity" : floating point value from 0.0 to 100.0 + "Distortion" : floating point value from -1.0 to 1.0 + "RetimeProcess" : A value from the following constants + - RETIME_USE_PROJECT = 0 + - RETIME_NEAREST + - RETIME_FRAME_BLEND + - RETIME_OPTICAL_FLOW + "MotionEstimation" : A value from the following constants + - MOTION_EST_USE_PROJECT = 0 + - MOTION_EST_STANDARD_FASTER + - MOTION_EST_STANDARD_BETTER + - MOTION_EST_ENHANCED_FASTER + - MOTION_EST_ENHANCED_BETTER + - MOTION_EST_SPEED_WRAP + "Scaling" : A value from the following constants + - SCALE_USE_PROJECT = 0 + - SCALE_CROP + - SCALE_FIT + - SCALE_FILL + - SCALE_STRETCH + "ResizeFilter" : A value from the following constants + - RESIZE_FILTER_USE_PROJECT = 0 + - RESIZE_FILTER_SHARPER + - RESIZE_FILTER_SMOOTHER + - RESIZE_FILTER_BICUBIC + - RESIZE_FILTER_BILINEAR + - RESIZE_FILTER_BESSEL + - RESIZE_FILTER_BOX + - RESIZE_FILTER_CATMULL_ROM + - RESIZE_FILTER_CUBIC + - RESIZE_FILTER_GAUSSIAN + - RESIZE_FILTER_LANCZOS + - RESIZE_FILTER_MITCHELL + - RESIZE_FILTER_NEAREST_NEIGHBOR + - RESIZE_FILTER_QUADRATIC + - RESIZE_FILTER_SINC + - RESIZE_FILTER_LINEAR +Values beyond the range will be clipped +width and height are same as the UI max limits + +The arguments can be passed as a key and value pair or they can be grouped together into a dictionary (for python) or table (for lua) and passed +as a single argument. + +Getting the values for the keys that uses constants will return the number which is in the constant + Deprecated Resolve API Functions -------------------------------- The following API functions are deprecated. @@ -450,12 +648,12 @@ TimelineItem Unsupported Resolve API Functions --------------------------------- -The following API (functions and paraameters) are no longer supported. +The following API (functions and parameters) are no longer supported. Use job IDs instead of indices. Project StartRendering(index1, index2, ...) --> Bool # Please use unique job ids (string) instead of indices. StartRendering([idxs...]) --> Bool # Please use unique job ids (string) instead of indices. DeleteRenderJobByIndex(idx) --> Bool # Please use unique job ids (string) instead of indices. GetRenderJobStatus(idx) --> {status info} # Please use unique job ids (string) instead of indices. - GetSetting and SetSetting --> {} # settingName "videoMonitorUseRec601For422SDI" is no longer supported. - # Please use "videoMonitorUseMatrixOverrideFor422SDI" and "videoMonitorMatrixOverrideFor422SDI" instead. + GetSetting and SetSetting --> {} # settingName videoMonitorUseRec601For422SDI is now replaced with videoMonitorUseMatrixOverrideFor422SDI and videoMonitorMatrixOverrideFor422SDI. + # settingName perfProxyMediaOn is now replaced with perfProxyMediaMode which takes values 0 - disabled, 1 - when available, 2 - when source not available. diff --git a/openpype/hosts/resolve/__init__.py b/openpype/hosts/resolve/__init__.py index 3e49ce3b9b..b4a994bbaa 100644 --- a/openpype/hosts/resolve/__init__.py +++ b/openpype/hosts/resolve/__init__.py @@ -1,129 +1,6 @@ -from .api.utils import ( - setup, - get_resolve_module +from .addon import ResolveAddon + + +__all__ = ( + "ResolveAddon", ) - -from .api.pipeline import ( - install, - uninstall, - ls, - containerise, - update_container, - publish, - launch_workfiles_app, - maintained_selection, - remove_instance, - list_instances -) - -from .api.lib import ( - maintain_current_timeline, - publish_clip_color, - get_project_manager, - get_current_project, - get_current_timeline, - create_bin, - get_media_pool_item, - create_media_pool_item, - create_timeline_item, - get_timeline_item, - get_video_track_names, - get_current_timeline_items, - get_pype_timeline_item_by_name, - get_timeline_item_pype_tag, - set_timeline_item_pype_tag, - imprint, - set_publish_attribute, - get_publish_attribute, - create_compound_clip, - swap_clips, - get_pype_clip_metadata, - set_project_manager_to_folder_name, - get_otio_clip_instance_data, - get_reformated_path -) - -from .api.menu import launch_pype_menu - -from .api.plugin import ( - ClipLoader, - TimelineItemLoader, - Creator, - PublishClip -) - -from .api.workio import ( - open_file, - save_file, - current_file, - has_unsaved_changes, - file_extensions, - work_root -) - -from .api.testing_utils import TestGUI - - -__all__ = [ - # pipeline - "install", - "uninstall", - "ls", - "containerise", - "update_container", - "reload_pipeline", - "publish", - "launch_workfiles_app", - "maintained_selection", - "remove_instance", - "list_instances", - - # utils - "setup", - "get_resolve_module", - - # lib - "maintain_current_timeline", - "publish_clip_color", - "get_project_manager", - "get_current_project", - "get_current_timeline", - "create_bin", - "get_media_pool_item", - "create_media_pool_item", - "create_timeline_item", - "get_timeline_item", - "get_video_track_names", - "get_current_timeline_items", - "get_pype_timeline_item_by_name", - "get_timeline_item_pype_tag", - "set_timeline_item_pype_tag", - "imprint", - "set_publish_attribute", - "get_publish_attribute", - "create_compound_clip", - "swap_clips", - "get_pype_clip_metadata", - "set_project_manager_to_folder_name", - "get_otio_clip_instance_data", - "get_reformated_path", - - # menu - "launch_pype_menu", - - # plugin - "ClipLoader", - "TimelineItemLoader", - "Creator", - "PublishClip", - - # workio - "open_file", - "save_file", - "current_file", - "has_unsaved_changes", - "file_extensions", - "work_root", - - "TestGUI" -] diff --git a/openpype/hosts/resolve/addon.py b/openpype/hosts/resolve/addon.py new file mode 100644 index 0000000000..02c1d7957f --- /dev/null +++ b/openpype/hosts/resolve/addon.py @@ -0,0 +1,23 @@ +import os + +from openpype.modules import OpenPypeModule, IHostAddon + +from .utils import RESOLVE_ROOT_DIR + + +class ResolveAddon(OpenPypeModule, IHostAddon): + name = "resolve" + host_name = "resolve" + + def initialize(self, module_settings): + self.enabled = True + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(RESOLVE_ROOT_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".drp"] diff --git a/openpype/hosts/resolve/api/__init__.py b/openpype/hosts/resolve/api/__init__.py index 48bd938e57..00a598548e 100644 --- a/openpype/hosts/resolve/api/__init__.py +++ b/openpype/hosts/resolve/api/__init__.py @@ -1,11 +1,136 @@ """ resolve api """ -import os +from .utils import ( + get_resolve_module +) + +from .pipeline import ( + install, + uninstall, + ls, + containerise, + update_container, + publish, + launch_workfiles_app, + maintained_selection, + remove_instance, + list_instances +) + +from .lib import ( + maintain_current_timeline, + publish_clip_color, + get_project_manager, + get_current_project, + get_current_timeline, + create_bin, + get_media_pool_item, + create_media_pool_item, + create_timeline_item, + get_timeline_item, + get_video_track_names, + get_current_timeline_items, + get_pype_timeline_item_by_name, + get_timeline_item_pype_tag, + set_timeline_item_pype_tag, + imprint, + set_publish_attribute, + get_publish_attribute, + create_compound_clip, + swap_clips, + get_pype_clip_metadata, + set_project_manager_to_folder_name, + get_otio_clip_instance_data, + get_reformated_path +) + +from .menu import launch_pype_menu + +from .plugin import ( + ClipLoader, + TimelineItemLoader, + Creator, + PublishClip +) + +from .workio import ( + open_file, + save_file, + current_file, + has_unsaved_changes, + file_extensions, + work_root +) + +from .testing_utils import TestGUI + bmdvr = None bmdvf = None -API_DIR = os.path.dirname(os.path.abspath(__file__)) -HOST_DIR = os.path.dirname(API_DIR) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +__all__ = [ + "bmdvr", + "bmdvf", + + # pipeline + "install", + "uninstall", + "ls", + "containerise", + "update_container", + "reload_pipeline", + "publish", + "launch_workfiles_app", + "maintained_selection", + "remove_instance", + "list_instances", + + # utils + "get_resolve_module", + + # lib + "maintain_current_timeline", + "publish_clip_color", + "get_project_manager", + "get_current_project", + "get_current_timeline", + "create_bin", + "get_media_pool_item", + "create_media_pool_item", + "create_timeline_item", + "get_timeline_item", + "get_video_track_names", + "get_current_timeline_items", + "get_pype_timeline_item_by_name", + "get_timeline_item_pype_tag", + "set_timeline_item_pype_tag", + "imprint", + "set_publish_attribute", + "get_publish_attribute", + "create_compound_clip", + "swap_clips", + "get_pype_clip_metadata", + "set_project_manager_to_folder_name", + "get_otio_clip_instance_data", + "get_reformated_path", + + # menu + "launch_pype_menu", + + # plugin + "ClipLoader", + "TimelineItemLoader", + "Creator", + "PublishClip", + + # workio + "open_file", + "save_file", + "current_file", + "has_unsaved_changes", + "file_extensions", + "work_root", + + "TestGUI" +] diff --git a/openpype/hosts/resolve/api/action.py b/openpype/hosts/resolve/api/action.py index f8f338a850..ceedc2cc54 100644 --- a/openpype/hosts/resolve/api/action.py +++ b/openpype/hosts/resolve/api/action.py @@ -4,7 +4,7 @@ from __future__ import absolute_import import pyblish.api -from ...action import get_errored_instances_from_context +from openpype.pipeline.publish import get_errored_instances_from_context class SelectInvalidAction(pyblish.api.Action): diff --git a/openpype/hosts/resolve/api/lib.py b/openpype/hosts/resolve/api/lib.py index 22f83c6eed..f41eb36caf 100644 --- a/openpype/hosts/resolve/api/lib.py +++ b/openpype/hosts/resolve/api/lib.py @@ -4,13 +4,13 @@ import re import os import contextlib from opentimelineio import opentime -import openpype + +from openpype.lib import Logger +from openpype.pipeline.editorial import is_overlapping_otio_ranges from ..otio import davinci_export as otio_export -from openpype.api import Logger - -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) self = sys.modules[__name__] self.project_manager = None @@ -319,14 +319,13 @@ def get_current_timeline_items( selected_track_count = timeline.GetTrackCount(track_type) # loop all tracks and get items - _clips = dict() + _clips = {} for track_index in range(1, (int(selected_track_count) + 1)): _track_name = timeline.GetTrackName(track_type, track_index) # filter out all unmathed track names - if track_name: - if _track_name not in track_name: - continue + if track_name and _track_name not in track_name: + continue timeline_items = timeline.GetItemListInTrack( track_type, track_index) @@ -348,12 +347,8 @@ def get_current_timeline_items( "index": clip_index } ti_color = ti.GetClipColor() - if filter is True: - if selecting_color in ti_color: - selected_clips.append(data) - else: + if filter and selecting_color in ti_color or not filter: selected_clips.append(data) - return selected_clips @@ -824,7 +819,7 @@ def get_otio_clip_instance_data(otio_timeline, timeline_item_data): continue if otio_clip.name not in timeline_item.GetName(): continue - if openpype.lib.is_overlapping_otio_ranges( + if is_overlapping_otio_ranges( parent_range, timeline_range, strict=True): # add pypedata marker to otio_clip metadata diff --git a/openpype/hosts/resolve/api/menu.py b/openpype/hosts/resolve/api/menu.py index 9e0dd12376..86b292105a 100644 --- a/openpype/hosts/resolve/api/menu.py +++ b/openpype/hosts/resolve/api/menu.py @@ -3,13 +3,13 @@ import sys from Qt import QtWidgets, QtCore +from openpype.tools.utils import host_tools + from .pipeline import ( publish, launch_workfiles_app ) -from openpype.tools.utils import host_tools - def load_stylesheet(): path = os.path.join(os.path.dirname(__file__), "menu_style.qss") @@ -54,15 +54,15 @@ class OpenPypeMenu(QtWidgets.QWidget): ) self.setWindowTitle("OpenPype") - workfiles_btn = QtWidgets.QPushButton("Workfiles...", self) - create_btn = QtWidgets.QPushButton("Create...", self) - publish_btn = QtWidgets.QPushButton("Publish...", self) - load_btn = QtWidgets.QPushButton("Load...", self) - inventory_btn = QtWidgets.QPushButton("Inventory...", self) - subsetm_btn = QtWidgets.QPushButton("Subset Manager...", self) - libload_btn = QtWidgets.QPushButton("Library...", self) + workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self) + create_btn = QtWidgets.QPushButton("Create ...", self) + publish_btn = QtWidgets.QPushButton("Publish ...", self) + load_btn = QtWidgets.QPushButton("Load ...", self) + inventory_btn = QtWidgets.QPushButton("Manager ...", self) + subsetm_btn = QtWidgets.QPushButton("Subset Manager ...", self) + libload_btn = QtWidgets.QPushButton("Library ...", self) experimental_btn = QtWidgets.QPushButton( - "Experimental tools...", self + "Experimental tools ...", self ) # rename_btn = QtWidgets.QPushButton("Rename", self) # set_colorspace_btn = QtWidgets.QPushButton( diff --git a/openpype/hosts/resolve/api/pipeline.py b/openpype/hosts/resolve/api/pipeline.py index 4a7d1c5bea..899cb825bb 100644 --- a/openpype/hosts/resolve/api/pipeline.py +++ b/openpype/hosts/resolve/api/pipeline.py @@ -7,7 +7,7 @@ from collections import OrderedDict from pyblish import api as pyblish -from openpype.api import Logger +from openpype.lib import Logger from openpype.pipeline import ( schema, register_loader_plugin_path, @@ -16,11 +16,15 @@ from openpype.pipeline import ( deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) -from . import lib -from . import PLUGINS_DIR from openpype.tools.utils import host_tools -log = Logger().get_logger(__name__) +from . import lib +from .utils import get_resolve_module + +log = Logger.get_logger(__name__) + +HOST_DIR = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) +PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") @@ -39,7 +43,6 @@ def install(): See the Maya equivalent for inspiration on how to implement this. """ - from .. import get_resolve_module log.info("openpype.hosts.resolve installed") @@ -241,7 +244,7 @@ def on_pyblish_instance_toggled(instance, old_value, new_value): log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( instance, old_value, new_value)) - from openpype.hosts.resolve import ( + from openpype.hosts.resolve.api import ( set_publish_attribute ) diff --git a/openpype/hosts/resolve/api/plugin.py b/openpype/hosts/resolve/api/plugin.py index 8e1436021c..0ed7beee59 100644 --- a/openpype/hosts/resolve/api/plugin.py +++ b/openpype/hosts/resolve/api/plugin.py @@ -4,13 +4,15 @@ import uuid import qargparse from Qt import QtWidgets, QtCore -import openpype.api as pype +from openpype.settings import get_current_project_settings +from openpype.pipeline.context_tools import get_current_project_asset from openpype.pipeline import ( LegacyCreator, LoaderPlugin, ) -from openpype.hosts import resolve + from . import lib +from .menu import load_stylesheet class CreatorWidget(QtWidgets.QDialog): @@ -86,7 +88,7 @@ class CreatorWidget(QtWidgets.QDialog): ok_btn.clicked.connect(self._on_ok_clicked) cancel_btn.clicked.connect(self._on_cancel_clicked) - stylesheet = resolve.api.menu.load_stylesheet() + stylesheet = load_stylesheet() self.setStyleSheet(stylesheet) def _on_ok_clicked(self): @@ -375,7 +377,7 @@ class ClipLoader: """ asset_name = self.context["representation"]["context"]["asset"] - self.data["assetData"] = pype.get_asset(asset_name)["data"] + self.data["assetData"] = get_current_project_asset(asset_name)["data"] def load(self): # create project bin for the media to be imported into @@ -438,7 +440,7 @@ class ClipLoader: source_in = int(_clip_property("Start")) source_out = int(_clip_property("End")) - resolve.swap_clips( + lib.swap_clips( timeline_item, media_pool_item, source_in, @@ -504,21 +506,21 @@ class Creator(LegacyCreator): def __init__(self, *args, **kwargs): super(Creator, self).__init__(*args, **kwargs) - from openpype.api import get_current_project_settings + resolve_p_settings = get_current_project_settings().get("resolve") - self.presets = dict() + self.presets = {} if resolve_p_settings: self.presets = resolve_p_settings["create"].get( self.__class__.__name__, {}) # adding basic current context resolve objects - self.project = resolve.get_current_project() - self.timeline = resolve.get_current_timeline() + self.project = lib.get_current_project() + self.timeline = lib.get_current_timeline() if (self.options or {}).get("useSelection"): - self.selected = resolve.get_current_timeline_items(filter=True) + self.selected = lib.get_current_timeline_items(filter=True) else: - self.selected = resolve.get_current_timeline_items(filter=False) + self.selected = lib.get_current_timeline_items(filter=False) self.widget = CreatorWidget diff --git a/openpype/hosts/resolve/api/preload_console.py b/openpype/hosts/resolve/api/preload_console.py deleted file mode 100644 index 1e3a56b4dd..0000000000 --- a/openpype/hosts/resolve/api/preload_console.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env python -import time -from openpype.hosts.resolve.utils import get_resolve_module -from openpype.api import Logger - -log = Logger().get_logger(__name__) - -wait_delay = 2.5 -wait = 0.00 -ready = None -while True: - try: - # Create project and set parameters: - resolve = get_resolve_module() - pm = resolve.GetProjectManager() - if pm: - ready = None - else: - ready = True - except AttributeError: - pass - - if ready is None: - time.sleep(wait_delay) - log.info(f"Waiting {wait}s for Resolve to have opened Project Manager") - wait += wait_delay - else: - print(f"Preloaded variables: \n\n\tResolve module: " - f"`resolve` > {type(resolve)} \n\tProject manager: " - f"`pm` > {type(pm)}") - break diff --git a/openpype/hosts/resolve/api/utils.py b/openpype/hosts/resolve/api/utils.py index 9b3762f328..871b3af38d 100644 --- a/openpype/hosts/resolve/api/utils.py +++ b/openpype/hosts/resolve/api/utils.py @@ -4,21 +4,21 @@ Resolve's tools for setting environment """ -import sys import os -import shutil -from . import HOST_DIR -from openpype.api import Logger -log = Logger().get_logger(__name__) +import sys + +from openpype.lib import Logger + +log = Logger.get_logger(__name__) def get_resolve_module(): - from openpype.hosts import resolve + from openpype.hosts.resolve import api # dont run if already loaded - if resolve.api.bmdvr: + if api.bmdvr: log.info(("resolve module is assigned to " - f"`pype.hosts.resolve.api.bmdvr`: {resolve.api.bmdvr}")) - return resolve.api.bmdvr + f"`pype.hosts.resolve.api.bmdvr`: {api.bmdvr}")) + return api.bmdvr try: """ The PYTHONPATH needs to be set correctly for this import @@ -71,79 +71,9 @@ def get_resolve_module(): # assign global var and return bmdvr = bmd.scriptapp("Resolve") bmdvf = bmd.scriptapp("Fusion") - resolve.api.bmdvr = bmdvr - resolve.api.bmdvf = bmdvf + api.bmdvr = bmdvr + api.bmdvf = bmdvf log.info(("Assigning resolve module to " - f"`pype.hosts.resolve.api.bmdvr`: {resolve.api.bmdvr}")) + f"`pype.hosts.resolve.api.bmdvr`: {api.bmdvr}")) log.info(("Assigning resolve module to " - f"`pype.hosts.resolve.api.bmdvf`: {resolve.api.bmdvf}")) - - -def _sync_utility_scripts(env=None): - """ Synchronizing basic utlility scripts for resolve. - - To be able to run scripts from inside `Resolve/Workspace/Scripts` menu - all scripts has to be accessible from defined folder. - """ - if not env: - env = os.environ - - # initiate inputs - scripts = {} - us_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR") - us_dir = env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "") - us_paths = [os.path.join( - HOST_DIR, - "utility_scripts" - )] - - # collect script dirs - if us_env: - log.info(f"Utility Scripts Env: `{us_env}`") - us_paths = us_env.split( - os.pathsep) + us_paths - - # collect scripts from dirs - for path in us_paths: - scripts.update({path: os.listdir(path)}) - - log.info(f"Utility Scripts Dir: `{us_paths}`") - log.info(f"Utility Scripts: `{scripts}`") - - # make sure no script file is in folder - if next((s for s in os.listdir(us_dir)), None): - for s in os.listdir(us_dir): - path = os.path.join(us_dir, s) - log.info(f"Removing `{path}`...") - if os.path.isdir(path): - shutil.rmtree(path, onerror=None) - else: - os.remove(path) - - # copy scripts into Resolve's utility scripts dir - for d, sl in scripts.items(): - # directory and scripts list - for s in sl: - # script in script list - src = os.path.join(d, s) - dst = os.path.join(us_dir, s) - log.info(f"Copying `{src}` to `{dst}`...") - if os.path.isdir(src): - shutil.copytree( - src, dst, symlinks=False, - ignore=None, ignore_dangling_symlinks=False - ) - else: - shutil.copy2(src, dst) - - -def setup(env=None): - """ Wrapper installer started from pype.hooks.resolve.ResolvePrelaunch() - """ - if not env: - env = os.environ - - # synchronize resolve utility scripts - _sync_utility_scripts(env) - - log.info("Resolve OpenPype wrapper has been installed") + f"`pype.hosts.resolve.api.bmdvf`: {api.bmdvf}")) diff --git a/openpype/hosts/resolve/api/workio.py b/openpype/hosts/resolve/api/workio.py index f175769387..5ce73eea53 100644 --- a/openpype/hosts/resolve/api/workio.py +++ b/openpype/hosts/resolve/api/workio.py @@ -1,15 +1,15 @@ """Host API required Work Files tool""" import os -from openpype.api import Logger -from .. import ( +from openpype.lib import Logger +from .lib import ( get_project_manager, get_current_project, set_project_manager_to_folder_name ) -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) exported_projet_ext = ".drp" @@ -60,7 +60,7 @@ def open_file(filepath): # load project from input path project = pm.LoadProject(fname) log.info(f"Project {project.GetName()} opened...") - return True + except AttributeError: log.warning((f"Project with name `{fname}` does not exist! It will " f"be imported from {filepath} and then loaded...")) @@ -69,9 +69,8 @@ def open_file(filepath): project = pm.LoadProject(fname) log.info(f"Project imported/loaded {project.GetName()}...") return True - else: - return False - + return False + return True def current_file(): pm = get_project_manager() @@ -80,13 +79,9 @@ def current_file(): name = project.GetName() fname = name + exported_projet_ext current_file = os.path.join(current_dir, fname) - normalised = os.path.normpath(current_file) - - # Unsaved current file - if normalised == "": + if not current_file: return None - - return normalised + return os.path.normpath(current_file) def work_root(session): diff --git a/openpype/hosts/resolve/hooks/pre_resolve_setup.py b/openpype/hosts/resolve/hooks/pre_resolve_setup.py index 978e3760fd..8574b3ad01 100644 --- a/openpype/hosts/resolve/hooks/pre_resolve_setup.py +++ b/openpype/hosts/resolve/hooks/pre_resolve_setup.py @@ -1,7 +1,7 @@ import os -import importlib +import platform from openpype.lib import PreLaunchHook -from openpype.hosts.resolve.api import utils +from openpype.hosts.resolve.utils import setup class ResolvePrelaunch(PreLaunchHook): @@ -14,47 +14,91 @@ class ResolvePrelaunch(PreLaunchHook): app_groups = ["resolve"] def execute(self): + current_platform = platform.system().lower() + + PROGRAMDATA = self.launch_context.env.get("PROGRAMDATA", "") + RESOLVE_SCRIPT_API_ = { + "windows": ( + f"{PROGRAMDATA}/Blackmagic Design/" + "DaVinci Resolve/Support/Developer/Scripting" + ), + "darwin": ( + "/Library/Application Support/Blackmagic Design" + "/DaVinci Resolve/Developer/Scripting" + ), + "linux": "/opt/resolve/Developer/Scripting" + } + RESOLVE_SCRIPT_API = os.path.normpath( + RESOLVE_SCRIPT_API_[current_platform]) + self.launch_context.env["RESOLVE_SCRIPT_API"] = RESOLVE_SCRIPT_API + + RESOLVE_SCRIPT_LIB_ = { + "windows": ( + "C:/Program Files/Blackmagic Design" + "/DaVinci Resolve/fusionscript.dll" + ), + "darwin": ( + "/Applications/DaVinci Resolve/DaVinci Resolve.app" + "/Contents/Libraries/Fusion/fusionscript.so" + ), + "linux": "/opt/resolve/libs/Fusion/fusionscript.so" + } + RESOLVE_SCRIPT_LIB = os.path.normpath( + RESOLVE_SCRIPT_LIB_[current_platform]) + self.launch_context.env["RESOLVE_SCRIPT_LIB"] = RESOLVE_SCRIPT_LIB + # TODO: add OTIO installation from `openpype/requirements.py` - # making sure python 3.6 is installed at provided path - py36_dir = os.path.normpath( - self.launch_context.env.get("PYTHON36_RESOLVE", "")) - assert os.path.isdir(py36_dir), ( - "Python 3.6 is not installed at the provided folder path. Either " + # making sure python <3.9.* is installed at provided path + python3_home = os.path.normpath( + self.launch_context.env.get("RESOLVE_PYTHON3_HOME", "")) + + assert os.path.isdir(python3_home), ( + "Python 3 is not installed at the provided folder path. Either " "make sure the `environments\resolve.json` is having correctly " - "set `PYTHON36_RESOLVE` or make sure Python 3.6 is installed " - f"in given path. \nPYTHON36_RESOLVE: `{py36_dir}`" + "set `RESOLVE_PYTHON3_HOME` or make sure Python 3 is installed " + f"in given path. \nRESOLVE_PYTHON3_HOME: `{python3_home}`" ) - self.log.info(f"Path to Resolve Python folder: `{py36_dir}`...") + self.launch_context.env["PYTHONHOME"] = python3_home + self.log.info(f"Path to Resolve Python folder: `{python3_home}`...") + # add to the python path to path + env_path = self.launch_context.env["PATH"] + self.launch_context.env["PATH"] = os.pathsep.join([ + python3_home, + os.path.join(python3_home, "Scripts") + ] + env_path.split(os.pathsep)) + + self.log.debug(f"PATH: {self.launch_context.env['PATH']}") + + # add to the PYTHONPATH + env_pythonpath = self.launch_context.env["PYTHONPATH"] + self.launch_context.env["PYTHONPATH"] = os.pathsep.join([ + os.path.join(python3_home, "Lib", "site-packages"), + os.path.join(RESOLVE_SCRIPT_API, "Modules"), + ] + env_pythonpath.split(os.pathsep)) + + self.log.debug(f"PYTHONPATH: {self.launch_context.env['PYTHONPATH']}") + + RESOLVE_UTILITY_SCRIPTS_DIR_ = { + "windows": ( + f"{PROGRAMDATA}/Blackmagic Design" + "/DaVinci Resolve/Fusion/Scripts/Comp" + ), + "darwin": ( + "/Library/Application Support/Blackmagic Design" + "/DaVinci Resolve/Fusion/Scripts/Comp" + ), + "linux": "/opt/resolve/Fusion/Scripts/Comp" + } + RESOLVE_UTILITY_SCRIPTS_DIR = os.path.normpath( + RESOLVE_UTILITY_SCRIPTS_DIR_[current_platform] + ) # setting utility scripts dir for scripts syncing - us_dir = os.path.normpath( - self.launch_context.env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "") - ) - assert os.path.isdir(us_dir), ( - "Resolve utility script dir does not exists. Either make sure " - "the `environments\resolve.json` is having correctly set " - "`RESOLVE_UTILITY_SCRIPTS_DIR` or reinstall DaVinci Resolve. \n" - f"RESOLVE_UTILITY_SCRIPTS_DIR: `{us_dir}`" - ) - self.log.debug(f"-- us_dir: `{us_dir}`") + self.launch_context.env["RESOLVE_UTILITY_SCRIPTS_DIR"] = ( + RESOLVE_UTILITY_SCRIPTS_DIR) - # correctly format path for pre python script - pre_py_sc = os.path.normpath( - self.launch_context.env.get("PRE_PYTHON_SCRIPT", "")) - self.launch_context.env["PRE_PYTHON_SCRIPT"] = pre_py_sc - self.log.debug(f"-- pre_py_sc: `{pre_py_sc}`...") - try: - __import__("openpype.hosts.resolve") - __import__("pyblish") + # remove terminal coloring tags + self.launch_context.env["OPENPYPE_LOG_NO_COLORS"] = "True" - except ImportError: - self.log.warning( - "pyblish: Could not load Resolve integration.", - exc_info=True - ) - - else: - # Resolve Setup integration - importlib.reload(utils) - self.log.debug(f"-- utils.__file__: `{utils.__file__}`") - utils.setup(self.launch_context.env) + # Resolve Setup integration + setup(self.launch_context.env) diff --git a/openpype/hosts/resolve/plugins/create/create_shot_clip.py b/openpype/hosts/resolve/plugins/create/create_shot_clip.py index 62d5557a50..4b14f2493f 100644 --- a/openpype/hosts/resolve/plugins/create/create_shot_clip.py +++ b/openpype/hosts/resolve/plugins/create/create_shot_clip.py @@ -1,9 +1,12 @@ # from pprint import pformat -from openpype.hosts import resolve -from openpype.hosts.resolve.api import lib +from openpype.hosts.resolve.api import plugin, lib +from openpype.hosts.resolve.api.lib import ( + get_video_track_names, + create_bin, +) -class CreateShotClip(resolve.Creator): +class CreateShotClip(plugin.Creator): """Publishable clip""" label = "Create Publishable Clip" @@ -11,7 +14,7 @@ class CreateShotClip(resolve.Creator): icon = "film" defaults = ["Main"] - gui_tracks = resolve.get_video_track_names() + gui_tracks = get_video_track_names() gui_name = "OpenPype publish attributes creator" gui_info = "Define sequential rename and fill hierarchy data." gui_inputs = { @@ -116,12 +119,13 @@ class CreateShotClip(resolve.Creator): "order": 0}, "vSyncTrack": { "value": gui_tracks, # noqa - "type": "QComboBox", - "label": "Hero track", - "target": "ui", - "toolTip": "Select driving track name which should be mastering all others", # noqa - "order": 1} + "type": "QComboBox", + "label": "Hero track", + "target": "ui", + "toolTip": "Select driving track name which should be mastering all others", # noqa + "order": 1 } + } }, "publishSettings": { "type": "section", @@ -172,28 +176,31 @@ class CreateShotClip(resolve.Creator): "target": "ui", "order": 4, "value": { - "workfileFrameStart": { - "value": 1001, - "type": "QSpinBox", - "label": "Workfiles Start Frame", - "target": "tag", - "toolTip": "Set workfile starting frame number", # noqa - "order": 0}, - "handleStart": { - "value": 0, - "type": "QSpinBox", - "label": "Handle start (head)", - "target": "tag", - "toolTip": "Handle at start of clip", # noqa - "order": 1}, - "handleEnd": { - "value": 0, - "type": "QSpinBox", - "label": "Handle end (tail)", - "target": "tag", - "toolTip": "Handle at end of clip", # noqa - "order": 2}, - } + "workfileFrameStart": { + "value": 1001, + "type": "QSpinBox", + "label": "Workfiles Start Frame", + "target": "tag", + "toolTip": "Set workfile starting frame number", # noqa + "order": 0 + }, + "handleStart": { + "value": 0, + "type": "QSpinBox", + "label": "Handle start (head)", + "target": "tag", + "toolTip": "Handle at start of clip", # noqa + "order": 1 + }, + "handleEnd": { + "value": 0, + "type": "QSpinBox", + "label": "Handle end (tail)", + "target": "tag", + "toolTip": "Handle at end of clip", # noqa + "order": 2 + } + } } } @@ -229,8 +236,10 @@ class CreateShotClip(resolve.Creator): v_sync_track = widget.result["vSyncTrack"]["value"] # sort selected trackItems by - sorted_selected_track_items = list() - unsorted_selected_track_items = list() + sorted_selected_track_items = [] + unsorted_selected_track_items = [] + print("_____ selected ______") + print(self.selected) for track_item_data in self.selected: if track_item_data["track"]["name"] in v_sync_track: sorted_selected_track_items.append(track_item_data) @@ -244,7 +253,7 @@ class CreateShotClip(resolve.Creator): sq_markers = self.timeline.GetMarkers() # create media bin for compound clips (trackItems) - mp_folder = resolve.create_bin(self.timeline.GetName()) + mp_folder = create_bin(self.timeline.GetName()) kwargs = { "ui_inputs": widget.result, @@ -253,11 +262,11 @@ class CreateShotClip(resolve.Creator): "sq_frame_start": sq_frame_start, "sq_markers": sq_markers } - + print(kwargs) for i, track_item_data in enumerate(sorted_selected_track_items): self.rename_index = i - + self.log.info(track_item_data) # convert track item to timeline media pool item - track_item = resolve.PublishClip( + track_item = plugin.PublishClip( self, track_item_data, **kwargs).convert() track_item.SetClipColor(lib.publish_clip_color) diff --git a/openpype/hosts/resolve/plugins/load/load_clip.py b/openpype/hosts/resolve/plugins/load/load_clip.py index cf88b14e81..a0c78c182f 100644 --- a/openpype/hosts/resolve/plugins/load/load_clip.py +++ b/openpype/hosts/resolve/plugins/load/load_clip.py @@ -1,17 +1,22 @@ from copy import deepcopy -from importlib import reload -from openpype.hosts import resolve +from openpype.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) +# from openpype.hosts import resolve from openpype.pipeline import ( get_representation_path, legacy_io, ) from openpype.hosts.resolve.api import lib, plugin -reload(plugin) -reload(lib) +from openpype.hosts.resolve.api.pipeline import ( + containerise, + update_container, +) -class LoadClip(resolve.TimelineItemLoader): +class LoadClip(plugin.TimelineItemLoader): """Load a subset to timeline as clip Place clip to timeline on its asset origin timings collected @@ -19,7 +24,7 @@ class LoadClip(resolve.TimelineItemLoader): """ families = ["render2d", "source", "plate", "render", "review"] - representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264", ".mov"] + representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264", "mov"] label = "Load as clip" order = -10 @@ -42,7 +47,7 @@ class LoadClip(resolve.TimelineItemLoader): }) # load clip to timeline and get main variables - timeline_item = resolve.ClipLoader( + timeline_item = plugin.ClipLoader( self, context, **options).load() namespace = namespace or timeline_item.GetName() version = context['version'] @@ -76,7 +81,7 @@ class LoadClip(resolve.TimelineItemLoader): self.log.info("Loader done: `{}`".format(name)) - return resolve.containerise( + return containerise( timeline_item, name, namespace, context, self.__class__.__name__, @@ -94,12 +99,10 @@ class LoadClip(resolve.TimelineItemLoader): context.update({"representation": representation}) name = container['name'] namespace = container['namespace'] - timeline_item_data = resolve.get_pype_timeline_item_by_name(namespace) + timeline_item_data = lib.get_pype_timeline_item_by_name(namespace) timeline_item = timeline_item_data["clip"]["item"] - version = legacy_io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + project_name = legacy_io.active_project() + version = get_version_by_id(project_name, representation["parent"]) version_data = version.get("data", {}) version_name = version.get("name", None) colorspace = version_data.get("colorspace", None) @@ -107,7 +110,7 @@ class LoadClip(resolve.TimelineItemLoader): self.fname = get_representation_path(representation) context["version"] = {"data": version_data} - loader = resolve.ClipLoader(self, context) + loader = plugin.ClipLoader(self, context) timeline_item = loader.update(timeline_item) # add additional metadata from the version to imprint Avalon knob @@ -134,23 +137,26 @@ class LoadClip(resolve.TimelineItemLoader): # update color of clip regarding the version order self.set_item_color(timeline_item, version) - return resolve.update_container(timeline_item, data_imprint) + return update_container(timeline_item, data_imprint) @classmethod def set_item_color(cls, timeline_item, version): - # define version name version_name = version.get("name", None) # get all versions in list - versions = legacy_io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') - - max_version = max(versions) + project_name = legacy_io.active_project() + last_version_doc = get_last_version_by_subset_id( + project_name, + version["parent"], + fields=["name"] + ) + if last_version_doc: + last_version = last_version_doc["name"] + else: + last_version = None # set clip colour - if version_name == max_version: + if version_name == last_version: timeline_item.SetClipColor(cls.clip_color_last) else: timeline_item.SetClipColor(cls.clip_color) diff --git a/openpype/hosts/resolve/plugins/publish/extract_workfile.py b/openpype/hosts/resolve/plugins/publish/extract_workfile.py index e3d60465a2..535f879b58 100644 --- a/openpype/hosts/resolve/plugins/publish/extract_workfile.py +++ b/openpype/hosts/resolve/plugins/publish/extract_workfile.py @@ -1,10 +1,11 @@ import os import pyblish.api -import openpype.api -from openpype.hosts import resolve + +from openpype.pipeline import publish +from openpype.hosts.resolve.api.lib import get_project_manager -class ExtractWorkfile(openpype.api.Extractor): +class ExtractWorkfile(publish.Extractor): """ Extractor export DRP workfile file representation """ @@ -29,7 +30,7 @@ class ExtractWorkfile(openpype.api.Extractor): os.path.join(staging_dir, drp_file_name)) # write out the drp workfile - resolve.get_project_manager().ExportProject( + get_project_manager().ExportProject( project.GetName(), drp_file_path) # create drp workfile representation diff --git a/openpype/hosts/resolve/plugins/publish/precollect_instances.py b/openpype/hosts/resolve/plugins/publish/precollect_instances.py index 8f1a13a4e5..8ec169ad65 100644 --- a/openpype/hosts/resolve/plugins/publish/precollect_instances.py +++ b/openpype/hosts/resolve/plugins/publish/precollect_instances.py @@ -1,9 +1,15 @@ -import pyblish -from openpype.hosts import resolve - -# # developer reload modules from pprint import pformat +import pyblish + +from openpype.hosts.resolve.api.lib import ( + get_current_timeline_items, + get_timeline_item_pype_tag, + publish_clip_color, + get_publish_attribute, + get_otio_clip_instance_data, +) + class PrecollectInstances(pyblish.api.ContextPlugin): """Collect all Track items selection.""" @@ -14,8 +20,8 @@ class PrecollectInstances(pyblish.api.ContextPlugin): def process(self, context): otio_timeline = context.data["otioTimeline"] - selected_timeline_items = resolve.get_current_timeline_items( - filter=True, selecting_color=resolve.publish_clip_color) + selected_timeline_items = get_current_timeline_items( + filter=True, selecting_color=publish_clip_color) self.log.info( "Processing enabled track items: {}".format( @@ -27,7 +33,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin): timeline_item = timeline_item_data["clip"]["item"] # get pype tag data - tag_data = resolve.get_timeline_item_pype_tag(timeline_item) + tag_data = get_timeline_item_pype_tag(timeline_item) self.log.debug(f"__ tag_data: {pformat(tag_data)}") if not tag_data: @@ -67,14 +73,15 @@ class PrecollectInstances(pyblish.api.ContextPlugin): "asset": asset, "item": timeline_item, "families": families, - "publish": resolve.get_publish_attribute(timeline_item), + "publish": get_publish_attribute(timeline_item), "fps": context.data["fps"], "handleStart": handle_start, - "handleEnd": handle_end + "handleEnd": handle_end, + "newAssetPublishing": True }) # otio clip data - otio_data = resolve.get_otio_clip_instance_data( + otio_data = get_otio_clip_instance_data( otio_timeline, timeline_item_data) or {} data.update(otio_data) @@ -133,7 +140,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin): "asset": asset, "family": family, "families": [], - "publish": resolve.get_publish_attribute(timeline_item) + "publish": get_publish_attribute(timeline_item) }) context.create_instance(**data) diff --git a/openpype/hosts/resolve/plugins/publish/precollect_workfile.py b/openpype/hosts/resolve/plugins/publish/precollect_workfile.py index a58f288770..0f94216556 100644 --- a/openpype/hosts/resolve/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/resolve/plugins/publish/precollect_workfile.py @@ -1,11 +1,9 @@ import pyblish.api from pprint import pformat -from importlib import reload -from openpype.hosts import resolve +from openpype.hosts.resolve import api as rapi from openpype.pipeline import legacy_io from openpype.hosts.resolve.otio import davinci_export -reload(davinci_export) class PrecollectWorkfile(pyblish.api.ContextPlugin): @@ -18,9 +16,9 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin): asset = legacy_io.Session["AVALON_ASSET"] subset = "workfile" - project = resolve.get_current_project() + project = rapi.get_current_project() fps = project.GetSetting("timelineFrameRate") - video_tracks = resolve.get_video_track_names() + video_tracks = rapi.get_video_track_names() # adding otio timeline to context otio_timeline = davinci_export.create_otio_timeline(project) @@ -30,7 +28,8 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin): "asset": asset, "subset": "{}{}".format(asset, subset.capitalize()), "item": project, - "family": "workfile" + "family": "workfile", + "families": [] } # create instance with workfile diff --git a/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py b/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py index 3a16b9c966..8f3917bece 100644 --- a/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py +++ b/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py @@ -6,10 +6,11 @@ from openpype.pipeline import install_host def main(env): - import openpype.hosts.resolve as bmdvr + from openpype.hosts.resolve.utils import setup + import openpype.hosts.resolve.api as bmdvr # Registers openpype's Global pyblish plugins install_host(bmdvr) - bmdvr.setup(env) + setup(env) if __name__ == "__main__": diff --git a/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py b/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py index 89ade9238b..1087a7b7a0 100644 --- a/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py +++ b/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py @@ -2,13 +2,13 @@ import os import sys from openpype.pipeline import install_host -from openpype.api import Logger +from openpype.lib import Logger -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) def main(env): - import openpype.hosts.resolve as bmdvr + import openpype.hosts.resolve.api as bmdvr # activate resolve from openpype install_host(bmdvr) diff --git a/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py b/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py index 8433bd9172..92f2e43a72 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py +++ b/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py @@ -6,8 +6,8 @@ import opentimelineio as otio from openpype.pipeline import install_host -from openpype.hosts.resolve import TestGUI -import openpype.hosts.resolve as bmdvr +import openpype.hosts.resolve.api as bmdvr +from openpype.hosts.resolve.api.testing_utils import TestGUI from openpype.hosts.resolve.otio import davinci_export as otio_export diff --git a/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py b/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py index 477955d527..91a361ec08 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py +++ b/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py @@ -2,11 +2,16 @@ import os import sys -from openpype.pipeline import install_host -from openpype.hosts.resolve import TestGUI -import openpype.hosts.resolve as bmdvr import clique +from openpype.pipeline import install_host +from openpype.hosts.resolve.api.testing_utils import TestGUI +import openpype.hosts.resolve.api as bmdvr +from openpype.hosts.resolve.api.lib import ( + create_media_pool_item, + create_timeline_item, +) + class ThisTestGUI(TestGUI): extensions = [".exr", ".jpg", ".mov", ".png", ".mp4", ".ari", ".arx"] @@ -55,10 +60,10 @@ class ThisTestGUI(TestGUI): # skip if unwanted extension if ext not in self.extensions: return - media_pool_item = bmdvr.create_media_pool_item(fpath) + media_pool_item = create_media_pool_item(fpath) print(media_pool_item) - track_item = bmdvr.create_timeline_item(media_pool_item) + track_item = create_timeline_item(media_pool_item) print(track_item) diff --git a/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py b/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py index 872d620162..2e83188bde 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py +++ b/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py @@ -1,13 +1,17 @@ #! python3 from openpype.pipeline import install_host -import openpype.hosts.resolve as bmdvr +from openpype.hosts.resolve import api as bmdvr +from openpype.hosts.resolve.api.lib import ( + create_media_pool_item, + create_timeline_item, +) def file_processing(fpath): - media_pool_item = bmdvr.create_media_pool_item(fpath) + media_pool_item = create_media_pool_item(fpath) print(media_pool_item) - track_item = bmdvr.create_timeline_item(media_pool_item) + track_item = create_timeline_item(media_pool_item) print(track_item) diff --git a/openpype/hosts/resolve/utils.py b/openpype/hosts/resolve/utils.py new file mode 100644 index 0000000000..5881f153ae --- /dev/null +++ b/openpype/hosts/resolve/utils.py @@ -0,0 +1,55 @@ +import os +import shutil +from openpype.lib import Logger + +RESOLVE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +def setup(env): + log = Logger.get_logger("ResolveSetup") + scripts = {} + us_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR") + us_dir = env["RESOLVE_UTILITY_SCRIPTS_DIR"] + + us_paths = [os.path.join( + RESOLVE_ROOT_DIR, + "utility_scripts" + )] + + # collect script dirs + if us_env: + log.info("Utility Scripts Env: `{}`".format(us_env)) + us_paths = us_env.split( + os.pathsep) + us_paths + + # collect scripts from dirs + for path in us_paths: + scripts.update({path: os.listdir(path)}) + + log.info("Utility Scripts Dir: `{}`".format(us_paths)) + log.info("Utility Scripts: `{}`".format(scripts)) + + # make sure no script file is in folder + for s in os.listdir(us_dir): + path = os.path.join(us_dir, s) + log.info("Removing `{}`...".format(path)) + if os.path.isdir(path): + shutil.rmtree(path, onerror=None) + else: + os.remove(path) + + # copy scripts into Resolve's utility scripts dir + for d, sl in scripts.items(): + # directory and scripts list + for s in sl: + # script in script list + src = os.path.join(d, s) + dst = os.path.join(us_dir, s) + log.info("Copying `{}` to `{}`...".format(src, dst)) + if os.path.isdir(src): + shutil.copytree( + src, dst, symlinks=False, + ignore=None, ignore_dangling_symlinks=False + ) + else: + shutil.copy2(src, dst) diff --git a/openpype/hosts/standalonepublisher/__init__.py b/openpype/hosts/standalonepublisher/__init__.py index e69de29bb2..f47fa6b573 100644 --- a/openpype/hosts/standalonepublisher/__init__.py +++ b/openpype/hosts/standalonepublisher/__init__.py @@ -0,0 +1,6 @@ +from .addon import StandAlonePublishAddon + + +__all__ = ( + "StandAlonePublishAddon", +) diff --git a/openpype/hosts/standalonepublisher/addon.py b/openpype/hosts/standalonepublisher/addon.py new file mode 100644 index 0000000000..65a4226664 --- /dev/null +++ b/openpype/hosts/standalonepublisher/addon.py @@ -0,0 +1,56 @@ +import os + +import click + +from openpype.lib import get_openpype_execute_args +from openpype.lib.execute import run_detached_process +from openpype.modules import OpenPypeModule, ITrayAction, IHostAddon + +STANDALONEPUBLISH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class StandAlonePublishAddon(OpenPypeModule, ITrayAction, IHostAddon): + label = "Publish" + name = "standalonepublisher" + host_name = "standalonepublisher" + + def initialize(self, modules_settings): + self.enabled = modules_settings["standalonepublish_tool"]["enabled"] + self.publish_paths = [ + os.path.join(STANDALONEPUBLISH_ROOT_DIR, "plugins", "publish") + ] + + def tray_init(self): + return + + def on_action_trigger(self): + self.run_standalone_publisher() + + def connect_with_modules(self, enabled_modules): + """Collect publish paths from other modules.""" + + publish_paths = self.manager.collect_plugin_paths()["publish"] + self.publish_paths.extend(publish_paths) + + def run_standalone_publisher(self): + args = get_openpype_execute_args("module", self.name, "launch") + run_detached_process(args) + + def cli(self, click_group): + click_group.add_command(cli_main) + + +@click.group( + StandAlonePublishAddon.name, + help="StandalonePublisher related commands.") +def cli_main(): + pass + + +@cli_main.command() +def launch(): + """Launch StandalonePublisher tool UI.""" + + from openpype.tools import standalonepublish + + standalonepublish.main() diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py index 3e7fb19c00..7925b0ecf3 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py @@ -2,8 +2,8 @@ import copy import json import pyblish.api -from openpype.lib import get_subset_name_with_asset_doc -from openpype.pipeline import legacy_io +from openpype.client import get_asset_by_name +from openpype.pipeline.create import get_subset_name class CollectBulkMovInstances(pyblish.api.InstancePlugin): @@ -24,12 +24,9 @@ class CollectBulkMovInstances(pyblish.api.InstancePlugin): def process(self, instance): context = instance.context + project_name = context.data["projectEntity"]["name"] asset_name = instance.data["asset"] - - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) if not asset_doc: raise AssertionError(( "Couldn't find Asset document with name \"{}\"" @@ -47,12 +44,14 @@ class CollectBulkMovInstances(pyblish.api.InstancePlugin): task_name = available_task_names[_task_name_low] break - subset_name = get_subset_name_with_asset_doc( + subset_name = get_subset_name( self.new_instance_family, self.subset_name_variant, task_name, asset_doc, - legacy_io.Session["AVALON_PROJECT"] + project_name, + host_name=context.data["hostName"], + project_settings=context.data["project_settings"] ) instance_name = f"{asset_name}_{subset_name}" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py index 0a1d29ccdc..8633d4bf9d 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py @@ -19,6 +19,7 @@ import os import opentimelineio as otio import pyblish.api from openpype import lib as plib +from openpype.pipeline.context_tools import get_current_project_asset class OTIO_View(pyblish.api.Action): @@ -116,7 +117,7 @@ class CollectEditorial(pyblish.api.InstancePlugin): if extension == ".edl": # EDL has no frame rate embedded so needs explicit # frame rate else 24 is asssumed. - kwargs["rate"] = plib.get_asset()["data"]["fps"] + kwargs["rate"] = get_current_project_asset()["data"]["fps"] instance.data["otio_timeline"] = otio.adapters.read_from_file( file_path, **kwargs) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py index d0d36bb717..75c260bad7 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py @@ -1,8 +1,12 @@ import os +from copy import deepcopy + import opentimelineio as otio import pyblish.api + from openpype import lib as plib -from copy import deepcopy +from openpype.pipeline.context_tools import get_current_project_asset + class CollectInstances(pyblish.api.InstancePlugin): """Collect instances from editorial's OTIO sequence""" @@ -48,7 +52,7 @@ class CollectInstances(pyblish.api.InstancePlugin): # get timeline otio data timeline = instance.data["otio_timeline"] - fps = plib.get_asset()["data"]["fps"] + fps = get_current_project_asset()["data"]["fps"] tracks = timeline.each_child( descended_from_type=otio.schema.Track @@ -166,7 +170,8 @@ class CollectInstances(pyblish.api.InstancePlugin): "frameStart": frame_start, "frameEnd": frame_end, "frameStartH": frame_start - handle_start, - "frameEndH": frame_end + handle_end + "frameEndH": frame_end + handle_end, + "newAssetPublishing": True } for data_key in instance_data_filter: diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py index 77163651c4..9109bf6726 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py @@ -1,9 +1,10 @@ import os +from pprint import pformat import re from copy import deepcopy import pyblish.api -from openpype.pipeline import legacy_io +from openpype.client import get_asset_by_id class CollectHierarchyInstance(pyblish.api.ContextPlugin): @@ -21,6 +22,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): families = ["shot"] # presets + shot_rename = True shot_rename_template = None shot_rename_search_patterns = None shot_add_hierarchy = None @@ -46,7 +48,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): parent_name = instance.context.data["assetEntity"]["name"] clip = instance.data["item"] clip_name = os.path.splitext(clip.name)[0].lower() - if self.shot_rename_search_patterns: + if self.shot_rename_search_patterns and self.shot_rename: search_text += parent_name + clip_name instance.data["anatomyData"].update({"clip_name": clip_name}) for type, pattern in self.shot_rename_search_patterns.items(): @@ -56,33 +58,38 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): continue instance.data["anatomyData"][type] = match[-1] - # format to new shot name - instance.data["asset"] = self.shot_rename_template.format( - **instance.data["anatomyData"]) + # format to new shot name + instance.data["asset"] = self.shot_rename_template.format( + **instance.data["anatomyData"]) def create_hierarchy(self, instance): - parents = list() - hierarchy = list() - visual_hierarchy = [instance.context.data["assetEntity"]] + asset_doc = instance.context.data["assetEntity"] + project_doc = instance.context.data["projectEntity"] + project_name = project_doc["name"] + visual_hierarchy = [asset_doc] + current_doc = asset_doc while True: - visual_parent = legacy_io.find_one( - {"_id": visual_hierarchy[-1]["data"]["visualParent"]} - ) - if visual_parent: - visual_hierarchy.append(visual_parent) - else: - visual_hierarchy.append( - instance.context.data["projectEntity"]) + visual_parent_id = current_doc["data"]["visualParent"] + visual_parent = None + if visual_parent_id: + visual_parent = get_asset_by_id(project_name, visual_parent_id) + + if not visual_parent: + visual_hierarchy.append(project_doc) break + visual_hierarchy.append(visual_parent) + current_doc = visual_parent # add current selection context hierarchy from standalonepublisher + parents = list() for entity in reversed(visual_hierarchy): parents.append({ "entity_type": entity["data"]["entityType"], "entity_name": entity["name"] }) - if self.shot_add_hierarchy: + hierarchy = list() + if self.shot_add_hierarchy.get("enabled"): parent_template_patern = re.compile(r"\{([a-z]*?)\}") # fill the parents parts from presets shot_add_hierarchy = self.shot_add_hierarchy.copy() @@ -126,12 +133,11 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): instance.data["parents"] = parents # print - self.log.debug(f"Hierarchy: {hierarchy}") - self.log.debug(f"parents: {parents}") + self.log.warning(f"Hierarchy: {hierarchy}") + self.log.info(f"parents: {parents}") + tasks_to_add = dict() if self.shot_add_tasks: - tasks_to_add = dict() - project_doc = legacy_io.find_one({"type": "project"}) project_tasks = project_doc["config"]["tasks"] for task_name, task_data in self.shot_add_tasks.items(): _task_data = deepcopy(task_data) @@ -150,9 +156,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): task_name, list(project_tasks.keys()))) - instance.data["tasks"] = tasks_to_add - else: - instance.data["tasks"] = dict() + instance.data["tasks"] = tasks_to_add # updating hierarchy data instance.data["anatomyData"].update({ @@ -161,6 +165,9 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): }) def process(self, context): + self.log.info("self.shot_add_hierarchy: {}".format( + pformat(self.shot_add_hierarchy) + )) for instance in context: if instance.data["family"] in self.families: self.processing_instance(instance) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py index 9d94bfdc91..82d7247b2b 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py @@ -4,7 +4,7 @@ import collections import pyblish.api from pprint import pformat -from openpype.pipeline import legacy_io +from openpype.client import get_assets class CollectMatchingAssetToInstance(pyblish.api.InstancePlugin): @@ -119,8 +119,9 @@ class CollectMatchingAssetToInstance(pyblish.api.InstancePlugin): def _asset_docs_by_parent_id(self, instance): # Query all assets for project and store them by parent's id to list + project_name = instance.context.data["projectEntity"]["name"] asset_docs_by_parent_id = collections.defaultdict(list) - for asset_doc in legacy_io.find({"type": "asset"}): + for asset_doc in get_assets(project_name): parent_id = asset_doc["data"]["visualParent"] asset_docs_by_parent_id[parent_id].append(asset_doc) return asset_docs_by_parent_id diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py index 941a76b05b..9f02d65d00 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py @@ -1,10 +1,11 @@ import os import tempfile import pyblish.api -import openpype.api from openpype.lib import ( get_ffmpeg_tool_path, get_ffprobe_streams, + path_to_subprocess_arg, + run_subprocess, ) @@ -37,82 +38,69 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin): if not thumbnail_repre: return + thumbnail_repre.pop("thumbnail") files = thumbnail_repre.get("files") if not files: return if isinstance(files, list): - files_len = len(files) - file = str(files[0]) + first_filename = str(files[0]) else: - files_len = 1 - file = files + first_filename = files staging_dir = None - is_jpeg = False - if file.endswith(".jpeg") or file.endswith(".jpg"): - is_jpeg = True - if is_jpeg and files_len == 1: - # skip if already is single jpeg file - return + # Convert to jpeg if not yet + full_input_path = os.path.join( + thumbnail_repre["stagingDir"], first_filename + ) + self.log.info("input {}".format(full_input_path)) + with tempfile.NamedTemporaryFile(suffix=".jpg") as tmp: + full_thumbnail_path = tmp.name - elif is_jpeg: - # use first frame as thumbnail if is sequence of jpegs - full_thumbnail_path = os.path.join( - thumbnail_repre["stagingDir"], file - ) - self.log.info( - "For thumbnail is used file: {}".format(full_thumbnail_path) - ) + self.log.info("output {}".format(full_thumbnail_path)) - else: - # Convert to jpeg if not yet - full_input_path = os.path.join(thumbnail_repre["stagingDir"], file) - self.log.info("input {}".format(full_input_path)) + instance.context.data["cleanupFullPaths"].append(full_thumbnail_path) - full_thumbnail_path = tempfile.mkstemp(suffix=".jpg")[1] - self.log.info("output {}".format(full_thumbnail_path)) + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") - ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") + ffmpeg_args = self.ffmpeg_args or {} - ffmpeg_args = self.ffmpeg_args or {} + jpeg_items = [ + path_to_subprocess_arg(ffmpeg_path), + # override file if already exists + "-y" + ] - jpeg_items = [ - "\"{}\"".format(ffmpeg_path), - # override file if already exists - "-y" - ] - - # add input filters from peresets - jpeg_items.extend(ffmpeg_args.get("input") or []) - # input file - jpeg_items.append("-i \"{}\"".format(full_input_path)) + # add input filters from peresets + jpeg_items.extend(ffmpeg_args.get("input") or []) + # input file + jpeg_items.extend([ + "-i", path_to_subprocess_arg(full_input_path), # extract only single file - jpeg_items.append("-frames:v 1") + "-frames:v", "1", # Add black background for transparent images - jpeg_items.append(( - "-filter_complex" - " \"color=black,format=rgb24[c]" + "-filter_complex", ( + "\"color=black,format=rgb24[c]" ";[c][0]scale2ref[c][i]" ";[c][i]overlay=format=auto:shortest=1,setsar=1\"" - )) + ), + ]) - jpeg_items.extend(ffmpeg_args.get("output") or []) + jpeg_items.extend(ffmpeg_args.get("output") or []) - # output file - jpeg_items.append("\"{}\"".format(full_thumbnail_path)) + # output file + jpeg_items.append(path_to_subprocess_arg(full_thumbnail_path)) - subprocess_jpeg = " ".join(jpeg_items) + subprocess_jpeg = " ".join(jpeg_items) - # run subprocess - self.log.debug("Executing: {}".format(subprocess_jpeg)) - openpype.api.run_subprocess( - subprocess_jpeg, shell=True, logger=self.log - ) + # run subprocess + self.log.debug("Executing: {}".format(subprocess_jpeg)) + run_subprocess( + subprocess_jpeg, shell=True, logger=self.log + ) # remove thumbnail key from origin repre - thumbnail_repre.pop("thumbnail") streams = get_ffprobe_streams(full_thumbnail_path) width = height = None for stream in streams: @@ -121,8 +109,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin): height = stream["height"] break - filename = os.path.basename(full_thumbnail_path) - staging_dir = staging_dir or os.path.dirname(full_thumbnail_path) + staging_dir, filename = os.path.split(full_thumbnail_path) # create new thumbnail representation representation = { @@ -130,15 +117,12 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin): 'ext': 'jpg', 'files': filename, "stagingDir": staging_dir, - "tags": ["thumbnail"], + "tags": ["thumbnail", "delete"], + "thumbnail": True } if width and height: representation["width"] = width representation["height"] = height - # # add Delete tag when temp file was rendered - if not is_jpeg: - representation["tags"].append("delete") - self.log.info(f"New representation {representation}") instance.data["representations"].append(representation) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py index afb828474d..3d2b6d04ad 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py @@ -1,6 +1,8 @@ import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateEditorialResources(pyblish.api.InstancePlugin): @@ -13,7 +15,7 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin): # make sure it is enabled only if at least both families are available match = pyblish.api.Subset - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder def process(self, instance): self.log.debug( diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py index 005157af62..074c62ea0e 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py @@ -2,9 +2,11 @@ import re import pyblish.api -import openpype.api -from openpype import lib -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.context_tools import get_current_project_asset +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateFrameRange(pyblish.api.InstancePlugin): @@ -13,7 +15,7 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): label = "Validate Frame Range" hosts = ["standalonepublisher"] families = ["render"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder optional = True # published data might be sequence (.mov, .mp4) in that counting files @@ -27,7 +29,8 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): for pattern in self.skip_timelines_check): self.log.info("Skipping for {} task".format(instance.data["task"])) - asset_data = lib.get_asset(instance.data["asset"])["data"] + # TODO repace query with using 'instance.data["assetEntity"]' + asset_data = get_current_project_asset(instance.data["asset"])["data"] frame_start = asset_data["frameStart"] frame_end = asset_data["frameEnd"] handle_start = asset_data["handleStart"] diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py index fe655f6b74..df04ae3b66 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py @@ -1,14 +1,17 @@ import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) + class ValidateShotDuplicates(pyblish.api.ContextPlugin): """Validating no duplicate names are in context.""" label = "Validate Shot Duplicates" hosts = ["standalonepublisher"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder def process(self, context): shot_names = [] diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_simple_unreal_texture_naming.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_simple_unreal_texture_naming.py index ef8da9f280..c123bef4f8 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_simple_unreal_texture_naming.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_simple_unreal_texture_naming.py @@ -1,16 +1,19 @@ # -*- coding: utf-8 -*- """Validator for correct file naming.""" -import pyblish.api -import openpype.api import re -from openpype.pipeline import PublishXmlValidationError +import pyblish.api + +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateSimpleUnrealTextureNaming(pyblish.api.InstancePlugin): label = "Validate Unreal Texture Names" hosts = ["standalonepublisher"] families = ["simpleUnrealTexture"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder regex = "^T_{asset}.*" def process(self, instance): diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py index 316f58988f..1782f53de2 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py @@ -2,8 +2,10 @@ import os import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateSources(pyblish.api.InstancePlugin): @@ -13,7 +15,7 @@ class ValidateSources(pyblish.api.InstancePlugin): got deleted between starting of SP and now. """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "Check source files" optional = True # only for unforeseeable cases diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py index 4c761c7a4c..19ea1a4778 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py @@ -1,9 +1,7 @@ import pyblish.api -from openpype.pipeline import ( - PublishXmlValidationError, - legacy_io, -) +from openpype.client import get_assets +from openpype.pipeline import PublishXmlValidationError class ValidateTaskExistence(pyblish.api.ContextPlugin): @@ -20,15 +18,11 @@ class ValidateTaskExistence(pyblish.api.ContextPlugin): for instance in context: asset_names.add(instance.data["asset"]) - asset_docs = legacy_io.find( - { - "type": "asset", - "name": {"$in": list(asset_names)} - }, - { - "name": 1, - "data.tasks": 1 - } + project_name = context.data["projectEntity"]["name"] + asset_docs = get_assets( + project_name, + asset_names=asset_names, + fields=["name", "data.tasks"] ) tasks_by_asset_names = {} for asset_doc in asset_docs: diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py index d66fb257bb..44f69e48f7 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py @@ -1,7 +1,9 @@ import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateTextureBatch(pyblish.api.InstancePlugin): @@ -9,7 +11,7 @@ class ValidateTextureBatch(pyblish.api.InstancePlugin): label = "Validate Texture Presence" hosts = ["standalonepublisher"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["texture_batch_workfile"] optional = False diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py index 0e67464f59..f489d37f59 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py @@ -1,7 +1,9 @@ import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin): @@ -12,7 +14,7 @@ class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin): """ label = "Validate Texture Has Workfile" hosts = ["standalonepublisher"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["textures"] optional = True diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py index 751ad917ca..22f4a0eafc 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py @@ -1,14 +1,16 @@ import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateTextureBatchNaming(pyblish.api.InstancePlugin): """Validates that all instances had properly formatted name.""" label = "Validate Texture Batch Naming" hosts = ["standalonepublisher"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["texture_batch_workfile", "textures"] optional = False diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py index 84d9def895..dab160d537 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py @@ -1,7 +1,9 @@ import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateTextureBatchVersions(pyblish.api.InstancePlugin): @@ -14,7 +16,7 @@ class ValidateTextureBatchVersions(pyblish.api.InstancePlugin): """ label = "Validate Texture Batch Versions" hosts = ["standalonepublisher"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["textures"] optional = False diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py index fa492a80d8..56ea82f6b6 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py @@ -1,7 +1,9 @@ import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin): @@ -12,7 +14,7 @@ class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin): label = "Validate Texture Workfile Has Resources" hosts = ["standalonepublisher"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["texture_batch_workfile"] optional = True diff --git a/openpype/hosts/testhost/README.md b/openpype/hosts/testhost/README.md deleted file mode 100644 index f69e02a3b3..0000000000 --- a/openpype/hosts/testhost/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# What is `testhost` -Host `testhost` was created to fake running host for testing of publisher. - -Does not have any proper launch mechanism at the moment. There is python script `./run_publish.py` which will show publisher window. The script requires to set few variables to run. Execution will register host `testhost`, register global publish plugins and register creator and publish plugins from `./plugins`. - -## Data -Created instances and context data are stored into json files inside `./api` folder. Can be easily modified to save them to a different place. - -## Plugins -Test host has few plugins to be able test publishing. - -### Creators -They are just example plugins using functions from `api` to create/remove/update data. One of them is auto creator which means that is triggered on each reset of create context. Others are manual creators both creating the same family. - -### Publishers -Collectors are example plugin to use `get_attribute_defs` to define attributes for specific families or for context. Validators are to test `PublishValidationError`. diff --git a/openpype/hosts/testhost/api/__init__.py b/openpype/hosts/testhost/api/__init__.py deleted file mode 100644 index a929a891aa..0000000000 --- a/openpype/hosts/testhost/api/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -import logging -import pyblish.api - -from openpype.pipeline import register_creator_plugin_path - -from .pipeline import ( - ls, - list_instances, - update_instances, - remove_instances, - get_context_data, - update_context_data, - get_context_title -) - - -HOST_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") - -log = logging.getLogger(__name__) - - -def install(): - log.info("OpenPype - Installing TestHost integration") - pyblish.api.register_host("testhost") - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_creator_plugin_path(CREATE_PATH) - - -__all__ = ( - "ls", - "list_instances", - "update_instances", - "remove_instances", - "get_context_data", - "update_context_data", - "get_context_title", - - "install" -) diff --git a/openpype/hosts/testhost/api/context.json b/openpype/hosts/testhost/api/context.json deleted file mode 100644 index 0967ef424b..0000000000 --- a/openpype/hosts/testhost/api/context.json +++ /dev/null @@ -1 +0,0 @@ -{} diff --git a/openpype/hosts/testhost/api/instances.json b/openpype/hosts/testhost/api/instances.json deleted file mode 100644 index d955012514..0000000000 --- a/openpype/hosts/testhost/api/instances.json +++ /dev/null @@ -1,108 +0,0 @@ -[ - { - "id": "pyblish.avalon.instance", - "active": true, - "family": "test", - "subset": "testMyVariant", - "version": 1, - "asset": "sq01_sh0010", - "task": "Compositing", - "variant": "myVariant", - "instance_id": "a485f148-9121-46a5-8157-aa64df0fb449", - "creator_attributes": { - "number_key": 10, - "ha": 10 - }, - "publish_attributes": { - "CollectFtrackApi": { - "add_ftrack_family": false - } - }, - "creator_identifier": "test_one" - }, - { - "id": "pyblish.avalon.instance", - "active": true, - "family": "test", - "subset": "testMyVariant2", - "version": 1, - "asset": "sq01_sh0010", - "task": "Compositing", - "variant": "myVariant2", - "creator_attributes": {}, - "instance_id": "a485f148-9121-46a5-8157-aa64df0fb444", - "publish_attributes": { - "CollectFtrackApi": { - "add_ftrack_family": true - } - }, - "creator_identifier": "test_one" - }, - { - "id": "pyblish.avalon.instance", - "active": true, - "family": "test", - "subset": "testMain", - "version": 1, - "asset": "sq01_sh0010", - "task": "Compositing", - "variant": "Main", - "creator_attributes": {}, - "instance_id": "3607bc95-75f6-4648-a58d-e699f413d09f", - "publish_attributes": { - "CollectFtrackApi": { - "add_ftrack_family": true - } - }, - "creator_identifier": "test_two" - }, - { - "id": "pyblish.avalon.instance", - "active": true, - "family": "test", - "subset": "testMain2", - "version": 1, - "asset": "sq01_sh0020", - "task": "Compositing", - "variant": "Main2", - "instance_id": "4ccf56f6-9982-4837-967c-a49695dbe8eb", - "creator_attributes": {}, - "publish_attributes": { - "CollectFtrackApi": { - "add_ftrack_family": true - } - }, - "creator_identifier": "test_two" - }, - { - "id": "pyblish.avalon.instance", - "family": "test_three", - "subset": "test_threeMain2", - "active": true, - "version": 1, - "asset": "sq01_sh0020", - "task": "Compositing", - "variant": "Main2", - "instance_id": "4ccf56f6-9982-4837-967c-a49695dbe8ec", - "creator_attributes": {}, - "publish_attributes": { - "CollectFtrackApi": { - "add_ftrack_family": true - } - } - }, - { - "id": "pyblish.avalon.instance", - "family": "workfile", - "subset": "workfileMain", - "active": true, - "creator_identifier": "workfile", - "version": 1, - "asset": "Alpaca_01", - "task": "modeling", - "variant": "Main", - "instance_id": "7c9ddfc7-9f9c-4c1c-b233-38c966735fb6", - "creator_attributes": {}, - "publish_attributes": {} - } -] \ No newline at end of file diff --git a/openpype/hosts/testhost/api/pipeline.py b/openpype/hosts/testhost/api/pipeline.py deleted file mode 100644 index 285fe8f8d6..0000000000 --- a/openpype/hosts/testhost/api/pipeline.py +++ /dev/null @@ -1,155 +0,0 @@ -import os -import json -from openpype.pipeline import legacy_io - - -class HostContext: - instances_json_path = None - context_json_path = None - - @classmethod - def get_context_title(cls): - project_name = os.environ.get("AVALON_PROJECT") - if not project_name: - return "TestHost" - - asset_name = os.environ.get("AVALON_ASSET") - if not asset_name: - return project_name - - asset_doc = legacy_io.find_one( - {"type": "asset", "name": asset_name}, - {"data.parents": 1} - ) - parents = asset_doc.get("data", {}).get("parents") or [] - - hierarchy = [project_name] - hierarchy.extend(parents) - hierarchy.append("{}".format(asset_name)) - task_name = os.environ.get("AVALON_TASK") - if task_name: - hierarchy.append(task_name) - - return "/".join(hierarchy) - - @classmethod - def get_current_dir_filepath(cls, filename): - return os.path.join( - os.path.dirname(os.path.abspath(__file__)), - filename - ) - - @classmethod - def get_instances_json_path(cls): - if cls.instances_json_path is None: - cls.instances_json_path = cls.get_current_dir_filepath( - "instances.json" - ) - return cls.instances_json_path - - @classmethod - def get_context_json_path(cls): - if cls.context_json_path is None: - cls.context_json_path = cls.get_current_dir_filepath( - "context.json" - ) - return cls.context_json_path - - @classmethod - def add_instance(cls, instance): - instances = cls.get_instances() - instances.append(instance) - cls.save_instances(instances) - - @classmethod - def save_instances(cls, instances): - json_path = cls.get_instances_json_path() - with open(json_path, "w") as json_stream: - json.dump(instances, json_stream, indent=4) - - @classmethod - def get_instances(cls): - json_path = cls.get_instances_json_path() - if not os.path.exists(json_path): - instances = [] - with open(json_path, "w") as json_stream: - json.dump(json_stream, instances) - else: - with open(json_path, "r") as json_stream: - instances = json.load(json_stream) - return instances - - @classmethod - def get_context_data(cls): - json_path = cls.get_context_json_path() - if not os.path.exists(json_path): - data = {} - with open(json_path, "w") as json_stream: - json.dump(data, json_stream) - else: - with open(json_path, "r") as json_stream: - data = json.load(json_stream) - return data - - @classmethod - def save_context_data(cls, data): - json_path = cls.get_context_json_path() - with open(json_path, "w") as json_stream: - json.dump(data, json_stream, indent=4) - - -def ls(): - return [] - - -def list_instances(): - return HostContext.get_instances() - - -def update_instances(update_list): - updated_instances = {} - for instance, _changes in update_list: - updated_instances[instance.id] = instance.data_to_store() - - instances = HostContext.get_instances() - for instance_data in instances: - instance_id = instance_data["instance_id"] - if instance_id in updated_instances: - new_instance_data = updated_instances[instance_id] - old_keys = set(instance_data.keys()) - new_keys = set(new_instance_data.keys()) - instance_data.update(new_instance_data) - for key in (old_keys - new_keys): - instance_data.pop(key) - - HostContext.save_instances(instances) - - -def remove_instances(instances): - if not isinstance(instances, (tuple, list)): - instances = [instances] - - current_instances = HostContext.get_instances() - for instance in instances: - instance_id = instance.data["instance_id"] - found_idx = None - for idx, _instance in enumerate(current_instances): - if instance_id == _instance["instance_id"]: - found_idx = idx - break - - if found_idx is not None: - current_instances.pop(found_idx) - HostContext.save_instances(current_instances) - - -def get_context_data(): - return HostContext.get_context_data() - - -def update_context_data(data, changes): - HostContext.save_context_data(data) - - -def get_context_title(): - return HostContext.get_context_title() diff --git a/openpype/hosts/testhost/plugins/create/auto_creator.py b/openpype/hosts/testhost/plugins/create/auto_creator.py deleted file mode 100644 index 06b95375b1..0000000000 --- a/openpype/hosts/testhost/plugins/create/auto_creator.py +++ /dev/null @@ -1,80 +0,0 @@ -from openpype.lib import NumberDef -from openpype.hosts.testhost.api import pipeline -from openpype.pipeline import ( - legacy_io, - AutoCreator, - CreatedInstance, -) - - -class MyAutoCreator(AutoCreator): - identifier = "workfile" - family = "workfile" - - def get_instance_attr_defs(self): - output = [ - NumberDef("number_key", label="Number") - ] - return output - - def collect_instances(self): - for instance_data in pipeline.list_instances(): - creator_id = instance_data.get("creator_identifier") - if creator_id == self.identifier: - subset_name = instance_data["subset"] - instance = CreatedInstance( - self.family, subset_name, instance_data, self - ) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - pipeline.update_instances(update_list) - - def create(self): - existing_instance = None - for instance in self.create_context.instances: - if instance.family == self.family: - existing_instance = instance - break - - variant = "Main" - project_name = legacy_io.Session["AVALON_PROJECT"] - asset_name = legacy_io.Session["AVALON_ASSET"] - task_name = legacy_io.Session["AVALON_TASK"] - host_name = legacy_io.Session["AVALON_APP"] - - if existing_instance is None: - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) - subset_name = self.get_subset_name( - variant, task_name, asset_doc, project_name, host_name - ) - data = { - "asset": asset_name, - "task": task_name, - "variant": variant - } - data.update(self.get_dynamic_data( - variant, task_name, asset_doc, project_name, host_name - )) - - new_instance = CreatedInstance( - self.family, subset_name, data, self - ) - self._add_instance_to_context(new_instance) - - elif ( - existing_instance["asset"] != asset_name - or existing_instance["task"] != task_name - ): - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) - subset_name = self.get_subset_name( - variant, task_name, asset_doc, project_name, host_name - ) - existing_instance["asset"] = asset_name - existing_instance["task"] = task_name diff --git a/openpype/hosts/testhost/plugins/create/test_creator_1.py b/openpype/hosts/testhost/plugins/create/test_creator_1.py deleted file mode 100644 index 7664276fa2..0000000000 --- a/openpype/hosts/testhost/plugins/create/test_creator_1.py +++ /dev/null @@ -1,94 +0,0 @@ -import json -from openpype import resources -from openpype.hosts.testhost.api import pipeline -from openpype.lib import ( - UISeparatorDef, - UILabelDef, - BoolDef, - NumberDef, - FileDef, -) -from openpype.pipeline import ( - Creator, - CreatedInstance, -) - - -class TestCreatorOne(Creator): - identifier = "test_one" - label = "test" - family = "test" - description = "Testing creator of testhost" - - create_allow_context_change = False - - def get_icon(self): - return resources.get_openpype_splash_filepath() - - def collect_instances(self): - for instance_data in pipeline.list_instances(): - creator_id = instance_data.get("creator_identifier") - if creator_id == self.identifier: - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - pipeline.update_instances(update_list) - - def remove_instances(self, instances): - pipeline.remove_instances(instances) - for instance in instances: - self._remove_instance_from_context(instance) - - def create(self, subset_name, data, pre_create_data): - print("Data that can be used in create:\n{}".format( - json.dumps(pre_create_data, indent=4) - )) - new_instance = CreatedInstance(self.family, subset_name, data, self) - pipeline.HostContext.add_instance(new_instance.data_to_store()) - self.log.info(new_instance.data) - self._add_instance_to_context(new_instance) - - def get_default_variants(self): - return [ - "myVariant", - "variantTwo", - "different_variant" - ] - - def get_instance_attr_defs(self): - output = [ - NumberDef("number_key", label="Number"), - ] - return output - - def get_pre_create_attr_defs(self): - output = [ - BoolDef("use_selection", label="Use selection"), - UISeparatorDef(), - UILabelDef("Testing label"), - FileDef("filepath", folders=True, label="Filepath"), - FileDef( - "filepath_2", multipath=True, folders=True, label="Filepath 2" - ) - ] - return output - - def get_detail_description(self): - return """# Relictus funes est Nyseides currusque nunc oblita - -## Causa sed - -Lorem markdownum posito consumptis, *plebe Amorque*, abstitimus rogatus fictaque -gladium Circe, nos? Bos aeternum quae. Utque me, si aliquem cladis, et vestigia -arbor, sic mea ferre lacrimae agantur prospiciens hactenus. Amanti dentes pete, -vos quid laudemque rastrorumque terras in gratantibus **radix** erat cedemus? - -Pudor tu ponderibus verbaque illa; ire ergo iam Venus patris certe longae -cruentum lecta, et quaeque. Sit doce nox. Anteit ad tempora magni plenaque et -videres mersit sibique auctor in tendunt mittit cunctos ventisque gravitate -volucris quemquam Aeneaden. Pectore Mensis somnus; pectora -[ferunt](http://www.mox.org/oculosbracchia)? Fertilitatis bella dulce et suum? - """ diff --git a/openpype/hosts/testhost/plugins/create/test_creator_2.py b/openpype/hosts/testhost/plugins/create/test_creator_2.py deleted file mode 100644 index f54adee8a2..0000000000 --- a/openpype/hosts/testhost/plugins/create/test_creator_2.py +++ /dev/null @@ -1,74 +0,0 @@ -from openpype.lib import NumberDef, TextDef -from openpype.hosts.testhost.api import pipeline -from openpype.pipeline import ( - Creator, - CreatedInstance, -) - - -class TestCreatorTwo(Creator): - identifier = "test_two" - label = "test" - family = "test" - description = "A second testing creator" - - def get_icon(self): - return "cube" - - def create(self, subset_name, data, pre_create_data): - new_instance = CreatedInstance(self.family, subset_name, data, self) - pipeline.HostContext.add_instance(new_instance.data_to_store()) - self.log.info(new_instance.data) - self._add_instance_to_context(new_instance) - - def collect_instances(self): - for instance_data in pipeline.list_instances(): - creator_id = instance_data.get("creator_identifier") - if creator_id == self.identifier: - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - pipeline.update_instances(update_list) - - def remove_instances(self, instances): - pipeline.remove_instances(instances) - for instance in instances: - self._remove_instance_from_context(instance) - - def get_instance_attr_defs(self): - output = [ - NumberDef("number_key"), - TextDef("text_key") - ] - return output - - def get_detail_description(self): - return """# Lorem ipsum, dolor sit amet. [![Awesome](https://cdn.rawgit.com/sindresorhus/awesome/d7305f38d29fed78fa85652e3a63e154dd8e8829/media/badge.svg)](https://github.com/sindresorhus/awesome) - -> A curated list of awesome lorem ipsum generators. - -Inspired by the [awesome](https://github.com/sindresorhus/awesome) list thing. - - -## Table of Contents - -- [Legend](#legend) -- [Practical](#briefcase-practical) -- [Whimsical](#roller_coaster-whimsical) - - [Animals](#rabbit-animals) - - [Eras](#tophat-eras) - - [Famous Individuals](#sunglasses-famous-individuals) - - [Music](#microphone-music) - - [Food and Drink](#pizza-food-and-drink) - - [Geographic and Dialects](#earth_africa-geographic-and-dialects) - - [Literature](#books-literature) - - [Miscellaneous](#cyclone-miscellaneous) - - [Sports and Fitness](#bicyclist-sports-and-fitness) - - [TV and Film](#movie_camera-tv-and-film) -- [Tools, Apps, and Extensions](#wrench-tools-apps-and-extensions) -- [Contribute](#contribute) -- [TODO](#todo) -""" diff --git a/openpype/hosts/testhost/plugins/publish/collect_context.py b/openpype/hosts/testhost/plugins/publish/collect_context.py deleted file mode 100644 index 0ab98fb84b..0000000000 --- a/openpype/hosts/testhost/plugins/publish/collect_context.py +++ /dev/null @@ -1,34 +0,0 @@ -import pyblish.api - -from openpype.pipeline import ( - OpenPypePyblishPluginMixin, - attribute_definitions -) - - -class CollectContextDataTestHost( - pyblish.api.ContextPlugin, OpenPypePyblishPluginMixin -): - """ - Collecting temp json data sent from a host context - and path for returning json data back to hostself. - """ - - label = "Collect Source - Test Host" - order = pyblish.api.CollectorOrder - 0.4 - hosts = ["testhost"] - - @classmethod - def get_attribute_defs(cls): - return [ - attribute_definitions.BoolDef( - "test_bool", - True, - label="Bool input" - ) - ] - - def process(self, context): - # get json paths from os and load them - for instance in context: - instance.data["source"] = "testhost" diff --git a/openpype/hosts/testhost/plugins/publish/collect_instance_1.py b/openpype/hosts/testhost/plugins/publish/collect_instance_1.py deleted file mode 100644 index c7241a15a8..0000000000 --- a/openpype/hosts/testhost/plugins/publish/collect_instance_1.py +++ /dev/null @@ -1,52 +0,0 @@ -import json -import pyblish.api - -from openpype.lib import attribute_definitions -from openpype.pipeline import OpenPypePyblishPluginMixin - - -class CollectInstanceOneTestHost( - pyblish.api.InstancePlugin, OpenPypePyblishPluginMixin -): - """ - Collecting temp json data sent from a host context - and path for returning json data back to hostself. - """ - - label = "Collect Instance 1 - Test Host" - order = pyblish.api.CollectorOrder - 0.3 - hosts = ["testhost"] - - @classmethod - def get_attribute_defs(cls): - return [ - attribute_definitions.NumberDef( - "version", - default=1, - minimum=1, - maximum=999, - decimals=0, - label="Version" - ) - ] - - def process(self, instance): - self._debug_log(instance) - - publish_attributes = instance.data.get("publish_attributes") - if not publish_attributes: - return - - values = publish_attributes.get(self.__class__.__name__) - if not values: - return - - instance.data["version"] = values["version"] - - def _debug_log(self, instance): - def _default_json(value): - return str(value) - - self.log.info( - json.dumps(instance.data, indent=4, default=_default_json) - ) diff --git a/openpype/hosts/testhost/plugins/publish/validate_context_with_error.py b/openpype/hosts/testhost/plugins/publish/validate_context_with_error.py deleted file mode 100644 index 46e996a569..0000000000 --- a/openpype/hosts/testhost/plugins/publish/validate_context_with_error.py +++ /dev/null @@ -1,57 +0,0 @@ -import pyblish.api -from openpype.pipeline import PublishValidationError - - -class ValidateInstanceAssetRepair(pyblish.api.Action): - """Repair the instance asset.""" - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - pass - - -description = """ -## Publish plugins - -### Validate Scene Settings - -#### Skip Resolution Check for Tasks - -Set regex pattern(s) to look for in a Task name to skip resolution check against values from DB. - -#### Skip Timeline Check for Tasks - -Set regex pattern(s) to look for in a Task name to skip `frameStart`, `frameEnd` check against values from DB. - -### AfterEffects Submit to Deadline - -* `Use Published scene` - Set to True (green) when Deadline should take published scene as a source instead of uploaded local one. -* `Priority` - priority of job on farm -* `Primary Pool` - here is list of pool fetched from server you can select from. -* `Secondary Pool` -* `Frames Per Task` - number of sequence division between individual tasks (chunks) -making one job on farm. -""" - - -class ValidateContextWithError(pyblish.api.ContextPlugin): - """Validate the instance asset is the current selected context asset. - - As it might happen that multiple worfiles are opened, switching - between them would mess with selected context. - In that case outputs might be output under wrong asset! - - Repair action will use Context asset value (from Workfiles or Launcher) - Closing and reopening with Workfiles will refresh Context value. - """ - - label = "Validate Context With Error" - hosts = ["testhost"] - actions = [ValidateInstanceAssetRepair] - order = pyblish.api.ValidatorOrder - - def process(self, context): - raise PublishValidationError("Crashing", "Context error", description) diff --git a/openpype/hosts/testhost/plugins/publish/validate_with_error.py b/openpype/hosts/testhost/plugins/publish/validate_with_error.py deleted file mode 100644 index 5a2888a8b0..0000000000 --- a/openpype/hosts/testhost/plugins/publish/validate_with_error.py +++ /dev/null @@ -1,57 +0,0 @@ -import pyblish.api -from openpype.pipeline import PublishValidationError - - -class ValidateInstanceAssetRepair(pyblish.api.Action): - """Repair the instance asset.""" - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - pass - - -description = """ -## Publish plugins - -### Validate Scene Settings - -#### Skip Resolution Check for Tasks - -Set regex pattern(s) to look for in a Task name to skip resolution check against values from DB. - -#### Skip Timeline Check for Tasks - -Set regex pattern(s) to look for in a Task name to skip `frameStart`, `frameEnd` check against values from DB. - -### AfterEffects Submit to Deadline - -* `Use Published scene` - Set to True (green) when Deadline should take published scene as a source instead of uploaded local one. -* `Priority` - priority of job on farm -* `Primary Pool` - here is list of pool fetched from server you can select from. -* `Secondary Pool` -* `Frames Per Task` - number of sequence division between individual tasks (chunks) -making one job on farm. -""" - - -class ValidateWithError(pyblish.api.InstancePlugin): - """Validate the instance asset is the current selected context asset. - - As it might happen that multiple worfiles are opened, switching - between them would mess with selected context. - In that case outputs might be output under wrong asset! - - Repair action will use Context asset value (from Workfiles or Launcher) - Closing and reopening with Workfiles will refresh Context value. - """ - - label = "Validate With Error" - hosts = ["testhost"] - actions = [ValidateInstanceAssetRepair] - order = pyblish.api.ValidatorOrder - - def process(self, instance): - raise PublishValidationError("Crashing", "Instance error", description) diff --git a/openpype/hosts/testhost/run_publish.py b/openpype/hosts/testhost/run_publish.py deleted file mode 100644 index c7ad63aafd..0000000000 --- a/openpype/hosts/testhost/run_publish.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -import sys - -mongo_url = "" -project_name = "" -asset_name = "" -task_name = "" -ftrack_url = "" -ftrack_username = "" -ftrack_api_key = "" - - -def multi_dirname(path, times=1): - for _ in range(times): - path = os.path.dirname(path) - return path - - -host_name = "testhost" -current_file = os.path.abspath(__file__) -openpype_dir = multi_dirname(current_file, 4) - -os.environ["OPENPYPE_MONGO"] = mongo_url -os.environ["OPENPYPE_ROOT"] = openpype_dir -os.environ["AVALON_PROJECT"] = project_name -os.environ["AVALON_ASSET"] = asset_name -os.environ["AVALON_TASK"] = task_name -os.environ["AVALON_APP"] = host_name -os.environ["OPENPYPE_DATABASE_NAME"] = "openpype" -os.environ["AVALON_TIMEOUT"] = "1000" -os.environ["AVALON_DB"] = "avalon" -os.environ["FTRACK_SERVER"] = ftrack_url -os.environ["FTRACK_API_USER"] = ftrack_username -os.environ["FTRACK_API_KEY"] = ftrack_api_key -for path in [ - openpype_dir, - r"{}\repos\avalon-core".format(openpype_dir), - r"{}\.venv\Lib\site-packages".format(openpype_dir) -]: - sys.path.append(path) - -from Qt import QtWidgets, QtCore - -from openpype.tools.publisher.window import PublisherWindow - - -def main(): - """Main function for testing purposes.""" - import pyblish.api - from openpype.pipeline import install_host - from openpype.modules import ModulesManager - from openpype.hosts.testhost import api as testhost - - manager = ModulesManager() - for plugin_path in manager.collect_plugin_paths()["publish"]: - pyblish.api.register_plugin_path(plugin_path) - - install_host(testhost) - - QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling) - app = QtWidgets.QApplication([]) - window = PublisherWindow() - window.show() - app.exec_() - - -if __name__ == "__main__": - main() diff --git a/openpype/hosts/traypublisher/__init__.py b/openpype/hosts/traypublisher/__init__.py new file mode 100644 index 0000000000..77ba908ddd --- /dev/null +++ b/openpype/hosts/traypublisher/__init__.py @@ -0,0 +1,6 @@ +from .addon import TrayPublishAddon + + +__all__ = ( + "TrayPublishAddon", +) diff --git a/openpype/modules/traypublish_action.py b/openpype/hosts/traypublisher/addon.py similarity index 56% rename from openpype/modules/traypublish_action.py rename to openpype/hosts/traypublisher/addon.py index 39163b8eb8..c157799898 100644 --- a/openpype/modules/traypublish_action.py +++ b/openpype/hosts/traypublisher/addon.py @@ -1,25 +1,23 @@ import os + +import click + from openpype.lib import get_openpype_execute_args from openpype.lib.execute import run_detached_process -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayAction +from openpype.modules import OpenPypeModule, ITrayAction, IHostAddon + +TRAYPUBLISH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) -class TrayPublishAction(OpenPypeModule, ITrayAction): +class TrayPublishAddon(OpenPypeModule, IHostAddon, ITrayAction): label = "New Publish (beta)" - name = "traypublish_tool" + name = "traypublisher" + host_name = "traypublisher" def initialize(self, modules_settings): - import openpype self.enabled = True self.publish_paths = [ - os.path.join( - openpype.PACKAGE_DIR, - "hosts", - "traypublisher", - "plugins", - "publish" - ) + os.path.join(TRAYPUBLISH_ROOT_DIR, "plugins", "publish") ] self._experimental_tools = None @@ -29,7 +27,7 @@ class TrayPublishAction(OpenPypeModule, ITrayAction): self._experimental_tools = ExperimentalTools() def tray_menu(self, *args, **kwargs): - super(TrayPublishAction, self).tray_menu(*args, **kwargs) + super(TrayPublishAddon, self).tray_menu(*args, **kwargs) traypublisher = self._experimental_tools.get("traypublisher") visible = False if traypublisher and traypublisher.enabled: @@ -45,5 +43,24 @@ class TrayPublishAction(OpenPypeModule, ITrayAction): self.publish_paths.extend(publish_paths) def run_traypublisher(self): - args = get_openpype_execute_args("traypublisher") + args = get_openpype_execute_args( + "module", self.name, "launch" + ) run_detached_process(args) + + def cli(self, click_group): + click_group.add_command(cli_main) + + +@click.group(TrayPublishAddon.name, help="TrayPublisher related commands.") +def cli_main(): + pass + + +@cli_main.command() +def launch(): + """Launch TrayPublish tool UI.""" + + from openpype.tools import traypublisher + + traypublisher.main() diff --git a/openpype/hosts/traypublisher/api/__init__.py b/openpype/hosts/traypublisher/api/__init__.py index c461c0c526..4e7284b09a 100644 --- a/openpype/hosts/traypublisher/api/__init__.py +++ b/openpype/hosts/traypublisher/api/__init__.py @@ -1,20 +1,8 @@ from .pipeline import ( - install, - ls, - - set_project_name, - get_context_title, - get_context_data, - update_context_data, + TrayPublisherHost, ) __all__ = ( - "install", - "ls", - - "set_project_name", - "get_context_title", - "get_context_data", - "update_context_data", + "TrayPublisherHost", ) diff --git a/openpype/hosts/traypublisher/api/editorial.py b/openpype/hosts/traypublisher/api/editorial.py new file mode 100644 index 0000000000..7c392ef508 --- /dev/null +++ b/openpype/hosts/traypublisher/api/editorial.py @@ -0,0 +1,331 @@ +import re +from copy import deepcopy + +from openpype.client import get_asset_by_id +from openpype.pipeline.create import CreatorError + + +class ShotMetadataSolver: + """ Solving hierarchical metadata + + Used during editorial publishing. Works with imput + clip name and settings defining python formatable + template. Settings also define searching patterns + and its token keys used for formating in templates. + """ + + NO_DECOR_PATERN = re.compile(r"\{([a-z]*?)\}") + + # presets + clip_name_tokenizer = None + shot_rename = True + shot_hierarchy = None + shot_add_tasks = None + + def __init__( + self, + clip_name_tokenizer, + shot_rename, + shot_hierarchy, + shot_add_tasks, + logger + ): + self.clip_name_tokenizer = clip_name_tokenizer + self.shot_rename = shot_rename + self.shot_hierarchy = shot_hierarchy + self.shot_add_tasks = shot_add_tasks + self.log = logger + + def _rename_template(self, data): + """Shot renaming function + + Args: + data (dict): formating data + + Raises: + CreatorError: If missing keys + + Returns: + str: formated new name + """ + shot_rename_template = self.shot_rename[ + "shot_rename_template"] + try: + # format to new shot name + return shot_rename_template.format(**data) + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct:: \n\n" + f"From template string {shot_rename_template} > " + f"`{_E}` has no equivalent in \n" + f"{list(data.keys())} input formating keys!" + )) + + def _generate_tokens(self, clip_name, source_data): + """Token generator + + Settings defines token pairs key and regex expression. + + Args: + clip_name (str): name of clip in editorial + source_data (dict): data for formating + + Raises: + CreatorError: if missing key + + Returns: + dict: updated source_data + """ + output_data = deepcopy(source_data["anatomy_data"]) + output_data["clip_name"] = clip_name + + if not self.clip_name_tokenizer: + return output_data + + parent_name = source_data["selected_asset_doc"]["name"] + + search_text = parent_name + clip_name + + for token_key, pattern in self.clip_name_tokenizer.items(): + p = re.compile(pattern) + match = p.findall(search_text) + if not match: + raise CreatorError(( + "Make sure regex expression works with your data: \n\n" + f"'{token_key}' with regex '{pattern}' in your settings\n" + "can't find any match in your clip name " + f"'{search_text}'!\n\nLook to: " + "'project_settings/traypublisher/editorial_creators" + "/editorial_simple/clip_name_tokenizer'\n" + "at your project settings..." + )) + + # QUESTION:how to refactory `match[-1]` to some better way? + output_data[token_key] = match[-1] + + return output_data + + def _create_parents_from_settings(self, parents, data): + """Formating parent components. + + Args: + parents (list): list of dict parent components + data (dict): formating data + + Raises: + CreatorError: missing formating key + CreatorError: missing token key + KeyError: missing parent token + + Returns: + list: list of dict of parent components + """ + # fill the parents parts from presets + shot_hierarchy = deepcopy(self.shot_hierarchy) + hierarchy_parents = shot_hierarchy["parents"] + + # fill parent keys data template from anatomy data + try: + _parent_tokens_formating_data = { + parent_token["name"]: parent_token["value"].format(**data) + for parent_token in hierarchy_parents + } + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct : \n" + f"`{_E}` has no equivalent in \n{list(data.keys())}" + )) + + _parent_tokens_type = { + parent_token["name"]: parent_token["type"] + for parent_token in hierarchy_parents + } + for _index, _parent in enumerate( + shot_hierarchy["parents_path"].split("/") + ): + # format parent token with value which is formated + try: + parent_name = _parent.format( + **_parent_tokens_formating_data) + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct : \n\n" + f"`{_E}` from template string " + f"{shot_hierarchy['parents_path']}, " + f" has no equivalent in \n" + f"{list(_parent_tokens_formating_data.keys())} parents" + )) + + parent_token_name = ( + self.NO_DECOR_PATERN.findall(_parent).pop()) + + if not parent_token_name: + raise KeyError( + f"Parent token is not found in: `{_parent}`") + + # find parent type + parent_token_type = _parent_tokens_type[parent_token_name] + + # in case selected context is set to the same asset + if ( + _index == 0 + and parents[-1]["entity_name"] == parent_name + ): + self.log.debug(f" skipping : {parent_name}") + continue + + # in case first parent is project then start parents from start + if ( + _index == 0 + and parent_token_type == "Project" + ): + self.log.debug("rebuilding parents from scratch") + project_parent = parents[0] + parents = [project_parent] + continue + + parents.append({ + "entity_type": parent_token_type, + "entity_name": parent_name + }) + + self.log.debug(f"__ parents: {parents}") + + return parents + + def _create_hierarchy_path(self, parents): + """Converting hierarchy path from parents + + Args: + parents (list): list of dict parent components + + Returns: + str: hierarchy path + """ + return "/".join( + [ + p["entity_name"] for p in parents + if p["entity_type"] != "Project" + ] + ) if parents else "" + + def _get_parents_from_selected_asset( + self, + asset_doc, + project_doc + ): + """Returning parents from context on selected asset. + + Context defined in Traypublisher project tree. + + Args: + asset_doc (db obj): selected asset doc + project_doc (db obj): actual project doc + + Returns: + list: list of dict parent components + """ + project_name = project_doc["name"] + visual_hierarchy = [asset_doc] + current_doc = asset_doc + + # looping trought all available visual parents + # if they are not available anymore than it breaks + while True: + visual_parent_id = current_doc["data"]["visualParent"] + visual_parent = None + if visual_parent_id: + visual_parent = get_asset_by_id(project_name, visual_parent_id) + + if not visual_parent: + visual_hierarchy.append(project_doc) + break + visual_hierarchy.append(visual_parent) + current_doc = visual_parent + + # add current selection context hierarchy + return [ + { + "entity_type": entity["data"]["entityType"], + "entity_name": entity["name"] + } + for entity in reversed(visual_hierarchy) + ] + + def _generate_tasks_from_settings(self, project_doc): + """Convert settings inputs to task data. + + Args: + project_doc (db obj): actual project doc + + Raises: + KeyError: Missing task type in project doc + + Returns: + dict: tasks data + """ + tasks_to_add = {} + + project_tasks = project_doc["config"]["tasks"] + for task_name, task_data in self.shot_add_tasks.items(): + _task_data = deepcopy(task_data) + + # check if task type in project task types + if _task_data["type"] in project_tasks.keys(): + tasks_to_add[task_name] = _task_data + else: + raise KeyError( + "Missing task type `{}` for `{}` is not" + " existing in `{}``".format( + _task_data["type"], + task_name, + list(project_tasks.keys()) + ) + ) + + return tasks_to_add + + def generate_data(self, clip_name, source_data): + """Metadata generator. + + Converts input data to hierarchy mentadata. + + Args: + clip_name (str): clip name + source_data (dict): formating data + + Returns: + (str, dict): shot name and hierarchy data + """ + self.log.info(f"_ source_data: {source_data}") + + tasks = {} + asset_doc = source_data["selected_asset_doc"] + project_doc = source_data["project_doc"] + + # match clip to shot name at start + shot_name = clip_name + + # parse all tokens and generate formating data + formating_data = self._generate_tokens(shot_name, source_data) + + # generate parents from selected asset + parents = self._get_parents_from_selected_asset(asset_doc, project_doc) + + if self.shot_rename["enabled"]: + shot_name = self._rename_template(formating_data) + self.log.info(f"Renamed shot name: {shot_name}") + + if self.shot_hierarchy["enabled"]: + parents = self._create_parents_from_settings( + parents, formating_data) + + if self.shot_add_tasks: + tasks = self._generate_tasks_from_settings( + project_doc) + + return shot_name, { + "hierarchy": self._create_hierarchy_path(parents), + "parents": parents, + "tasks": tasks + } diff --git a/openpype/hosts/traypublisher/api/pipeline.py b/openpype/hosts/traypublisher/api/pipeline.py index 954a0bae47..0a8ddaa343 100644 --- a/openpype/hosts/traypublisher/api/pipeline.py +++ b/openpype/hosts/traypublisher/api/pipeline.py @@ -9,6 +9,8 @@ from openpype.pipeline import ( register_creator_plugin_path, legacy_io, ) +from openpype.host import HostBase, IPublishHost + ROOT_DIR = os.path.dirname(os.path.dirname( os.path.abspath(__file__) @@ -17,6 +19,35 @@ PUBLISH_PATH = os.path.join(ROOT_DIR, "plugins", "publish") CREATE_PATH = os.path.join(ROOT_DIR, "plugins", "create") +class TrayPublisherHost(HostBase, IPublishHost): + name = "traypublisher" + + def install(self): + os.environ["AVALON_APP"] = self.name + legacy_io.Session["AVALON_APP"] = self.name + + pyblish.api.register_host("traypublisher") + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_creator_plugin_path(CREATE_PATH) + + def get_context_title(self): + return HostContext.get_project_name() + + def get_context_data(self): + return HostContext.get_context_data() + + def update_context_data(self, data, changes): + HostContext.save_context_data(data, changes) + + def set_project_name(self, project_name): + # TODO Deregister project specific plugins and register new project + # plugins + os.environ["AVALON_PROJECT"] = project_name + legacy_io.Session["AVALON_PROJECT"] = project_name + legacy_io.install() + HostContext.set_project_name(project_name) + + class HostContext: _context_json_path = None @@ -150,32 +181,3 @@ def get_context_data(): def update_context_data(data, changes): HostContext.save_context_data(data) - - -def get_context_title(): - return HostContext.get_project_name() - - -def ls(): - """Probably will never return loaded containers.""" - return [] - - -def install(): - """This is called before a project is known. - - Project is defined with 'set_project_name'. - """ - os.environ["AVALON_APP"] = "traypublisher" - - pyblish.api.register_host("traypublisher") - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_creator_plugin_path(CREATE_PATH) - - -def set_project_name(project_name): - # TODO Deregister project specific plugins and register new project plugins - os.environ["AVALON_PROJECT"] = project_name - legacy_io.Session["AVALON_PROJECT"] = project_name - legacy_io.install() - HostContext.set_project_name(project_name) diff --git a/openpype/hosts/traypublisher/api/plugin.py b/openpype/hosts/traypublisher/api/plugin.py index 603f34ee29..75930f0f31 100644 --- a/openpype/hosts/traypublisher/api/plugin.py +++ b/openpype/hosts/traypublisher/api/plugin.py @@ -1,9 +1,12 @@ -from openpype.pipeline import ( +from openpype.lib.attribute_definitions import FileDef +from openpype.lib.transcoding import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS +from openpype.pipeline.create import ( Creator, - CreatedInstance + HiddenCreator, + CreatedInstance, + cache_and_get_instances, + PRE_CREATE_THUMBNAIL_KEY, ) -from openpype.lib import FileDef - from .pipeline import ( list_instances, update_instances, @@ -11,18 +14,20 @@ from .pipeline import ( HostContext, ) +REVIEW_EXTENSIONS = set(IMAGE_EXTENSIONS) | set(VIDEO_EXTENSIONS) +SHARED_DATA_KEY = "openpype.traypublisher.instances" -class TrayPublishCreator(Creator): - create_allow_context_change = True + +class HiddenTrayPublishCreator(HiddenCreator): + host_name = "traypublisher" def collect_instances(self): - for instance_data in list_instances(): - creator_id = instance_data.get("creator_identifier") - if creator_id == self.identifier: - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) + instances_by_identifier = cache_and_get_instances( + self, SHARED_DATA_KEY, list_instances + ) + for instance_data in instances_by_identifier[self.identifier]: + instance = CreatedInstance.from_existing(instance_data, self) + self._add_instance_to_context(instance) def update_instances(self, update_list): update_instances(update_list) @@ -32,47 +37,105 @@ class TrayPublishCreator(Creator): for instance in instances: self._remove_instance_from_context(instance) - def get_pre_create_attr_defs(self): - # Use same attributes as for instance attrobites - return self.get_instance_attr_defs() + def _store_new_instance(self, new_instance): + """Tray publisher specific method to store instance. + Instance is stored into "workfile" of traypublisher and also add it + to CreateContext. -class SettingsCreator(TrayPublishCreator): - create_allow_context_change = True + Args: + new_instance (CreatedInstance): Instance that should be stored. + """ - extensions = [] - - def collect_instances(self): - for instance_data in list_instances(): - creator_id = instance_data.get("creator_identifier") - if creator_id == self.identifier: - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - - def create(self, subset_name, data, pre_create_data): - # Pass precreate data to creator attributes - data["creator_attributes"] = pre_create_data - data["settings_creator"] = True - # Create new instance - new_instance = CreatedInstance(self.family, subset_name, data, self) # Host implementation of storing metadata about instance HostContext.add_instance(new_instance.data_to_store()) # Add instance to current context self._add_instance_to_context(new_instance) + +class TrayPublishCreator(Creator): + create_allow_context_change = True + host_name = "traypublisher" + + def collect_instances(self): + instances_by_identifier = cache_and_get_instances( + self, SHARED_DATA_KEY, list_instances + ) + for instance_data in instances_by_identifier[self.identifier]: + instance = CreatedInstance.from_existing(instance_data, self) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + update_instances(update_list) + + def remove_instances(self, instances): + remove_instances(instances) + for instance in instances: + self._remove_instance_from_context(instance) + + def _store_new_instance(self, new_instance): + """Tray publisher specific method to store instance. + + Instance is stored into "workfile" of traypublisher and also add it + to CreateContext. + + Args: + new_instance (CreatedInstance): Instance that should be stored. + """ + + # Host implementation of storing metadata about instance + HostContext.add_instance(new_instance.data_to_store()) + new_instance.mark_as_stored() + + # Add instance to current context + self._add_instance_to_context(new_instance) + + +class SettingsCreator(TrayPublishCreator): + create_allow_context_change = True + create_allow_thumbnail = True + + extensions = [] + + def create(self, subset_name, data, pre_create_data): + # Pass precreate data to creator attributes + thumbnail_path = pre_create_data.pop(PRE_CREATE_THUMBNAIL_KEY, None) + + data["creator_attributes"] = pre_create_data + data["settings_creator"] = True + # Create new instance + new_instance = CreatedInstance(self.family, subset_name, data, self) + + self._store_new_instance(new_instance) + + if thumbnail_path: + self.set_instance_thumbnail_path(new_instance.id, thumbnail_path) + def get_instance_attr_defs(self): return [ FileDef( - "filepath", + "representation_files", folders=False, extensions=self.extensions, allow_sequences=self.allow_sequences, - label="Filepath", + single_item=not self.allow_multiple_items, + label="Representations", + ), + FileDef( + "reviewable", + folders=False, + extensions=REVIEW_EXTENSIONS, + allow_sequences=True, + single_item=True, + label="Reviewable representations", + extensions_label="Single reviewable item" ) ] + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attrobites + return self.get_instance_attr_defs() + @classmethod def from_settings(cls, item_data): identifier = item_data["identifier"] @@ -91,6 +154,7 @@ class SettingsCreator(TrayPublishCreator): "detailed_description": item_data["detailed_description"], "extensions": item_data["extensions"], "allow_sequences": item_data["allow_sequences"], + "allow_multiple_items": item_data["allow_multiple_items"], "default_variants": item_data["default_variants"] } ) diff --git a/openpype/hosts/traypublisher/plugins/create/create_editorial.py b/openpype/hosts/traypublisher/plugins/create/create_editorial.py new file mode 100644 index 0000000000..28a115629e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/create/create_editorial.py @@ -0,0 +1,869 @@ +import os +from copy import deepcopy +from pprint import pformat +import opentimelineio as otio +from openpype.client import ( + get_asset_by_name, + get_project +) +from openpype.hosts.traypublisher.api.plugin import ( + TrayPublishCreator, + HiddenTrayPublishCreator +) +from openpype.hosts.traypublisher.api.editorial import ( + ShotMetadataSolver +) + +from openpype.pipeline import CreatedInstance + +from openpype.lib import ( + get_ffprobe_data, + convert_ffprobe_fps_value, + + FileDef, + TextDef, + NumberDef, + EnumDef, + BoolDef, + UISeparatorDef, + UILabelDef +) + + +CLIP_ATTR_DEFS = [ + EnumDef( + "fps", + items={ + "from_selection": "From selection", + 23.997: "23.976", + 24: "24", + 25: "25", + 29.97: "29.97", + 30: "30" + }, + label="FPS" + ), + NumberDef( + "workfile_start_frame", + default=1001, + label="Workfile start frame" + ), + NumberDef( + "handle_start", + default=0, + label="Handle start" + ), + NumberDef( + "handle_end", + default=0, + label="Handle end" + ) +] + + +class EditorialClipInstanceCreatorBase(HiddenTrayPublishCreator): + """ Wrapper class for clip family creators + + Args: + HiddenTrayPublishCreator (BaseCreator): hidden supporting class + """ + host_name = "traypublisher" + + def create(self, instance_data, source_data=None): + self.log.info(f"instance_data: {instance_data}") + subset_name = instance_data["subset"] + + # Create new instance + new_instance = CreatedInstance( + self.family, subset_name, instance_data, self + ) + self.log.info(f"instance_data: {pformat(new_instance.data)}") + + self._store_new_instance(new_instance) + + return new_instance + + def get_instance_attr_defs(self): + return [ + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + +class EditorialShotInstanceCreator(EditorialClipInstanceCreatorBase): + """ Shot family class + + The shot metadata instance carrier. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_shot" + family = "shot" + label = "Editorial Shot" + + def get_instance_attr_defs(self): + attr_defs = [ + TextDef( + "asset_name", + label="Asset name", + ) + ] + attr_defs.extend(CLIP_ATTR_DEFS) + return attr_defs + + +class EditorialPlateInstanceCreator(EditorialClipInstanceCreatorBase): + """ Plate family class + + Plate representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_plate" + family = "plate" + label = "Editorial Plate" + + +class EditorialAudioInstanceCreator(EditorialClipInstanceCreatorBase): + """ Audio family class + + Audio representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_audio" + family = "audio" + label = "Editorial Audio" + + +class EditorialReviewInstanceCreator(EditorialClipInstanceCreatorBase): + """ Review family class + + Review representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_review" + family = "review" + label = "Editorial Review" + + +class EditorialSimpleCreator(TrayPublishCreator): + """ Editorial creator class + + Simple workflow creator. This creator only disecting input + video file into clip chunks and then converts each to + defined format defined Settings for each subset preset. + + Args: + TrayPublishCreator (Creator): Tray publisher plugin class + """ + + label = "Editorial Simple" + family = "editorial" + identifier = "editorial_simple" + default_variants = [ + "main" + ] + description = "Editorial files to generate shots." + detailed_description = """ +Supporting publishing new shots to project +or updating already created. Publishing will create OTIO file. +""" + icon = "fa.file" + + def __init__( + self, project_settings, *args, **kwargs + ): + super(EditorialSimpleCreator, self).__init__( + project_settings, *args, **kwargs + ) + editorial_creators = deepcopy( + project_settings["traypublisher"]["editorial_creators"] + ) + # get this creator settings by identifier + self._creator_settings = editorial_creators.get(self.identifier) + + clip_name_tokenizer = self._creator_settings["clip_name_tokenizer"] + shot_rename = self._creator_settings["shot_rename"] + shot_hierarchy = self._creator_settings["shot_hierarchy"] + shot_add_tasks = self._creator_settings["shot_add_tasks"] + + self._shot_metadata_solver = ShotMetadataSolver( + clip_name_tokenizer, + shot_rename, + shot_hierarchy, + shot_add_tasks, + self.log + ) + + # try to set main attributes from settings + if self._creator_settings.get("default_variants"): + self.default_variants = self._creator_settings["default_variants"] + + def create(self, subset_name, instance_data, pre_create_data): + allowed_family_presets = self._get_allowed_family_presets( + pre_create_data) + + clip_instance_properties = { + k: v for k, v in pre_create_data.items() + if k != "sequence_filepath_data" + if k not in [ + i["family"] for i in self._creator_settings["family_presets"] + ] + } + # Create otio editorial instance + asset_name = instance_data["asset"] + asset_doc = get_asset_by_name(self.project_name, asset_name) + + self.log.info(pre_create_data["fps"]) + + if pre_create_data["fps"] == "from_selection": + # get asset doc data attributes + fps = asset_doc["data"]["fps"] + else: + fps = float(pre_create_data["fps"]) + + instance_data.update({ + "fps": fps + }) + + # get path of sequence + sequence_path_data = pre_create_data["sequence_filepath_data"] + media_path_data = pre_create_data["media_filepaths_data"] + + sequence_path = self._get_path_from_file_data(sequence_path_data) + media_path = self._get_path_from_file_data(media_path_data) + + # get otio timeline + otio_timeline = self._create_otio_timeline( + sequence_path, fps) + + # Create all clip instances + clip_instance_properties.update({ + "fps": fps, + "parent_asset_name": asset_name, + "variant": instance_data["variant"] + }) + + # create clip instances + self._get_clip_instances( + otio_timeline, + media_path, + clip_instance_properties, + family_presets=allowed_family_presets + + ) + + # create otio editorial instance + self._create_otio_instance( + subset_name, instance_data, + sequence_path, media_path, + otio_timeline + ) + + def _create_otio_instance( + self, + subset_name, + data, + sequence_path, + media_path, + otio_timeline + ): + """Otio instance creating function + + Args: + subset_name (str): name of subset + data (dict): instnance data + sequence_path (str): path to sequence file + media_path (str): path to media file + otio_timeline (otio.Timeline): otio timeline object + """ + # Pass precreate data to creator attributes + data.update({ + "sequenceFilePath": sequence_path, + "editorialSourcePath": media_path, + "otioTimeline": otio.adapters.write_to_string(otio_timeline) + }) + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._store_new_instance(new_instance) + + def _create_otio_timeline(self, sequence_path, fps): + """Creating otio timeline from sequence path + + Args: + sequence_path (str): path to sequence file + fps (float): frame per second + + Returns: + otio.Timeline: otio timeline object + """ + # get editorial sequence file into otio timeline object + extension = os.path.splitext(sequence_path)[1] + + kwargs = {} + if extension == ".edl": + # EDL has no frame rate embedded so needs explicit + # frame rate else 24 is asssumed. + kwargs["rate"] = fps + kwargs["ignore_timecode_mismatch"] = True + + self.log.info(f"kwargs: {kwargs}") + return otio.adapters.read_from_file(sequence_path, **kwargs) + + def _get_path_from_file_data(self, file_path_data): + """Converting creator path data to single path string + + Args: + file_path_data (FileDefItem): creator path data inputs + + Raises: + FileExistsError: in case nothing had been set + + Returns: + str: path string + """ + # TODO: just temporarly solving only one media file + if isinstance(file_path_data, list): + file_path_data = file_path_data.pop() + + if len(file_path_data["filenames"]) == 0: + raise FileExistsError( + f"File path was not added: {file_path_data}") + + return os.path.join( + file_path_data["directory"], file_path_data["filenames"][0]) + + def _get_clip_instances( + self, + otio_timeline, + media_path, + instance_data, + family_presets + ): + """Helping function fro creating clip instance + + Args: + otio_timeline (otio.Timeline): otio timeline object + media_path (str): media file path string + instance_data (dict): clip instance data + family_presets (list): list of dict settings subset presets + """ + self.asset_name_check = [] + + tracks = otio_timeline.each_child( + descended_from_type=otio.schema.Track + ) + + # media data for audio sream and reference solving + media_data = self._get_media_source_metadata(media_path) + + for track in tracks: + self.log.debug(f"track.name: {track.name}") + try: + track_start_frame = ( + abs(track.source_range.start_time.value) + ) + self.log.debug(f"track_start_frame: {track_start_frame}") + track_start_frame -= self.timeline_frame_start + except AttributeError: + track_start_frame = 0 + + self.log.debug(f"track_start_frame: {track_start_frame}") + + for clip in track.each_child(): + if not self._validate_clip_for_processing(clip): + continue + + # get available frames info to clip data + self._create_otio_reference(clip, media_path, media_data) + + # convert timeline range to source range + self._restore_otio_source_range(clip) + + base_instance_data = self._get_base_instance_data( + clip, + instance_data, + track_start_frame + ) + + parenting_data = { + "instance_label": None, + "instance_id": None + } + self.log.info(( + "Creating subsets from presets: \n" + f"{pformat(family_presets)}" + )) + + for _fpreset in family_presets: + # exclude audio family if no audio stream + if ( + _fpreset["family"] == "audio" + and not media_data.get("audio") + ): + continue + + instance = self._make_subset_instance( + clip, + _fpreset, + deepcopy(base_instance_data), + parenting_data + ) + self.log.debug(f"{pformat(dict(instance.data))}") + + def _restore_otio_source_range(self, otio_clip): + """Infusing source range. + + Otio clip is missing proper source clip range so + here we add them from from parent timeline frame range. + + Args: + otio_clip (otio.Clip): otio clip object + """ + otio_clip.source_range = otio_clip.range_in_parent() + + def _create_otio_reference( + self, + otio_clip, + media_path, + media_data + ): + """Creating otio reference at otio clip. + + Args: + otio_clip (otio.Clip): otio clip object + media_path (str): media file path string + media_data (dict): media metadata + """ + start_frame = media_data["start_frame"] + frame_duration = media_data["duration"] + fps = media_data["fps"] + + available_range = otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime( + start_frame, fps), + duration=otio.opentime.RationalTime( + frame_duration, fps) + ) + # in case old OTIO or video file create `ExternalReference` + media_reference = otio.schema.ExternalReference( + target_url=media_path, + available_range=available_range + ) + + otio_clip.media_reference = media_reference + + def _get_media_source_metadata(self, path): + """Get all available metadata from file + + Args: + path (str): media file path string + + Raises: + AssertionError: ffprobe couldn't read metadata + + Returns: + dict: media file metadata + """ + return_data = {} + + try: + media_data = get_ffprobe_data( + path, self.log + ) + self.log.debug(f"__ media_data: {pformat(media_data)}") + + # get video stream data + video_stream = media_data["streams"][0] + return_data = { + "video": True, + "start_frame": 0, + "duration": int(video_stream["nb_frames"]), + "fps": float( + convert_ffprobe_fps_value( + video_stream["r_frame_rate"] + ) + ) + } + + # get audio streams data + audio_stream = [ + stream for stream in media_data["streams"] + if stream["codec_type"] == "audio" + ] + + if audio_stream: + return_data["audio"] = True + + except Exception as exc: + raise AssertionError(( + "FFprobe couldn't read information about input file: " + f"\"{path}\". Error message: {exc}" + )) + + return return_data + + def _make_subset_instance( + self, + otio_clip, + preset, + instance_data, + parenting_data + ): + """Making subset instance from input preset + + Args: + otio_clip (otio.Clip): otio clip object + preset (dict): sigle family preset + instance_data (dict): instance data + parenting_data (dict): shot instance parent data + + Returns: + CreatedInstance: creator instance object + """ + family = preset["family"] + label = self._make_subset_naming( + preset, + instance_data + ) + instance_data["label"] = label + + # add file extension filter only if it is not shot family + if family == "shot": + instance_data["otioClip"] = ( + otio.adapters.write_to_string(otio_clip)) + c_instance = self.create_context.creators[ + "editorial_shot"].create( + instance_data) + parenting_data.update({ + "instance_label": label, + "instance_id": c_instance.data["instance_id"] + }) + else: + # add review family if defined + instance_data.update({ + "outputFileType": preset["output_file_type"], + "parent_instance_id": parenting_data["instance_id"], + "creator_attributes": { + "parent_instance": parenting_data["instance_label"], + "add_review_family": preset.get("review") + } + }) + + creator_identifier = f"editorial_{family}" + editorial_clip_creator = self.create_context.creators[ + creator_identifier] + c_instance = editorial_clip_creator.create( + instance_data) + + return c_instance + + def _make_subset_naming( + self, + preset, + instance_data + ): + """ Subset name maker + + Args: + preset (dict): single preset item + instance_data (dict): instance data + + Returns: + str: label string + """ + shot_name = instance_data["shotName"] + variant_name = instance_data["variant"] + family = preset["family"] + + # get variant name from preset or from inharitance + _variant_name = preset.get("variant") or variant_name + + self.log.debug(f"__ family: {family}") + self.log.debug(f"__ preset: {preset}") + + # subset name + subset_name = "{}{}".format( + family, _variant_name.capitalize() + ) + label = "{}_{}".format( + shot_name, + subset_name + ) + + instance_data.update({ + "family": family, + "label": label, + "variant": _variant_name, + "subset": subset_name, + }) + + return label + + def _get_base_instance_data( + self, + otio_clip, + instance_data, + track_start_frame, + ): + """ Factoring basic set of instance data. + + Args: + otio_clip (otio.Clip): otio clip object + instance_data (dict): precreate instance data + track_start_frame (int): track start frame + + Returns: + dict: instance data + """ + # get clip instance properties + parent_asset_name = instance_data["parent_asset_name"] + handle_start = instance_data["handle_start"] + handle_end = instance_data["handle_end"] + timeline_offset = instance_data["timeline_offset"] + workfile_start_frame = instance_data["workfile_start_frame"] + fps = instance_data["fps"] + variant_name = instance_data["variant"] + + # basic unique asset name + clip_name = os.path.splitext(otio_clip.name)[0].lower() + project_doc = get_project(self.project_name) + + shot_name, shot_metadata = self._shot_metadata_solver.generate_data( + clip_name, + { + "anatomy_data": { + "project": { + "name": self.project_name, + "code": project_doc["data"]["code"] + }, + "parent": parent_asset_name, + "app": self.host_name + }, + "selected_asset_doc": get_asset_by_name( + self.project_name, parent_asset_name), + "project_doc": project_doc + } + ) + + self._validate_name_uniqueness(shot_name) + + timing_data = self._get_timing_data( + otio_clip, + timeline_offset, + track_start_frame, + workfile_start_frame + ) + + # create creator attributes + creator_attributes = { + "asset_name": shot_name, + "Parent hierarchy path": shot_metadata["hierarchy"], + "workfile_start_frame": workfile_start_frame, + "fps": fps, + "handle_start": int(handle_start), + "handle_end": int(handle_end) + } + creator_attributes.update(timing_data) + + # create shared new instance data + base_instance_data = { + "shotName": shot_name, + "variant": variant_name, + + # HACK: just for temporal bug workaround + # TODO: should loockup shot name for update + "asset": parent_asset_name, + "task": "", + + "newAssetPublishing": True, + + # parent time properties + "trackStartFrame": track_start_frame, + "timelineOffset": timeline_offset, + # creator_attributes + "creator_attributes": creator_attributes + } + # add hierarchy shot metadata + base_instance_data.update(shot_metadata) + + return base_instance_data + + def _get_timing_data( + self, + otio_clip, + timeline_offset, + track_start_frame, + workfile_start_frame + ): + """Returning available timing data + + Args: + otio_clip (otio.Clip): otio clip object + timeline_offset (int): offset value + track_start_frame (int): starting frame input + workfile_start_frame (int): start frame for shot's workfiles + + Returns: + dict: timing metadata + """ + # frame ranges data + clip_in = otio_clip.range_in_parent().start_time.value + clip_in += track_start_frame + clip_out = otio_clip.range_in_parent().end_time_inclusive().value + clip_out += track_start_frame + self.log.info(f"clip_in: {clip_in} | clip_out: {clip_out}") + + # add offset in case there is any + self.log.debug(f"__ timeline_offset: {timeline_offset}") + if timeline_offset: + clip_in += timeline_offset + clip_out += timeline_offset + + clip_duration = otio_clip.duration().value + self.log.info(f"clip duration: {clip_duration}") + + source_in = otio_clip.trimmed_range().start_time.value + source_out = source_in + clip_duration + + # define starting frame for future shot + frame_start = ( + clip_in if workfile_start_frame is None + else workfile_start_frame + ) + frame_end = frame_start + (clip_duration - 1) + + return { + "frameStart": int(frame_start), + "frameEnd": int(frame_end), + "clipIn": int(clip_in), + "clipOut": int(clip_out), + "clipDuration": int(otio_clip.duration().value), + "sourceIn": int(source_in), + "sourceOut": int(source_out) + } + + def _get_allowed_family_presets(self, pre_create_data): + """ Filter out allowed family presets. + + Args: + pre_create_data (dict): precreate attributes inputs + + Returns: + list: lit of dict with preset items + """ + self.log.debug(f"__ pre_create_data: {pre_create_data}") + return [ + {"family": "shot"}, + *[ + preset for preset in self._creator_settings["family_presets"] + if pre_create_data[preset["family"]] + ] + ] + + def _validate_clip_for_processing(self, otio_clip): + """Validate otio clip attribues + + Args: + otio_clip (otio.Clip): otio clip object + + Returns: + bool: True if all passing conditions + """ + if otio_clip.name is None: + return False + + if isinstance(otio_clip, otio.schema.Gap): + return False + + # skip all generators like black empty + if isinstance( + otio_clip.media_reference, + otio.schema.GeneratorReference): + return False + + # Transitions are ignored, because Clips have the full frame + # range. + if isinstance(otio_clip, otio.schema.Transition): + return False + + return True + + def _validate_name_uniqueness(self, name): + """ Validating name uniqueness. + + In context of other clip names in sequence file. + + Args: + name (str): shot name string + """ + if name not in self.asset_name_check: + self.asset_name_check.append(name) + else: + self.log.warning( + f"Duplicate shot name: {name}! " + "Please check names in the input sequence files." + ) + + def get_pre_create_attr_defs(self): + """ Creating pre-create attributes at creator plugin. + + Returns: + list: list of attribute object instances + """ + # Use same attributes as for instance attrobites + attr_defs = [ + FileDef( + "sequence_filepath_data", + folders=False, + extensions=[ + ".edl", + ".xml", + ".aaf", + ".fcpxml" + ], + allow_sequences=False, + single_item=True, + label="Sequence file", + ), + FileDef( + "media_filepaths_data", + folders=False, + extensions=[ + ".mov", + ".mp4", + ".wav" + ], + allow_sequences=False, + single_item=False, + label="Media files", + ), + # TODO: perhpas better would be timecode and fps input + NumberDef( + "timeline_offset", + default=0, + label="Timeline offset" + ), + UISeparatorDef(), + UILabelDef("Clip instance attributes"), + UISeparatorDef() + ] + # add variants swithers + attr_defs.extend( + BoolDef(_var["family"], label=_var["family"]) + for _var in self._creator_settings["family_presets"] + ) + attr_defs.append(UISeparatorDef()) + + attr_defs.extend(CLIP_ATTR_DEFS) + return attr_defs diff --git a/openpype/hosts/traypublisher/plugins/create/create_from_settings.py b/openpype/hosts/traypublisher/plugins/create/create_from_settings.py index baca274ea6..df6253b0c2 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_from_settings.py +++ b/openpype/hosts/traypublisher/plugins/create/create_from_settings.py @@ -1,6 +1,8 @@ import os +from openpype.lib import Logger +from openpype.settings import get_project_settings -from openpype.api import get_project_settings +log = Logger.get_logger(__name__) def initialize(): @@ -13,6 +15,7 @@ def initialize(): global_variables = globals() for item in simple_creators: + dynamic_plugin = SettingsCreator.from_settings(item) global_variables[dynamic_plugin.__name__] = dynamic_plugin diff --git a/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py b/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py new file mode 100644 index 0000000000..cf25a37918 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py @@ -0,0 +1,219 @@ +import copy +import os +import re + +from openpype.client import get_assets, get_asset_by_name +from openpype.lib import ( + FileDef, + BoolDef, +) +from openpype.pipeline import ( + CreatedInstance, + CreatorError +) +from openpype.pipeline.create import ( + get_subset_name, + TaskNotSetError, +) + +from openpype.hosts.traypublisher.api.plugin import TrayPublishCreator + + +class BatchMovieCreator(TrayPublishCreator): + """Creates instances from movie file(s). + + Intended for .mov files, but should work for any video file. + Doesn't handle image sequences though. + """ + identifier = "render_movie_batch" + label = "Batch Movies" + family = "render" + description = "Publish batch of video files" + + create_allow_context_change = False + version_regex = re.compile(r"^(.+)_v([0-9]+)$") + + def __init__(self, project_settings, *args, **kwargs): + super(BatchMovieCreator, self).__init__(project_settings, + *args, **kwargs) + creator_settings = ( + project_settings["traypublisher"]["BatchMovieCreator"] + ) + self.default_variants = creator_settings["default_variants"] + self.default_tasks = creator_settings["default_tasks"] + self.extensions = creator_settings["extensions"] + + def get_icon(self): + return "fa.file" + + def create(self, subset_name, data, pre_create_data): + file_paths = pre_create_data.get("filepath") + if not file_paths: + return + + for file_info in file_paths: + instance_data = copy.deepcopy(data) + file_name = file_info["filenames"][0] + filepath = os.path.join(file_info["directory"], file_name) + instance_data["creator_attributes"] = {"filepath": filepath} + + asset_doc, version = self.get_asset_doc_from_file_name( + file_name, self.project_name) + + subset_name, task_name = self._get_subset_and_task( + asset_doc, data["variant"], self.project_name) + + instance_data["task"] = task_name + instance_data["asset"] = asset_doc["name"] + + # Create new instance + new_instance = CreatedInstance(self.family, subset_name, + instance_data, self) + self._store_new_instance(new_instance) + + def get_asset_doc_from_file_name(self, source_filename, project_name): + """Try to parse out asset name from file name provided. + + Artists might provide various file name formats. + Currently handled: + - chair.mov + - chair_v001.mov + - my_chair_to_upload.mov + """ + version = None + asset_name = os.path.splitext(source_filename)[0] + # Always first check if source filename is in assets + matching_asset_doc = self._get_asset_by_name_case_not_sensitive( + project_name, asset_name) + + if matching_asset_doc is None: + matching_asset_doc, version = ( + self._parse_with_version(project_name, asset_name)) + + if matching_asset_doc is None: + matching_asset_doc = self._parse_containing(project_name, + asset_name) + + if matching_asset_doc is None: + raise CreatorError( + "Cannot guess asset name from {}".format(source_filename)) + + return matching_asset_doc, version + + def _parse_with_version(self, project_name, asset_name): + """Try to parse asset name from a file name containing version too + + Eg. 'chair_v001.mov' >> 'chair', 1 + """ + self.log.debug(( + "Asset doc by \"{}\" was not found, trying version regex." + ).format(asset_name)) + + matching_asset_doc = version_number = None + + regex_result = self.version_regex.findall(asset_name) + if regex_result: + _asset_name, _version_number = regex_result[0] + matching_asset_doc = self._get_asset_by_name_case_not_sensitive( + project_name, _asset_name) + if matching_asset_doc: + version_number = int(_version_number) + + return matching_asset_doc, version_number + + def _parse_containing(self, project_name, asset_name): + """Look if file name contains any existing asset name""" + for asset_doc in get_assets(project_name, fields=["name"]): + if asset_doc["name"].lower() in asset_name.lower(): + return get_asset_by_name(project_name, asset_doc["name"]) + + def _get_subset_and_task(self, asset_doc, variant, project_name): + """Create subset name according to standard template process""" + task_name = self._get_task_name(asset_doc) + + try: + subset_name = get_subset_name( + self.family, + variant, + task_name, + asset_doc, + project_name + ) + except TaskNotSetError: + # Create instance with fake task + # - instance will be marked as invalid so it can't be published + # but user have ability to change it + # NOTE: This expect that there is not task 'Undefined' on asset + task_name = "Undefined" + subset_name = get_subset_name( + self.family, + variant, + task_name, + asset_doc, + project_name + ) + + return subset_name, task_name + + def _get_task_name(self, asset_doc): + """Get applicable task from 'asset_doc' """ + available_task_names = {} + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + for task_name in asset_tasks.keys(): + available_task_names[task_name.lower()] = task_name + + task_name = None + for _task_name in self.default_tasks: + _task_name_low = _task_name.lower() + if _task_name_low in available_task_names: + task_name = available_task_names[_task_name_low] + break + + return task_name + + def get_instance_attr_defs(self): + return [ + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attributes + return [ + FileDef( + "filepath", + folders=False, + single_item=False, + extensions=self.extensions, + allow_sequences=False, + label="Filepath" + ), + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + def get_detail_description(self): + return """# Publish batch of .mov to multiple assets. + + File names must then contain only asset name, or asset name + version. + (eg. 'chair.mov', 'chair_v001.mov', not really safe `my_chair_v001.mov` + """ + + def _get_asset_by_name_case_not_sensitive(self, project_name, asset_name): + """Handle more cases in file names""" + asset_name = re.compile(asset_name, re.IGNORECASE) + + assets = list(get_assets(project_name, asset_names=[asset_name])) + if assets: + if len(assets) > 1: + self.log.warning("Too many records found for {}".format( + asset_name)) + return + + return assets.pop() diff --git a/openpype/hosts/traypublisher/plugins/create/create_online.py b/openpype/hosts/traypublisher/plugins/create/create_online.py new file mode 100644 index 0000000000..19f956a50e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/create/create_online.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +"""Creator of online files. + +Online file retain their original name and use it as subset name. To +avoid conflicts, this creator checks if subset with this name already +exists under selected asset. +""" +from pathlib import Path + +from openpype.client import get_subset_by_name, get_asset_by_name +from openpype.lib.attribute_definitions import FileDef +from openpype.pipeline import ( + CreatedInstance, + CreatorError +) +from openpype.hosts.traypublisher.api.plugin import TrayPublishCreator + + +class OnlineCreator(TrayPublishCreator): + """Creates instance from file and retains its original name.""" + + identifier = "io.openpype.creators.traypublisher.online" + label = "Online" + family = "online" + description = "Publish file retaining its original file name" + extensions = [".mov", ".mp4", ".mxf", ".m4v", ".mpg"] + + def get_detail_description(self): + return """# Create file retaining its original file name. + + This will publish files using template helping to retain original + file name and that file name is used as subset name. + + Bz default it tries to guard against multiple publishes of the same + file.""" + + def get_icon(self): + return "fa.file" + + def create(self, subset_name, instance_data, pre_create_data): + repr_file = pre_create_data.get("representation_file") + if not repr_file: + raise CreatorError("No files specified") + + files = repr_file.get("filenames") + if not files: + # this should never happen + raise CreatorError("Missing files from representation") + + origin_basename = Path(files[0]).stem + + asset = get_asset_by_name( + self.project_name, instance_data["asset"], fields=["_id"]) + if get_subset_by_name( + self.project_name, origin_basename, asset["_id"], + fields=["_id"]): + raise CreatorError(f"subset with {origin_basename} already " + "exists in selected asset") + + instance_data["originalBasename"] = origin_basename + subset_name = origin_basename + + instance_data["creator_attributes"] = { + "path": (Path(repr_file["directory"]) / files[0]).as_posix() + } + + # Create new instance + new_instance = CreatedInstance(self.family, subset_name, + instance_data, self) + self._store_new_instance(new_instance) + + def get_pre_create_attr_defs(self): + return [ + FileDef( + "representation_file", + folders=False, + extensions=self.extensions, + allow_sequences=False, + single_item=True, + label="Representation", + ) + ] + + def get_subset_name( + self, + variant, + task_name, + asset_doc, + project_name, + host_name=None, + instance=None + ): + if instance is None: + return "{originalBasename}" + + return instance.data["subset"] diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py new file mode 100644 index 0000000000..bdf7c05f3d --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py @@ -0,0 +1,36 @@ +from pprint import pformat +import pyblish.api + + +class CollectClipInstance(pyblish.api.InstancePlugin): + """Collect clip instances and resolve its parent""" + + label = "Collect Clip Instances" + order = pyblish.api.CollectorOrder - 0.081 + + hosts = ["traypublisher"] + families = ["plate", "review", "audio"] + + def process(self, instance): + creator_identifier = instance.data["creator_identifier"] + if creator_identifier not in [ + "editorial_plate", + "editorial_audio", + "editorial_review" + ]: + return + + instance.data["families"].append("clip") + + parent_instance_id = instance.data["parent_instance_id"] + edit_shared_data = instance.context.data["editorialSharedData"] + instance.data.update( + edit_shared_data[parent_instance_id] + ) + + if "editorialSourcePath" in instance.context.data.keys(): + instance.data["editorialSourcePath"] = ( + instance.context.data["editorialSourcePath"]) + instance.data["families"].append("trimming") + + self.log.debug(pformat(instance.data)) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py new file mode 100644 index 0000000000..e181d0abe5 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py @@ -0,0 +1,48 @@ +import os +from pprint import pformat +import pyblish.api +import opentimelineio as otio + + +class CollectEditorialInstance(pyblish.api.InstancePlugin): + """Collect data for instances created by settings creators.""" + + label = "Collect Editorial Instances" + order = pyblish.api.CollectorOrder - 0.1 + + hosts = ["traypublisher"] + families = ["editorial"] + + def process(self, instance): + + if "families" not in instance.data: + instance.data["families"] = [] + + if "representations" not in instance.data: + instance.data["representations"] = [] + + fpath = instance.data["sequenceFilePath"] + otio_timeline_string = instance.data.pop("otioTimeline") + otio_timeline = otio.adapters.read_from_string( + otio_timeline_string) + + instance.context.data["otioTimeline"] = otio_timeline + instance.context.data["editorialSourcePath"] = ( + instance.data["editorialSourcePath"]) + + self.log.info(fpath) + + instance.data["stagingDir"] = os.path.dirname(fpath) + + _, ext = os.path.splitext(fpath) + + instance.data["representations"].append({ + "ext": ext[1:], + "name": ext[1:], + "stagingDir": instance.data["stagingDir"], + "files": os.path.basename(fpath) + }) + + self.log.debug("Created Editorial Instance {}".format( + pformat(instance.data) + )) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py new file mode 100644 index 0000000000..4af4fb94e9 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py @@ -0,0 +1,30 @@ +import pyblish.api + + +class CollectEditorialReviewable(pyblish.api.InstancePlugin): + """ Collect review input from user. + + Adds the input to instance data. + """ + + label = "Collect Editorial Reviewable" + order = pyblish.api.CollectorOrder + + families = ["plate", "review", "audio"] + hosts = ["traypublisher"] + + def process(self, instance): + creator_identifier = instance.data["creator_identifier"] + if creator_identifier not in [ + "editorial_plate", + "editorial_audio", + "editorial_review" + ]: + return + + creator_attributes = instance.data["creator_attributes"] + + if creator_attributes["add_review_family"]: + instance.data["families"].append("review") + + self.log.debug("instance.data {}".format(instance.data)) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py b/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py new file mode 100644 index 0000000000..5f8b2878b7 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py @@ -0,0 +1,48 @@ +import os + +import pyblish.api +from openpype.pipeline import OpenPypePyblishPluginMixin + + +class CollectMovieBatch( + pyblish.api.InstancePlugin, OpenPypePyblishPluginMixin +): + """Collect file url for batch movies and create representation. + + Adds review on instance and to repre.tags based on value of toggle button + on creator. + """ + + label = "Collect Movie Batch Files" + order = pyblish.api.CollectorOrder + + hosts = ["traypublisher"] + + def process(self, instance): + if instance.data.get("creator_identifier") != "render_movie_batch": + return + + creator_attributes = instance.data["creator_attributes"] + + file_url = creator_attributes["filepath"] + file_name = os.path.basename(file_url) + _, ext = os.path.splitext(file_name) + + repre = { + "name": ext[1:], + "ext": ext[1:], + "files": file_name, + "stagingDir": os.path.dirname(file_url), + "tags": [] + } + instance.data["representations"].append(repre) + + if creator_attributes["add_review_family"]: + repre["tags"].append("review") + instance.data["families"].append("review") + if not instance.data.get("thumbnailSource"): + instance.data["thumbnailSource"] = file_url + + instance.data["source"] = file_url + + self.log.debug("instance.data {}".format(instance.data)) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_online_file.py b/openpype/hosts/traypublisher/plugins/publish/collect_online_file.py new file mode 100644 index 0000000000..a3f86afa13 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_online_file.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +import pyblish.api +from pathlib import Path + + +class CollectOnlineFile(pyblish.api.InstancePlugin): + """Collect online file and retain its file name.""" + label = "Collect Online File" + order = pyblish.api.CollectorOrder + families = ["online"] + hosts = ["traypublisher"] + + def process(self, instance): + file = Path(instance.data["creator_attributes"]["path"]) + + instance.data["representations"].append( + { + "name": file.suffix.lstrip("."), + "ext": file.suffix.lstrip("."), + "files": file.name, + "stagingDir": file.parent.as_posix() + } + ) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_review_family.py b/openpype/hosts/traypublisher/plugins/publish/collect_review_family.py deleted file mode 100644 index 965e251527..0000000000 --- a/openpype/hosts/traypublisher/plugins/publish/collect_review_family.py +++ /dev/null @@ -1,31 +0,0 @@ -import pyblish.api -from openpype.lib import BoolDef -from openpype.pipeline import OpenPypePyblishPluginMixin - - -class CollectReviewFamily( - pyblish.api.InstancePlugin, OpenPypePyblishPluginMixin -): - """Add review family.""" - - label = "Collect Review Family" - order = pyblish.api.CollectorOrder - 0.49 - - hosts = ["traypublisher"] - families = [ - "image", - "render", - "plate", - "review" - ] - - def process(self, instance): - values = self.get_attr_values_from_data(instance.data) - if values.get("add_review_family"): - instance.data["families"].append("review") - - @classmethod - def get_attribute_defs(cls): - return [ - BoolDef("add_review_family", label="Review", default=True) - ] diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py new file mode 100644 index 0000000000..716f73022e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py @@ -0,0 +1,213 @@ +from pprint import pformat +import pyblish.api +import opentimelineio as otio + + +class CollectShotInstance(pyblish.api.InstancePlugin): + """ Collect shot instances + + Resolving its user inputs from creator attributes + to instance data. + """ + + label = "Collect Shot Instances" + order = pyblish.api.CollectorOrder - 0.09 + + hosts = ["traypublisher"] + families = ["shot"] + + SHARED_KEYS = [ + "asset", + "fps", + "handleStart", + "handleEnd", + "frameStart", + "frameEnd", + "clipIn", + "clipOut", + "clipDuration", + "sourceIn", + "sourceOut", + "otioClip", + "workfileFrameStart" + ] + + def process(self, instance): + self.log.debug(pformat(instance.data)) + + creator_identifier = instance.data["creator_identifier"] + if "editorial" not in creator_identifier: + return + + # get otio clip object + otio_clip = self._get_otio_clip(instance) + instance.data["otioClip"] = otio_clip + + # first solve the inputs from creator attr + data = self._solve_inputs_to_data(instance) + instance.data.update(data) + + # distribute all shared keys to clips instances + self._distribute_shared_data(instance) + self._solve_hierarchy_context(instance) + + self.log.debug(pformat(instance.data)) + + def _get_otio_clip(self, instance): + """ Converts otio string data. + + Convert them to proper otio object + and finds its equivalent at otio timeline. + This process is a hack to support also + resolving parent range. + + Args: + instance (obj): publishing instance + + Returns: + otio.Clip: otio clip object + """ + context = instance.context + # convert otio clip from string to object + otio_clip_string = instance.data.pop("otioClip") + otio_clip = otio.adapters.read_from_string( + otio_clip_string) + + otio_timeline = context.data["otioTimeline"] + + clips = [ + clip for clip in otio_timeline.each_child( + descended_from_type=otio.schema.Clip) + if clip.name == otio_clip.name + ] + + otio_clip = clips.pop() + self.log.debug(f"__ otioclip.parent: {otio_clip.parent}") + + return otio_clip + + def _distribute_shared_data(self, instance): + """ Distribute all defined keys. + + All data are shared between all related + instances in context. + + Args: + instance (obj): publishing instance + """ + context = instance.context + + instance_id = instance.data["instance_id"] + + if not context.data.get("editorialSharedData"): + context.data["editorialSharedData"] = {} + + context.data["editorialSharedData"][instance_id] = { + _k: _v for _k, _v in instance.data.items() + if _k in self.SHARED_KEYS + } + + def _solve_inputs_to_data(self, instance): + """ Resolve all user inputs into instance data. + + Args: + instance (obj): publishing instance + + Returns: + dict: instance data updating data + """ + _cr_attrs = instance.data["creator_attributes"] + workfile_start_frame = _cr_attrs["workfile_start_frame"] + frame_start = _cr_attrs["frameStart"] + frame_end = _cr_attrs["frameEnd"] + frame_dur = frame_end - frame_start + + return { + "asset": _cr_attrs["asset_name"], + "fps": float(_cr_attrs["fps"]), + "handleStart": _cr_attrs["handle_start"], + "handleEnd": _cr_attrs["handle_end"], + "frameStart": workfile_start_frame, + "frameEnd": workfile_start_frame + frame_dur, + "clipIn": _cr_attrs["clipIn"], + "clipOut": _cr_attrs["clipOut"], + "clipDuration": _cr_attrs["clipDuration"], + "sourceIn": _cr_attrs["sourceIn"], + "sourceOut": _cr_attrs["sourceOut"], + "workfileFrameStart": workfile_start_frame + } + + def _solve_hierarchy_context(self, instance): + """ Adding hierarchy data to context shared data. + + Args: + instance (obj): publishing instance + """ + context = instance.context + + final_context = ( + context.data["hierarchyContext"] + if context.data.get("hierarchyContext") + else {} + ) + + name = instance.data["asset"] + + # get handles + handle_start = int(instance.data["handleStart"]) + handle_end = int(instance.data["handleEnd"]) + + in_info = { + "entity_type": "Shot", + "custom_attributes": { + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": instance.data["frameStart"], + "frameEnd": instance.data["frameEnd"], + "clipIn": instance.data["clipIn"], + "clipOut": instance.data["clipOut"], + "fps": instance.data["fps"] + }, + "tasks": instance.data["tasks"] + } + + parents = instance.data.get('parents', []) + self.log.debug(f"parents: {pformat(parents)}") + + actual = {name: in_info} + + for parent in reversed(parents): + parent_name = parent["entity_name"] + next_dict = { + parent_name: { + "entity_type": parent["entity_type"], + "childs": actual + } + } + actual = next_dict + + final_context = self._update_dict(final_context, actual) + + # adding hierarchy context to instance + context.data["hierarchyContext"] = final_context + self.log.debug(pformat(final_context)) + + def _update_dict(self, ex_dict, new_dict): + """ Recursion function + + Updating nested data with another nested data. + + Args: + ex_dict (dict): nested data + new_dict (dict): nested data + + Returns: + dict: updated nested data + """ + for key in ex_dict: + if key in new_dict and isinstance(ex_dict[key], dict): + new_dict[key] = self._update_dict(ex_dict[key], new_dict[key]) + elif not ex_dict.get(key) or not new_dict.get(key): + new_dict[key] = ex_dict[key] + + return new_dict diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py index b2be43c701..183195a515 100644 --- a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py +++ b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py @@ -1,9 +1,32 @@ import os +import tempfile +from pathlib import Path + +import clique import pyblish.api class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): - """Collect data for instances created by settings creators.""" + """Collect data for instances created by settings creators. + + Plugin create representations for simple instances based + on 'representation_files' attribute stored on instance data. + + There is also possibility to have reviewable representation which can be + stored under 'reviewable' attribute stored on instance data. If there was + already created representation with the same files as 'revieable' containes + + Representations can be marked for review and in that case is also added + 'review' family to instance families. For review can be marked only one + representation so **first** representation that has extension available + in '_review_extensions' is used for review. + + For instance 'source' is used path from last representation created + from 'representation_files'. + + Set staging directory on instance. That is probably never used because + each created representation has it's own staging dir. + """ label = "Collect Settings Simple Instances" order = pyblish.api.CollectorOrder - 0.49 @@ -14,37 +37,207 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): if not instance.data.get("settings_creator"): return - if "families" not in instance.data: - instance.data["families"] = [] + instance_label = instance.data["name"] + # Create instance's staging dir in temp + tmp_folder = tempfile.mkdtemp(prefix="traypublisher_") + instance.data["stagingDir"] = tmp_folder + instance.context.data["cleanupFullPaths"].append(tmp_folder) - if "representations" not in instance.data: - instance.data["representations"] = [] - repres = instance.data["representations"] + self.log.debug(( + "Created temp staging directory for instance {}. {}" + ).format(instance_label, tmp_folder)) + + # Store filepaths for validation of their existence + source_filepaths = [] + # Make sure there are no representations with same name + repre_names_counter = {} + # Store created names for logging + repre_names = [] + # Store set of filepaths per each representation + representation_files_mapping = [] + source = self._create_main_representations( + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ) + + self._create_review_representation( + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ) + source_filepaths = list(set(source_filepaths)) + instance.data["source"] = source + instance.data["sourceFilepaths"] = source_filepaths + + # NOTE: Missing filepaths should not cause crashes (at least not here) + # - if filepaths are required they should crash on validation + if source_filepaths: + # NOTE: Original basename is not handling sequences + # - we should maybe not fill the key when sequence is used? + origin_basename = Path(source_filepaths[0]).stem + instance.data["originalBasename"] = origin_basename + + self.log.debug( + ( + "Created Simple Settings instance \"{}\"" + " with {} representations: {}" + ).format( + instance_label, + len(instance.data["representations"]), + ", ".join(repre_names) + ) + ) + + def _create_main_representations( + self, + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ): + creator_attributes = instance.data["creator_attributes"] + filepath_items = creator_attributes["representation_files"] + if not isinstance(filepath_items, list): + filepath_items = [filepath_items] + + source = None + for filepath_item in filepath_items: + # Skip if filepath item does not have filenames + if not filepath_item["filenames"]: + continue + + filepaths = { + os.path.join(filepath_item["directory"], filename) + for filename in filepath_item["filenames"] + } + source_filepaths.extend(filepaths) + + source = self._calculate_source(filepaths) + representation = self._create_representation_data( + filepath_item, repre_names_counter, repre_names + ) + instance.data["representations"].append(representation) + representation_files_mapping.append( + (filepaths, representation, source) + ) + return source + + def _create_review_representation( + self, + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ): + # Skip review representation creation if there are no representations + # created for "main" part + # - review representation must not be created in that case so + # validation can care about it + if not representation_files_mapping: + self.log.warning(( + "There are missing source representations." + " Creation of review representation was skipped." + )) + return creator_attributes = instance.data["creator_attributes"] - filepath_item = creator_attributes["filepath"] - self.log.info(filepath_item) - filepaths = [ - os.path.join(filepath_item["directory"], filename) - for filename in filepath_item["filenames"] - ] + review_file_item = creator_attributes["reviewable"] + filenames = review_file_item.get("filenames") + if not filenames: + self.log.debug(( + "Filepath for review is not defined." + " Skipping review representation creation." + )) + return - instance.data["sourceFilepaths"] = filepaths - instance.data["stagingDir"] = filepath_item["directory"] + item_dir = review_file_item["directory"] + first_filepath = os.path.join(item_dir, filenames[0]) + + filepaths = { + os.path.join(item_dir, filename) + for filename in filenames + } + source_filepaths.extend(filepaths) + # First try to find out representation with same filepaths + # so it's not needed to create new representation just for review + review_representation = None + # Review path (only for logging) + review_path = None + for item in representation_files_mapping: + _filepaths, representation, repre_path = item + if _filepaths == filepaths: + review_representation = representation + review_path = repre_path + break + + if review_representation is None: + self.log.debug("Creating new review representation") + review_path = self._calculate_source(filepaths) + review_representation = self._create_representation_data( + review_file_item, repre_names_counter, repre_names + ) + instance.data["representations"].append(review_representation) + + if "review" not in instance.data["families"]: + instance.data["families"].append("review") + + if not instance.data.get("thumbnailSource"): + instance.data["thumbnailSource"] = first_filepath + + review_representation["tags"].append("review") + self.log.debug("Representation {} was marked for review. {}".format( + review_representation["name"], review_path + )) + + def _create_representation_data( + self, filepath_item, repre_names_counter, repre_names + ): + """Create new representation data based on file item. + + Args: + filepath_item (Dict[str, Any]): Item with information about + representation paths. + repre_names_counter (Dict[str, int]): Store count of representation + names. + repre_names (List[str]): All used representation names. For + logging purposes. + + Returns: + Dict: Prepared base representation data. + """ filenames = filepath_item["filenames"] _, ext = os.path.splitext(filenames[0]) - ext = ext[1:] if len(filenames) == 1: filenames = filenames[0] - repres.append({ - "ext": ext, - "name": ext, + repre_name = repre_ext = ext[1:] + if repre_name not in repre_names_counter: + repre_names_counter[repre_name] = 2 + else: + counter = repre_names_counter[repre_name] + repre_names_counter[repre_name] += 1 + repre_name = "{}_{}".format(repre_name, counter) + repre_names.append(repre_name) + return { + "ext": repre_ext, + "name": repre_name, "stagingDir": filepath_item["directory"], - "files": filenames - }) + "files": filenames, + "tags": [] + } - self.log.debug("Created Simple Settings instance {}".format( - instance.data - )) + def _calculate_source(self, filepaths): + cols, rems = clique.assemble(filepaths) + if cols: + source = cols[0].format("{head}{padding}{tail}") + elif rems: + source = rems[0] + return source diff --git a/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml b/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml new file mode 100644 index 0000000000..933df1c7c5 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml @@ -0,0 +1,15 @@ + + + +Invalid frame range + +## Invalid frame range + +Expected duration or '{duration}' frames set in database, workfile contains only '{found}' frames. + +### How to repair? + +Modify configuration in the database or tweak frame range in the workfile. + + + \ No newline at end of file diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py b/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py index c7302b1005..749199fbd3 100644 --- a/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py +++ b/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py @@ -3,8 +3,17 @@ import pyblish.api from openpype.pipeline import PublishValidationError -class ValidateWorkfilePath(pyblish.api.InstancePlugin): - """Validate existence of workfile instance existence.""" +class ValidateFilePath(pyblish.api.InstancePlugin): + """Validate existence of source filepaths on instance. + + Plugins looks into key 'sourceFilepaths' and validate if paths there + actually exist on disk. + + Also validate if the key is filled but is empty. In that case also + crashes so do not fill the key if unfilled value should not cause error. + + This is primarily created for Simple Creator instances. + """ label = "Validate Workfile" order = pyblish.api.ValidatorOrder - 0.49 @@ -14,12 +23,28 @@ class ValidateWorkfilePath(pyblish.api.InstancePlugin): def process(self, instance): if "sourceFilepaths" not in instance.data: self.log.info(( - "Can't validate source filepaths existence." + "Skipped validation of source filepaths existence." " Instance does not have collected 'sourceFilepaths'" )) return - filepaths = instance.data.get("sourceFilepaths") + family = instance.data["family"] + label = instance.data["name"] + filepaths = instance.data["sourceFilepaths"] + if not filepaths: + raise PublishValidationError( + ( + "Source filepaths of '{}' instance \"{}\" are not filled" + ).format(family, label), + "File not filled", + ( + "## Files were not filled" + "\nThis mean that you didn't enter any files into required" + " file input." + "\n- Please refresh publishing and check instance" + " {}" + ).format(label) + ) not_found_files = [ filepath @@ -34,11 +59,7 @@ class ValidateWorkfilePath(pyblish.api.InstancePlugin): raise PublishValidationError( ( "Filepath of '{}' instance \"{}\" does not exist:\n{}" - ).format( - instance.data["family"], - instance.data["name"], - joined_paths - ), + ).format(family, label, joined_paths), "File not found", ( "## Files were not found\nFiles\n{}" diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py b/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py new file mode 100644 index 0000000000..b962ea464a --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py @@ -0,0 +1,75 @@ +import re + +import pyblish.api + +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) + + +class ValidateFrameRange(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """Validating frame range of rendered files against state in DB.""" + + label = "Validate Frame Range" + hosts = ["traypublisher"] + families = ["render"] + order = ValidateContentsOrder + + optional = True + # published data might be sequence (.mov, .mp4) in that counting files + # doesnt make sense + check_extensions = ["exr", "dpx", "jpg", "jpeg", "png", "tiff", "tga", + "gif", "svg"] + skip_timelines_check = [] # skip for specific task names (regex) + + def process(self, instance): + # Skip the instance if is not active by data on the instance + if not self.is_active(instance.data): + return + + if (self.skip_timelines_check and + any(re.search(pattern, instance.data["task"]) + for pattern in self.skip_timelines_check)): + self.log.info("Skipping for {} task".format(instance.data["task"])) + + asset_doc = instance.data["assetEntity"] + asset_data = asset_doc["data"] + frame_start = asset_data["frameStart"] + frame_end = asset_data["frameEnd"] + handle_start = asset_data["handleStart"] + handle_end = asset_data["handleEnd"] + duration = (frame_end - frame_start + 1) + handle_start + handle_end + + repres = instance.data.get("representations") + if not repres: + self.log.info("No representations, skipping.") + return + + first_repre = repres[0] + ext = first_repre['ext'].replace(".", '') + + if not ext or ext.lower() not in self.check_extensions: + self.log.warning("Cannot check for extension {}".format(ext)) + return + + files = first_repre["files"] + if isinstance(files, str): + files = [files] + frames = len(files) + + msg = ( + "Frame duration from DB:'{}' doesn't match number of files:'{}'" + " Please change frame range for Asset or limit no. of files" + ). format(int(duration), frames) + + formatting_data = {"duration": duration, + "found": frames} + if frames != duration: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + self.log.debug("Valid ranges expected '{}' - found '{}'". + format(int(duration), frames)) diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_online_file.py b/openpype/hosts/traypublisher/plugins/publish/validate_online_file.py new file mode 100644 index 0000000000..12b2e72ced --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/validate_online_file.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +import pyblish.api + +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishValidationError, + OptionalPyblishPluginMixin, +) +from openpype.client import get_subset_by_name + + +class ValidateOnlineFile(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """Validate that subset doesn't exist yet.""" + label = "Validate Existing Online Files" + hosts = ["traypublisher"] + families = ["online"] + order = ValidateContentsOrder + + optional = True + + def process(self, instance): + project_name = instance.context.data["projectName"] + asset_id = instance.data["assetEntity"]["_id"] + subset = get_subset_by_name( + project_name, instance.data["subset"], asset_id) + + if subset: + raise PublishValidationError( + "Subset to be published already exists.", + title=self.label + ) diff --git a/openpype/hosts/tvpaint/__init__.py b/openpype/hosts/tvpaint/__init__.py index 09b7c52cd1..b98680f204 100644 --- a/openpype/hosts/tvpaint/__init__.py +++ b/openpype/hosts/tvpaint/__init__.py @@ -1,20 +1,12 @@ -import os +from .addon import ( + get_launch_script_path, + TVPaintAddon, + TVPAINT_ROOT_DIR, +) -def add_implementation_envs(env, _app): - """Modify environments to contain all required for implementation.""" - defaults = { - "OPENPYPE_LOG_NO_COLORS": "True" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - -def get_launch_script_path(): - current_dir = os.path.dirname(os.path.abspath(__file__)) - return os.path.join( - current_dir, - "api", - "launch_script.py" - ) +__all__ = ( + "get_launch_script_path", + "TVPaintAddon", + "TVPAINT_ROOT_DIR", +) diff --git a/openpype/hosts/tvpaint/addon.py b/openpype/hosts/tvpaint/addon.py new file mode 100644 index 0000000000..b695bf8ecc --- /dev/null +++ b/openpype/hosts/tvpaint/addon.py @@ -0,0 +1,40 @@ +import os +from openpype.modules import OpenPypeModule, IHostAddon + +TVPAINT_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +def get_launch_script_path(): + return os.path.join( + TVPAINT_ROOT_DIR, + "api", + "launch_script.py" + ) + + +class TVPaintAddon(OpenPypeModule, IHostAddon): + name = "tvpaint" + host_name = "tvpaint" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + """Modify environments to contain all required for implementation.""" + + defaults = { + "OPENPYPE_LOG_NO_COLORS": "True" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(TVPAINT_ROOT_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".tvpp"] diff --git a/openpype/hosts/tvpaint/api/__init__.py b/openpype/hosts/tvpaint/api/__init__.py index c461b33f4b..7b53aad9a4 100644 --- a/openpype/hosts/tvpaint/api/__init__.py +++ b/openpype/hosts/tvpaint/api/__init__.py @@ -1,49 +1,11 @@ from .communication_server import CommunicationWrapper -from . import lib -from . import launch_script -from . import workio -from . import pipeline -from . import plugin from .pipeline import ( - install, - uninstall, - maintained_selection, - remove_instance, - list_instances, - ls -) - -from .workio import ( - open_file, - save_file, - current_file, - has_unsaved_changes, - file_extensions, - work_root, + TVPaintHost, ) __all__ = ( "CommunicationWrapper", - "lib", - "launch_script", - "workio", - "pipeline", - "plugin", - - "install", - "uninstall", - "maintained_selection", - "remove_instance", - "list_instances", - "ls", - - # Workfiles API - "open_file", - "save_file", - "current_file", - "has_unsaved_changes", - "file_extensions", - "work_root" + "TVPaintHost", ) diff --git a/openpype/hosts/tvpaint/api/communication_server.py b/openpype/hosts/tvpaint/api/communication_server.py index 65cb9aa2f3..6ac3e6324c 100644 --- a/openpype/hosts/tvpaint/api/communication_server.py +++ b/openpype/hosts/tvpaint/api/communication_server.py @@ -707,6 +707,9 @@ class BaseCommunicator: if exit_code is not None: self.exit_code = exit_code + if self.exit_code is None: + self.exit_code = 0 + def stop(self): """Stop communication and currently running python process.""" log.info("Stopping communication") diff --git a/openpype/hosts/tvpaint/api/launch_script.py b/openpype/hosts/tvpaint/api/launch_script.py index 0b25027fc6..c474a10529 100644 --- a/openpype/hosts/tvpaint/api/launch_script.py +++ b/openpype/hosts/tvpaint/api/launch_script.py @@ -10,10 +10,10 @@ from Qt import QtWidgets, QtCore, QtGui from openpype import style from openpype.pipeline import install_host -from openpype.hosts.tvpaint.api.communication_server import ( - CommunicationWrapper +from openpype.hosts.tvpaint.api import ( + TVPaintHost, + CommunicationWrapper, ) -from openpype.hosts.tvpaint import api as tvpaint_host log = logging.getLogger(__name__) @@ -30,6 +30,7 @@ def main(launch_args): # - QApplicaiton is also main thread/event loop of the server qt_app = QtWidgets.QApplication([]) + tvpaint_host = TVPaintHost() # Execute pipeline installation install_host(tvpaint_host) diff --git a/openpype/hosts/tvpaint/api/lib.py b/openpype/hosts/tvpaint/api/lib.py index 0c63dbe5be..5e64773b8e 100644 --- a/openpype/hosts/tvpaint/api/lib.py +++ b/openpype/hosts/tvpaint/api/lib.py @@ -2,7 +2,7 @@ import os import logging import tempfile -from . import CommunicationWrapper +from .communication_server import CommunicationWrapper log = logging.getLogger(__name__) @@ -165,12 +165,12 @@ def parse_group_data(data): if not group_raw: continue - parts = group_raw.split(" ") + parts = group_raw.split("|") # Check for length and concatenate 2 last items until length match # - this happens if name contain spaces while len(parts) > 6: last_item = parts.pop(-1) - parts[-1] = " ".join([parts[-1], last_item]) + parts[-1] = "|".join([parts[-1], last_item]) clip_id, group_id, red, green, blue, name = parts group = { @@ -201,11 +201,16 @@ def get_groups_data(communicator=None): george_script_lines = ( # Variable containing full path to output file "output_path = \"{}\"".format(output_filepath), - "loop = 1", - "FOR idx = 1 TO 12", + "empty = 0", + # Loop over 100 groups + "FOR idx = 1 TO 100", + # Receive information about groups "tv_layercolor \"getcolor\" 0 idx", - "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' result", - "END" + "PARSE result clip_id group_index c_red c_green c_blue group_name", + # Create and add line to output file + "line = clip_id'|'group_index'|'c_red'|'c_green'|'c_blue'|'group_name", + "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line", + "END", ) george_script = "\n".join(george_script_lines) execute_george_through_file(george_script, communicator) diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py index f473f51457..249326791b 100644 --- a/openpype/hosts/tvpaint/api/pipeline.py +++ b/openpype/hosts/tvpaint/api/pipeline.py @@ -1,6 +1,5 @@ import os import json -import contextlib import tempfile import logging @@ -8,15 +7,15 @@ import requests import pyblish.api -from openpype.hosts import tvpaint -from openpype.api import get_current_project_settings +from openpype.client import get_project, get_asset_by_name +from openpype.host import HostBase, IWorkfileHost, ILoadHost +from openpype.hosts.tvpaint import TVPAINT_ROOT_DIR +from openpype.settings import get_current_project_settings from openpype.lib import register_event_callback from openpype.pipeline import ( legacy_io, register_loader_plugin_path, register_creator_plugin_path, - deregister_loader_plugin_path, - deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) @@ -27,11 +26,6 @@ from .lib import ( log = logging.getLogger(__name__) -HOST_DIR = os.path.dirname(os.path.abspath(tvpaint.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") METADATA_SECTION = "avalon" SECTION_NAME_CONTEXT = "context" @@ -64,43 +58,152 @@ instances=2 """ -def install(): - """Install TVPaint-specific functionality.""" +class TVPaintHost(HostBase, IWorkfileHost, ILoadHost): + name = "tvpaint" - log.info("OpenPype - Installing TVPaint integration") - legacy_io.install() + def install(self): + """Install TVPaint-specific functionality.""" - # Create workdir folder if does not exist yet - workdir = legacy_io.Session["AVALON_WORKDIR"] - if not os.path.exists(workdir): - os.makedirs(workdir) + log.info("OpenPype - Installing TVPaint integration") + legacy_io.install() - pyblish.api.register_host("tvpaint") - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) + # Create workdir folder if does not exist yet + workdir = legacy_io.Session["AVALON_WORKDIR"] + if not os.path.exists(workdir): + os.makedirs(workdir) - registered_callbacks = ( - pyblish.api.registered_callbacks().get("instanceToggled") or [] - ) - if on_instance_toggle not in registered_callbacks: - pyblish.api.register_callback("instanceToggled", on_instance_toggle) + plugins_dir = os.path.join(TVPAINT_ROOT_DIR, "plugins") + publish_dir = os.path.join(plugins_dir, "publish") + load_dir = os.path.join(plugins_dir, "load") + create_dir = os.path.join(plugins_dir, "create") - register_event_callback("application.launched", initial_launch) - register_event_callback("application.exit", application_exit) + pyblish.api.register_host("tvpaint") + pyblish.api.register_plugin_path(publish_dir) + register_loader_plugin_path(load_dir) + register_creator_plugin_path(create_dir) + registered_callbacks = ( + pyblish.api.registered_callbacks().get("instanceToggled") or [] + ) + if self.on_instance_toggle not in registered_callbacks: + pyblish.api.register_callback( + "instanceToggled", self.on_instance_toggle + ) -def uninstall(): - """Uninstall TVPaint-specific functionality. + register_event_callback("application.launched", self.initial_launch) + register_event_callback("application.exit", self.application_exit) - This function is called automatically on calling `uninstall_host()`. - """ + def open_workfile(self, filepath): + george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( + filepath.replace("\\", "/") + ) + return execute_george_through_file(george_script) - log.info("OpenPype - Uninstalling TVPaint integration") - pyblish.api.deregister_host("tvpaint") - pyblish.api.deregister_plugin_path(PUBLISH_PATH) - deregister_loader_plugin_path(LOAD_PATH) - deregister_creator_plugin_path(CREATE_PATH) + def save_workfile(self, filepath=None): + if not filepath: + filepath = self.get_current_workfile() + context = { + "project": legacy_io.Session["AVALON_PROJECT"], + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] + } + save_current_workfile_context(context) + + # Execute george script to save workfile. + george_script = "tv_SaveProject {}".format(filepath.replace("\\", "/")) + return execute_george(george_script) + + def work_root(self, session): + return session["AVALON_WORKDIR"] + + def get_current_workfile(self): + return execute_george("tv_GetProjectName") + + def workfile_has_unsaved_changes(self): + return None + + def get_workfile_extensions(self): + return [".tvpp"] + + def get_containers(self): + return get_containers() + + def initial_launch(self): + # Setup project settings if its the template that's launched. + # TODO also check for template creation when it's possible to define + # templates + last_workfile = os.environ.get("AVALON_LAST_WORKFILE") + if not last_workfile or os.path.exists(last_workfile): + return + + log.info("Setting up project...") + set_context_settings() + + def remove_instance(self, instance): + """Remove instance from current workfile metadata. + + Implementation for Subset manager tool. + """ + + current_instances = get_workfile_metadata(SECTION_NAME_INSTANCES) + instance_id = instance.get("uuid") + found_idx = None + if instance_id: + for idx, _inst in enumerate(current_instances): + if _inst["uuid"] == instance_id: + found_idx = idx + break + + if found_idx is None: + return + current_instances.pop(found_idx) + write_instances(current_instances) + + def application_exit(self): + """Logic related to TimerManager. + + Todo: + This should be handled out of TVPaint integration logic. + """ + + data = get_current_project_settings() + stop_timer = data["tvpaint"]["stop_timer_on_application_exit"] + + if not stop_timer: + return + + # Stop application timer. + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") + rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) + requests.post(rest_api_url) + + def on_instance_toggle(self, instance, old_value, new_value): + """Update instance data in workfile on publish toggle.""" + # Review may not have real instance in wokrfile metadata + if not instance.data.get("uuid"): + return + + instance_id = instance.data["uuid"] + found_idx = None + current_instances = list_instances() + for idx, workfile_instance in enumerate(current_instances): + if workfile_instance["uuid"] == instance_id: + found_idx = idx + break + + if found_idx is None: + return + + if "active" in current_instances[found_idx]: + current_instances[found_idx]["active"] = new_value + self.write_instances(current_instances) + + def list_instances(self): + """List all created instances from current workfile.""" + return list_instances() + + def write_instances(self, data): + return write_instances(data) def containerise( @@ -130,7 +233,7 @@ def containerise( "representation": str(context["representation"]["_id"]) } if current_containers is None: - current_containers = ls() + current_containers = get_containers() # Add container to containers list current_containers.append(container_data) @@ -141,15 +244,6 @@ def containerise( return container_data -@contextlib.contextmanager -def maintained_selection(): - # TODO implement logic - try: - yield - finally: - pass - - def split_metadata_string(text, chunk_length=None): """Split string by length. @@ -347,23 +441,6 @@ def save_current_workfile_context(context): return write_workfile_metadata(SECTION_NAME_CONTEXT, context) -def remove_instance(instance): - """Remove instance from current workfile metadata.""" - current_instances = get_workfile_metadata(SECTION_NAME_INSTANCES) - instance_id = instance.get("uuid") - found_idx = None - if instance_id: - for idx, _inst in enumerate(current_instances): - if _inst["uuid"] == instance_id: - found_idx = idx - break - - if found_idx is None: - return - current_instances.pop(found_idx) - write_instances(current_instances) - - def list_instances(): """List all created instances from current workfile.""" return get_workfile_metadata(SECTION_NAME_INSTANCES) @@ -373,83 +450,31 @@ def write_instances(data): return write_workfile_metadata(SECTION_NAME_INSTANCES, data) -# Backwards compatibility -def _write_instances(*args, **kwargs): - return write_instances(*args, **kwargs) - - -def ls(): +def get_containers(): output = get_workfile_metadata(SECTION_NAME_CONTAINERS) if output: for item in output: if "objectName" not in item and "members" in item: members = item["members"] if isinstance(members, list): - members = "|".join(members) + members = "|".join([str(member) for member in members]) item["objectName"] = members return output -def on_instance_toggle(instance, old_value, new_value): - """Update instance data in workfile on publish toggle.""" - # Review may not have real instance in wokrfile metadata - if not instance.data.get("uuid"): - return - - instance_id = instance.data["uuid"] - found_idx = None - current_instances = list_instances() - for idx, workfile_instance in enumerate(current_instances): - if workfile_instance["uuid"] == instance_id: - found_idx = idx - break - - if found_idx is None: - return - - if "active" in current_instances[found_idx]: - current_instances[found_idx]["active"] = new_value - write_instances(current_instances) - - -def initial_launch(): - # Setup project settings if its the template that's launched. - # TODO also check for template creation when it's possible to define - # templates - last_workfile = os.environ.get("AVALON_LAST_WORKFILE") - if not last_workfile or os.path.exists(last_workfile): - return - - log.info("Setting up project...") - set_context_settings() - - -def application_exit(): - data = get_current_project_settings() - stop_timer = data["tvpaint"]["stop_timer_on_application_exit"] - - if not stop_timer: - return - - # Stop application timer. - webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") - rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) - requests.post(rest_api_url) - - def set_context_settings(asset_doc=None): """Set workfile settings by asset document data. Change fps, resolution and frame start/end. """ - if asset_doc is None: - # Use current session asset if not passed - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": legacy_io.Session["AVALON_ASSET"] - }) - project_doc = legacy_io.find_one({"type": "project"}) + project_name = legacy_io.active_project() + if asset_doc is None: + asset_name = legacy_io.Session["AVALON_ASSET"] + # Use current session asset if not passed + asset_doc = get_asset_by_name(project_name, asset_name) + + project_doc = get_project(project_name) framerate = asset_doc["data"].get("fps") if framerate is None: diff --git a/openpype/hosts/tvpaint/api/plugin.py b/openpype/hosts/tvpaint/api/plugin.py index 15ad8905e0..da456e7067 100644 --- a/openpype/hosts/tvpaint/api/plugin.py +++ b/openpype/hosts/tvpaint/api/plugin.py @@ -4,11 +4,11 @@ import uuid from openpype.pipeline import ( LegacyCreator, LoaderPlugin, + registered_host, ) -from openpype.hosts.tvpaint.api import ( - pipeline, - lib -) + +from .lib import get_layers_data +from .pipeline import get_current_workfile_context class Creator(LegacyCreator): @@ -22,7 +22,7 @@ class Creator(LegacyCreator): dynamic_data = super(Creator, cls).get_dynamic_data(*args, **kwargs) # Change asset and name by current workfile context - workfile_context = pipeline.get_current_workfile_context() + workfile_context = get_current_workfile_context() asset_name = workfile_context.get("asset") task_name = workfile_context.get("task") if "asset" not in dynamic_data and asset_name: @@ -67,10 +67,12 @@ class Creator(LegacyCreator): self.log.debug( "Storing instance data to workfile. {}".format(str(data)) ) - return pipeline.write_instances(data) + host = registered_host() + return host.write_instances(data) def process(self): - data = pipeline.list_instances() + host = registered_host() + data = host.list_instances() data.append(self.data) self.write_instances(data) @@ -108,7 +110,7 @@ class Loader(LoaderPlugin): counter_regex = re.compile(r"_(\d{3})$") higher_counter = 0 - for layer in lib.get_layers_data(): + for layer in get_layers_data(): layer_name = layer["name"] if not layer_name.startswith(layer_name_base): continue diff --git a/openpype/hosts/tvpaint/api/workio.py b/openpype/hosts/tvpaint/api/workio.py deleted file mode 100644 index 1a5ad00ca8..0000000000 --- a/openpype/hosts/tvpaint/api/workio.py +++ /dev/null @@ -1,58 +0,0 @@ -"""Host API required for Work Files. -# TODO @iLLiCiT implement functions: - has_unsaved_changes -""" - -from openpype.pipeline import ( - HOST_WORKFILE_EXTENSIONS, - legacy_io, -) -from .lib import ( - execute_george, - execute_george_through_file -) -from .pipeline import save_current_workfile_context - - -def open_file(filepath): - """Open the scene file in Blender.""" - george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( - filepath.replace("\\", "/") - ) - return execute_george_through_file(george_script) - - -def save_file(filepath): - """Save the open scene file.""" - # Store context to workfile before save - context = { - "project": legacy_io.Session["AVALON_PROJECT"], - "asset": legacy_io.Session["AVALON_ASSET"], - "task": legacy_io.Session["AVALON_TASK"] - } - save_current_workfile_context(context) - - # Execute george script to save workfile. - george_script = "tv_SaveProject {}".format(filepath.replace("\\", "/")) - return execute_george(george_script) - - -def current_file(): - """Return the path of the open scene file.""" - george_script = "tv_GetProjectName" - return execute_george(george_script) - - -def has_unsaved_changes(): - """Does the open scene file have unsaved changes?""" - return False - - -def file_extensions(): - """Return the supported file extensions for Blender scene files.""" - return HOST_WORKFILE_EXTENSIONS["tvpaint"] - - -def work_root(session): - """Return the default root to browse for work files.""" - return session["AVALON_WORKDIR"] diff --git a/openpype/hosts/tvpaint/lib.py b/openpype/hosts/tvpaint/lib.py index c67ab1e4fb..95653b6ecb 100644 --- a/openpype/hosts/tvpaint/lib.py +++ b/openpype/hosts/tvpaint/lib.py @@ -646,9 +646,6 @@ def rename_filepaths_by_frame_start( filepaths_by_frame, range_start, range_end, new_frame_start ): """Change frames in filenames of finished images to new frame start.""" - # Skip if source first frame is same as destination first frame - if range_start == new_frame_start: - return # Calculate frame end new_frame_end = range_end + (new_frame_start - range_start) @@ -669,14 +666,17 @@ def rename_filepaths_by_frame_start( source_range = range(range_start, range_end + 1) output_range = range(new_frame_start, new_frame_end + 1) + # Skip if source first frame is same as destination first frame new_dst_filepaths = {} for src_frame, dst_frame in zip(source_range, output_range): - src_filepath = filepaths_by_frame[src_frame] - src_dirpath = os.path.dirname(src_filepath) + src_filepath = os.path.normpath(filepaths_by_frame[src_frame]) + dirpath, src_filename = os.path.split(src_filepath) dst_filename = filename_template.format(frame=dst_frame) - dst_filepath = os.path.join(src_dirpath, dst_filename) + dst_filepath = os.path.join(dirpath, dst_filename) - os.rename(src_filepath, dst_filepath) + if src_filename != dst_filename: + os.rename(src_filepath, dst_filepath) new_dst_filepaths[dst_frame] = dst_filepath + return new_dst_filepaths diff --git a/openpype/hosts/tvpaint/plugins/create/create_render_layer.py b/openpype/hosts/tvpaint/plugins/create/create_render_layer.py index 3b5bd47189..a085830e96 100644 --- a/openpype/hosts/tvpaint/plugins/create/create_render_layer.py +++ b/openpype/hosts/tvpaint/plugins/create/create_render_layer.py @@ -1,11 +1,15 @@ -from openpype.pipeline import CreatorError from openpype.lib import prepare_template_data +from openpype.pipeline import CreatorError from openpype.hosts.tvpaint.api import ( plugin, - pipeline, - lib, CommunicationWrapper ) +from openpype.hosts.tvpaint.api.lib import ( + get_layers_data, + get_groups_data, + execute_george_through_file, +) +from openpype.hosts.tvpaint.api.pipeline import list_instances class CreateRenderlayer(plugin.Creator): @@ -63,7 +67,7 @@ class CreateRenderlayer(plugin.Creator): # Validate that communication is initialized if CommunicationWrapper.communicator: # Get currently selected layers - layers_data = lib.get_layers_data() + layers_data = get_layers_data() selected_layers = [ layer @@ -81,8 +85,8 @@ class CreateRenderlayer(plugin.Creator): def process(self): self.log.debug("Query data from workfile.") - instances = pipeline.list_instances() - layers_data = lib.get_layers_data() + instances = list_instances() + layers_data = get_layers_data() self.log.debug("Checking for selection groups.") # Collect group ids from selection @@ -109,7 +113,7 @@ class CreateRenderlayer(plugin.Creator): self.log.debug(f"Selected group id is \"{group_id}\".") self.data["group_id"] = group_id - group_data = lib.get_groups_data() + group_data = get_groups_data() group_name = None for group in group_data: if group["group_id"] == group_id: @@ -176,7 +180,7 @@ class CreateRenderlayer(plugin.Creator): return self.log.debug("Querying groups data from workfile.") - groups_data = lib.get_groups_data() + groups_data = get_groups_data() self.log.debug("Changing name of the group.") selected_group = None @@ -195,7 +199,7 @@ class CreateRenderlayer(plugin.Creator): b=selected_group["blue"], name=new_group_name ) - lib.execute_george_through_file(rename_script) + execute_george_through_file(rename_script) self.log.info( f"Name of group with index {group_id}" diff --git a/openpype/hosts/tvpaint/plugins/create/create_render_pass.py b/openpype/hosts/tvpaint/plugins/create/create_render_pass.py index 26fa8ac51a..a44cb29f20 100644 --- a/openpype/hosts/tvpaint/plugins/create/create_render_pass.py +++ b/openpype/hosts/tvpaint/plugins/create/create_render_pass.py @@ -2,10 +2,10 @@ from openpype.pipeline import CreatorError from openpype.lib import prepare_template_data from openpype.hosts.tvpaint.api import ( plugin, - pipeline, - lib, CommunicationWrapper ) +from openpype.hosts.tvpaint.api.lib import get_layers_data +from openpype.hosts.tvpaint.api.pipeline import list_instances class CreateRenderPass(plugin.Creator): @@ -54,7 +54,7 @@ class CreateRenderPass(plugin.Creator): # Validate that communication is initialized if CommunicationWrapper.communicator: # Get currently selected layers - layers_data = lib.layers_data() + layers_data = get_layers_data() selected_layers = [ layer @@ -72,8 +72,8 @@ class CreateRenderPass(plugin.Creator): def process(self): self.log.debug("Query data from workfile.") - instances = pipeline.list_instances() - layers_data = lib.layers_data() + instances = list_instances() + layers_data = get_layers_data() self.log.debug("Checking selection.") # Get all selected layers and their group ids diff --git a/openpype/hosts/tvpaint/plugins/load/load_image.py b/openpype/hosts/tvpaint/plugins/load/load_image.py index f861d0119e..5283d04355 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_image.py +++ b/openpype/hosts/tvpaint/plugins/load/load_image.py @@ -1,5 +1,6 @@ -import qargparse -from openpype.hosts.tvpaint.api import lib, plugin +from openpype.lib.attribute_definitions import BoolDef +from openpype.hosts.tvpaint.api import plugin +from openpype.hosts.tvpaint.api.lib import execute_george_through_file class ImportImage(plugin.Loader): @@ -26,26 +27,28 @@ class ImportImage(plugin.Loader): "preload": True } - options = [ - qargparse.Boolean( - "stretch", - label="Stretch to project size", - default=True, - help="Stretch loaded image/s to project resolution?" - ), - qargparse.Boolean( - "timestretch", - label="Stretch to timeline length", - default=True, - help="Clip loaded image/s to timeline length?" - ), - qargparse.Boolean( - "preload", - label="Preload loaded image/s", - default=True, - help="Preload image/s?" - ) - ] + @classmethod + def get_options(cls, contexts): + return [ + BoolDef( + "stretch", + label="Stretch to project size", + default=cls.defaults["stretch"], + tooltip="Stretch loaded image/s to project resolution?" + ), + BoolDef( + "timestretch", + label="Stretch to timeline length", + default=cls.defaults["timestretch"], + tooltip="Clip loaded image/s to timeline length?" + ), + BoolDef( + "preload", + label="Preload loaded image/s", + default=cls.defaults["preload"], + tooltip="Preload image/s?" + ) + ] def load(self, context, name, namespace, options): stretch = options.get("stretch", self.defaults["stretch"]) @@ -79,4 +82,4 @@ class ImportImage(plugin.Loader): layer_name, load_options_str ) - return lib.execute_george_through_file(george_script) + return execute_george_through_file(george_script) diff --git a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py b/openpype/hosts/tvpaint/plugins/load/load_reference_image.py index af1a4a9b6b..7f7a68cc41 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py +++ b/openpype/hosts/tvpaint/plugins/load/load_reference_image.py @@ -1,7 +1,20 @@ import collections -import qargparse -from openpype.pipeline import get_representation_context -from openpype.hosts.tvpaint.api import lib, pipeline, plugin + +from openpype.lib.attribute_definitions import BoolDef +from openpype.pipeline import ( + get_representation_context, + register_host, +) +from openpype.hosts.tvpaint.api import plugin +from openpype.hosts.tvpaint.api.lib import ( + get_layers_data, + execute_george_through_file, +) +from openpype.hosts.tvpaint.api.pipeline import ( + write_workfile_metadata, + SECTION_NAME_CONTAINERS, + containerise, +) class LoadImage(plugin.Loader): @@ -28,26 +41,28 @@ class LoadImage(plugin.Loader): "preload": True } - options = [ - qargparse.Boolean( - "stretch", - label="Stretch to project size", - default=True, - help="Stretch loaded image/s to project resolution?" - ), - qargparse.Boolean( - "timestretch", - label="Stretch to timeline length", - default=True, - help="Clip loaded image/s to timeline length?" - ), - qargparse.Boolean( - "preload", - label="Preload loaded image/s", - default=True, - help="Preload image/s?" - ) - ] + @classmethod + def get_options(cls, contexts): + return [ + BoolDef( + "stretch", + label="Stretch to project size", + default=cls.defaults["stretch"], + tooltip="Stretch loaded image/s to project resolution?" + ), + BoolDef( + "timestretch", + label="Stretch to timeline length", + default=cls.defaults["timestretch"], + tooltip="Clip loaded image/s to timeline length?" + ), + BoolDef( + "preload", + label="Preload loaded image/s", + default=cls.defaults["preload"], + tooltip="Preload image/s?" + ) + ] def load(self, context, name, namespace, options): stretch = options.get("stretch", self.defaults["stretch"]) @@ -79,10 +94,10 @@ class LoadImage(plugin.Loader): load_options_str ) - lib.execute_george_through_file(george_script) + execute_george_through_file(george_script) loaded_layer = None - layers = lib.layers_data() + layers = get_layers_data() for layer in layers: if layer["name"] == layer_name: loaded_layer = layer @@ -95,7 +110,7 @@ class LoadImage(plugin.Loader): layer_names = [loaded_layer["name"]] namespace = namespace or layer_name - return pipeline.containerise( + return containerise( name=name, namespace=namespace, members=layer_names, @@ -109,7 +124,7 @@ class LoadImage(plugin.Loader): return if layers is None: - layers = lib.layers_data() + layers = get_layers_data() available_ids = set(layer["layer_id"] for layer in layers) @@ -152,14 +167,15 @@ class LoadImage(plugin.Loader): line = "tv_layerkill {}".format(layer_id) george_script_lines.append(line) george_script = "\n".join(george_script_lines) - lib.execute_george_through_file(george_script) + execute_george_through_file(george_script) def _remove_container(self, container, members=None): if not container: return representation = container["representation"] members = self.get_members_from_container(container) - current_containers = pipeline.ls() + host = register_host() + current_containers = host.get_containers() pop_idx = None for idx, cur_con in enumerate(current_containers): cur_members = self.get_members_from_container(cur_con) @@ -179,8 +195,8 @@ class LoadImage(plugin.Loader): return current_containers.pop(pop_idx) - pipeline.write_workfile_metadata( - pipeline.SECTION_NAME_CONTAINERS, current_containers + write_workfile_metadata( + SECTION_NAME_CONTAINERS, current_containers ) def remove(self, container): @@ -214,7 +230,7 @@ class LoadImage(plugin.Loader): break old_layers = [] - layers = lib.layers_data() + layers = get_layers_data() previous_layer_ids = set(layer["layer_id"] for layer in layers) if old_layers_are_ids: for layer in layers: @@ -263,7 +279,7 @@ class LoadImage(plugin.Loader): new_container = self.load(context, name, namespace, {}) new_layer_names = self.get_members_from_container(new_container) - layers = lib.layers_data() + layers = get_layers_data() new_layers = [] for layer in layers: @@ -304,4 +320,4 @@ class LoadImage(plugin.Loader): # Execute george scripts if there are any if george_script_lines: george_script = "\n".join(george_script_lines) - lib.execute_george_through_file(george_script) + execute_george_through_file(george_script) diff --git a/openpype/hosts/tvpaint/plugins/load/load_sound.py b/openpype/hosts/tvpaint/plugins/load/load_sound.py index 3f42370f5c..f312db262a 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_sound.py +++ b/openpype/hosts/tvpaint/plugins/load/load_sound.py @@ -1,6 +1,9 @@ import os import tempfile -from openpype.hosts.tvpaint.api import lib, plugin +from openpype.hosts.tvpaint.api import plugin +from openpype.hosts.tvpaint.api.lib import ( + execute_george_through_file, +) class ImportSound(plugin.Loader): @@ -64,7 +67,7 @@ class ImportSound(plugin.Loader): ) self.log.info("*** George script:\n{}\n***".format(george_script)) # Execute geoge script - lib.execute_george_through_file(george_script) + execute_george_through_file(george_script) # Read output file lines = [] diff --git a/openpype/hosts/tvpaint/plugins/load/load_workfile.py b/openpype/hosts/tvpaint/plugins/load/load_workfile.py index 0eab083c22..fc7588f56e 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_workfile.py +++ b/openpype/hosts/tvpaint/plugins/load/load_workfile.py @@ -1,17 +1,23 @@ import os -from openpype.lib import ( - StringTemplate, - get_workfile_template_key_from_context, - get_workdir_data, - get_last_workfile_with_version, -) +from openpype.lib import StringTemplate from openpype.pipeline import ( registered_host, legacy_io, + Anatomy, +) +from openpype.pipeline.workfile import ( + get_workfile_template_key_from_context, + get_last_workfile_with_version, +) +from openpype.pipeline.template_data import get_template_data_with_names +from openpype.hosts.tvpaint.api import plugin +from openpype.hosts.tvpaint.api.lib import ( + execute_george_through_file, +) +from openpype.hosts.tvpaint.api.pipeline import ( + get_current_workfile_context, ) -from openpype.api import Anatomy -from openpype.hosts.tvpaint.api import lib, pipeline, plugin class LoadWorkfile(plugin.Loader): @@ -26,9 +32,9 @@ class LoadWorkfile(plugin.Loader): # Load context of current workfile as first thing # - which context and extension has host = registered_host() - current_file = host.current_file() + current_file = host.get_current_workfile() - context = pipeline.get_current_workfile_context() + context = get_current_workfile_context() filepath = self.fname.replace("\\", "/") @@ -40,47 +46,42 @@ class LoadWorkfile(plugin.Loader): george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( filepath ) - lib.execute_george_through_file(george_script) + execute_george_through_file(george_script) # Save workfile. host_name = "tvpaint" + project_name = context.get("project") asset_name = context.get("asset") task_name = context.get("task") # Far cases when there is workfile without context if not asset_name: + project_name = legacy_io.active_project() asset_name = legacy_io.Session["AVALON_ASSET"] task_name = legacy_io.Session["AVALON_TASK"] - project_doc = legacy_io.find_one({ - "type": "project" - }) - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) - project_name = project_doc["name"] - template_key = get_workfile_template_key_from_context( asset_name, task_name, host_name, - project_name=project_name, - dbcon=legacy_io + project_name=project_name ) anatomy = Anatomy(project_name) - data = get_workdir_data(project_doc, asset_doc, task_name, host_name) + data = get_template_data_with_names( + project_name, asset_name, task_name, host_name + ) data["root"] = anatomy.roots file_template = anatomy.templates[template_key]["file"] # Define saving file extension + extensions = host.get_workfile_extensions() if current_file: # Match the extension of current file _, extension = os.path.splitext(current_file) else: # Fall back to the first extension supported for this host. - extension = host.file_extensions()[0] + extension = extensions[0] data["ext"] = extension @@ -89,7 +90,7 @@ class LoadWorkfile(plugin.Loader): folder_template, data ) version = get_last_workfile_with_version( - work_root, file_template, data, host.file_extensions() + work_root, file_template, data, extensions )[1] if version is None: @@ -103,4 +104,4 @@ class LoadWorkfile(plugin.Loader): file_template, data ) path = os.path.join(work_root, filename) - host.save_file(path) + host.save_workfile(path) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py index f291c363b8..d5b79758ad 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py @@ -6,7 +6,7 @@ class CollectOutputFrameRange(pyblish.api.ContextPlugin): When instances are collected context does not contain `frameStart` and `frameEnd` keys yet. They are collected in global plugin - `CollectAvalonEntities`. + `CollectContextEntities`. """ label = "Collect output frame range" order = pyblish.api.CollectorOrder diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py index 782907b65d..ae1326a5bd 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py @@ -2,8 +2,9 @@ import json import copy import pyblish.api -from openpype.lib import get_subset_name_with_asset_doc +from openpype.client import get_asset_by_name from openpype.pipeline import legacy_io +from openpype.pipeline.create import get_subset_name class CollectInstances(pyblish.api.ContextPlugin): @@ -92,29 +93,28 @@ class CollectInstances(pyblish.api.ContextPlugin): if family == "review": # Change subset name of review instance + # Project name from workfile context + project_name = context.data["workfile_context"]["project"] + # Collect asset doc to get asset id # - not sure if it's good idea to require asset id in # get_subset_name? asset_name = context.data["workfile_context"]["asset"] - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) - # Project name from workfile context - project_name = context.data["workfile_context"]["project"] # Host name from environment variable host_name = context.data["hostName"] # Use empty variant value variant = "" task_name = legacy_io.Session["AVALON_TASK"] - new_subset_name = get_subset_name_with_asset_doc( + new_subset_name = get_subset_name( family, variant, task_name, asset_doc, project_name, - host_name + host_name, + project_settings=context.data["project_settings"] ) instance_data["subset"] = new_subset_name diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py b/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py index 2b8dbdc5b4..92a2815ba0 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py @@ -2,8 +2,8 @@ import json import copy import pyblish.api -from openpype.lib import get_subset_name_with_asset_doc -from openpype.pipeline import legacy_io +from openpype.client import get_asset_by_name +from openpype.pipeline.create import get_subset_name class CollectRenderScene(pyblish.api.ContextPlugin): @@ -56,14 +56,11 @@ class CollectRenderScene(pyblish.api.ContextPlugin): # - not sure if it's good idea to require asset id in # get_subset_name? workfile_context = context.data["workfile_context"] - asset_name = workfile_context["asset"] - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) - # Project name from workfile context project_name = context.data["workfile_context"]["project"] + asset_name = workfile_context["asset"] + asset_doc = get_asset_by_name(project_name, asset_name) + # Host name from environment variable host_name = context.data["hostName"] # Variant is using render pass name @@ -78,14 +75,15 @@ class CollectRenderScene(pyblish.api.ContextPlugin): dynamic_data["render_pass"] = dynamic_data["renderpass"] task_name = workfile_context["task"] - subset_name = get_subset_name_with_asset_doc( + subset_name = get_subset_name( "render", variant, task_name, asset_doc, project_name, host_name, - dynamic_data=dynamic_data + dynamic_data=dynamic_data, + project_settings=context.data["project_settings"] ) instance_data = { diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py index 70d92f82e9..8c7c8c3899 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py @@ -2,8 +2,9 @@ import os import json import pyblish.api -from openpype.lib import get_subset_name_with_asset_doc +from openpype.client import get_asset_by_name from openpype.pipeline import legacy_io +from openpype.pipeline.create import get_subset_name class CollectWorkfile(pyblish.api.ContextPlugin): @@ -22,31 +23,30 @@ class CollectWorkfile(pyblish.api.ContextPlugin): basename, ext = os.path.splitext(filename) instance = context.create_instance(name=basename) + # Project name from workfile context + project_name = context.data["workfile_context"]["project"] + # Get subset name of workfile instance # Collect asset doc to get asset id # - not sure if it's good idea to require asset id in # get_subset_name? family = "workfile" asset_name = context.data["workfile_context"]["asset"] - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) - # Project name from workfile context - project_name = context.data["workfile_context"]["project"] # Host name from environment variable host_name = os.environ["AVALON_APP"] # Use empty variant value variant = "" task_name = legacy_io.Session["AVALON_TASK"] - subset_name = get_subset_name_with_asset_doc( + subset_name = get_subset_name( family, variant, task_name, asset_doc, project_name, - host_name + host_name, + project_settings=context.data["project_settings"] ) # Create Workfile instance diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py index c59ef82f85..8fe71a4a46 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py @@ -5,7 +5,22 @@ import tempfile import pyblish.api from openpype.pipeline import legacy_io -from openpype.hosts.tvpaint.api import pipeline, lib +from openpype.hosts.tvpaint.api.lib import ( + execute_george, + execute_george_through_file, + get_layers_data, + get_groups_data, +) +from openpype.hosts.tvpaint.api.pipeline import ( + SECTION_NAME_CONTEXT, + SECTION_NAME_INSTANCES, + SECTION_NAME_CONTAINERS, + + get_workfile_metadata_string, + write_workfile_metadata, + get_current_workfile_context, + list_instances, +) class ResetTVPaintWorkfileMetadata(pyblish.api.Action): @@ -15,12 +30,12 @@ class ResetTVPaintWorkfileMetadata(pyblish.api.Action): def process(self, context, plugin): metadata_keys = { - pipeline.SECTION_NAME_CONTEXT: {}, - pipeline.SECTION_NAME_INSTANCES: [], - pipeline.SECTION_NAME_CONTAINERS: [] + SECTION_NAME_CONTEXT: {}, + SECTION_NAME_INSTANCES: [], + SECTION_NAME_CONTAINERS: [] } for metadata_key, default in metadata_keys.items(): - json_string = pipeline.get_workfile_metadata_string(metadata_key) + json_string = get_workfile_metadata_string(metadata_key) if not json_string: continue @@ -35,7 +50,7 @@ class ResetTVPaintWorkfileMetadata(pyblish.api.Action): ).format(metadata_key, default, json_string), exc_info=True ) - pipeline.write_workfile_metadata(metadata_key, default) + write_workfile_metadata(metadata_key, default) class CollectWorkfileData(pyblish.api.ContextPlugin): @@ -45,8 +60,8 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): actions = [ResetTVPaintWorkfileMetadata] def process(self, context): - current_project_id = lib.execute_george("tv_projectcurrentid") - lib.execute_george("tv_projectselect {}".format(current_project_id)) + current_project_id = execute_george("tv_projectcurrentid") + execute_george("tv_projectselect {}".format(current_project_id)) # Collect and store current context to have reference current_context = { @@ -60,7 +75,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Collect context from workfile metadata self.log.info("Collecting workfile context") - workfile_context = pipeline.get_current_workfile_context() + workfile_context = get_current_workfile_context() # Store workfile context to pyblish context context.data["workfile_context"] = workfile_context if workfile_context: @@ -96,7 +111,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Collect instances self.log.info("Collecting instance data from workfile") - instance_data = pipeline.list_instances() + instance_data = list_instances() context.data["workfileInstances"] = instance_data self.log.debug( "Instance data:\"{}".format(json.dumps(instance_data, indent=4)) @@ -104,7 +119,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Collect information about layers self.log.info("Collecting layers data from workfile") - layers_data = lib.layers_data() + layers_data = get_layers_data() layers_by_name = {} for layer in layers_data: layer_name = layer["name"] @@ -120,14 +135,14 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Collect information about groups self.log.info("Collecting groups data from workfile") - group_data = lib.groups_data() + group_data = get_groups_data() context.data["groupsData"] = group_data self.log.debug( "Group data:\"{}".format(json.dumps(group_data, indent=4)) ) self.log.info("Collecting scene data from workfile") - workfile_info_parts = lib.execute_george("tv_projectinfo").split(" ") + workfile_info_parts = execute_george("tv_projectinfo").split(" ") # Project frame start - not used workfile_info_parts.pop(-1) @@ -139,10 +154,10 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): workfile_path = " ".join(workfile_info_parts).replace("\"", "") # Marks return as "{frame - 1} {state} ", example "0 set". - result = lib.execute_george("tv_markin") + result = execute_george("tv_markin") mark_in_frame, mark_in_state, _ = result.split(" ") - result = lib.execute_george("tv_markout") + result = execute_george("tv_markout") mark_out_frame, mark_out_state, _ = result.split(" ") scene_data = { @@ -156,7 +171,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): "sceneMarkInState": mark_in_state == "set", "sceneMarkOut": int(mark_out_frame), "sceneMarkOutState": mark_out_state == "set", - "sceneStartFrame": int(lib.execute_george("tv_startframe")), + "sceneStartFrame": int(execute_george("tv_startframe")), "sceneBgColor": self._get_bg_color() } self.log.debug( @@ -188,7 +203,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): ] george_script = "\n".join(george_script_lines) - lib.execute_george_through_file(george_script) + execute_george_through_file(george_script) with open(output_filepath, "r") as stream: data = stream.read() diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py index d4fd1dff4b..1ebaf1da64 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -5,7 +5,13 @@ import tempfile from PIL import Image import pyblish.api -from openpype.hosts.tvpaint.api import lib + +from openpype.hosts.tvpaint.api.lib import ( + execute_george, + execute_george_through_file, + get_layers_pre_post_behavior, + get_layers_exposure_frames, +) from openpype.hosts.tvpaint.lib import ( calculate_layers_extraction_data, get_frame_filename_template, @@ -61,7 +67,7 @@ class ExtractSequence(pyblish.api.Extractor): # different way when Start Frame is not `0` # NOTE It will be set back after rendering scene_start_frame = instance.context.data["sceneStartFrame"] - lib.execute_george("tv_startframe 0") + execute_george("tv_startframe 0") # Frame start/end may be stored as float frame_start = int(instance.data["frameStart"]) @@ -73,14 +79,8 @@ class ExtractSequence(pyblish.api.Extractor): scene_bg_color = instance.context.data["sceneBgColor"] - # --- Fallbacks ---------------------------------------------------- - # This is required if validations of ranges are ignored. - # - all of this code won't change processing if range to render - # match to range of expected output - # Prepare output frames output_frame_start = frame_start - handle_start - output_frame_end = frame_end + handle_end # Change output frame start to 0 if handles cause it's negative number if output_frame_start < 0: @@ -90,32 +90,8 @@ class ExtractSequence(pyblish.api.Extractor): ).format(frame_start, handle_start)) output_frame_start = 0 - # Check Marks range and output range - output_range = output_frame_end - output_frame_start - marks_range = mark_out - mark_in - - # Lower Mark Out if mark range is bigger than output - # - do not rendered not used frames - if output_range < marks_range: - new_mark_out = mark_out - (marks_range - output_range) - self.log.warning(( - "Lowering render range to {} frames. Changed Mark Out {} -> {}" - ).format(marks_range + 1, mark_out, new_mark_out)) - # Assign new mark out to variable - mark_out = new_mark_out - - # Lower output frame end so representation has right `frameEnd` value - elif output_range > marks_range: - new_output_frame_end = ( - output_frame_end - (output_range - marks_range) - ) - self.log.warning(( - "Lowering representation range to {} frames." - " Changed frame end {} -> {}" - ).format(output_range + 1, mark_out, new_output_frame_end)) - output_frame_end = new_output_frame_end - - # ------------------------------------------------------------------- + # Calculate frame end + output_frame_end = output_frame_start + (mark_out - mark_in) # Save to staging dir output_dir = instance.data.get("stagingDir") @@ -143,7 +119,7 @@ class ExtractSequence(pyblish.api.Extractor): output_filepaths_by_frame_idx, thumbnail_fullpath = result # Change scene frame Start back to previous value - lib.execute_george("tv_startframe {}".format(scene_start_frame)) + execute_george("tv_startframe {}".format(scene_start_frame)) # Sequence of one frame if not output_filepaths_by_frame_idx: @@ -271,7 +247,7 @@ class ExtractSequence(pyblish.api.Extractor): george_script_lines.append(" ".join(orig_color_command)) - lib.execute_george_through_file("\n".join(george_script_lines)) + execute_george_through_file("\n".join(george_script_lines)) first_frame_filepath = None output_filepaths_by_frame_idx = {} @@ -334,8 +310,8 @@ class ExtractSequence(pyblish.api.Extractor): return [], None self.log.debug("Collecting pre/post behavior of individual layers.") - behavior_by_layer_id = lib.get_layers_pre_post_behavior(layer_ids) - exposure_frames_by_layer_id = lib.get_layers_exposure_frames( + behavior_by_layer_id = get_layers_pre_post_behavior(layer_ids) + exposure_frames_by_layer_id = get_layers_exposure_frames( layer_ids, layers ) extraction_data_by_layer_id = calculate_layers_extraction_data( @@ -440,7 +416,7 @@ class ExtractSequence(pyblish.api.Extractor): ",".join(frames_to_render), layer_id, layer["name"] )) # Let TVPaint render layer's image - lib.execute_george_through_file("\n".join(george_script_lines)) + execute_george_through_file("\n".join(george_script_lines)) # Fill frames between `frame_start_index` and `frame_end_index` self.log.debug("Filling frames not rendered frames.") diff --git a/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py b/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py index 24d6558168..a85caf2557 100644 --- a/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py +++ b/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py @@ -1,7 +1,7 @@ import pyblish.api -from openpype.api import version_up -from openpype.hosts.tvpaint.api import workio +from openpype.lib import version_up +from openpype.pipeline import registered_host class IncrementWorkfileVersion(pyblish.api.ContextPlugin): @@ -17,6 +17,7 @@ class IncrementWorkfileVersion(pyblish.api.ContextPlugin): assert all(result["success"] for result in context.data["results"]), ( "Publishing not successful so version is not increased.") + host = registered_host() path = context.data["currentFile"] - workio.save_file(version_up(path)) + host.save_workfile(version_up(path)) self.log.info('Incrementing workfile version') diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py index 70816f9f18..7e35726030 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py @@ -1,6 +1,9 @@ import pyblish.api from openpype.pipeline import PublishXmlValidationError -from openpype.hosts.tvpaint.api import pipeline +from openpype.hosts.tvpaint.api.pipeline import ( + list_instances, + write_instances, +) class FixAssetNames(pyblish.api.Action): @@ -15,7 +18,7 @@ class FixAssetNames(pyblish.api.Action): def process(self, context, plugin): context_asset_name = context.data["asset"] - old_instance_items = pipeline.list_instances() + old_instance_items = list_instances() new_instance_items = [] for instance_item in old_instance_items: instance_asset_name = instance_item.get("asset") @@ -25,7 +28,7 @@ class FixAssetNames(pyblish.api.Action): ): instance_item["asset"] = context_asset_name new_instance_items.append(instance_item) - pipeline._write_instances(new_instance_items) + write_instances(new_instance_items) class ValidateAssetNames(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py index d1f299e006..0030b0fd1c 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py @@ -2,7 +2,7 @@ import json import pyblish.api from openpype.pipeline import PublishXmlValidationError -from openpype.hosts.tvpaint.api import lib +from openpype.hosts.tvpaint.api.lib import execute_george class ValidateMarksRepair(pyblish.api.Action): @@ -15,10 +15,10 @@ class ValidateMarksRepair(pyblish.api.Action): def process(self, context, plugin): expected_data = ValidateMarks.get_expected_data(context) - lib.execute_george( + execute_george( "tv_markin {} set".format(expected_data["markIn"]) ) - lib.execute_george( + execute_george( "tv_markout {} set".format(expected_data["markOut"]) ) @@ -39,7 +39,7 @@ class ValidateMarks(pyblish.api.ContextPlugin): def get_expected_data(context): scene_mark_in = context.data["sceneMarkIn"] - # Data collected in `CollectAvalonEntities` + # Data collected in `CollectContextEntities` frame_end = context.data["frameEnd"] frame_start = context.data["frameStart"] handle_start = context.data["handleStart"] diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py index ddc738c6ed..066e54c670 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py @@ -1,6 +1,6 @@ import pyblish.api from openpype.pipeline import PublishXmlValidationError -from openpype.hosts.tvpaint.api import lib +from openpype.hosts.tvpaint.api.lib import execute_george class RepairStartFrame(pyblish.api.Action): @@ -11,7 +11,7 @@ class RepairStartFrame(pyblish.api.Action): on = "failed" def process(self, context, plugin): - lib.execute_george("tv_startframe 0") + execute_george("tv_startframe 0") class ValidateStartFrame(pyblish.api.ContextPlugin): @@ -24,7 +24,7 @@ class ValidateStartFrame(pyblish.api.ContextPlugin): optional = True def process(self, context): - start_frame = lib.execute_george("tv_startframe") + start_frame = execute_george("tv_startframe") if start_frame == 0: return diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py index eac345f395..d66ae50c60 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py @@ -1,6 +1,5 @@ import pyblish.api -from openpype.pipeline import PublishXmlValidationError -from openpype.hosts.tvpaint.api import save_file +from openpype.pipeline import PublishXmlValidationError, registered_host class ValidateWorkfileMetadataRepair(pyblish.api.Action): @@ -13,8 +12,9 @@ class ValidateWorkfileMetadataRepair(pyblish.api.Action): def process(self, context, _plugin): """Save current workfile which should trigger storing of metadata.""" current_file = context.data["currentFile"] + host = registered_host() # Save file should trigger - save_file(current_file) + host.save_workfile(current_file) class ValidateWorkfileMetadata(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/tvpaint/worker/init_file.tvpp b/openpype/hosts/tvpaint/worker/init_file.tvpp index 572d278fdb..22170b45bc 100644 Binary files a/openpype/hosts/tvpaint/worker/init_file.tvpp and b/openpype/hosts/tvpaint/worker/init_file.tvpp differ diff --git a/openpype/hosts/tvpaint/worker/worker_job.py b/openpype/hosts/tvpaint/worker/worker_job.py index 1c785ab2ee..95c0a678bc 100644 --- a/openpype/hosts/tvpaint/worker/worker_job.py +++ b/openpype/hosts/tvpaint/worker/worker_job.py @@ -9,7 +9,7 @@ from abc import ABCMeta, abstractmethod, abstractproperty import six -from openpype.api import PypeLogger +from openpype.lib import Logger from openpype.modules import ModulesManager @@ -328,7 +328,7 @@ class TVPaintCommands: def log(self): """Access to logger object.""" if self._log is None: - self._log = PypeLogger.get_logger(self.__class__.__name__) + self._log = Logger.get_logger(self.__class__.__name__) return self._log @property diff --git a/openpype/hosts/unreal/__init__.py b/openpype/hosts/unreal/__init__.py index 533f315df3..42dd8f0ac4 100644 --- a/openpype/hosts/unreal/__init__.py +++ b/openpype/hosts/unreal/__init__.py @@ -1,20 +1,6 @@ -import os -import openpype.hosts +from .addon import UnrealAddon -def add_implementation_envs(env, _app): - """Modify environments to contain all required for implementation.""" - # Set OPENPYPE_UNREAL_PLUGIN required for Unreal implementation - unreal_plugin_path = os.path.join( - os.path.dirname(os.path.abspath(openpype.hosts.__file__)), - "unreal", "integration" - ) - env["OPENPYPE_UNREAL_PLUGIN"] = unreal_plugin_path - - # Set default environments if are not set via settings - defaults = { - "OPENPYPE_LOG_NO_COLORS": "True" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value +__all__ = ( + "UnrealAddon", +) diff --git a/openpype/hosts/unreal/addon.py b/openpype/hosts/unreal/addon.py new file mode 100644 index 0000000000..e2c8484651 --- /dev/null +++ b/openpype/hosts/unreal/addon.py @@ -0,0 +1,41 @@ +import os +from openpype.modules import OpenPypeModule, IHostAddon + +UNREAL_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class UnrealAddon(OpenPypeModule, IHostAddon): + name = "unreal" + host_name = "unreal" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, app): + """Modify environments to contain all required for implementation.""" + # Set OPENPYPE_UNREAL_PLUGIN required for Unreal implementation + + ue_plugin = "UE_5.0" if app.name[:1] == "5" else "UE_4.7" + unreal_plugin_path = os.path.join( + UNREAL_ROOT_DIR, "integration", ue_plugin + ) + if not env.get("OPENPYPE_UNREAL_PLUGIN"): + env["OPENPYPE_UNREAL_PLUGIN"] = unreal_plugin_path + + # Set default environments if are not set via settings + defaults = { + "OPENPYPE_LOG_NO_COLORS": "True" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(UNREAL_ROOT_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".uproject"] diff --git a/openpype/hosts/unreal/api/__init__.py b/openpype/hosts/unreal/api/__init__.py index ede71aa218..3f96d8ac6f 100644 --- a/openpype/hosts/unreal/api/__init__.py +++ b/openpype/hosts/unreal/api/__init__.py @@ -1,10 +1,8 @@ # -*- coding: utf-8 -*- """Unreal Editor OpenPype host API.""" -from .plugin import ( - Loader, - Creator -) +from .plugin import Loader + from .pipeline import ( install, uninstall, @@ -19,12 +17,12 @@ from .pipeline import ( show_tools_dialog, show_tools_popup, instantiate, + UnrealHost, ) __all__ = [ "install", "uninstall", - "Creator", "Loader", "ls", "publish", @@ -36,5 +34,6 @@ __all__ = [ "show_experimental_tools", "show_tools_dialog", "show_tools_popup", - "instantiate" + "instantiate", + "UnrealHost", ] diff --git a/openpype/hosts/unreal/api/pipeline.py b/openpype/hosts/unreal/api/pipeline.py index bbca7916d3..d396b64072 100644 --- a/openpype/hosts/unreal/api/pipeline.py +++ b/openpype/hosts/unreal/api/pipeline.py @@ -14,6 +14,7 @@ from openpype.pipeline import ( ) from openpype.tools.utils import host_tools import openpype.hosts.unreal +from openpype.host import HostBase, ILoadHost import unreal # noqa @@ -29,6 +30,32 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") +class UnrealHost(HostBase, ILoadHost): + """Unreal host implementation. + + For some time this class will re-use functions from module based + implementation for backwards compatibility of older unreal projects. + """ + + name = "unreal" + + def install(self): + install() + + def get_containers(self): + return ls() + + def show_tools_popup(self): + """Show tools popup with actions leading to show other tools.""" + + show_tools_popup() + + def show_tools_dialog(self): + """Show tools dialog with actions leading to show other tools.""" + + show_tools_dialog() + + def install(): """Install Unreal configuration for OpenPype.""" print("-=" * 40) diff --git a/openpype/hosts/unreal/api/plugin.py b/openpype/hosts/unreal/api/plugin.py index d8d2f2420d..6fc00cb71c 100644 --- a/openpype/hosts/unreal/api/plugin.py +++ b/openpype/hosts/unreal/api/plugin.py @@ -1,16 +1,7 @@ # -*- coding: utf-8 -*- from abc import ABC -from openpype.pipeline import ( - LegacyCreator, - LoaderPlugin, -) - - -class Creator(LegacyCreator): - """This serves as skeleton for future OpenPype specific functionality""" - defaults = ['Main'] - maintain_selection = False +from openpype.pipeline import LoaderPlugin class Loader(LoaderPlugin, ABC): diff --git a/openpype/hosts/unreal/api/rendering.py b/openpype/hosts/unreal/api/rendering.py index 376e1b75ce..29e4747f6e 100644 --- a/openpype/hosts/unreal/api/rendering.py +++ b/openpype/hosts/unreal/api/rendering.py @@ -1,5 +1,8 @@ +import os + import unreal +from openpype.pipeline import Anatomy from openpype.hosts.unreal.api import pipeline @@ -46,6 +49,15 @@ def start_rendering(): if data["family"] == "render": inst_data.append(data) + try: + project = os.environ.get("AVALON_PROJECT") + anatomy = Anatomy(project) + root = anatomy.roots['renders'] + except Exception: + raise Exception("Could not find render root in anatomy settings.") + + render_dir = f"{root}/{project}" + # subsystem = unreal.get_editor_subsystem( # unreal.MoviePipelineQueueSubsystem) # queue = subsystem.get_queue() @@ -105,7 +117,7 @@ def start_rendering(): settings.custom_end_frame = r.get("frame_range")[1] settings.use_custom_playback_range = True settings.file_name_format = "{sequence_name}.{frame_number}" - settings.output_directory.path += r.get('output') + settings.output_directory.path = f"{render_dir}/{r.get('output')}" renderPass = job.get_configuration().find_or_add_setting_by_class( unreal.MoviePipelineDeferredPassBase) diff --git a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py b/openpype/hosts/unreal/hooks/pre_workfile_preparation.py index f07e96551c..4ae72593e9 100644 --- a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py +++ b/openpype/hosts/unreal/hooks/pre_workfile_preparation.py @@ -1,15 +1,15 @@ # -*- coding: utf-8 -*- """Hook to launch Unreal and prepare projects.""" import os +import copy from pathlib import Path from openpype.lib import ( PreLaunchHook, ApplicationLaunchFailed, ApplicationNotFound, - get_workdir_data, - get_workfile_template_key ) +from openpype.pipeline.workfile import get_workfile_template_key import openpype.hosts.unreal.lib as unreal_lib @@ -25,7 +25,7 @@ class UnrealPrelaunchHook(PreLaunchHook): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.signature = "( {} )".format(self.__class__.__name__) + self.signature = f"( {self.__class__.__name__} )" def _get_work_filename(self): # Use last workfile if was found @@ -35,18 +35,13 @@ class UnrealPrelaunchHook(PreLaunchHook): return last_workfile.name # Prepare data for fill data and for getting workfile template key - task_name = self.data["task_name"] anatomy = self.data["anatomy"] - asset_doc = self.data["asset_doc"] project_doc = self.data["project_doc"] - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_info = asset_tasks.get(task_name) or {} - task_type = task_info.get("type") + # Use already prepared workdir data + workdir_data = copy.deepcopy(self.data["workdir_data"]) + task_type = workdir_data.get("task", {}).get("type") - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, self.host_name - ) # QUESTION raise exception if version is part of filename template? workdir_data["version"] = 1 workdir_data["ext"] = "uproject" @@ -71,7 +66,7 @@ class UnrealPrelaunchHook(PreLaunchHook): if int(engine_version.split(".")[0]) < 4 and \ int(engine_version.split(".")[1]) < 26: raise ApplicationLaunchFailed(( - f"{self.signature} Old unsupported version of UE4 " + f"{self.signature} Old unsupported version of UE " f"detected - {engine_version}")) except ValueError: # there can be string in minor version and in that case @@ -99,18 +94,19 @@ class UnrealPrelaunchHook(PreLaunchHook): f"character ({unreal_project_name}). Appending 'P'" )) unreal_project_name = f"P{unreal_project_name}" + unreal_project_filename = f'{unreal_project_name}.uproject' project_path = Path(os.path.join(workdir, unreal_project_name)) self.log.info(( - f"{self.signature} requested UE4 version: " + f"{self.signature} requested UE version: " f"[ {engine_version} ]" )) detected = unreal_lib.get_engine_versions(self.launch_context.env) detected_str = ', '.join(detected.keys()) or 'none' self.log.info(( - f"{self.signature} detected UE4 versions: " + f"{self.signature} detected UE versions: " f"[ {detected_str} ]" )) if not detected: @@ -123,10 +119,10 @@ class UnrealPrelaunchHook(PreLaunchHook): f"detected [ {engine_version} ]" )) - ue4_path = unreal_lib.get_editor_executable_path( - Path(detected[engine_version])) + ue_path = unreal_lib.get_editor_executable_path( + Path(detected[engine_version]), engine_version) - self.launch_context.launch_args = [ue4_path.as_posix()] + self.launch_context.launch_args = [ue_path.as_posix()] project_path.mkdir(parents=True, exist_ok=True) project_file = project_path / unreal_project_filename @@ -138,6 +134,11 @@ class UnrealPrelaunchHook(PreLaunchHook): )) # Set "OPENPYPE_UNREAL_PLUGIN" to current process environment for # execution of `create_unreal_project` + if self.launch_context.env.get("OPENPYPE_UNREAL_PLUGIN"): + self.log.info(( + f"{self.signature} using OpenPype plugin from " + f"{self.launch_context.env.get('OPENPYPE_UNREAL_PLUGIN')}" + )) env_key = "OPENPYPE_UNREAL_PLUGIN" if self.launch_context.env.get(env_key): os.environ[env_key] = self.launch_context.env[env_key] diff --git a/openpype/hosts/unreal/integration/.gitignore b/openpype/hosts/unreal/integration/UE_4.7/.gitignore similarity index 100% rename from openpype/hosts/unreal/integration/.gitignore rename to openpype/hosts/unreal/integration/UE_4.7/.gitignore diff --git a/openpype/hosts/unreal/integration/Content/Python/init_unreal.py b/openpype/hosts/unreal/integration/UE_4.7/Content/Python/init_unreal.py similarity index 90% rename from openpype/hosts/unreal/integration/Content/Python/init_unreal.py rename to openpype/hosts/unreal/integration/UE_4.7/Content/Python/init_unreal.py index 4bb03b07ed..b85f970699 100644 --- a/openpype/hosts/unreal/integration/Content/Python/init_unreal.py +++ b/openpype/hosts/unreal/integration/UE_4.7/Content/Python/init_unreal.py @@ -3,7 +3,9 @@ import unreal openpype_detected = True try: from openpype.pipeline import install_host - from openpype.hosts.unreal import api as openpype_host + from openpype.hosts.unreal.api import UnrealHost + + openpype_host = UnrealHost() except ImportError as exc: openpype_host = None openpype_detected = False diff --git a/openpype/hosts/unreal/integration/OpenPype.uplugin b/openpype/hosts/unreal/integration/UE_4.7/OpenPype.uplugin similarity index 100% rename from openpype/hosts/unreal/integration/OpenPype.uplugin rename to openpype/hosts/unreal/integration/UE_4.7/OpenPype.uplugin diff --git a/openpype/hosts/unreal/integration/README.md b/openpype/hosts/unreal/integration/UE_4.7/README.md similarity index 91% rename from openpype/hosts/unreal/integration/README.md rename to openpype/hosts/unreal/integration/UE_4.7/README.md index a32d89aab8..a08c1ada39 100644 --- a/openpype/hosts/unreal/integration/README.md +++ b/openpype/hosts/unreal/integration/UE_4.7/README.md @@ -1,4 +1,4 @@ -# OpenPype Unreal Integration plugin +# OpenPype Unreal Integration plugin - UE 4.x This is plugin for Unreal Editor, creating menu for [OpenPype](https://github.com/getavalon) tools to run. diff --git a/openpype/hosts/unreal/integration/Resources/openpype128.png b/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype128.png similarity index 100% rename from openpype/hosts/unreal/integration/Resources/openpype128.png rename to openpype/hosts/unreal/integration/UE_4.7/Resources/openpype128.png diff --git a/openpype/hosts/unreal/integration/Resources/openpype40.png b/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype40.png similarity index 100% rename from openpype/hosts/unreal/integration/Resources/openpype40.png rename to openpype/hosts/unreal/integration/UE_4.7/Resources/openpype40.png diff --git a/openpype/hosts/unreal/integration/Resources/openpype512.png b/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype512.png similarity index 100% rename from openpype/hosts/unreal/integration/Resources/openpype512.png rename to openpype/hosts/unreal/integration/UE_4.7/Resources/openpype512.png diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/OpenPype.Build.cs b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/OpenPype.Build.cs similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/OpenPype.Build.cs rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/OpenPype.Build.cs diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/AssetContainer.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/AssetContainer.cpp similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/AssetContainer.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/AssetContainer.cpp diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/AssetContainerFactory.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/AssetContainerFactory.cpp similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/AssetContainerFactory.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/AssetContainerFactory.cpp diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPype.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPype.cpp similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPype.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPype.cpp diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeLib.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeLib.cpp similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeLib.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeLib.cpp diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstance.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstance.cpp similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstance.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstance.cpp diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePythonBridge.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePythonBridge.cpp similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePythonBridge.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePythonBridge.cpp diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeStyle.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeStyle.cpp similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeStyle.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeStyle.cpp diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/AssetContainer.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/AssetContainer.h similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/AssetContainer.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/AssetContainer.h diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/AssetContainerFactory.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/AssetContainerFactory.h similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/AssetContainerFactory.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/AssetContainerFactory.h diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPype.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPype.h similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPype.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPype.h diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeLib.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeLib.h similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeLib.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeLib.h diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstance.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstance.h similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstance.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstance.h diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePythonBridge.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePythonBridge.h similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePythonBridge.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePythonBridge.h diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeStyle.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeStyle.h similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeStyle.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeStyle.h diff --git a/openpype/hosts/unreal/integration/UE_5.0/.gitignore b/openpype/hosts/unreal/integration/UE_5.0/.gitignore new file mode 100644 index 0000000000..b32a6f55e5 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/.gitignore @@ -0,0 +1,35 @@ +# Prerequisites +*.d + +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Fortran module files +*.mod +*.smod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app + +/Binaries +/Intermediate diff --git a/openpype/hosts/unreal/integration/UE_5.0/Content/Python/init_unreal.py b/openpype/hosts/unreal/integration/UE_5.0/Content/Python/init_unreal.py new file mode 100644 index 0000000000..b85f970699 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Content/Python/init_unreal.py @@ -0,0 +1,30 @@ +import unreal + +openpype_detected = True +try: + from openpype.pipeline import install_host + from openpype.hosts.unreal.api import UnrealHost + + openpype_host = UnrealHost() +except ImportError as exc: + openpype_host = None + openpype_detected = False + unreal.log_error("OpenPype: cannot load OpenPype [ {} ]".format(exc)) + +if openpype_detected: + install_host(openpype_host) + + +@unreal.uclass() +class OpenPypeIntegration(unreal.OpenPypePythonBridge): + @unreal.ufunction(override=True) + def RunInPython_Popup(self): + unreal.log_warning("OpenPype: showing tools popup") + if openpype_detected: + openpype_host.show_tools_popup() + + @unreal.ufunction(override=True) + def RunInPython_Dialog(self): + unreal.log_warning("OpenPype: showing tools dialog") + if openpype_detected: + openpype_host.show_tools_dialog() diff --git a/openpype/hosts/unreal/integration/UE_5.0/OpenPype.uplugin b/openpype/hosts/unreal/integration/UE_5.0/OpenPype.uplugin new file mode 100644 index 0000000000..4c7a74403c --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/OpenPype.uplugin @@ -0,0 +1,24 @@ +{ + "FileVersion": 3, + "Version": 1, + "VersionName": "1.0", + "FriendlyName": "OpenPype", + "Description": "OpenPype Integration", + "Category": "OpenPype.Integration", + "CreatedBy": "Ondrej Samohel", + "CreatedByURL": "https://openpype.io", + "DocsURL": "https://openpype.io/docs/artist_hosts_unreal", + "MarketplaceURL": "", + "SupportURL": "https://pype.club/", + "CanContainContent": true, + "IsBetaVersion": true, + "IsExperimentalVersion": false, + "Installed": false, + "Modules": [ + { + "Name": "OpenPype", + "Type": "Editor", + "LoadingPhase": "Default" + } + ] +} \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/README.md b/openpype/hosts/unreal/integration/UE_5.0/README.md new file mode 100644 index 0000000000..cf0aa622c2 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/README.md @@ -0,0 +1,11 @@ +# OpenPype Unreal Integration plugin - UE 5.x + +This is plugin for Unreal Editor, creating menu for [OpenPype](https://github.com/getavalon) tools to run. + +## How does this work + +Plugin is creating basic menu items in **Window/OpenPype** section of Unreal Editor main menu and a button +on the main toolbar with associated menu. Clicking on those menu items is calling callbacks that are +declared in C++ but needs to be implemented during Unreal Editor +startup in `Plugins/OpenPype/Content/Python/init_unreal.py` - this should be executed by Unreal Editor +automatically. diff --git a/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype128.png b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype128.png new file mode 100644 index 0000000000..abe8a807ef Binary files /dev/null and b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype128.png differ diff --git a/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype40.png b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype40.png new file mode 100644 index 0000000000..f983e7a1f2 Binary files /dev/null and b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype40.png differ diff --git a/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype512.png b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype512.png new file mode 100644 index 0000000000..97c4d4326b Binary files /dev/null and b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype512.png differ diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/OpenPype.Build.cs b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/OpenPype.Build.cs new file mode 100644 index 0000000000..fcfd268234 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/OpenPype.Build.cs @@ -0,0 +1,59 @@ +// Copyright 1998-2019 Epic Games, Inc. All Rights Reserved. + +using UnrealBuildTool; + +public class OpenPype : ModuleRules +{ + public OpenPype(ReadOnlyTargetRules Target) : base(Target) + { + PCHUsage = ModuleRules.PCHUsageMode.UseExplicitOrSharedPCHs; + + PublicIncludePaths.AddRange( + new string[] { + // ... add public include paths required here ... + } + ); + + + PrivateIncludePaths.AddRange( + new string[] { + // ... add other private include paths required here ... + } + ); + + + PublicDependencyModuleNames.AddRange( + new string[] + { + "Core", + // ... add other public dependencies that you statically link with here ... + } + ); + + + PrivateDependencyModuleNames.AddRange( + new string[] + { + "Projects", + "InputCore", + "EditorFramework", + "UnrealEd", + "ToolMenus", + "LevelEditor", + "CoreUObject", + "Engine", + "Slate", + "SlateCore", + // ... add private dependencies that you statically link with here ... + } + ); + + + DynamicallyLoadedModuleNames.AddRange( + new string[] + { + // ... add any modules that your module loads dynamically here ... + } + ); + } +} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainer.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainer.cpp new file mode 100644 index 0000000000..c766f87a8e --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainer.cpp @@ -0,0 +1,115 @@ +// Fill out your copyright notice in the Description page of Project Settings. + +#include "AssetContainer.h" +#include "AssetRegistryModule.h" +#include "Misc/PackageName.h" +#include "Engine.h" +#include "Containers/UnrealString.h" + +UAssetContainer::UAssetContainer(const FObjectInitializer& ObjectInitializer) +: UAssetUserData(ObjectInitializer) +{ + FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked("AssetRegistry"); + FString path = UAssetContainer::GetPathName(); + UE_LOG(LogTemp, Warning, TEXT("UAssetContainer %s"), *path); + FARFilter Filter; + Filter.PackagePaths.Add(FName(*path)); + + AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UAssetContainer::OnAssetAdded); + AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UAssetContainer::OnAssetRemoved); + AssetRegistryModule.Get().OnAssetRenamed().AddUObject(this, &UAssetContainer::OnAssetRenamed); +} + +void UAssetContainer::OnAssetAdded(const FAssetData& AssetData) +{ + TArray split; + + // get directory of current container + FString selfFullPath = UAssetContainer::GetPathName(); + FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); + + // get asset path and class + FString assetPath = AssetData.GetFullName(); + FString assetFName = AssetData.AssetClass.ToString(); + + // split path + assetPath.ParseIntoArray(split, TEXT(" "), true); + + FString assetDir = FPackageName::GetLongPackagePath(*split[1]); + + // take interest only in paths starting with path of current container + if (assetDir.StartsWith(*selfDir)) + { + // exclude self + if (assetFName != "AssetContainer") + { + assets.Add(assetPath); + assetsData.Add(AssetData); + UE_LOG(LogTemp, Log, TEXT("%s: asset added to %s"), *selfFullPath, *selfDir); + } + } +} + +void UAssetContainer::OnAssetRemoved(const FAssetData& AssetData) +{ + TArray split; + + // get directory of current container + FString selfFullPath = UAssetContainer::GetPathName(); + FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); + + // get asset path and class + FString assetPath = AssetData.GetFullName(); + FString assetFName = AssetData.AssetClass.ToString(); + + // split path + assetPath.ParseIntoArray(split, TEXT(" "), true); + + FString assetDir = FPackageName::GetLongPackagePath(*split[1]); + + // take interest only in paths starting with path of current container + FString path = UAssetContainer::GetPathName(); + FString lpp = FPackageName::GetLongPackagePath(*path); + + if (assetDir.StartsWith(*selfDir)) + { + // exclude self + if (assetFName != "AssetContainer") + { + // UE_LOG(LogTemp, Warning, TEXT("%s: asset removed"), *lpp); + assets.Remove(assetPath); + assetsData.Remove(AssetData); + } + } +} + +void UAssetContainer::OnAssetRenamed(const FAssetData& AssetData, const FString& str) +{ + TArray split; + + // get directory of current container + FString selfFullPath = UAssetContainer::GetPathName(); + FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); + + // get asset path and class + FString assetPath = AssetData.GetFullName(); + FString assetFName = AssetData.AssetClass.ToString(); + + // split path + assetPath.ParseIntoArray(split, TEXT(" "), true); + + FString assetDir = FPackageName::GetLongPackagePath(*split[1]); + if (assetDir.StartsWith(*selfDir)) + { + // exclude self + if (assetFName != "AssetContainer") + { + + assets.Remove(str); + assets.Add(assetPath); + assetsData.Remove(AssetData); + // UE_LOG(LogTemp, Warning, TEXT("%s: asset renamed %s"), *lpp, *str); + } + } +} + diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainerFactory.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainerFactory.cpp new file mode 100644 index 0000000000..b943150bdd --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainerFactory.cpp @@ -0,0 +1,20 @@ +#include "AssetContainerFactory.h" +#include "AssetContainer.h" + +UAssetContainerFactory::UAssetContainerFactory(const FObjectInitializer& ObjectInitializer) + : UFactory(ObjectInitializer) +{ + SupportedClass = UAssetContainer::StaticClass(); + bCreateNew = false; + bEditorImport = true; +} + +UObject* UAssetContainerFactory::FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) +{ + UAssetContainer* AssetContainer = NewObject(InParent, Class, Name, Flags); + return AssetContainer; +} + +bool UAssetContainerFactory::ShouldShowInNewMenu() const { + return false; +} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPype.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPype.cpp new file mode 100644 index 0000000000..b3bd9a81b3 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPype.cpp @@ -0,0 +1,85 @@ +#include "OpenPype.h" +#include "OpenPypeStyle.h" +#include "OpenPypeCommands.h" +#include "OpenPypePythonBridge.h" +#include "LevelEditor.h" +#include "Misc/MessageDialog.h" +#include "ToolMenus.h" + + +static const FName OpenPypeTabName("OpenPype"); + +#define LOCTEXT_NAMESPACE "FOpenPypeModule" + +// This function is triggered when the plugin is staring up +void FOpenPypeModule::StartupModule() +{ + FOpenPypeStyle::Initialize(); + FOpenPypeStyle::ReloadTextures(); + FOpenPypeCommands::Register(); + + PluginCommands = MakeShareable(new FUICommandList); + + PluginCommands->MapAction( + FOpenPypeCommands::Get().OpenPypeTools, + FExecuteAction::CreateRaw(this, &FOpenPypeModule::MenuPopup), + FCanExecuteAction()); + PluginCommands->MapAction( + FOpenPypeCommands::Get().OpenPypeToolsDialog, + FExecuteAction::CreateRaw(this, &FOpenPypeModule::MenuDialog), + FCanExecuteAction()); + + UToolMenus::RegisterStartupCallback(FSimpleMulticastDelegate::FDelegate::CreateRaw(this, &FOpenPypeModule::RegisterMenus)); +} + +void FOpenPypeModule::ShutdownModule() +{ + UToolMenus::UnRegisterStartupCallback(this); + + UToolMenus::UnregisterOwner(this); + + FOpenPypeStyle::Shutdown(); + + FOpenPypeCommands::Unregister(); +} + +void FOpenPypeModule::RegisterMenus() +{ + // Owner will be used for cleanup in call to UToolMenus::UnregisterOwner + FToolMenuOwnerScoped OwnerScoped(this); + + { + UToolMenu* Menu = UToolMenus::Get()->ExtendMenu("LevelEditor.MainMenu.Tools"); + { + // FToolMenuSection& Section = Menu->FindOrAddSection("OpenPype"); + FToolMenuSection& Section = Menu->AddSection( + "OpenPype", + TAttribute(FText::FromString("OpenPype")), + FToolMenuInsert("Programming", EToolMenuInsertType::Before) + ); + Section.AddMenuEntryWithCommandList(FOpenPypeCommands::Get().OpenPypeTools, PluginCommands); + Section.AddMenuEntryWithCommandList(FOpenPypeCommands::Get().OpenPypeToolsDialog, PluginCommands); + } + UToolMenu* ToolbarMenu = UToolMenus::Get()->ExtendMenu("LevelEditor.LevelEditorToolBar.PlayToolBar"); + { + FToolMenuSection& Section = ToolbarMenu->FindOrAddSection("PluginTools"); + { + FToolMenuEntry& Entry = Section.AddEntry(FToolMenuEntry::InitToolBarButton(FOpenPypeCommands::Get().OpenPypeTools)); + Entry.SetCommandList(PluginCommands); + } + } + } +} + + +void FOpenPypeModule::MenuPopup() { + UOpenPypePythonBridge* bridge = UOpenPypePythonBridge::Get(); + bridge->RunInPython_Popup(); +} + +void FOpenPypeModule::MenuDialog() { + UOpenPypePythonBridge* bridge = UOpenPypePythonBridge::Get(); + bridge->RunInPython_Dialog(); +} + +IMPLEMENT_MODULE(FOpenPypeModule, OpenPype) diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeCommands.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeCommands.cpp new file mode 100644 index 0000000000..6187bd7c7e --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeCommands.cpp @@ -0,0 +1,13 @@ +// Copyright Epic Games, Inc. All Rights Reserved. + +#include "OpenPypeCommands.h" + +#define LOCTEXT_NAMESPACE "FOpenPypeModule" + +void FOpenPypeCommands::RegisterCommands() +{ + UI_COMMAND(OpenPypeTools, "OpenPype Tools", "Pipeline tools", EUserInterfaceActionType::Button, FInputChord()); + UI_COMMAND(OpenPypeToolsDialog, "OpenPype Tools Dialog", "Pipeline tools dialog", EUserInterfaceActionType::Button, FInputChord()); +} + +#undef LOCTEXT_NAMESPACE diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeLib.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeLib.cpp new file mode 100644 index 0000000000..5facab7b8b --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeLib.cpp @@ -0,0 +1,48 @@ +#include "OpenPypeLib.h" +#include "Misc/Paths.h" +#include "Misc/ConfigCacheIni.h" +#include "UObject/UnrealType.h" + +/** + * Sets color on folder icon on given path + * @param InPath - path to folder + * @param InFolderColor - color of the folder + * @warning This color will appear only after Editor restart. Is there a better way? + */ + +void UOpenPypeLib::CSetFolderColor(FString FolderPath, FLinearColor FolderColor, bool bForceAdd) +{ + auto SaveColorInternal = [](FString InPath, FLinearColor InFolderColor) + { + // Saves the color of the folder to the config + if (FPaths::FileExists(GEditorPerProjectIni)) + { + GConfig->SetString(TEXT("PathColor"), *InPath, *InFolderColor.ToString(), GEditorPerProjectIni); + } + + }; + + SaveColorInternal(FolderPath, FolderColor); + +} +/** + * Returns all poperties on given object + * @param cls - class + * @return TArray of properties + */ +TArray UOpenPypeLib::GetAllProperties(UClass* cls) +{ + TArray Ret; + if (cls != nullptr) + { + for (TFieldIterator It(cls); It; ++It) + { + FProperty* Property = *It; + if (Property->HasAnyPropertyFlags(EPropertyFlags::CPF_Edit)) + { + Ret.Add(Property->GetName()); + } + } + } + return Ret; +} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstance.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstance.cpp new file mode 100644 index 0000000000..4f1e846c0b --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstance.cpp @@ -0,0 +1,108 @@ +#pragma once + +#include "OpenPypePublishInstance.h" +#include "AssetRegistryModule.h" + + +UOpenPypePublishInstance::UOpenPypePublishInstance(const FObjectInitializer& ObjectInitializer) + : UObject(ObjectInitializer) +{ + FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked("AssetRegistry"); + FString path = UOpenPypePublishInstance::GetPathName(); + FARFilter Filter; + Filter.PackagePaths.Add(FName(*path)); + + AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UOpenPypePublishInstance::OnAssetAdded); + AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UOpenPypePublishInstance::OnAssetRemoved); + AssetRegistryModule.Get().OnAssetRenamed().AddUObject(this, &UOpenPypePublishInstance::OnAssetRenamed); +} + +void UOpenPypePublishInstance::OnAssetAdded(const FAssetData& AssetData) +{ + TArray split; + + // get directory of current container + FString selfFullPath = UOpenPypePublishInstance::GetPathName(); + FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); + + // get asset path and class + FString assetPath = AssetData.GetFullName(); + FString assetFName = AssetData.AssetClass.ToString(); + + // split path + assetPath.ParseIntoArray(split, TEXT(" "), true); + + FString assetDir = FPackageName::GetLongPackagePath(*split[1]); + + // take interest only in paths starting with path of current container + if (assetDir.StartsWith(*selfDir)) + { + // exclude self + if (assetFName != "OpenPypePublishInstance") + { + assets.Add(assetPath); + UE_LOG(LogTemp, Log, TEXT("%s: asset added to %s"), *selfFullPath, *selfDir); + } + } +} + +void UOpenPypePublishInstance::OnAssetRemoved(const FAssetData& AssetData) +{ + TArray split; + + // get directory of current container + FString selfFullPath = UOpenPypePublishInstance::GetPathName(); + FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); + + // get asset path and class + FString assetPath = AssetData.GetFullName(); + FString assetFName = AssetData.AssetClass.ToString(); + + // split path + assetPath.ParseIntoArray(split, TEXT(" "), true); + + FString assetDir = FPackageName::GetLongPackagePath(*split[1]); + + // take interest only in paths starting with path of current container + FString path = UOpenPypePublishInstance::GetPathName(); + FString lpp = FPackageName::GetLongPackagePath(*path); + + if (assetDir.StartsWith(*selfDir)) + { + // exclude self + if (assetFName != "OpenPypePublishInstance") + { + // UE_LOG(LogTemp, Warning, TEXT("%s: asset removed"), *lpp); + assets.Remove(assetPath); + } + } +} + +void UOpenPypePublishInstance::OnAssetRenamed(const FAssetData& AssetData, const FString& str) +{ + TArray split; + + // get directory of current container + FString selfFullPath = UOpenPypePublishInstance::GetPathName(); + FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); + + // get asset path and class + FString assetPath = AssetData.GetFullName(); + FString assetFName = AssetData.AssetClass.ToString(); + + // split path + assetPath.ParseIntoArray(split, TEXT(" "), true); + + FString assetDir = FPackageName::GetLongPackagePath(*split[1]); + if (assetDir.StartsWith(*selfDir)) + { + // exclude self + if (assetFName != "AssetContainer") + { + + assets.Remove(str); + assets.Add(assetPath); + // UE_LOG(LogTemp, Warning, TEXT("%s: asset renamed %s"), *lpp, *str); + } + } +} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp new file mode 100644 index 0000000000..e61964c689 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp @@ -0,0 +1,20 @@ +#include "OpenPypePublishInstanceFactory.h" +#include "OpenPypePublishInstance.h" + +UOpenPypePublishInstanceFactory::UOpenPypePublishInstanceFactory(const FObjectInitializer& ObjectInitializer) + : UFactory(ObjectInitializer) +{ + SupportedClass = UOpenPypePublishInstance::StaticClass(); + bCreateNew = false; + bEditorImport = true; +} + +UObject* UOpenPypePublishInstanceFactory::FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) +{ + UOpenPypePublishInstance* OpenPypePublishInstance = NewObject(InParent, Class, Name, Flags); + return OpenPypePublishInstance; +} + +bool UOpenPypePublishInstanceFactory::ShouldShowInNewMenu() const { + return false; +} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePythonBridge.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePythonBridge.cpp new file mode 100644 index 0000000000..8113231503 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePythonBridge.cpp @@ -0,0 +1,13 @@ +#include "OpenPypePythonBridge.h" + +UOpenPypePythonBridge* UOpenPypePythonBridge::Get() +{ + TArray OpenPypePythonBridgeClasses; + GetDerivedClasses(UOpenPypePythonBridge::StaticClass(), OpenPypePythonBridgeClasses); + int32 NumClasses = OpenPypePythonBridgeClasses.Num(); + if (NumClasses > 0) + { + return Cast(OpenPypePythonBridgeClasses[NumClasses - 1]->GetDefaultObject()); + } + return nullptr; +}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeStyle.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeStyle.cpp new file mode 100644 index 0000000000..49e805da4d --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeStyle.cpp @@ -0,0 +1,61 @@ +#include "OpenPypeStyle.h" +#include "OpenPype.h" +#include "Framework/Application/SlateApplication.h" +#include "Styling/SlateStyleRegistry.h" +#include "Slate/SlateGameResources.h" +#include "Interfaces/IPluginManager.h" +#include "Styling/SlateStyleMacros.h" + +#define RootToContentDir Style->RootToContentDir + +TSharedPtr FOpenPypeStyle::OpenPypeStyleInstance = nullptr; + +void FOpenPypeStyle::Initialize() +{ + if (!OpenPypeStyleInstance.IsValid()) + { + OpenPypeStyleInstance = Create(); + FSlateStyleRegistry::RegisterSlateStyle(*OpenPypeStyleInstance); + } +} + +void FOpenPypeStyle::Shutdown() +{ + FSlateStyleRegistry::UnRegisterSlateStyle(*OpenPypeStyleInstance); + ensure(OpenPypeStyleInstance.IsUnique()); + OpenPypeStyleInstance.Reset(); +} + +FName FOpenPypeStyle::GetStyleSetName() +{ + static FName StyleSetName(TEXT("OpenPypeStyle")); + return StyleSetName; +} + +const FVector2D Icon16x16(16.0f, 16.0f); +const FVector2D Icon20x20(20.0f, 20.0f); +const FVector2D Icon40x40(40.0f, 40.0f); + +TSharedRef< FSlateStyleSet > FOpenPypeStyle::Create() +{ + TSharedRef< FSlateStyleSet > Style = MakeShareable(new FSlateStyleSet("OpenPypeStyle")); + Style->SetContentRoot(IPluginManager::Get().FindPlugin("OpenPype")->GetBaseDir() / TEXT("Resources")); + + Style->Set("OpenPype.OpenPypeTools", new IMAGE_BRUSH(TEXT("openpype40"), Icon40x40)); + Style->Set("OpenPype.OpenPypeToolsDialog", new IMAGE_BRUSH(TEXT("openpype40"), Icon40x40)); + + return Style; +} + +void FOpenPypeStyle::ReloadTextures() +{ + if (FSlateApplication::IsInitialized()) + { + FSlateApplication::Get().GetRenderer()->ReloadTextureResources(); + } +} + +const ISlateStyle& FOpenPypeStyle::Get() +{ + return *OpenPypeStyleInstance; +} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainer.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainer.h new file mode 100644 index 0000000000..3c2a360c78 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainer.h @@ -0,0 +1,39 @@ +// Fill out your copyright notice in the Description page of Project Settings. + +#pragma once + +#include "CoreMinimal.h" +#include "UObject/NoExportTypes.h" +#include "Engine/AssetUserData.h" +#include "AssetData.h" +#include "AssetContainer.generated.h" + +/** + * + */ +UCLASS(Blueprintable) +class OPENPYPE_API UAssetContainer : public UAssetUserData +{ + GENERATED_BODY() + +public: + + UAssetContainer(const FObjectInitializer& ObjectInitalizer); + // ~UAssetContainer(); + + UPROPERTY(EditAnywhere, BlueprintReadOnly) + TArray assets; + + // There seems to be no reflection option to expose array of FAssetData + /* + UPROPERTY(Transient, BlueprintReadOnly, Category = "Python", meta=(DisplayName="Assets Data")) + TArray assetsData; + */ +private: + TArray assetsData; + void OnAssetAdded(const FAssetData& AssetData); + void OnAssetRemoved(const FAssetData& AssetData); + void OnAssetRenamed(const FAssetData& AssetData, const FString& str); +}; + + diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainerFactory.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainerFactory.h new file mode 100644 index 0000000000..331ce6bb50 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainerFactory.h @@ -0,0 +1,21 @@ +// Fill out your copyright notice in the Description page of Project Settings. + +#pragma once + +#include "CoreMinimal.h" +#include "Factories/Factory.h" +#include "AssetContainerFactory.generated.h" + +/** + * + */ +UCLASS() +class OPENPYPE_API UAssetContainerFactory : public UFactory +{ + GENERATED_BODY() + +public: + UAssetContainerFactory(const FObjectInitializer& ObjectInitializer); + virtual UObject* FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override; + virtual bool ShouldShowInNewMenu() const override; +}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPype.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPype.h new file mode 100644 index 0000000000..3ee5eaa65f --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPype.h @@ -0,0 +1,23 @@ +// Copyright 1998-2019 Epic Games, Inc. All Rights Reserved. + +#pragma once + +#include "CoreMinimal.h" +#include "Modules/ModuleManager.h" + + +class FOpenPypeModule : public IModuleInterface +{ +public: + virtual void StartupModule() override; + virtual void ShutdownModule() override; + +private: + void RegisterMenus(); + + void MenuPopup(); + void MenuDialog(); + +private: + TSharedPtr PluginCommands; +}; diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeCommands.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeCommands.h new file mode 100644 index 0000000000..62ffb8de33 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeCommands.h @@ -0,0 +1,24 @@ +// Copyright Epic Games, Inc. All Rights Reserved. + +#pragma once + +#include "CoreMinimal.h" +#include "Framework/Commands/Commands.h" +#include "OpenPypeStyle.h" + +class FOpenPypeCommands : public TCommands +{ +public: + + FOpenPypeCommands() + : TCommands(TEXT("OpenPype"), NSLOCTEXT("Contexts", "OpenPype", "OpenPype Tools"), NAME_None, FOpenPypeStyle::GetStyleSetName()) + { + } + + // TCommands<> interface + virtual void RegisterCommands() override; + +public: + TSharedPtr< FUICommandInfo > OpenPypeTools; + TSharedPtr< FUICommandInfo > OpenPypeToolsDialog; +}; diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeLib.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeLib.h new file mode 100644 index 0000000000..59e9c8bd76 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeLib.h @@ -0,0 +1,19 @@ +#pragma once + +#include "Engine.h" +#include "OpenPypeLib.generated.h" + + +UCLASS(Blueprintable) +class OPENPYPE_API UOpenPypeLib : public UObject +{ + + GENERATED_BODY() + +public: + UFUNCTION(BlueprintCallable, Category = Python) + static void CSetFolderColor(FString FolderPath, FLinearColor FolderColor, bool bForceAdd); + + UFUNCTION(BlueprintCallable, Category = Python) + static TArray GetAllProperties(UClass* cls); +}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstance.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstance.h new file mode 100644 index 0000000000..0a27a078d7 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstance.h @@ -0,0 +1,21 @@ +#pragma once + +#include "Engine.h" +#include "OpenPypePublishInstance.generated.h" + + +UCLASS(Blueprintable) +class OPENPYPE_API UOpenPypePublishInstance : public UObject +{ + GENERATED_BODY() + +public: + UOpenPypePublishInstance(const FObjectInitializer& ObjectInitalizer); + + UPROPERTY(EditAnywhere, BlueprintReadOnly) + TArray assets; +private: + void OnAssetAdded(const FAssetData& AssetData); + void OnAssetRemoved(const FAssetData& AssetData); + void OnAssetRenamed(const FAssetData& AssetData, const FString& str); +}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h new file mode 100644 index 0000000000..a2b3abe13e --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h @@ -0,0 +1,19 @@ +#pragma once + +#include "CoreMinimal.h" +#include "Factories/Factory.h" +#include "OpenPypePublishInstanceFactory.generated.h" + +/** + * + */ +UCLASS() +class OPENPYPE_API UOpenPypePublishInstanceFactory : public UFactory +{ + GENERATED_BODY() + +public: + UOpenPypePublishInstanceFactory(const FObjectInitializer& ObjectInitializer); + virtual UObject* FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override; + virtual bool ShouldShowInNewMenu() const override; +}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePythonBridge.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePythonBridge.h new file mode 100644 index 0000000000..692aab2e5e --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePythonBridge.h @@ -0,0 +1,20 @@ +#pragma once +#include "Engine.h" +#include "OpenPypePythonBridge.generated.h" + +UCLASS(Blueprintable) +class UOpenPypePythonBridge : public UObject +{ + GENERATED_BODY() + +public: + UFUNCTION(BlueprintCallable, Category = Python) + static UOpenPypePythonBridge* Get(); + + UFUNCTION(BlueprintImplementableEvent, Category = Python) + void RunInPython_Popup() const; + + UFUNCTION(BlueprintImplementableEvent, Category = Python) + void RunInPython_Dialog() const; + +}; diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeStyle.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeStyle.h new file mode 100644 index 0000000000..ae704251e1 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeStyle.h @@ -0,0 +1,18 @@ +#pragma once +#include "CoreMinimal.h" +#include "Styling/SlateStyle.h" + +class FOpenPypeStyle +{ +public: + static void Initialize(); + static void Shutdown(); + static void ReloadTextures(); + static const ISlateStyle& Get(); + static FName GetStyleSetName(); + + +private: + static TSharedRef< class FSlateStyleSet > Create(); + static TSharedPtr< class FSlateStyleSet > OpenPypeStyleInstance; +}; \ No newline at end of file diff --git a/openpype/hosts/unreal/lib.py b/openpype/hosts/unreal/lib.py index 805e883c64..d02c6de357 100644 --- a/openpype/hosts/unreal/lib.py +++ b/openpype/hosts/unreal/lib.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """Unreal launching and project tools.""" -import sys + import os import platform import json @@ -9,7 +9,7 @@ import subprocess import re from pathlib import Path from collections import OrderedDict -from openpype.api import get_project_settings +from openpype.settings import get_project_settings def get_engine_versions(env=None): @@ -70,19 +70,22 @@ def get_engine_versions(env=None): return OrderedDict() -def get_editor_executable_path(engine_path: Path) -> Path: - """Get UE4 Editor executable path.""" - ue4_path = engine_path / "Engine/Binaries" +def get_editor_executable_path(engine_path: Path, engine_version: str) -> Path: + """Get UE Editor executable path.""" + ue_path = engine_path / "Engine/Binaries" if platform.system().lower() == "windows": - ue4_path /= "Win64/UE4Editor.exe" + if engine_version.split(".")[0] == "4": + ue_path /= "Win64/UE4Editor.exe" + elif engine_version.split(".")[0] == "5": + ue_path /= "Win64/UnrealEditor.exe" elif platform.system().lower() == "linux": - ue4_path /= "Linux/UE4Editor" + ue_path /= "Linux/UE4Editor" elif platform.system().lower() == "darwin": - ue4_path /= "Mac/UE4Editor" + ue_path /= "Mac/UE4Editor" - return ue4_path + return ue_path def _win_get_engine_versions(): @@ -208,22 +211,26 @@ def create_unreal_project(project_name: str, # created in different UE4 version. When user convert such project # to his UE4 version, Engine ID is replaced in uproject file. If some # other user tries to open it, it will present him with similar error. - ue4_modules = Path() + ue_modules = Path() if platform.system().lower() == "windows": - ue4_modules = Path(os.path.join(engine_path, "Engine", "Binaries", - "Win64", "UE4Editor.modules")) + ue_modules_path = engine_path / "Engine/Binaries/Win64" + if ue_version.split(".")[0] == "4": + ue_modules_path /= "UE4Editor.modules" + elif ue_version.split(".")[0] == "5": + ue_modules_path /= "UnrealEditor.modules" + ue_modules = Path(ue_modules_path) if platform.system().lower() == "linux": - ue4_modules = Path(os.path.join(engine_path, "Engine", "Binaries", + ue_modules = Path(os.path.join(engine_path, "Engine", "Binaries", "Linux", "UE4Editor.modules")) if platform.system().lower() == "darwin": - ue4_modules = Path(os.path.join(engine_path, "Engine", "Binaries", + ue_modules = Path(os.path.join(engine_path, "Engine", "Binaries", "Mac", "UE4Editor.modules")) - if ue4_modules.exists(): + if ue_modules.exists(): print("--- Loading Engine ID from modules file ...") - with open(ue4_modules, "r") as mp: + with open(ue_modules, "r") as mp: loaded_modules = json.load(mp) if loaded_modules.get("BuildId"): @@ -280,7 +287,7 @@ def create_unreal_project(project_name: str, python_path = None if platform.system().lower() == "windows": python_path = engine_path / ("Engine/Binaries/ThirdParty/" - "Python3/Win64/pythonw.exe") + "Python3/Win64/python.exe") if platform.system().lower() == "linux": python_path = engine_path / ("Engine/Binaries/ThirdParty/" @@ -294,14 +301,15 @@ def create_unreal_project(project_name: str, raise NotImplementedError("Unsupported platform") if not python_path.exists(): raise RuntimeError(f"Unreal Python not found at {python_path}") - subprocess.run( + subprocess.check_call( [python_path.as_posix(), "-m", "pip", "install", "pyside2"]) if dev_mode or preset["dev_mode"]: - _prepare_cpp_project(project_file, engine_path) + _prepare_cpp_project(project_file, engine_path, ue_version) -def _prepare_cpp_project(project_file: Path, engine_path: Path) -> None: +def _prepare_cpp_project( + project_file: Path, engine_path: Path, ue_version: str) -> None: """Prepare CPP Unreal Project. This function will add source files needed for project to be @@ -420,8 +428,12 @@ class {1}_API A{0}GameModeBase : public AGameModeBase with open(sources_dir / f"{project_name}GameModeBase.h", mode="w") as f: f.write(game_mode_h) - u_build_tool = Path( - engine_path / "Engine/Binaries/DotNET/UnrealBuildTool.exe") + u_build_tool_path = engine_path / "Engine/Binaries/DotNET" + if ue_version.split(".")[0] == "4": + u_build_tool_path /= "UnrealBuildTool.exe" + elif ue_version.split(".")[0] == "5": + u_build_tool_path /= "UnrealBuildTool/UnrealBuildTool.exe" + u_build_tool = Path(u_build_tool_path) u_header_tool = None arch = "Win64" diff --git a/openpype/hosts/unreal/plugins/create/create_camera.py b/openpype/hosts/unreal/plugins/create/create_camera.py index 2842900834..bf1489d688 100644 --- a/openpype/hosts/unreal/plugins/create/create_camera.py +++ b/openpype/hosts/unreal/plugins/create/create_camera.py @@ -2,11 +2,11 @@ import unreal from unreal import EditorAssetLibrary as eal from unreal import EditorLevelLibrary as ell -from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api.pipeline import instantiate +from openpype.pipeline import LegacyCreator -class CreateCamera(plugin.Creator): +class CreateCamera(LegacyCreator): """Layout output for character rigs""" name = "layoutMain" diff --git a/openpype/hosts/unreal/plugins/create/create_layout.py b/openpype/hosts/unreal/plugins/create/create_layout.py index 5fef08ce2a..c1067b00d9 100644 --- a/openpype/hosts/unreal/plugins/create/create_layout.py +++ b/openpype/hosts/unreal/plugins/create/create_layout.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- from unreal import EditorLevelLibrary -from openpype.hosts.unreal.api import plugin +from openpype.pipeline import LegacyCreator from openpype.hosts.unreal.api.pipeline import instantiate -class CreateLayout(plugin.Creator): +class CreateLayout(LegacyCreator): """Layout output for character rigs.""" name = "layoutMain" diff --git a/openpype/hosts/unreal/plugins/create/create_look.py b/openpype/hosts/unreal/plugins/create/create_look.py index 12f6b70ae6..4abf3f6095 100644 --- a/openpype/hosts/unreal/plugins/create/create_look.py +++ b/openpype/hosts/unreal/plugins/create/create_look.py @@ -2,9 +2,10 @@ """Create look in Unreal.""" import unreal # noqa from openpype.hosts.unreal.api import pipeline, plugin +from openpype.pipeline import LegacyCreator -class CreateLook(plugin.Creator): +class CreateLook(LegacyCreator): """Shader connections defining shape look.""" name = "unrealLook" diff --git a/openpype/hosts/unreal/plugins/create/create_render.py b/openpype/hosts/unreal/plugins/create/create_render.py index 3b6c7a9f1e..a85d17421b 100644 --- a/openpype/hosts/unreal/plugins/create/create_render.py +++ b/openpype/hosts/unreal/plugins/create/create_render.py @@ -1,11 +1,10 @@ import unreal -from openpype.pipeline import legacy_io from openpype.hosts.unreal.api import pipeline -from openpype.hosts.unreal.api.plugin import Creator +from openpype.pipeline import LegacyCreator -class CreateRender(Creator): +class CreateRender(LegacyCreator): """Create instance for sequence for rendering""" name = "unrealRender" @@ -22,17 +21,24 @@ class CreateRender(Creator): ar = unreal.AssetRegistryHelpers.get_asset_registry() + # The asset name is the the third element of the path which contains + # the map. + # The index of the split path is 3 because the first element is an + # empty string, as the path begins with "/Content". + a = unreal.EditorUtilityLibrary.get_selected_assets()[0] + asset_name = a.get_path_name().split("/")[3] + # Get the master sequence and the master level. # There should be only one sequence and one level in the directory. filter = unreal.ARFilter( class_names=["LevelSequence"], - package_paths=[f"/Game/OpenPype/{self.data['asset']}"], + package_paths=[f"/Game/OpenPype/{asset_name}"], recursive_paths=False) sequences = ar.get_assets(filter) ms = sequences[0].get_editor_property('object_path') filter = unreal.ARFilter( class_names=["World"], - package_paths=[f"/Game/OpenPype/{self.data['asset']}"], + package_paths=[f"/Game/OpenPype/{asset_name}"], recursive_paths=False) levels = ar.get_assets(filter) ml = levels[0].get_editor_property('object_path') diff --git a/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py b/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py index 601c2fae06..45d517d27d 100644 --- a/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py +++ b/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- """Create Static Meshes as FBX geometry.""" import unreal # noqa -from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api.pipeline import ( instantiate, ) +from openpype.pipeline import LegacyCreator -class CreateStaticMeshFBX(plugin.Creator): +class CreateStaticMeshFBX(LegacyCreator): """Static FBX geometry.""" name = "unrealStaticMeshMain" diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py index b2c3889f68..9fe5f3ab4b 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py @@ -20,6 +20,34 @@ class SkeletalMeshAlembicLoader(plugin.Loader): icon = "cube" color = "orange" + def get_task(self, filename, asset_dir, asset_name, replace): + task = unreal.AssetImportTask() + options = unreal.AbcImportSettings() + sm_settings = unreal.AbcStaticMeshSettings() + conversion_settings = unreal.AbcConversionSettings( + preset=unreal.AbcConversionPreset.CUSTOM, + flip_u=False, flip_v=False, + rotation=[0.0, 0.0, 0.0], + scale=[1.0, 1.0, 1.0]) + + task.set_editor_property('filename', filename) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', replace) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options.set_editor_property( + 'import_type', unreal.AlembicImportType.SKELETAL) + + options.static_mesh_settings = sm_settings + options.conversion_settings = conversion_settings + task.options = options + + return task + def load(self, context, name, namespace, data): """Load and containerise representation into Content Browser. @@ -50,36 +78,24 @@ class SkeletalMeshAlembicLoader(plugin.Loader): asset_name = "{}_{}".format(asset, name) else: asset_name = "{}".format(name) + version = context.get('version').get('name') tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") + f"{root}/{asset}/{name}_v{version:03d}", suffix="") container_name += suffix - unreal.EditorAssetLibrary.make_directory(asset_dir) + if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir): + unreal.EditorAssetLibrary.make_directory(asset_dir) - task = unreal.AssetImportTask() + task = self.get_task(self.fname, asset_dir, asset_name, False) - task.set_editor_property('filename', self.fname) - task.set_editor_property('destination_path', asset_dir) - task.set_editor_property('destination_name', asset_name) - task.set_editor_property('replace_existing', False) - task.set_editor_property('automated', True) - task.set_editor_property('save', True) + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 - # set import options here - # Unreal 4.24 ignores the settings. It works with Unreal 4.26 - options = unreal.AbcImportSettings() - options.set_editor_property( - 'import_type', unreal.AlembicImportType.SKELETAL) - - task.options = options - unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 - - # Create Asset Container - unreal_pipeline.create_container( - container=container_name, path=asset_dir) + # Create Asset Container + unreal_pipeline.create_container( + container=container_name, path=asset_dir) data = { "schema": "openpype:container-2.0", @@ -110,23 +126,8 @@ class SkeletalMeshAlembicLoader(plugin.Loader): source_path = get_representation_path(representation) destination_path = container["namespace"] - task = unreal.AssetImportTask() + task = self.get_task(source_path, destination_path, name, True) - task.set_editor_property('filename', source_path) - task.set_editor_property('destination_path', destination_path) - # strip suffix - task.set_editor_property('destination_name', name) - task.set_editor_property('replace_existing', True) - task.set_editor_property('automated', True) - task.set_editor_property('save', True) - - # set import options here - # Unreal 4.24 ignores the settings. It works with Unreal 4.26 - options = unreal.AbcImportSettings() - options.set_editor_property( - 'import_type', unreal.AlembicImportType.SKELETAL) - - task.options = options # do import fbx and replace existing data unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) container_path = "{}/{}".format(container["namespace"], diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py index 5a73c72c64..a5b9cbd1fc 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py @@ -20,11 +20,11 @@ class StaticMeshAlembicLoader(plugin.Loader): icon = "cube" color = "orange" - def get_task(self, filename, asset_dir, asset_name, replace): + @staticmethod + def get_task(filename, asset_dir, asset_name, replace, default_conversion): task = unreal.AssetImportTask() options = unreal.AbcImportSettings() sm_settings = unreal.AbcStaticMeshSettings() - conversion_settings = unreal.AbcConversionSettings() task.set_editor_property('filename', filename) task.set_editor_property('destination_path', asset_dir) @@ -40,20 +40,20 @@ class StaticMeshAlembicLoader(plugin.Loader): sm_settings.set_editor_property('merge_meshes', True) - conversion_settings.set_editor_property('flip_u', False) - conversion_settings.set_editor_property('flip_v', True) - conversion_settings.set_editor_property( - 'scale', unreal.Vector(x=100.0, y=100.0, z=100.0)) - conversion_settings.set_editor_property( - 'rotation', unreal.Vector(x=-90.0, y=0.0, z=180.0)) + if not default_conversion: + conversion_settings = unreal.AbcConversionSettings( + preset=unreal.AbcConversionPreset.CUSTOM, + flip_u=False, flip_v=False, + rotation=[0.0, 0.0, 0.0], + scale=[1.0, 1.0, 1.0]) + options.conversion_settings = conversion_settings options.static_mesh_settings = sm_settings - options.conversion_settings = conversion_settings task.options = options return task - def load(self, context, name, namespace, data): + def load(self, context, name, namespace, options): """Load and containerise representation into Content Browser. This is two step process. First, import FBX to temporary path and @@ -83,22 +83,29 @@ class StaticMeshAlembicLoader(plugin.Loader): asset_name = "{}_{}".format(asset, name) else: asset_name = "{}".format(name) + version = context.get('version').get('name') + + default_conversion = False + if options.get("default_conversion"): + default_conversion = options.get("default_conversion") tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") + f"{root}/{asset}/{name}_v{version:03d}", suffix="") container_name += suffix - unreal.EditorAssetLibrary.make_directory(asset_dir) + if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir): + unreal.EditorAssetLibrary.make_directory(asset_dir) - task = self.get_task(self.fname, asset_dir, asset_name, False) + task = self.get_task( + self.fname, asset_dir, asset_name, False, default_conversion) - unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 - # Create Asset Container - unreal_pipeline.create_container( - container=container_name, path=asset_dir) + # Create Asset Container + unreal_pipeline.create_container( + container=container_name, path=asset_dir) data = { "schema": "openpype:container-2.0", diff --git a/openpype/hosts/unreal/plugins/load/load_animation.py b/openpype/hosts/unreal/plugins/load/load_animation.py index c7581e0cdd..1fe0bef462 100644 --- a/openpype/hosts/unreal/plugins/load/load_animation.py +++ b/openpype/hosts/unreal/plugins/load/load_animation.py @@ -8,6 +8,7 @@ from unreal import EditorAssetLibrary from unreal import MovieSceneSkeletalAnimationTrack from unreal import MovieSceneSkeletalAnimationSection +from openpype.pipeline.context_tools import get_current_project_asset from openpype.pipeline import ( get_representation_path, AVALON_CONTAINER_ID @@ -52,6 +53,8 @@ class AnimationFBXLoader(plugin.Loader): if not actor: return None + asset_doc = get_current_project_asset(fields=["data.fps"]) + task.set_editor_property('filename', self.fname) task.set_editor_property('destination_path', asset_dir) task.set_editor_property('destination_name', asset_name) @@ -77,13 +80,15 @@ class AnimationFBXLoader(plugin.Loader): task.options.anim_sequence_import_data.set_editor_property( 'import_meshes_in_bone_hierarchy', False) task.options.anim_sequence_import_data.set_editor_property( - 'use_default_sample_rate', True) + 'use_default_sample_rate', False) + task.options.anim_sequence_import_data.set_editor_property( + 'custom_sample_rate', asset_doc.get("data", {}).get("fps")) task.options.anim_sequence_import_data.set_editor_property( 'import_custom_attribute', True) task.options.anim_sequence_import_data.set_editor_property( 'import_bone_tracks', True) task.options.anim_sequence_import_data.set_editor_property( - 'remove_redundant_keys', True) + 'remove_redundant_keys', False) task.options.anim_sequence_import_data.set_editor_property( 'convert_scene', True) @@ -134,26 +139,40 @@ class AnimationFBXLoader(plugin.Loader): Returns: list(str): list of container content """ - # Create directory for asset and avalon container hierarchy = context.get('asset').get('data').get('parents') root = "/Game/OpenPype" asset = context.get('asset').get('name') suffix = "_CON" - if asset: - asset_name = "{}_{}".format(asset, name) - else: - asset_name = "{}".format(name) - + asset_name = f"{asset}_{name}" if asset else f"{name}" tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( f"{root}/Animations/{asset}/{name}", suffix="") + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{root}/{hierarchy[0]}"], + recursive_paths=False) + levels = ar.get_assets(_filter) + master_level = levels[0].get_editor_property('object_path') + hierarchy_dir = root for h in hierarchy: hierarchy_dir = f"{hierarchy_dir}/{h}" hierarchy_dir = f"{hierarchy_dir}/{asset}" + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{hierarchy_dir}/"], + recursive_paths=True) + levels = ar.get_assets(_filter) + level = levels[0].get_editor_property('object_path') + + unreal.EditorLevelLibrary.save_all_dirty_levels() + unreal.EditorLevelLibrary.load_level(level) + container_name += suffix EditorAssetLibrary.make_directory(asset_dir) @@ -165,7 +184,7 @@ class AnimationFBXLoader(plugin.Loader): instance_name = data.get("instance_name") - animation = self._process(asset_dir, container_name, instance_name) + animation = self._process(asset_dir, asset_name, instance_name) asset_content = EditorAssetLibrary.list_assets( hierarchy_dir, recursive=True, include_folder=False) @@ -215,8 +234,7 @@ class AnimationFBXLoader(plugin.Loader): "parent": context["representation"]["parent"], "family": context["representation"]["context"]["family"] } - unreal_pipeline.imprint( - "{}/{}".format(asset_dir, container_name), data) + unreal_pipeline.imprint(f"{asset_dir}/{container_name}", data) imported_content = EditorAssetLibrary.list_assets( asset_dir, recursive=True, include_folder=False) @@ -224,9 +242,13 @@ class AnimationFBXLoader(plugin.Loader): for a in imported_content: EditorAssetLibrary.save_asset(a) + unreal.EditorLevelLibrary.save_current_level() + unreal.EditorLevelLibrary.load_level(master_level) + def update(self, container, representation): name = container["asset_name"] source_path = get_representation_path(representation) + asset_doc = get_current_project_asset(fields=["data.fps"]) destination_path = container["namespace"] task = unreal.AssetImportTask() @@ -258,13 +280,15 @@ class AnimationFBXLoader(plugin.Loader): task.options.anim_sequence_import_data.set_editor_property( 'import_meshes_in_bone_hierarchy', False) task.options.anim_sequence_import_data.set_editor_property( - 'use_default_sample_rate', True) + 'use_default_sample_rate', False) + task.options.anim_sequence_import_data.set_editor_property( + 'custom_sample_rate', asset_doc.get("data", {}).get("fps")) task.options.anim_sequence_import_data.set_editor_property( 'import_custom_attribute', True) task.options.anim_sequence_import_data.set_editor_property( 'import_bone_tracks', True) task.options.anim_sequence_import_data.set_editor_property( - 'remove_redundant_keys', True) + 'remove_redundant_keys', False) task.options.anim_sequence_import_data.set_editor_property( 'convert_scene', True) @@ -275,8 +299,7 @@ class AnimationFBXLoader(plugin.Loader): # do import fbx and replace existing data unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) - container_path = "{}/{}".format(container["namespace"], - container["objectName"]) + container_path = f'{container["namespace"]}/{container["objectName"]}' # update metadata unreal_pipeline.imprint( container_path, diff --git a/openpype/hosts/unreal/plugins/load/load_camera.py b/openpype/hosts/unreal/plugins/load/load_camera.py index ea896f6c44..ca6b0ce736 100644 --- a/openpype/hosts/unreal/plugins/load/load_camera.py +++ b/openpype/hosts/unreal/plugins/load/load_camera.py @@ -1,11 +1,12 @@ # -*- coding: utf-8 -*- """Load camera from FBX.""" -import os +from pathlib import Path import unreal from unreal import EditorAssetLibrary from unreal import EditorLevelLibrary - +from unreal import EditorLevelUtils +from openpype.client import get_assets, get_asset_by_name from openpype.pipeline import ( AVALON_CONTAINER_ID, legacy_io, @@ -23,14 +24,6 @@ class CameraLoader(plugin.Loader): icon = "cube" color = "orange" - def _get_data(self, asset_name): - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) - - return asset_doc.get("data") - def _set_sequence_hierarchy( self, seq_i, seq_j, min_frame_j, max_frame_j ): @@ -57,6 +50,33 @@ class CameraLoader(plugin.Loader): min_frame_j, max_frame_j + 1) + def _import_camera( + self, world, sequence, bindings, import_fbx_settings, import_filename + ): + ue_version = unreal.SystemLibrary.get_engine_version().split('.') + ue_major = int(ue_version[0]) + ue_minor = int(ue_version[1]) + + if ue_major == 4 and ue_minor <= 26: + unreal.SequencerTools.import_fbx( + world, + sequence, + bindings, + import_fbx_settings, + import_filename + ) + elif (ue_major == 4 and ue_minor >= 27) or ue_major == 5: + unreal.SequencerTools.import_level_sequence_fbx( + world, + sequence, + bindings, + import_fbx_settings, + import_filename + ) + else: + raise NotImplementedError( + f"Unreal version {ue_major} not supported") + def load(self, context, name, namespace, data): """ Load and containerise representation into Content Browser. @@ -84,10 +104,10 @@ class CameraLoader(plugin.Loader): hierarchy = context.get('asset').get('data').get('parents') root = "/Game/OpenPype" hierarchy_dir = root - hierarchy_list = [] + hierarchy_dir_list = [] for h in hierarchy: hierarchy_dir = f"{hierarchy_dir}/{h}" - hierarchy_list.append(hierarchy_dir) + hierarchy_dir_list.append(hierarchy_dir) asset = context.get('asset').get('name') suffix = "_CON" if asset: @@ -121,27 +141,53 @@ class CameraLoader(plugin.Loader): asset_dir, container_name = tools.create_unique_asset_name( f"{hierarchy_dir}/{asset}/{name}_{unique_number:02d}", suffix="") + asset_path = Path(asset_dir) + asset_path_parent = str(asset_path.parent.as_posix()) + container_name += suffix - current_level = EditorLevelLibrary.get_editor_world().get_full_name() + EditorAssetLibrary.make_directory(asset_dir) + + # Create map for the shot, and create hierarchy of map. If the maps + # already exist, we will use them. + h_dir = hierarchy_dir_list[0] + h_asset = hierarchy[0] + master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + if not EditorAssetLibrary.does_asset_exist(master_level): + EditorLevelLibrary.new_level(f"{h_dir}/{h_asset}_map") + + level = f"{asset_path_parent}/{asset}_map.{asset}_map" + if not EditorAssetLibrary.does_asset_exist(level): + EditorLevelLibrary.new_level(f"{asset_path_parent}/{asset}_map") + + EditorLevelLibrary.load_level(master_level) + EditorLevelUtils.add_level_to_world( + EditorLevelLibrary.get_editor_world(), + level, + unreal.LevelStreamingDynamic + ) EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(level) - ar = unreal.AssetRegistryHelpers.get_asset_registry() - filter = unreal.ARFilter( - class_names=["World"], - package_paths=[f"{hierarchy_dir}/{asset}/"], - recursive_paths=True) - maps = ar.get_assets(filter) - - # There should be only one map in the list - EditorLevelLibrary.load_level(maps[0].get_full_name()) - + project_name = legacy_io.active_project() + # TODO refactor + # - Creationg of hierarchy should be a function in unreal integration + # - it's used in multiple loaders but must not be loader's logic + # - hard to say what is purpose of the loop + # - variables does not match their meaning + # - why scene is stored to sequences? + # - asset documents vs. elements + # - cleanup variable names in whole function + # - e.g. 'asset', 'asset_name', 'asset_data', 'asset_doc' + # - really inefficient queries of asset documents + # - existing asset in scene is considered as "with correct values" + # - variable 'elements' is modified during it's loop # Get all the sequences in the hierarchy. It will create them, if # they don't exist. sequences = [] frame_ranges = [] i = 0 - for h in hierarchy_list: + for h in hierarchy_dir_list: root_content = EditorAssetLibrary.list_assets( h, recursive=False, include_folder=False) @@ -160,26 +206,30 @@ class CameraLoader(plugin.Loader): factory=unreal.LevelSequenceFactoryNew() ) - asset_data = legacy_io.find_one({ - "type": "asset", - "name": h.split('/')[-1] - }) - - id = asset_data.get('_id') + asset_data = get_asset_by_name( + project_name, + h.split('/')[-1], + fields=["_id", "data.fps"] + ) start_frames = [] end_frames = [] - elements = list( - legacy_io.find({"type": "asset", "data.visualParent": id})) + elements = list(get_assets( + project_name, + parent_ids=[asset_data["_id"]], + fields=["_id", "data.clipIn", "data.clipOut"] + )) + for e in elements: start_frames.append(e.get('data').get('clipIn')) end_frames.append(e.get('data').get('clipOut')) - elements.extend(legacy_io.find({ - "type": "asset", - "data.visualParent": e.get('_id') - })) + elements.extend(get_assets( + project_name, + parent_ids=[e["_id"]], + fields=["_id", "data.clipIn", "data.clipOut"] + )) min_frame = min(start_frames) max_frame = max(end_frames) @@ -215,7 +265,7 @@ class CameraLoader(plugin.Loader): sequences[i], sequences[i + 1], frame_ranges[i + 1][0], frame_ranges[i + 1][1]) - data = self._get_data(asset) + data = get_asset_by_name(project_name, asset)["data"] cam_seq.set_display_rate( unreal.FrameRate(data.get("fps"), 1.0)) cam_seq.set_playback_start(0) @@ -228,7 +278,7 @@ class CameraLoader(plugin.Loader): settings.set_editor_property('reduce_keys', False) if cam_seq: - unreal.SequencerTools.import_fbx( + self._import_camera( EditorLevelLibrary.get_editor_world(), cam_seq, cam_seq.get_bindings(), @@ -256,7 +306,7 @@ class CameraLoader(plugin.Loader): "{}/{}".format(asset_dir, container_name), data) EditorLevelLibrary.save_all_dirty_levels() - EditorLevelLibrary.load_level(current_level) + EditorLevelLibrary.load_level(master_level) asset_content = EditorAssetLibrary.list_assets( asset_dir, recursive=True, include_folder=True @@ -268,68 +318,242 @@ class CameraLoader(plugin.Loader): return asset_content def update(self, container, representation): - path = container["namespace"] - ar = unreal.AssetRegistryHelpers.get_asset_registry() - tools = unreal.AssetToolsHelpers().get_asset_tools() - asset_content = EditorAssetLibrary.list_assets( - path, recursive=False, include_folder=False - ) - asset_name = "" - for a in asset_content: - asset = ar.get_asset_by_object_path(a) - if a.endswith("_CON"): - loaded_asset = EditorAssetLibrary.load_asset(a) - EditorAssetLibrary.set_metadata_tag( - loaded_asset, "representation", str(representation["_id"]) - ) - EditorAssetLibrary.set_metadata_tag( - loaded_asset, "parent", str(representation["parent"]) - ) - asset_name = EditorAssetLibrary.get_metadata_tag( - loaded_asset, "asset_name" - ) - elif asset.asset_class == "LevelSequence": - EditorAssetLibrary.delete_asset(a) + root = "/Game/OpenPype" - sequence = tools.create_asset( - asset_name=asset_name, - package_path=path, - asset_class=unreal.LevelSequence, - factory=unreal.LevelSequenceFactoryNew() + asset_dir = container.get('namespace') + + context = representation.get("context") + + hierarchy = context.get('hierarchy').split("/") + h_dir = f"{root}/{hierarchy[0]}" + h_asset = hierarchy[0] + master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + + EditorLevelLibrary.save_current_level() + + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[asset_dir], + recursive_paths=False) + sequences = ar.get_assets(filter) + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[str(Path(asset_dir).parent.as_posix())], + recursive_paths=True) + maps = ar.get_assets(filter) + + # There should be only one map in the list + EditorLevelLibrary.load_level(maps[0].get_full_name()) + + level_sequence = sequences[0].get_asset() + + display_rate = level_sequence.get_display_rate() + playback_start = level_sequence.get_playback_start() + playback_end = level_sequence.get_playback_end() + + sequence_name = f"{container.get('asset')}_camera" + + # Get the actors in the level sequence. + objs = unreal.SequencerTools.get_bound_objects( + unreal.EditorLevelLibrary.get_editor_world(), + level_sequence, + level_sequence.get_bindings(), + unreal.SequencerScriptingRange( + has_start_value=True, + has_end_value=True, + inclusive_start=level_sequence.get_playback_start(), + exclusive_end=level_sequence.get_playback_end() + ) ) - io_asset = legacy_io.Session["AVALON_ASSET"] - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": io_asset - }) + # Delete actors from the map + for o in objs: + if o.bound_objects[0].get_class().get_name() == "CineCameraActor": + actor_path = o.bound_objects[0].get_path_name().split(":")[-1] + actor = EditorLevelLibrary.get_actor_reference(actor_path) + EditorLevelLibrary.destroy_actor(actor) - data = asset_doc.get("data") + # Remove the Level Sequence from the parent. + # We need to traverse the hierarchy from the master sequence to find + # the level sequence. + root = "/Game/OpenPype" + namespace = container.get('namespace').replace(f"{root}/", "") + ms_asset = namespace.split('/')[0] + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + sequences = ar.get_assets(filter) + master_sequence = sequences[0].get_asset() - if data: - sequence.set_display_rate(unreal.FrameRate(data.get("fps"), 1.0)) - sequence.set_playback_start(data.get("frameStart")) - sequence.set_playback_end(data.get("frameEnd")) + sequences = [master_sequence] + + parent = None + sub_scene = None + for s in sequences: + tracks = s.get_master_tracks() + subscene_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + break + if subscene_track: + sections = subscene_track.get_sections() + for ss in sections: + if ss.get_sequence().get_name() == sequence_name: + parent = s + sub_scene = ss + # subscene_track.remove_section(ss) + break + sequences.append(ss.get_sequence()) + # Update subscenes indexes. + i = 0 + for ss in sections: + ss.set_row_index(i) + i += 1 + + if parent: + break + + assert parent, "Could not find the parent sequence" + + EditorAssetLibrary.delete_asset(level_sequence.get_path_name()) settings = unreal.MovieSceneUserImportFBXSettings() settings.set_editor_property('reduce_keys', False) - unreal.SequencerTools.import_fbx( + tools = unreal.AssetToolsHelpers().get_asset_tools() + new_sequence = tools.create_asset( + asset_name=sequence_name, + package_path=asset_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) + + new_sequence.set_display_rate(display_rate) + new_sequence.set_playback_start(playback_start) + new_sequence.set_playback_end(playback_end) + + sub_scene.set_sequence(new_sequence) + + self._import_camera( EditorLevelLibrary.get_editor_world(), - sequence, - sequence.get_bindings(), + new_sequence, + new_sequence.get_bindings(), settings, str(representation["data"]["path"]) ) + data = { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]) + } + unreal_pipeline.imprint( + "{}/{}".format(asset_dir, container.get('container_name')), data) + + EditorLevelLibrary.save_current_level() + + asset_content = EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=False) + + for a in asset_content: + EditorAssetLibrary.save_asset(a) + + EditorLevelLibrary.load_level(master_level) + def remove(self, container): - path = container["namespace"] - parent_path = os.path.dirname(path) + path = Path(container.get("namespace")) + parent_path = str(path.parent.as_posix()) - EditorAssetLibrary.delete_directory(path) + ar = unreal.AssetRegistryHelpers.get_asset_registry() + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{str(path.as_posix())}"], + recursive_paths=False) + sequences = ar.get_assets(filter) + if not sequences: + raise Exception("Could not find sequence.") + + world = ar.get_asset_by_object_path( + EditorLevelLibrary.get_editor_world().get_path_name()) + + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{parent_path}"], + recursive_paths=True) + maps = ar.get_assets(filter) + + # There should be only one map in the list + if not maps: + raise Exception("Could not find map.") + + map = maps[0] + + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(map.get_full_name()) + + # Remove the camera from the level. + actors = EditorLevelLibrary.get_all_level_actors() + + for a in actors: + if a.__class__ == unreal.CineCameraActor: + EditorLevelLibrary.destroy_actor(a) + + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(world.get_full_name()) + + # There should be only one sequence in the path. + sequence_name = sequences[0].asset_name + + # Remove the Level Sequence from the parent. + # We need to traverse the hierarchy from the master sequence to find + # the level sequence. + root = "/Game/OpenPype" + namespace = container.get('namespace').replace(f"{root}/", "") + ms_asset = namespace.split('/')[0] + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + sequences = ar.get_assets(filter) + master_sequence = sequences[0].get_asset() + + sequences = [master_sequence] + + parent = None + for s in sequences: + tracks = s.get_master_tracks() + subscene_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + break + if subscene_track: + sections = subscene_track.get_sections() + for ss in sections: + if ss.get_sequence().get_name() == sequence_name: + parent = s + subscene_track.remove_section(ss) + break + sequences.append(ss.get_sequence()) + # Update subscenes indexes. + i = 0 + for ss in sections: + ss.set_row_index(i) + i += 1 + + if parent: + break + + assert parent, "Could not find the parent sequence" + + EditorAssetLibrary.delete_directory(str(path.as_posix())) + + # Check if there isn't any more assets in the parent folder, and + # delete it if not. asset_content = EditorAssetLibrary.list_assets( parent_path, recursive=False, include_folder=True ) diff --git a/openpype/hosts/unreal/plugins/load/load_layout.py b/openpype/hosts/unreal/plugins/load/load_layout.py index e09255d88c..c1d66ddf2a 100644 --- a/openpype/hosts/unreal/plugins/load/load_layout.py +++ b/openpype/hosts/unreal/plugins/load/load_layout.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- """Loader for layouts.""" -import os import json from pathlib import Path @@ -10,8 +9,12 @@ from unreal import EditorLevelLibrary from unreal import EditorLevelUtils from unreal import AssetToolsHelpers from unreal import FBXImportType -from unreal import MathLibrary as umath +from unreal import MovieSceneLevelVisibilityTrack +from unreal import MovieSceneSubTrack +from bson.objectid import ObjectId + +from openpype.client import get_asset_by_name, get_assets from openpype.pipeline import ( discover_loader_plugins, loaders_from_representation, @@ -20,6 +23,8 @@ from openpype.pipeline import ( AVALON_CONTAINER_ID, legacy_io, ) +from openpype.pipeline.context_tools import get_current_project_asset +from openpype.settings import get_current_project_settings from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -87,16 +92,9 @@ class LayoutLoader(plugin.Loader): return None - def _get_data(self, asset_name): - asset_doc = legacy_io.find_one({ - "type": "asset", - "name": asset_name - }) - - return asset_doc.get("data") - + @staticmethod def _set_sequence_hierarchy( - self, seq_i, seq_j, max_frame_i, min_frame_j, max_frame_j, map_paths + seq_i, seq_j, max_frame_i, min_frame_j, max_frame_j, map_paths ): # Get existing sequencer tracks or create them if they don't exist tracks = seq_i.get_master_tracks() @@ -165,8 +163,29 @@ class LayoutLoader(plugin.Loader): hid_section.set_row_index(index) hid_section.set_level_names(maps) + def _transform_from_basis(self, transform, basis): + """Transform a transform from a basis to a new basis.""" + # Get the basis matrix + basis_matrix = unreal.Matrix( + basis[0], + basis[1], + basis[2], + basis[3] + ) + transform_matrix = unreal.Matrix( + transform[0], + transform[1], + transform[2], + transform[3] + ) + + new_transform = ( + basis_matrix.get_inverse() * transform_matrix * basis_matrix) + + return new_transform.transform() + def _process_family( - self, assets, class_name, transform, sequence, inst_name=None + self, assets, class_name, transform, basis, sequence, inst_name=None ): ar = unreal.AssetRegistryHelpers.get_asset_registry() @@ -176,30 +195,12 @@ class LayoutLoader(plugin.Loader): for asset in assets: obj = ar.get_asset_by_object_path(asset).get_asset() if obj.get_class().get_name() == class_name: + t = self._transform_from_basis(transform, basis) actor = EditorLevelLibrary.spawn_actor_from_object( - obj, - transform.get('translation') + obj, t.translation ) - if inst_name: - try: - # Rename method leads to crash - # actor.rename(name=inst_name) - - # The label works, although it make it slightly more - # complicated to check for the names, as we need to - # loop through all the actors in the level - actor.set_actor_label(inst_name) - except Exception as e: - print(e) - actor.set_actor_rotation(unreal.Rotator( - umath.radians_to_degrees( - transform.get('rotation').get('x')), - -umath.radians_to_degrees( - transform.get('rotation').get('y')), - umath.radians_to_degrees( - transform.get('rotation').get('z')), - ), False) - actor.set_actor_scale3d(transform.get('scale')) + actor.set_actor_rotation(t.rotation.rotator(), False) + actor.set_actor_scale3d(t.scale3d) if class_name == 'SkeletalMesh': skm_comp = actor.get_editor_property( @@ -208,9 +209,17 @@ class LayoutLoader(plugin.Loader): actors.append(actor) - binding = sequence.add_possessable(actor) + if sequence: + binding = None + for p in sequence.get_possessables(): + if p.get_name() == actor.get_name(): + binding = p + break - bindings.append(binding) + if not binding: + binding = sequence.add_possessable(actor) + + bindings.append(binding) return actors, bindings @@ -223,6 +232,7 @@ class LayoutLoader(plugin.Loader): anim_path = f"{asset_dir}/animations/{anim_file_name}" + asset_doc = get_current_project_asset() # Import animation task = unreal.AssetImportTask() task.options = unreal.FbxImportUI() @@ -255,13 +265,15 @@ class LayoutLoader(plugin.Loader): task.options.anim_sequence_import_data.set_editor_property( 'import_meshes_in_bone_hierarchy', False) task.options.anim_sequence_import_data.set_editor_property( - 'use_default_sample_rate', True) + 'use_default_sample_rate', False) + task.options.anim_sequence_import_data.set_editor_property( + 'custom_sample_rate', asset_doc.get("data", {}).get("fps")) task.options.anim_sequence_import_data.set_editor_property( 'import_custom_attribute', True) task.options.anim_sequence_import_data.set_editor_property( 'import_bone_tracks', True) task.options.anim_sequence_import_data.set_editor_property( - 'remove_redundant_keys', True) + 'remove_redundant_keys', False) task.options.anim_sequence_import_data.set_editor_property( 'convert_scene', True) @@ -296,20 +308,109 @@ class LayoutLoader(plugin.Loader): actor.skeletal_mesh_component.animation_data.set_editor_property( 'anim_to_play', animation) - # Add animation to the sequencer - bindings = bindings_dict.get(instance_name) + if sequence: + # Add animation to the sequencer + bindings = bindings_dict.get(instance_name) + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + for binding in bindings: + tracks = binding.get_tracks() + track = None + track = tracks[0] if tracks else binding.add_track( + unreal.MovieSceneSkeletalAnimationTrack) + + sections = track.get_sections() + section = None + if not sections: + section = track.add_section() + else: + section = sections[0] + + sec_params = section.get_editor_property('params') + curr_anim = sec_params.get_editor_property('animation') + + if curr_anim: + # Checks if the animation path has a container. + # If it does, it means that the animation is + # already in the sequencer. + anim_path = str(Path( + curr_anim.get_path_name()).parent + ).replace('\\', '/') + + _filter = unreal.ARFilter( + class_names=["AssetContainer"], + package_paths=[anim_path], + recursive_paths=False) + containers = ar.get_assets(_filter) + + if len(containers) > 0: + return - for binding in bindings: - binding.add_track(unreal.MovieSceneSkeletalAnimationTrack) - for track in binding.get_tracks(): - section = track.add_section() section.set_range( sequence.get_playback_start(), sequence.get_playback_end()) sec_params = section.get_editor_property('params') sec_params.set_editor_property('animation', animation) - def _process(self, lib_path, asset_dir, sequence, loaded=None): + @staticmethod + def _generate_sequence(h, h_dir): + tools = unreal.AssetToolsHelpers().get_asset_tools() + + sequence = tools.create_asset( + asset_name=h, + package_path=h_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) + + project_name = legacy_io.active_project() + asset_data = get_asset_by_name( + project_name, + h_dir.split('/')[-1], + fields=["_id", "data.fps"] + ) + + start_frames = [] + end_frames = [] + + elements = list(get_assets( + project_name, + parent_ids=[asset_data["_id"]], + fields=["_id", "data.clipIn", "data.clipOut"] + )) + for e in elements: + start_frames.append(e.get('data').get('clipIn')) + end_frames.append(e.get('data').get('clipOut')) + + elements.extend(get_assets( + project_name, + parent_ids=[e["_id"]], + fields=["_id", "data.clipIn", "data.clipOut"] + )) + + min_frame = min(start_frames) + max_frame = max(end_frames) + + sequence.set_display_rate( + unreal.FrameRate(asset_data.get('data').get("fps"), 1.0)) + sequence.set_playback_start(min_frame) + sequence.set_playback_end(max_frame) + + tracks = sequence.get_master_tracks() + track = None + for t in tracks: + if (t.get_class() == + unreal.MovieSceneCameraCutTrack.static_class()): + track = t + break + if not track: + track = sequence.add_master_track( + unreal.MovieSceneCameraCutTrack) + + return sequence, (min_frame, max_frame) + + def _process(self, lib_path, asset_dir, sequence, repr_loaded=None): ar = unreal.AssetRegistryHelpers.get_asset_registry() with open(lib_path, "r") as fp: @@ -317,8 +418,8 @@ class LayoutLoader(plugin.Loader): all_loaders = discover_loader_plugins() - if not loaded: - loaded = [] + if not repr_loaded: + repr_loaded = [] path = Path(lib_path) @@ -326,83 +427,124 @@ class LayoutLoader(plugin.Loader): actors_dict = {} bindings_dict = {} + loaded_assets = [] + for element in data: - reference = None - if element.get('reference_fbx'): - reference = element.get('reference_fbx') + representation = None + repr_format = None + if element.get('representation'): + # representation = element.get('representation') + + self.log.info(element.get("version")) + + valid_formats = ['fbx', 'abc'] + + repr_data = legacy_io.find_one({ + "type": "representation", + "parent": ObjectId(element.get("version")), + "name": {"$in": valid_formats} + }) + repr_format = repr_data.get('name') + + if not repr_data: + self.log.error( + f"No valid representation found for version " + f"{element.get('version')}") + continue + + representation = str(repr_data.get('_id')) + print(representation) + # This is to keep compatibility with old versions of the + # json format. + elif element.get('reference_fbx'): + representation = element.get('reference_fbx') + repr_format = 'fbx' elif element.get('reference_abc'): - reference = element.get('reference_abc') + representation = element.get('reference_abc') + repr_format = 'abc' # If reference is None, this element is skipped, as it cannot be # imported in Unreal - if not reference: + if not representation: continue instance_name = element.get('instance_name') skeleton = None - if reference not in loaded: - loaded.append(reference) + if representation not in repr_loaded: + repr_loaded.append(representation) family = element.get('family') loaders = loaders_from_representation( - all_loaders, reference) + all_loaders, representation) loader = None - if reference == element.get('reference_fbx'): + if repr_format == 'fbx': loader = self._get_fbx_loader(loaders, family) - elif reference == element.get('reference_abc'): + elif repr_format == 'abc': loader = self._get_abc_loader(loaders, family) if not loader: + self.log.error( + f"No valid loader found for {representation}") continue options = { - "asset_dir": asset_dir + # "asset_dir": asset_dir } assets = load_container( loader, - reference, + representation, namespace=instance_name, options=options ) + container = None + + for asset in assets: + obj = ar.get_asset_by_object_path(asset).get_asset() + if obj.get_class().get_name() == 'AssetContainer': + container = obj + if obj.get_class().get_name() == 'Skeleton': + skeleton = obj + + loaded_assets.append(container.get_path_name()) + instances = [ item for item in data - if (item.get('reference_fbx') == reference or - item.get('reference_abc') == reference)] + if ((item.get('version') and + item.get('version') == element.get('version')) or + item.get('reference_fbx') == representation or + item.get('reference_abc') == representation)] for instance in instances: - transform = instance.get('transform') + # transform = instance.get('transform') + transform = instance.get('transform_matrix') + basis = instance.get('basis') inst = instance.get('instance_name') actors = [] if family == 'model': actors, _ = self._process_family( - assets, 'StaticMesh', transform, sequence, inst) + assets, 'StaticMesh', transform, basis, + sequence, inst + ) elif family == 'rig': actors, bindings = self._process_family( - assets, 'SkeletalMesh', transform, sequence, inst) + assets, 'SkeletalMesh', transform, basis, + sequence, inst + ) actors_dict[inst] = actors bindings_dict[inst] = bindings - if family == 'rig': - # Finds skeleton among the imported assets - for asset in assets: - obj = ar.get_asset_by_object_path(asset).get_asset() - if obj.get_class().get_name() == 'Skeleton': - skeleton = obj - if skeleton: - break - if skeleton: - skeleton_dict[reference] = skeleton + skeleton_dict[representation] = skeleton else: - skeleton = skeleton_dict.get(reference) + skeleton = skeleton_dict.get(representation) animation_file = element.get('animation') @@ -411,6 +553,8 @@ class LayoutLoader(plugin.Loader): asset_dir, path, instance_name, skeleton, actors_dict, animation_file, bindings_dict, sequence) + return loaded_assets + @staticmethod def _remove_family(assets, components, class_name, prop_name): ar = unreal.AssetRegistryHelpers.get_asset_registry() @@ -474,20 +618,20 @@ class LayoutLoader(plugin.Loader): Returns: list(str): list of container content """ + data = get_current_project_settings() + create_sequences = data["unreal"]["level_sequences_for_layouts"] + # Create directory for asset and avalon container hierarchy = context.get('asset').get('data').get('parents') root = self.ASSET_ROOT hierarchy_dir = root - hierarchy_list = [] + hierarchy_dir_list = [] for h in hierarchy: hierarchy_dir = f"{hierarchy_dir}/{h}" - hierarchy_list.append(hierarchy_dir) + hierarchy_dir_list.append(hierarchy_dir) asset = context.get('asset').get('name') suffix = "_CON" - if asset: - asset_name = "{}_{}".format(asset, name) - else: - asset_name = "{}".format(name) + asset_name = f"{asset}_{name}" if asset else name tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( @@ -497,145 +641,90 @@ class LayoutLoader(plugin.Loader): EditorAssetLibrary.make_directory(asset_dir) - # Create map for the shot, and create hierarchy of map. If the maps - # already exist, we will use them. - maps = [] - for h in hierarchy_list: - a = h.split('/')[-1] - map = f"{h}/{a}_map.{a}_map" - new = False - - if not EditorAssetLibrary.does_asset_exist(map): - EditorLevelLibrary.new_level(f"{h}/{a}_map") - new = True - - maps.append({"map": map, "new": new}) - - EditorLevelLibrary.new_level(f"{asset_dir}/{asset}_map") - maps.append( - {"map": f"{asset_dir}/{asset}_map.{asset}_map", "new": True}) - - for i in range(0, len(maps) - 1): - for j in range(i + 1, len(maps)): - if maps[j].get('new'): - EditorLevelLibrary.load_level(maps[i].get('map')) - EditorLevelUtils.add_level_to_world( - EditorLevelLibrary.get_editor_world(), - maps[j].get('map'), - unreal.LevelStreamingDynamic - ) - EditorLevelLibrary.save_all_dirty_levels() - - EditorLevelLibrary.load_level(maps[-1].get('map')) - - # Get all the sequences in the hierarchy. It will create them, if - # they don't exist. + master_level = None + shot = None sequences = [] - frame_ranges = [] - i = 0 - for h in hierarchy_list: - root_content = EditorAssetLibrary.list_assets( - h, recursive=False, include_folder=False) - existing_sequences = [ - EditorAssetLibrary.find_asset_data(asset) - for asset in root_content - if EditorAssetLibrary.find_asset_data( - asset).get_class().get_name() == 'LevelSequence' - ] + level = f"{asset_dir}/{asset}_map.{asset}_map" + EditorLevelLibrary.new_level(f"{asset_dir}/{asset}_map") - if not existing_sequences: - sequence = tools.create_asset( - asset_name=hierarchy[i], - package_path=h, - asset_class=unreal.LevelSequence, - factory=unreal.LevelSequenceFactoryNew() + if create_sequences: + # Create map for the shot, and create hierarchy of map. If the + # maps already exist, we will use them. + if hierarchy: + h_dir = hierarchy_dir_list[0] + h_asset = hierarchy[0] + master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + if not EditorAssetLibrary.does_asset_exist(master_level): + EditorLevelLibrary.new_level(f"{h_dir}/{h_asset}_map") + + if master_level: + EditorLevelLibrary.load_level(master_level) + EditorLevelUtils.add_level_to_world( + EditorLevelLibrary.get_editor_world(), + level, + unreal.LevelStreamingDynamic ) + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(level) - asset_data = legacy_io.find_one({ - "type": "asset", - "name": h.split('/')[-1] - }) + # Get all the sequences in the hierarchy. It will create them, if + # they don't exist. + frame_ranges = [] + for (h_dir, h) in zip(hierarchy_dir_list, hierarchy): + root_content = EditorAssetLibrary.list_assets( + h_dir, recursive=False, include_folder=False) - id = asset_data.get('_id') + existing_sequences = [ + EditorAssetLibrary.find_asset_data(asset) + for asset in root_content + if EditorAssetLibrary.find_asset_data( + asset).get_class().get_name() == 'LevelSequence' + ] - start_frames = [] - end_frames = [] + if not existing_sequences: + sequence, frame_range = self._generate_sequence(h, h_dir) - elements = list( - legacy_io.find({"type": "asset", "data.visualParent": id})) - for e in elements: - start_frames.append(e.get('data').get('clipIn')) - end_frames.append(e.get('data').get('clipOut')) + sequences.append(sequence) + frame_ranges.append(frame_range) + else: + for e in existing_sequences: + sequences.append(e.get_asset()) + frame_ranges.append(( + e.get_asset().get_playback_start(), + e.get_asset().get_playback_end())) - elements.extend(legacy_io.find({ - "type": "asset", - "data.visualParent": e.get('_id') - })) + shot = tools.create_asset( + asset_name=asset, + package_path=asset_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) - min_frame = min(start_frames) - max_frame = max(end_frames) + # sequences and frame_ranges have the same length + for i in range(0, len(sequences) - 1): + self._set_sequence_hierarchy( + sequences[i], sequences[i + 1], + frame_ranges[i][1], + frame_ranges[i + 1][0], frame_ranges[i + 1][1], + [level]) - sequence.set_display_rate( - unreal.FrameRate(asset_data.get('data').get("fps"), 1.0)) - sequence.set_playback_start(min_frame) - sequence.set_playback_end(max_frame) + project_name = legacy_io.active_project() + data = get_asset_by_name(project_name, asset)["data"] + shot.set_display_rate( + unreal.FrameRate(data.get("fps"), 1.0)) + shot.set_playback_start(0) + shot.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1) + if sequences: + self._set_sequence_hierarchy( + sequences[-1], shot, + frame_ranges[-1][1], + data.get('clipIn'), data.get('clipOut'), + [level]) - sequences.append(sequence) - frame_ranges.append((min_frame, max_frame)) + EditorLevelLibrary.load_level(level) - tracks = sequence.get_master_tracks() - track = None - for t in tracks: - if (t.get_class() == - unreal.MovieSceneCameraCutTrack.static_class()): - track = t - break - if not track: - track = sequence.add_master_track( - unreal.MovieSceneCameraCutTrack) - else: - for e in existing_sequences: - sequences.append(e.get_asset()) - frame_ranges.append(( - e.get_asset().get_playback_start(), - e.get_asset().get_playback_end())) - - i += 1 - - shot = tools.create_asset( - asset_name=asset, - package_path=asset_dir, - asset_class=unreal.LevelSequence, - factory=unreal.LevelSequenceFactoryNew() - ) - - # sequences and frame_ranges have the same length - for i in range(0, len(sequences) - 1): - maps_to_add = [] - for j in range(i + 1, len(maps)): - maps_to_add.append(maps[j].get('map')) - - self._set_sequence_hierarchy( - sequences[i], sequences[i + 1], - frame_ranges[i][1], - frame_ranges[i + 1][0], frame_ranges[i + 1][1], - maps_to_add) - - data = self._get_data(asset) - shot.set_display_rate( - unreal.FrameRate(data.get("fps"), 1.0)) - shot.set_playback_start(0) - shot.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1) - self._set_sequence_hierarchy( - sequences[-1], shot, - frame_ranges[-1][1], - data.get('clipIn'), data.get('clipOut'), - [maps[-1].get('map')]) - - EditorLevelLibrary.load_level(maps[-1].get('map')) - - self._process(self.fname, asset_dir, shot) + loaded_assets = self._process(self.fname, asset_dir, shot) for s in sequences: EditorAssetLibrary.save_asset(s.get_full_name()) @@ -656,7 +745,8 @@ class LayoutLoader(plugin.Loader): "loader": str(self.__class__.__name__), "representation": context["representation"]["_id"], "parent": context["representation"]["parent"], - "family": context["representation"]["context"]["family"] + "family": context["representation"]["context"]["family"], + "loaded_assets": loaded_assets } unreal_pipeline.imprint( "{}/{}".format(asset_dir, container_name), data) @@ -667,148 +757,224 @@ class LayoutLoader(plugin.Loader): for a in asset_content: EditorAssetLibrary.save_asset(a) - EditorLevelLibrary.load_level(maps[0].get('map')) + if master_level: + EditorLevelLibrary.load_level(master_level) return asset_content def update(self, container, representation): + data = get_current_project_settings() + create_sequences = data["unreal"]["level_sequences_for_layouts"] + ar = unreal.AssetRegistryHelpers.get_asset_registry() + root = "/Game/OpenPype" + + asset_dir = container.get('namespace') + context = representation.get("context") + + sequence = None + master_level = None + + if create_sequences: + hierarchy = context.get('hierarchy').split("/") + h_dir = f"{root}/{hierarchy[0]}" + h_asset = hierarchy[0] + master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[asset_dir], + recursive_paths=False) + sequences = ar.get_assets(filter) + sequence = sequences[0].get_asset() + + prev_level = None + + if not master_level: + curr_level = unreal.LevelEditorSubsystem().get_current_level() + curr_level_path = curr_level.get_outer().get_path_name() + # If the level path does not start with "/Game/", the current + # level is a temporary, unsaved level. + if curr_level_path.startswith("/Game/"): + prev_level = curr_level_path + + # Get layout level + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[asset_dir], + recursive_paths=False) + levels = ar.get_assets(filter) + + layout_level = levels[0].get_editor_property('object_path') + + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(layout_level) + + # Delete all the actors in the level + actors = unreal.EditorLevelLibrary.get_all_level_actors() + for actor in actors: + unreal.EditorLevelLibrary.destroy_actor(actor) + + if create_sequences: + EditorLevelLibrary.save_current_level() + + EditorAssetLibrary.delete_directory(f"{asset_dir}/animations/") + source_path = get_representation_path(representation) - destination_path = container["namespace"] - lib_path = Path(get_representation_path(representation)) - self._remove_actors(destination_path) + loaded_assets = self._process(source_path, asset_dir, sequence) - # Delete old animations - anim_path = f"{destination_path}/animations/" - EditorAssetLibrary.delete_directory(anim_path) - - with open(source_path, "r") as fp: - data = json.load(fp) - - references = [e.get('reference_fbx') for e in data] - asset_containers = self._get_asset_containers(destination_path) - loaded = [] - - # Delete all the assets imported with the previous version of the - # layout, if they're not in the new layout. - for asset_container in asset_containers: - if asset_container.get_editor_property( - 'asset_name') == container["objectName"]: - continue - ref = EditorAssetLibrary.get_metadata_tag( - asset_container.get_asset(), 'representation') - ppath = asset_container.get_editor_property('package_path') - - if ref not in references: - # If the asset is not in the new layout, delete it. - # Also check if the parent directory is empty, and delete that - # as well, if it is. - EditorAssetLibrary.delete_directory(ppath) - - parent = os.path.dirname(str(ppath)) - parent_content = EditorAssetLibrary.list_assets( - parent, recursive=False, include_folder=True - ) - - if len(parent_content) == 0: - EditorAssetLibrary.delete_directory(parent) - else: - # If the asset is in the new layout, search the instances in - # the JSON file, and create actors for them. - - actors_dict = {} - skeleton_dict = {} - - for element in data: - reference = element.get('reference_fbx') - instance_name = element.get('instance_name') - - skeleton = None - - if reference == ref and ref not in loaded: - loaded.append(ref) - - family = element.get('family') - - assets = EditorAssetLibrary.list_assets( - ppath, recursive=True, include_folder=False) - - instances = [ - item for item in data - if item.get('reference_fbx') == reference] - - for instance in instances: - transform = instance.get('transform') - inst = instance.get('instance_name') - - actors = [] - - if family == 'model': - actors = self._process_family( - assets, 'StaticMesh', transform, inst) - elif family == 'rig': - actors = self._process_family( - assets, 'SkeletalMesh', transform, inst) - actors_dict[inst] = actors - - if family == 'rig': - # Finds skeleton among the imported assets - for asset in assets: - obj = ar.get_asset_by_object_path( - asset).get_asset() - if obj.get_class().get_name() == 'Skeleton': - skeleton = obj - if skeleton: - break - - if skeleton: - skeleton_dict[reference] = skeleton - else: - skeleton = skeleton_dict.get(reference) - - animation_file = element.get('animation') - - if animation_file and skeleton: - self._import_animation( - destination_path, lib_path, - instance_name, skeleton, - actors_dict, animation_file) - - self._process(source_path, destination_path, loaded) - - container_path = "{}/{}".format(container["namespace"], - container["objectName"]) - # update metadata + data = { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]), + "loaded_assets": loaded_assets + } unreal_pipeline.imprint( - container_path, - { - "representation": str(representation["_id"]), - "parent": str(representation["parent"]) - }) + "{}/{}".format(asset_dir, container.get('container_name')), data) + + EditorLevelLibrary.save_current_level() asset_content = EditorAssetLibrary.list_assets( - destination_path, recursive=True, include_folder=False) + asset_dir, recursive=True, include_folder=False) for a in asset_content: EditorAssetLibrary.save_asset(a) + if master_level: + EditorLevelLibrary.load_level(master_level) + elif prev_level: + EditorLevelLibrary.load_level(prev_level) + def remove(self, container): """ - First, destroy all actors of the assets to be removed. Then, deletes - the asset's directory. + Delete the layout. First, check if the assets loaded with the layout + are used by other layouts. If not, delete the assets. """ - path = container["namespace"] - parent_path = os.path.dirname(path) + data = get_current_project_settings() + create_sequences = data["unreal"]["level_sequences_for_layouts"] - self._remove_actors(path) + root = "/Game/OpenPype" + path = Path(container.get("namespace")) - EditorAssetLibrary.delete_directory(path) + containers = unreal_pipeline.ls() + layout_containers = [ + c for c in containers + if (c.get('asset_name') != container.get('asset_name') and + c.get('family') == "layout")] + # Check if the assets have been loaded by other layouts, and deletes + # them if they haven't. + for asset in eval(container.get('loaded_assets')): + layouts = [ + lc for lc in layout_containers + if asset in lc.get('loaded_assets')] + + if not layouts: + EditorAssetLibrary.delete_directory(str(Path(asset).parent)) + + # Delete the parent folder if there aren't any more + # layouts in it. + asset_content = EditorAssetLibrary.list_assets( + str(Path(asset).parent.parent), recursive=False, + include_folder=True + ) + + if len(asset_content) == 0: + EditorAssetLibrary.delete_directory( + str(Path(asset).parent.parent)) + + master_sequence = None + master_level = None + sequences = [] + + if create_sequences: + # Remove the Level Sequence from the parent. + # We need to traverse the hierarchy from the master sequence to + # find the level sequence. + namespace = container.get('namespace').replace(f"{root}/", "") + ms_asset = namespace.split('/')[0] + ar = unreal.AssetRegistryHelpers.get_asset_registry() + _filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + sequences = ar.get_assets(_filter) + master_sequence = sequences[0].get_asset() + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + levels = ar.get_assets(_filter) + master_level = levels[0].get_editor_property('object_path') + + sequences = [master_sequence] + + parent = None + for s in sequences: + tracks = s.get_master_tracks() + subscene_track = None + visibility_track = None + for t in tracks: + if t.get_class() == MovieSceneSubTrack.static_class(): + subscene_track = t + if (t.get_class() == + MovieSceneLevelVisibilityTrack.static_class()): + visibility_track = t + if subscene_track: + sections = subscene_track.get_sections() + for ss in sections: + if (ss.get_sequence().get_name() == + container.get('asset')): + parent = s + subscene_track.remove_section(ss) + break + sequences.append(ss.get_sequence()) + # Update subscenes indexes. + i = 0 + for ss in sections: + ss.set_row_index(i) + i += 1 + + if visibility_track: + sections = visibility_track.get_sections() + for ss in sections: + if (unreal.Name(f"{container.get('asset')}_map") + in ss.get_level_names()): + visibility_track.remove_section(ss) + # Update visibility sections indexes. + i = -1 + prev_name = [] + for ss in sections: + if prev_name != ss.get_level_names(): + i += 1 + ss.set_row_index(i) + prev_name = ss.get_level_names() + if parent: + break + + assert parent, "Could not find the parent sequence" + + # Create a temporary level to delete the layout level. + EditorLevelLibrary.save_all_dirty_levels() + EditorAssetLibrary.make_directory(f"{root}/tmp") + tmp_level = f"{root}/tmp/temp_map" + if not EditorAssetLibrary.does_asset_exist(f"{tmp_level}.temp_map"): + EditorLevelLibrary.new_level(tmp_level) + else: + EditorLevelLibrary.load_level(tmp_level) + + # Delete the layout directory. + EditorAssetLibrary.delete_directory(str(path)) + + if create_sequences: + EditorLevelLibrary.load_level(master_level) + EditorAssetLibrary.delete_directory(f"{root}/tmp") + + # Delete the parent folder if there aren't any more layouts in it. asset_content = EditorAssetLibrary.list_assets( - parent_path, recursive=False, include_folder=True + str(path.parent), recursive=False, include_folder=True ) if len(asset_content) == 0: - EditorAssetLibrary.delete_directory(parent_path) + EditorAssetLibrary.delete_directory(str(path.parent)) diff --git a/openpype/hosts/unreal/plugins/load/load_layout_existing.py b/openpype/hosts/unreal/plugins/load/load_layout_existing.py new file mode 100644 index 0000000000..3ce99f8ef6 --- /dev/null +++ b/openpype/hosts/unreal/plugins/load/load_layout_existing.py @@ -0,0 +1,418 @@ +import json +from pathlib import Path + +import unreal +from unreal import EditorLevelLibrary + +from bson.objectid import ObjectId + +from openpype import pipeline +from openpype.pipeline import ( + discover_loader_plugins, + loaders_from_representation, + load_container, + get_representation_path, + AVALON_CONTAINER_ID, + legacy_io, +) +from openpype.api import get_current_project_settings +from openpype.hosts.unreal.api import plugin +from openpype.hosts.unreal.api import pipeline as upipeline + + +class ExistingLayoutLoader(plugin.Loader): + """ + Load Layout for an existing scene, and match the existing assets. + """ + + families = ["layout"] + representations = ["json"] + + label = "Load Layout on Existing Scene" + icon = "code-fork" + color = "orange" + ASSET_ROOT = "/Game/OpenPype" + + @staticmethod + def _create_container( + asset_name, asset_dir, asset, representation, parent, family + ): + container_name = f"{asset_name}_CON" + + container = None + if not unreal.EditorAssetLibrary.does_asset_exist( + f"{asset_dir}/{container_name}" + ): + container = upipeline.create_container(container_name, asset_dir) + else: + ar = unreal.AssetRegistryHelpers.get_asset_registry() + obj = ar.get_asset_by_object_path( + f"{asset_dir}/{container_name}.{container_name}") + container = obj.get_asset() + + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "asset": asset, + "namespace": asset_dir, + "container_name": container_name, + "asset_name": asset_name, + # "loader": str(self.__class__.__name__), + "representation": representation, + "parent": parent, + "family": family + } + + upipeline.imprint( + "{}/{}".format(asset_dir, container_name), data) + + return container.get_path_name() + + @staticmethod + def _get_current_level(): + ue_version = unreal.SystemLibrary.get_engine_version().split('.') + ue_major = ue_version[0] + + if ue_major == '4': + return EditorLevelLibrary.get_editor_world() + elif ue_major == '5': + return unreal.LevelEditorSubsystem().get_current_level() + + raise NotImplementedError( + f"Unreal version {ue_major} not supported") + + def _get_transform(self, ext, import_data, lasset): + conversion = unreal.Matrix.IDENTITY.transform() + fbx_tuning = unreal.Matrix.IDENTITY.transform() + + basis = unreal.Matrix( + lasset.get('basis')[0], + lasset.get('basis')[1], + lasset.get('basis')[2], + lasset.get('basis')[3] + ).transform() + transform = unreal.Matrix( + lasset.get('transform_matrix')[0], + lasset.get('transform_matrix')[1], + lasset.get('transform_matrix')[2], + lasset.get('transform_matrix')[3] + ).transform() + + # Check for the conversion settings. We cannot access + # the alembic conversion settings, so we assume that + # the maya ones have been applied. + if ext == '.fbx': + loc = import_data.import_translation + rot = import_data.import_rotation.to_vector() + scale = import_data.import_uniform_scale + conversion = unreal.Transform( + location=[loc.x, loc.y, loc.z], + rotation=[rot.x, rot.y, rot.z], + scale=[-scale, scale, scale] + ) + fbx_tuning = unreal.Transform( + rotation=[180.0, 0.0, 90.0], + scale=[1.0, 1.0, 1.0] + ) + elif ext == '.abc': + # This is the standard conversion settings for + # alembic files from Maya. + conversion = unreal.Transform( + location=[0.0, 0.0, 0.0], + rotation=[0.0, 0.0, 0.0], + scale=[1.0, -1.0, 1.0] + ) + + new_transform = (basis.inverse() * transform * basis) + return fbx_tuning * conversion.inverse() * new_transform + + def _spawn_actor(self, obj, lasset): + actor = EditorLevelLibrary.spawn_actor_from_object( + obj, unreal.Vector(0.0, 0.0, 0.0) + ) + + actor.set_actor_label(lasset.get('instance_name')) + smc = actor.get_editor_property('static_mesh_component') + mesh = smc.get_editor_property('static_mesh') + import_data = mesh.get_editor_property('asset_import_data') + filename = import_data.get_first_filename() + path = Path(filename) + + transform = self._get_transform( + path.suffix, import_data, lasset) + + actor.set_actor_transform(transform, False, True) + + @staticmethod + def _get_fbx_loader(loaders, family): + name = "" + if family == 'rig': + name = "SkeletalMeshFBXLoader" + elif family == 'model' or family == 'staticMesh': + name = "StaticMeshFBXLoader" + elif family == 'camera': + name = "CameraLoader" + + if name == "": + return None + + for loader in loaders: + if loader.__name__ == name: + return loader + + return None + + @staticmethod + def _get_abc_loader(loaders, family): + name = "" + if family == 'rig': + name = "SkeletalMeshAlembicLoader" + elif family == 'model': + name = "StaticMeshAlembicLoader" + + if name == "": + return None + + for loader in loaders: + if loader.__name__ == name: + return loader + + return None + + def _load_asset(self, representation, version, instance_name, family): + valid_formats = ['fbx', 'abc'] + + repr_data = legacy_io.find_one({ + "type": "representation", + "parent": ObjectId(version), + "name": {"$in": valid_formats} + }) + repr_format = repr_data.get('name') + + all_loaders = discover_loader_plugins() + loaders = loaders_from_representation( + all_loaders, representation) + + loader = None + + if repr_format == 'fbx': + loader = self._get_fbx_loader(loaders, family) + elif repr_format == 'abc': + loader = self._get_abc_loader(loaders, family) + + if not loader: + self.log.error(f"No valid loader found for {representation}") + return [] + + # This option is necessary to avoid importing the assets with a + # different conversion compared to the other assets. For ABC files, + # it is in fact impossible to access the conversion settings. So, + # we must assume that the Maya conversion settings have been applied. + options = { + "default_conversion": True + } + + assets = load_container( + loader, + representation, + namespace=instance_name, + options=options + ) + + return assets + + def _process(self, lib_path): + data = get_current_project_settings() + delete_unmatched = data["unreal"]["delete_unmatched_assets"] + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + actors = EditorLevelLibrary.get_all_level_actors() + + with open(lib_path, "r") as fp: + data = json.load(fp) + + layout_data = [] + + # Get all the representations in the JSON from the database. + for element in data: + if element.get('representation'): + layout_data.append(( + pipeline.legacy_io.find_one({ + "_id": ObjectId(element.get('representation')) + }), + element + )) + + containers = [] + actors_matched = [] + + for (repr_data, lasset) in layout_data: + if not repr_data: + raise AssertionError("Representation not found") + if not (repr_data.get('data') or + repr_data.get('data').get('path')): + raise AssertionError("Representation does not have path") + if not repr_data.get('context'): + raise AssertionError("Representation does not have context") + + # For every actor in the scene, check if it has a representation in + # those we got from the JSON. If so, create a container for it. + # Otherwise, remove it from the scene. + found = False + + for actor in actors: + if not actor.get_class().get_name() == 'StaticMeshActor': + continue + if actor in actors_matched: + continue + + # Get the original path of the file from which the asset has + # been imported. + smc = actor.get_editor_property('static_mesh_component') + mesh = smc.get_editor_property('static_mesh') + import_data = mesh.get_editor_property('asset_import_data') + filename = import_data.get_first_filename() + path = Path(filename) + + if (not path.name or + path.name not in repr_data.get('data').get('path')): + continue + + actor.set_actor_label(lasset.get('instance_name')) + + mesh_path = Path(mesh.get_path_name()).parent.as_posix() + + # Create the container for the asset. + asset = repr_data.get('context').get('asset') + subset = repr_data.get('context').get('subset') + container = self._create_container( + f"{asset}_{subset}", mesh_path, asset, + repr_data.get('_id'), repr_data.get('parent'), + repr_data.get('context').get('family') + ) + containers.append(container) + + # Set the transform for the actor. + transform = self._get_transform( + path.suffix, import_data, lasset) + actor.set_actor_transform(transform, False, True) + + actors_matched.append(actor) + found = True + break + + # If an actor has not been found for this representation, + # we check if it has been loaded already by checking all the + # loaded containers. If so, we add it to the scene. Otherwise, + # we load it. + if found: + continue + + all_containers = upipeline.ls() + + loaded = False + + for container in all_containers: + repr = container.get('representation') + + if not repr == str(repr_data.get('_id')): + continue + + asset_dir = container.get('namespace') + + filter = unreal.ARFilter( + class_names=["StaticMesh"], + package_paths=[asset_dir], + recursive_paths=False) + assets = ar.get_assets(filter) + + for asset in assets: + obj = asset.get_asset() + self._spawn_actor(obj, lasset) + + loaded = True + break + + # If the asset has not been loaded yet, we load it. + if loaded: + continue + + assets = self._load_asset( + lasset.get('representation'), + lasset.get('version'), + lasset.get('instance_name'), + lasset.get('family') + ) + + for asset in assets: + obj = ar.get_asset_by_object_path(asset).get_asset() + if not obj.get_class().get_name() == 'StaticMesh': + continue + self._spawn_actor(obj, lasset) + + break + + # Check if an actor was not matched to a representation. + # If so, remove it from the scene. + for actor in actors: + if not actor.get_class().get_name() == 'StaticMeshActor': + continue + if actor not in actors_matched: + self.log.warning(f"Actor {actor.get_name()} not matched.") + if delete_unmatched: + EditorLevelLibrary.destroy_actor(actor) + + return containers + + def load(self, context, name, namespace, options): + print("Loading Layout and Match Assets") + + asset = context.get('asset').get('name') + asset_name = f"{asset}_{name}" if asset else name + container_name = f"{asset}_{name}_CON" + + curr_level = self._get_current_level() + + if not curr_level: + raise AssertionError("Current level not saved") + + containers = self._process(self.fname) + + curr_level_path = Path( + curr_level.get_outer().get_path_name()).parent.as_posix() + + if not unreal.EditorAssetLibrary.does_asset_exist( + f"{curr_level_path}/{container_name}" + ): + upipeline.create_container( + container=container_name, path=curr_level_path) + + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "asset": asset, + "namespace": curr_level_path, + "container_name": container_name, + "asset_name": asset_name, + "loader": str(self.__class__.__name__), + "representation": context["representation"]["_id"], + "parent": context["representation"]["parent"], + "family": context["representation"]["context"]["family"], + "loaded_assets": containers + } + upipeline.imprint(f"{curr_level_path}/{container_name}", data) + + def update(self, container, representation): + asset_dir = container.get('namespace') + + source_path = get_representation_path(representation) + containers = self._process(source_path) + + data = { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]), + "loaded_assets": containers + } + upipeline.imprint( + "{}/{}".format(asset_dir, container.get('container_name')), data) diff --git a/openpype/hosts/unreal/plugins/load/load_rig.py b/openpype/hosts/unreal/plugins/load/load_rig.py index ff844a5e94..227c5c9292 100644 --- a/openpype/hosts/unreal/plugins/load/load_rig.py +++ b/openpype/hosts/unreal/plugins/load/load_rig.py @@ -14,7 +14,7 @@ import unreal # noqa class SkeletalMeshFBXLoader(plugin.Loader): """Load Unreal SkeletalMesh from FBX.""" - families = ["rig"] + families = ["rig", "skeletalMesh"] label = "Import FBX Skeletal Mesh" representations = ["fbx"] icon = "cube" @@ -52,54 +52,55 @@ class SkeletalMeshFBXLoader(plugin.Loader): asset_name = "{}_{}".format(asset, name) else: asset_name = "{}".format(name) + version = context.get('version').get('name') tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") + f"{root}/{asset}/{name}_v{version:03d}", suffix="") container_name += suffix - unreal.EditorAssetLibrary.make_directory(asset_dir) + if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir): + unreal.EditorAssetLibrary.make_directory(asset_dir) - task = unreal.AssetImportTask() + task = unreal.AssetImportTask() - task.set_editor_property('filename', self.fname) - task.set_editor_property('destination_path', asset_dir) - task.set_editor_property('destination_name', asset_name) - task.set_editor_property('replace_existing', False) - task.set_editor_property('automated', True) - task.set_editor_property('save', False) + task.set_editor_property('filename', self.fname) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', False) + task.set_editor_property('automated', True) + task.set_editor_property('save', False) - # set import options here - options = unreal.FbxImportUI() - options.set_editor_property('import_as_skeletal', True) - options.set_editor_property('import_animations', False) - options.set_editor_property('import_mesh', True) - options.set_editor_property('import_materials', True) - options.set_editor_property('import_textures', True) - options.set_editor_property('skeleton', None) - options.set_editor_property('create_physics_asset', False) + # set import options here + options = unreal.FbxImportUI() + options.set_editor_property('import_as_skeletal', True) + options.set_editor_property('import_animations', False) + options.set_editor_property('import_mesh', True) + options.set_editor_property('import_materials', False) + options.set_editor_property('import_textures', False) + options.set_editor_property('skeleton', None) + options.set_editor_property('create_physics_asset', False) - options.set_editor_property('mesh_type_to_import', - unreal.FBXImportType.FBXIT_SKELETAL_MESH) + options.set_editor_property( + 'mesh_type_to_import', + unreal.FBXImportType.FBXIT_SKELETAL_MESH) - options.skeletal_mesh_import_data.set_editor_property( - 'import_content_type', - unreal.FBXImportContentType.FBXICT_ALL - ) - # set to import normals, otherwise Unreal will compute them - # and it will take a long time, depending on the size of the mesh - options.skeletal_mesh_import_data.set_editor_property( - 'normal_import_method', - unreal.FBXNormalImportMethod.FBXNIM_IMPORT_NORMALS - ) + options.skeletal_mesh_import_data.set_editor_property( + 'import_content_type', + unreal.FBXImportContentType.FBXICT_ALL) + # set to import normals, otherwise Unreal will compute them + # and it will take a long time, depending on the size of the mesh + options.skeletal_mesh_import_data.set_editor_property( + 'normal_import_method', + unreal.FBXNormalImportMethod.FBXNIM_IMPORT_NORMALS) - task.options = options - unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 + task.options = options + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 - # Create Asset Container - unreal_pipeline.create_container( - container=container_name, path=asset_dir) + # Create Asset Container + unreal_pipeline.create_container( + container=container_name, path=asset_dir) data = { "schema": "openpype:container-2.0", diff --git a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py index 282d249947..351c686095 100644 --- a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py +++ b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py @@ -14,7 +14,7 @@ import unreal # noqa class StaticMeshFBXLoader(plugin.Loader): """Load Unreal StaticMesh from FBX.""" - families = ["model", "unrealStaticMesh"] + families = ["model", "staticMesh"] label = "Import FBX Static Mesh" representations = ["fbx"] icon = "cube" diff --git a/openpype/hosts/unreal/plugins/publish/collect_render_instances.py b/openpype/hosts/unreal/plugins/publish/collect_render_instances.py index 9d60b65d08..cb28f4bf60 100644 --- a/openpype/hosts/unreal/plugins/publish/collect_render_instances.py +++ b/openpype/hosts/unreal/plugins/publish/collect_render_instances.py @@ -1,8 +1,11 @@ +import os from pathlib import Path + import unreal -import pyblish.api +from openpype.pipeline import Anatomy from openpype.hosts.unreal.api import pipeline +import pyblish.api class CollectRenderInstances(pyblish.api.InstancePlugin): @@ -77,9 +80,15 @@ class CollectRenderInstances(pyblish.api.InstancePlugin): self.log.debug(f"new instance data: {new_data}") - project_dir = unreal.Paths.project_dir() - render_dir = (f"{project_dir}/Saved/MovieRenders/" - f"{s.get('output')}") + try: + project = os.environ.get("AVALON_PROJECT") + anatomy = Anatomy(project) + root = anatomy.roots['renders'] + except Exception: + raise Exception( + "Could not find render root in anatomy settings.") + + render_dir = f"{root}/{project}/{s.get('output')}" render_path = Path(render_dir) frames = [] diff --git a/openpype/hosts/unreal/plugins/publish/extract_camera.py b/openpype/hosts/unreal/plugins/publish/extract_camera.py index ce53824563..4e37cc6a86 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_camera.py +++ b/openpype/hosts/unreal/plugins/publish/extract_camera.py @@ -6,10 +6,10 @@ import unreal from unreal import EditorAssetLibrary as eal from unreal import EditorLevelLibrary as ell -import openpype.api +from openpype.pipeline import publish -class ExtractCamera(openpype.api.Extractor): +class ExtractCamera(publish.Extractor): """Extract a camera.""" label = "Extract Camera" diff --git a/openpype/hosts/unreal/plugins/publish/extract_layout.py b/openpype/hosts/unreal/plugins/publish/extract_layout.py index 87e6693a97..cac7991f00 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_layout.py +++ b/openpype/hosts/unreal/plugins/publish/extract_layout.py @@ -3,17 +3,15 @@ import os import json import math -from bson.objectid import ObjectId - import unreal from unreal import EditorLevelLibrary as ell from unreal import EditorAssetLibrary as eal -import openpype.api -from openpype.pipeline import legacy_io +from openpype.client import get_representation_by_name +from openpype.pipeline import legacy_io, publish -class ExtractLayout(openpype.api.Extractor): +class ExtractLayout(publish.Extractor): """Extract a layout.""" label = "Extract Layout" @@ -34,6 +32,7 @@ class ExtractLayout(openpype.api.Extractor): "Wrong level loaded" json_data = [] + project_name = legacy_io.active_project() for member in instance[:]: actor = ell.get_actor_reference(member) @@ -57,17 +56,13 @@ class ExtractLayout(openpype.api.Extractor): self.log.error("AssetContainer not found.") return - parent = eal.get_metadata_tag(asset_container, "parent") + parent_id = eal.get_metadata_tag(asset_container, "parent") family = eal.get_metadata_tag(asset_container, "family") - self.log.info("Parent: {}".format(parent)) - blend = legacy_io.find_one( - { - "type": "representation", - "parent": ObjectId(parent), - "name": "blend" - }, - projection={"_id": True}) + self.log.info("Parent: {}".format(parent_id)) + blend = get_representation_by_name( + project_name, "blend", parent_id, fields=["_id"] + ) blend_id = blend["_id"] json_element = {} diff --git a/openpype/hosts/unreal/plugins/publish/extract_look.py b/openpype/hosts/unreal/plugins/publish/extract_look.py index ea39949417..f999ad8651 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_look.py +++ b/openpype/hosts/unreal/plugins/publish/extract_look.py @@ -5,10 +5,10 @@ import os import unreal from unreal import MaterialEditingLibrary as mat_lib -import openpype.api +from openpype.pipeline import publish -class ExtractLook(openpype.api.Extractor): +class ExtractLook(publish.Extractor): """Extract look.""" label = "Extract Look" diff --git a/openpype/hosts/unreal/plugins/publish/extract_render.py b/openpype/hosts/unreal/plugins/publish/extract_render.py index 37fe7e916f..8ff38fbee0 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_render.py +++ b/openpype/hosts/unreal/plugins/publish/extract_render.py @@ -2,10 +2,10 @@ from pathlib import Path import unreal -import openpype.api +from openpype.pipeline import publish -class ExtractRender(openpype.api.Extractor): +class ExtractRender(publish.Extractor): """Extract render.""" label = "Extract Render" diff --git a/openpype/hosts/webpublisher/__init__.py b/openpype/hosts/webpublisher/__init__.py index e69de29bb2..4e918c5d7d 100644 --- a/openpype/hosts/webpublisher/__init__.py +++ b/openpype/hosts/webpublisher/__init__.py @@ -0,0 +1,10 @@ +from .addon import ( + WebpublisherAddon, + WEBPUBLISHER_ROOT_DIR, +) + + +__all__ = ( + "WebpublisherAddon", + "WEBPUBLISHER_ROOT_DIR", +) diff --git a/openpype/hosts/webpublisher/addon.py b/openpype/hosts/webpublisher/addon.py new file mode 100644 index 0000000000..eb7fced2e6 --- /dev/null +++ b/openpype/hosts/webpublisher/addon.py @@ -0,0 +1,105 @@ +import os + +import click + +from openpype.modules import OpenPypeModule, IHostAddon + +WEBPUBLISHER_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class WebpublisherAddon(OpenPypeModule, IHostAddon): + name = "webpublisher" + host_name = "webpublisher" + + def initialize(self, module_settings): + self.enabled = True + + def headless_publish(self, log, close_plugin_name=None, is_test=False): + """Runs publish in a opened host with a context. + + Close Python process at the end. + """ + + from openpype.pipeline.publish.lib import remote_publish + from .lib import get_webpublish_conn, publish_and_log + + if is_test: + remote_publish(log, close_plugin_name) + return + + dbcon = get_webpublish_conn() + _id = os.environ.get("BATCH_LOG_ID") + if not _id: + log.warning("Unable to store log records, " + "batch will be unfinished!") + return + + publish_and_log( + dbcon, _id, log, close_plugin_name=close_plugin_name + ) + + def cli(self, click_group): + click_group.add_command(cli_main) + + +@click.group( + WebpublisherAddon.name, + help="Webpublisher related commands.") +def cli_main(): + pass + + +@cli_main.command() +@click.argument("path") +@click.option("-u", "--user", help="User email address") +@click.option("-p", "--project", help="Project") +@click.option("-t", "--targets", help="Targets", default=None, + multiple=True) +def publish(project, path, user=None, targets=None): + """Start publishing (Inner command). + + Publish collects json from paths provided as an argument. + More than one path is allowed. + """ + + from .publish_functions import cli_publish + + cli_publish(project, path, user, targets) + + +@cli_main.command() +@click.argument("path") +@click.option("-p", "--project", help="Project") +@click.option("-h", "--host", help="Host") +@click.option("-u", "--user", help="User email address") +@click.option("-t", "--targets", help="Targets", default=None, + multiple=True) +def publishfromapp(project, path, host, user=None, targets=None): + """Start publishing through application (Inner command). + + Publish collects json from paths provided as an argument. + More than one path is allowed. + """ + + from .publish_functions import cli_publish_from_app + + cli_publish_from_app(project, path, host, user, targets) + + +@cli_main.command() +@click.option("-e", "--executable", help="Executable") +@click.option("-u", "--upload_dir", help="Upload dir") +@click.option("-h", "--host", help="Host", default=None) +@click.option("-p", "--port", help="Port", default=None) +def webserver(executable, upload_dir, host=None, port=None): + """Start service for communication with Webpublish Front end. + + OP must be congigured on a machine, eg. OPENPYPE_MONGO filled AND + FTRACK_BOT_API_KEY provided with api key from Ftrack. + + Expect "pype.club" user created on Ftrack. + """ + + from .webserver_service import run_webserver + + run_webserver(executable, upload_dir, host, port) diff --git a/openpype/hosts/webpublisher/api/__init__.py b/openpype/hosts/webpublisher/api/__init__.py index 18e3a16cf5..afea838e2c 100644 --- a/openpype/hosts/webpublisher/api/__init__.py +++ b/openpype/hosts/webpublisher/api/__init__.py @@ -1,31 +1,23 @@ import os import logging -from pyblish import api as pyblish -import openpype.hosts.webpublisher -from openpype.pipeline import legacy_io +import pyblish.api + +from openpype.host import HostBase +from openpype.hosts.webpublisher import WEBPUBLISHER_ROOT_DIR log = logging.getLogger("openpype.hosts.webpublisher") -HOST_DIR = os.path.dirname(os.path.abspath( - openpype.hosts.webpublisher.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +class WebpublisherHost(HostBase): + name = "webpublisher" -def install(): - print("Installing Pype config...") + def install(self): + print("Installing Pype config...") + pyblish.api.register_host(self.name) - pyblish.register_plugin_path(PUBLISH_PATH) - log.info(PUBLISH_PATH) - - legacy_io.install() - - -def uninstall(): - pyblish.deregister_plugin_path(PUBLISH_PATH) - - -# to have required methods for interface -def ls(): - pass + publish_plugin_dir = os.path.join( + WEBPUBLISHER_ROOT_DIR, "plugins", "publish" + ) + pyblish.api.register_plugin_path(publish_plugin_dir) + self.log.info(publish_plugin_dir) diff --git a/openpype/lib/remote_publish.py b/openpype/hosts/webpublisher/lib.py similarity index 71% rename from openpype/lib/remote_publish.py rename to openpype/hosts/webpublisher/lib.py index 8a42daf4e9..4bc3f1db80 100644 --- a/openpype/lib/remote_publish.py +++ b/openpype/hosts/webpublisher/lib.py @@ -1,14 +1,18 @@ import os from datetime import datetime import collections +import json from bson.objectid import ObjectId import pyblish.util import pyblish.api -from openpype.lib.mongo import OpenPypeMongoConnection -from openpype.lib.plugin_tools import parse_json +from openpype.client.mongo import OpenPypeMongoConnection +from openpype.settings import get_project_settings +from openpype.lib import Logger +from openpype.lib.profiles_filtering import filter_profiles +from openpype.pipeline.publish.lib import find_close_plugin ERROR_STATUS = "error" IN_PROGRESS_STATUS = "in_progress" @@ -17,21 +21,51 @@ SENT_REPROCESSING_STATUS = "sent_for_reprocessing" FINISHED_REPROCESS_STATUS = "republishing_finished" FINISHED_OK_STATUS = "finished_ok" +log = Logger.get_logger(__name__) -def headless_publish(log, close_plugin_name=None, is_test=False): - """Runs publish in a opened host with a context and closes Python process. + +def parse_json(path): + """Parses json file at 'path' location + + Returns: + (dict) or None if unparsable + Raises: + AsssertionError if 'path' doesn't exist """ - if not is_test: - dbcon = get_webpublish_conn() - _id = os.environ.get("BATCH_LOG_ID") - if not _id: - log.warning("Unable to store log records, " - "batch will be unfinished!") - return + path = path.strip('\"') + assert os.path.isfile(path), ( + "Path to json file doesn't exist. \"{}\"".format(path) + ) + data = None + with open(path, "r") as json_file: + try: + data = json.load(json_file) + except Exception as exc: + log.error( + "Error loading json: {} - Exception: {}".format(path, exc) + ) + return data - publish_and_log(dbcon, _id, log, close_plugin_name=close_plugin_name) + +def get_batch_asset_task_info(ctx): + """Parses context data from webpublisher's batch metadata + + Returns: + (tuple): asset, task_name (Optional), task_type + """ + task_type = "default_task_type" + task_name = None + asset = None + + if ctx["type"] == "task": + items = ctx["path"].split('/') + asset = items[-2] + task_name = ctx["name"] + task_type = ctx["attributes"]["type"] else: - publish(log, close_plugin_name) + asset = ctx["name"] + + return asset, task_name, task_type def get_webpublish_conn(): @@ -60,38 +94,13 @@ def start_webpublish_log(dbcon, batch_id, user): }).inserted_id -def publish(log, close_plugin_name=None): - """Loops through all plugins, logs to console. Used for tests. - - Args: - log (OpenPypeLogger) - close_plugin_name (str): name of plugin with responsibility to - close host app - """ - # Error exit as soon as any error occurs. - error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" - - close_plugin = _get_close_plugin(close_plugin_name, log) - - for result in pyblish.util.publish_iter(): - for record in result["records"]: - log.info("{}: {}".format( - result["plugin"].label, record.msg)) - - if result["error"]: - log.error(error_format.format(**result)) - if close_plugin: # close host app explicitly after error - context = pyblish.api.Context() - close_plugin().process(context) - - def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): """Loops through all plugins, logs ok and fails into OP DB. Args: dbcon (OpenPypeMongoConnection) _id (str) - id of current job in DB - log (OpenPypeLogger) + log (openpype.lib.Logger) batch_id (str) - id sent from frontend close_plugin_name (str): name of plugin with responsibility to close host app @@ -100,7 +109,7 @@ def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}\n" error_format += "-" * 80 + "\n" - close_plugin = _get_close_plugin(close_plugin_name, log) + close_plugin = find_close_plugin(close_plugin_name, log) if isinstance(_id, str): _id = ObjectId(_id) @@ -170,14 +179,12 @@ def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): ) -def fail_batch(_id, batches_in_progress, dbcon): - """Set current batch as failed as there are some stuck batches.""" - running_batches = [str(batch["_id"]) - for batch in batches_in_progress - if batch["_id"] != _id] - msg = "There are still running batches {}\n". \ - format("\n".join(running_batches)) - msg += "Ask admin to check them and reprocess current batch" +def fail_batch(_id, dbcon, msg): + """Set current batch as failed as there is some problem. + + Raises: + ValueError + """ dbcon.update_one( {"_id": _id}, {"$set": @@ -221,16 +228,6 @@ def find_variant_key(application_manager, host): return found_variant_key -def _get_close_plugin(close_plugin_name, log): - if close_plugin_name: - plugins = pyblish.api.discover() - for plugin in plugins: - if plugin.__name__ == close_plugin_name: - return plugin - - log.warning("Close plugin not found, app might not close.") - - def get_task_data(batch_dir): """Return parsed data from first task manifest.json @@ -254,3 +251,19 @@ def get_task_data(batch_dir): "Cannot parse batch meta in {} folder".format(task_data)) return task_data + + +def get_timeout(project_name, host_name, task_type): + """Returns timeout(seconds) from Setting profile.""" + filter_data = { + "task_types": task_type, + "hosts": host_name + } + timeout_profiles = (get_project_settings(project_name)["webpublisher"] + ["timeout_profiles"]) + matching_item = filter_profiles(timeout_profiles, filter_data) + timeout = 3600 + if matching_item: + timeout = matching_item["timeout"] + + return timeout diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py index 9ff779636a..eb2737b276 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py @@ -13,12 +13,13 @@ import os import pyblish.api -from openpype.lib.plugin_tools import ( - parse_json, - get_batch_asset_task_info -) -from openpype.lib.remote_publish import get_webpublish_conn, IN_PROGRESS_STATUS from openpype.pipeline import legacy_io +from openpype_modules.webpublisher.lib import ( + parse_json, + get_batch_asset_task_info, + get_webpublish_conn, + IN_PROGRESS_STATUS +) class CollectBatchData(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py index bdd3caccfd..79ed499a20 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py @@ -13,17 +13,18 @@ import tempfile import math import pyblish.api + +from openpype.client import ( + get_asset_by_name, + get_last_version_by_subset_name +) from openpype.lib import ( prepare_template_data, - get_asset, get_ffprobe_streams, convert_ffprobe_fps_value, ) -from openpype.lib.plugin_tools import ( - parse_json, - get_subset_name_with_asset_doc -) -from openpype.pipeline import legacy_io +from openpype.pipeline.create import get_subset_name +from openpype_modules.webpublisher.lib import parse_json class CollectPublishedFiles(pyblish.api.ContextPlugin): @@ -36,6 +37,15 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): This is not applicable for 'studio' processing where host application is called to process uploaded workfile and render frames itself. + + For each task configure what properties should resulting instance have + based on uploaded files: + - uploading sequence of 'png' >> create instance of 'render' family, + by adding 'review' to 'Families' and 'Create review' to Tags it will + produce review. + + There might be difference between single(>>image) and sequence(>>render) + uploaded files. """ # must be really early, context values are only in json file order = pyblish.api.CollectorOrder - 0.490 @@ -45,6 +55,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): # from Settings task_type_to_family = [] + sync_next_version = False # find max version to be published, use for all def process(self, context): batch_dir = context.data["batchDir"] @@ -56,36 +67,52 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): self.log.info("task_sub:: {}".format(task_subfolders)) + project_name = context.data["project_name"] asset_name = context.data["asset"] - asset_doc = get_asset() + asset_doc = get_asset_by_name(project_name, asset_name) task_name = context.data["task"] task_type = context.data["taskType"] project_name = context.data["project_name"] variant = context.data["variant"] + + next_versions = [] + instances = [] for task_dir in task_subfolders: task_data = parse_json(os.path.join(task_dir, "manifest.json")) self.log.info("task_data:: {}".format(task_data)) is_sequence = len(task_data["files"]) > 1 + first_file = task_data["files"][0] - _, extension = os.path.splitext(task_data["files"][0]) + _, extension = os.path.splitext(first_file) + extension = extension.lower() family, families, tags = self._get_family( self.task_type_to_family, task_type, is_sequence, extension.replace(".", '')) - subset_name = get_subset_name_with_asset_doc( - family, variant, task_name, asset_doc, - project_name=project_name, host_name="webpublisher" + subset_name = get_subset_name( + family, + variant, + task_name, + asset_doc, + project_name=project_name, + host_name="webpublisher", + project_settings=context.data["project_settings"] ) - version = self._get_last_version(asset_name, subset_name) + 1 + version = self._get_next_version( + project_name, asset_doc, subset_name + ) + next_versions.append(version) instance = context.create_instance(subset_name) instance.data["asset"] = asset_name instance.data["subset"] = subset_name + # set configurable result family instance.data["family"] = family + # set configurable additional families instance.data["families"] = families instance.data["version"] = version instance.data["stagingDir"] = tempfile.mkdtemp() @@ -124,12 +151,25 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): self.log.warning("Unable to count frames " "duration {}".format(no_of_frames)) - # raise ValueError("STOP") instance.data["handleStart"] = asset_doc["data"]["handleStart"] instance.data["handleEnd"] = asset_doc["data"]["handleEnd"] + if "review" in tags: + first_file_path = os.path.join(task_dir, first_file) + instance.data["thumbnailSource"] = first_file_path + + instances.append(instance) self.log.info("instance.data:: {}".format(instance.data)) + if not self.sync_next_version: + return + + # overwrite specific version with same version for all + max_next_version = max(next_versions) + for inst in instances: + inst.data["version"] = max_next_version + self.log.debug("overwritten version:: {}".format(max_next_version)) + def _get_subset_name(self, family, subset_template, task_name, variant): fill_pairs = { "variant": variant, @@ -141,6 +181,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): def _get_single_repre(self, task_dir, files, tags): _, ext = os.path.splitext(files[0]) + ext = ext.lower() repre_data = { "name": ext[1:], "ext": ext[1:], @@ -160,6 +201,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): frame_start = list(collections[0].indexes)[0] frame_end = list(collections[0].indexes)[-1] ext = collections[0].tail + ext = ext.lower() repre_data = { "frameStart": frame_start, "frameEnd": frame_end, @@ -167,7 +209,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): "ext": ext[1:], "files": files, "stagingDir": task_dir, - "tags": tags + "tags": tags # configurable tags from Settings } self.log.info("sequences repre_data.data:: {}".format(repre_data)) return [repre_data] @@ -205,8 +247,17 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): for config in families_config: if is_sequence != config["is_sequence"]: continue - if (extension in config["extensions"] or - '' in config["extensions"]): # all extensions setting + extensions = config.get("extensions") or [] + lower_extensions = set() + for ext in extensions: + if ext: + ext = ext.lower() + if ext.startswith("."): + ext = ext[1:] + lower_extensions.add(ext) + + # all extensions setting + if not lower_extensions or extension in lower_extensions: found_family = config["result_family"] break @@ -219,55 +270,19 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): config["families"], config["tags"]) - def _get_last_version(self, asset_name, subset_name): - """Returns version number or 0 for 'asset' and 'subset'""" - query = [ - { - "$match": {"type": "asset", "name": asset_name} - }, - { - "$lookup": - { - "from": os.environ["AVALON_PROJECT"], - "localField": "_id", - "foreignField": "parent", - "as": "subsets" - } - }, - { - "$unwind": "$subsets" - }, - { - "$match": {"subsets.type": "subset", - "subsets.name": subset_name}}, - { - "$lookup": - { - "from": os.environ["AVALON_PROJECT"], - "localField": "subsets._id", - "foreignField": "parent", - "as": "versions" - } - }, - { - "$unwind": "$versions" - }, - { - "$group": { - "_id": { - "asset_name": "$name", - "subset_name": "$subsets.name" - }, - 'version': {'$max': "$versions.name"} - } - } - ] - version = list(legacy_io.aggregate(query)) + def _get_next_version(self, project_name, asset_doc, subset_name): + """Returns version number or 1 for 'asset' and 'subset'""" - if version: - return version[0].get("version") or 0 - else: - return 0 + version_doc = get_last_version_by_subset_name( + project_name, + subset_name, + asset_doc["_id"], + fields=["name"] + ) + version = 1 + if version_doc: + version += int(version_doc["name"]) + return version def _get_number_of_frames(self, file_url): """Return duration in frames""" diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py index 92f581be5f..948e86c23e 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py @@ -10,7 +10,7 @@ import re import copy import pyblish.api -from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline.create import get_subset_name class CollectTVPaintInstances(pyblish.api.ContextPlugin): @@ -47,13 +47,14 @@ class CollectTVPaintInstances(pyblish.api.ContextPlugin): new_instances = [] # Workfile instance - workfile_subset_name = get_subset_name_with_asset_doc( + workfile_subset_name = get_subset_name( self.workfile_family, self.workfile_variant, task_name, asset_doc, project_name, - host_name + host_name, + project_settings=context.data["project_settings"] ) workfile_instance = self._create_workfile_instance( context, workfile_subset_name @@ -61,13 +62,14 @@ class CollectTVPaintInstances(pyblish.api.ContextPlugin): new_instances.append(workfile_instance) # Review instance - review_subset_name = get_subset_name_with_asset_doc( + review_subset_name = get_subset_name( self.review_family, self.review_variant, task_name, asset_doc, project_name, - host_name + host_name, + project_settings=context.data["project_settings"] ) review_instance = self._create_review_instance( context, review_subset_name @@ -114,14 +116,15 @@ class CollectTVPaintInstances(pyblish.api.ContextPlugin): "family": "render" } - subset_name = get_subset_name_with_asset_doc( + subset_name = get_subset_name( self.render_pass_family, render_pass, task_name, asset_doc, project_name, host_name, - dynamic_data=dynamic_data + dynamic_data=dynamic_data, + project_settings=context.data["project_settings"] ) instance = self._create_render_pass_instance( @@ -137,14 +140,15 @@ class CollectTVPaintInstances(pyblish.api.ContextPlugin): # Override family for subset name "family": "render" } - subset_name = get_subset_name_with_asset_doc( + subset_name = get_subset_name( self.render_layer_family, variant, task_name, asset_doc, project_name, host_name, - dynamic_data=dynamic_data + dynamic_data=dynamic_data, + project_settings=context.data["project_settings"] ) instance = self._create_render_layer_instance( context, layers, subset_name diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_workfile_data.py b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_workfile_data.py index f0f29260a2..b5f8ed9c8f 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_workfile_data.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_workfile_data.py @@ -16,11 +16,11 @@ import uuid import json import shutil import pyblish.api -from openpype.lib.plugin_tools import parse_json from openpype.hosts.tvpaint.worker import ( SenderTVPaintCommands, CollectSceneData ) +from openpype_modules.webpublisher.lib import parse_json class CollectTVPaintWorkfileData(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py b/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py deleted file mode 100644 index a56521891b..0000000000 --- a/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py +++ /dev/null @@ -1,137 +0,0 @@ -import os -import shutil - -import pyblish.api -from openpype.lib import ( - get_ffmpeg_tool_path, - - run_subprocess, - - get_transcode_temp_directory, - convert_input_paths_for_ffmpeg, - should_convert_for_ffmpeg -) - - -class ExtractThumbnail(pyblish.api.InstancePlugin): - """Create jpg thumbnail from input using ffmpeg.""" - - label = "Extract Thumbnail" - order = pyblish.api.ExtractorOrder - families = [ - "render", - "image" - ] - hosts = ["webpublisher"] - targets = ["filespublish"] - - def process(self, instance): - self.log.info("subset {}".format(instance.data['subset'])) - - filtered_repres = self._get_filtered_repres(instance) - for repre in filtered_repres: - repre_files = repre["files"] - if not isinstance(repre_files, (list, tuple)): - input_file = repre_files - else: - file_index = int(float(len(repre_files)) * 0.5) - input_file = repre_files[file_index] - - stagingdir = os.path.normpath(repre["stagingDir"]) - - full_input_path = os.path.join(stagingdir, input_file) - self.log.info("Input filepath: {}".format(full_input_path)) - - do_convert = should_convert_for_ffmpeg(full_input_path) - # If result is None the requirement of conversion can't be - # determined - if do_convert is None: - self.log.info(( - "Can't determine if representation requires conversion." - " Skipped." - )) - continue - - # Do conversion if needed - # - change staging dir of source representation - # - must be set back after output definitions processing - convert_dir = None - if do_convert: - convert_dir = get_transcode_temp_directory() - filename = os.path.basename(full_input_path) - convert_input_paths_for_ffmpeg( - [full_input_path], - convert_dir, - self.log - ) - full_input_path = os.path.join(convert_dir, filename) - - filename = os.path.splitext(input_file)[0] - while filename.endswith("."): - filename = filename[:-1] - thumbnail_filename = filename + "_thumbnail.jpg" - full_output_path = os.path.join(stagingdir, thumbnail_filename) - - self.log.info("output {}".format(full_output_path)) - - ffmpeg_args = [ - get_ffmpeg_tool_path("ffmpeg"), - "-y", - "-i", full_input_path, - "-vframes", "1", - full_output_path - ] - - # run subprocess - self.log.debug("{}".format(" ".join(ffmpeg_args))) - try: # temporary until oiiotool is supported cross platform - run_subprocess( - ffmpeg_args, logger=self.log - ) - except RuntimeError as exp: - if "Compression" in str(exp): - self.log.debug( - "Unsupported compression on input files. Skipping!!!" - ) - return - self.log.warning("Conversion crashed", exc_info=True) - raise - - new_repre = { - "name": "thumbnail", - "ext": "jpg", - "files": thumbnail_filename, - "stagingDir": stagingdir, - "thumbnail": True, - "tags": ["thumbnail"] - } - - # adding representation - self.log.debug("Adding: {}".format(new_repre)) - instance.data["representations"].append(new_repre) - - # Cleanup temp folder - if convert_dir is not None and os.path.exists(convert_dir): - shutil.rmtree(convert_dir) - - def _get_filtered_repres(self, instance): - filtered_repres = [] - repres = instance.data.get("representations") or [] - for repre in repres: - self.log.debug(repre) - tags = repre.get("tags") or [] - # Skip instance if already has thumbnail representation - if "thumbnail" in tags: - return [] - - if "review" not in tags: - continue - - if not repre.get("files"): - self.log.info(( - "Representation \"{}\" don't have files. Skipping" - ).format(repre["name"])) - continue - - filtered_repres.append(repre) - return filtered_repres diff --git a/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py b/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py index a5e4868411..d8b7bb9078 100644 --- a/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py +++ b/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py @@ -13,7 +13,7 @@ class ValidateWorkfileData(pyblish.api.ContextPlugin): targets = ["tvpaint_worker"] def process(self, context): - # Data collected in `CollectAvalonEntities` + # Data collected in `CollectContextEntities` frame_start = context.data["frameStart"] frame_end = context.data["frameEnd"] handle_start = context.data["handleStart"] diff --git a/openpype/hosts/webpublisher/publish_functions.py b/openpype/hosts/webpublisher/publish_functions.py new file mode 100644 index 0000000000..83f53ced68 --- /dev/null +++ b/openpype/hosts/webpublisher/publish_functions.py @@ -0,0 +1,205 @@ +import os +import time +import pyblish.api +import pyblish.util + +from openpype.lib import Logger +from openpype.lib.applications import ( + ApplicationManager, + get_app_environments_for_context, +) +from openpype.pipeline import install_host +from openpype.hosts.webpublisher.api import WebpublisherHost + +from .lib import ( + get_batch_asset_task_info, + get_webpublish_conn, + start_webpublish_log, + publish_and_log, + fail_batch, + find_variant_key, + get_task_data, + get_timeout, + IN_PROGRESS_STATUS +) + + +def cli_publish(project_name, batch_path, user_email, targets): + """Start headless publishing. + + Used to publish rendered assets, workfiles etc via Webpublisher. + Eventually should be yanked out to Webpublisher cli. + + Publish use json from passed paths argument. + + Args: + project_name (str): project to publish (only single context is + expected per call of remotepublish + batch_path (str): Path batch folder. Contains subfolders with + resources (workfile, another subfolder 'renders' etc.) + user_email (string): email address for webpublisher - used to + find Ftrack user with same email + targets (list): Pyblish targets + (to choose validator for example) + + Raises: + RuntimeError: When there is no path to process. + """ + + if not batch_path: + raise RuntimeError("No publish paths specified") + + log = Logger.get_logger("remotepublish") + log.info("remotepublish command") + + # Register target and host + webpublisher_host = WebpublisherHost() + + os.environ["OPENPYPE_PUBLISH_DATA"] = batch_path + os.environ["AVALON_PROJECT"] = project_name + os.environ["AVALON_APP"] = webpublisher_host.name + os.environ["USER_EMAIL"] = user_email + os.environ["HEADLESS_PUBLISH"] = 'true' # to use in app lib + + if targets: + if isinstance(targets, str): + targets = [targets] + for target in targets: + pyblish.api.register_target(target) + + install_host(webpublisher_host) + + log.info("Running publish ...") + + _, batch_id = os.path.split(batch_path) + dbcon = get_webpublish_conn() + _id = start_webpublish_log(dbcon, batch_id, user_email) + + task_data = get_task_data(batch_path) + if not task_data["context"]: + msg = "Batch manifest must contain context data" + msg += "Create new batch and set context properly." + fail_batch(_id, dbcon, msg) + + publish_and_log(dbcon, _id, log, batch_id=batch_id) + + log.info("Publish finished.") + + +def cli_publish_from_app( + project_name, batch_path, host_name, user_email, targets +): + """Opens installed variant of 'host' and run remote publish there. + + Eventually should be yanked out to Webpublisher cli. + + Currently implemented and tested for Photoshop where customer + wants to process uploaded .psd file and publish collected layers + from there. Triggered by Webpublisher. + + Checks if no other batches are running (status =='in_progress). If + so, it sleeps for SLEEP (this is separate process), + waits for WAIT_FOR seconds altogether. + + Requires installed host application on the machine. + + Runs publish process as user would, in automatic fashion. + + Args: + project_name (str): project to publish (only single context is + expected per call of remotepublish + batch_path (str): Path batch folder. Contains subfolders with + resources (workfile, another subfolder 'renders' etc.) + host_name (str): 'photoshop' + user_email (string): email address for webpublisher - used to + find Ftrack user with same email + targets (list): Pyblish targets + (to choose validator for example) + """ + + log = Logger.get_logger("RemotePublishFromApp") + + log.info("remotepublishphotoshop command") + + task_data = get_task_data(batch_path) + + workfile_path = os.path.join(batch_path, + task_data["task"], + task_data["files"][0]) + + print("workfile_path {}".format(workfile_path)) + + batch_id = task_data["batch"] + dbcon = get_webpublish_conn() + # safer to start logging here, launch might be broken altogether + _id = start_webpublish_log(dbcon, batch_id, user_email) + + batches_in_progress = list(dbcon.find({"status": IN_PROGRESS_STATUS})) + if len(batches_in_progress) > 1: + running_batches = [str(batch["_id"]) + for batch in batches_in_progress + if batch["_id"] != _id] + msg = "There are still running batches {}\n". \ + format("\n".join(running_batches)) + msg += "Ask admin to check them and reprocess current batch" + fail_batch(_id, dbcon, msg) + + if not task_data["context"]: + msg = "Batch manifest must contain context data" + msg += "Create new batch and set context properly." + fail_batch(_id, dbcon, msg) + + asset_name, task_name, task_type = get_batch_asset_task_info( + task_data["context"]) + + application_manager = ApplicationManager() + found_variant_key = find_variant_key(application_manager, host_name) + app_name = "{}/{}".format(host_name, found_variant_key) + + # must have for proper launch of app + env = get_app_environments_for_context( + project_name, + asset_name, + task_name, + app_name + ) + print("env:: {}".format(env)) + os.environ.update(env) + + os.environ["OPENPYPE_PUBLISH_DATA"] = batch_path + # must pass identifier to update log lines for a batch + os.environ["BATCH_LOG_ID"] = str(_id) + os.environ["HEADLESS_PUBLISH"] = 'true' # to use in app lib + os.environ["USER_EMAIL"] = user_email + + pyblish.api.register_host(host_name) + if targets: + if isinstance(targets, str): + targets = [targets] + current_targets = os.environ.get("PYBLISH_TARGETS", "").split( + os.pathsep) + for target in targets: + current_targets.append(target) + + os.environ["PYBLISH_TARGETS"] = os.pathsep.join( + set(current_targets)) + + data = { + "last_workfile_path": workfile_path, + "start_last_workfile": True, + "project_name": project_name, + "asset_name": asset_name, + "task_name": task_name + } + + launched_app = application_manager.launch(app_name, **data) + + timeout = get_timeout(project_name, host_name, task_type) + + time_start = time.time() + while launched_app.poll() is None: + time.sleep(0.5) + if time.time() - time_start > timeout: + launched_app.terminate() + msg = "Timeout reached" + fail_batch(_id, dbcon, msg) diff --git a/openpype/hosts/webpublisher/webserver_service/__init__.py b/openpype/hosts/webpublisher/webserver_service/__init__.py new file mode 100644 index 0000000000..73111d286e --- /dev/null +++ b/openpype/hosts/webpublisher/webserver_service/__init__.py @@ -0,0 +1,6 @@ +from .webserver import run_webserver + + +__all__ = ( + "run_webserver", +) diff --git a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py index e82ba7f2b8..4039d2c8ec 100644 --- a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py +++ b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py @@ -2,44 +2,46 @@ import os import json import datetime -from bson.objectid import ObjectId import collections -from aiohttp.web_response import Response import subprocess +from bson.objectid import ObjectId +from aiohttp.web_response import Response -from openpype.lib import ( - OpenPypeMongoConnection, - PypeLogger, +from openpype.client import ( + get_projects, + get_assets, ) -from openpype.lib.remote_publish import ( +from openpype.lib import Logger +from openpype.settings import get_project_settings +from openpype_modules.webserver.base_routes import RestApiEndpoint +from openpype_modules.webpublisher import WebpublisherAddon +from openpype_modules.webpublisher.lib import ( + get_webpublish_conn, get_task_data, ERROR_STATUS, REPROCESS_STATUS ) -from openpype.pipeline import AvalonMongoDB -from openpype_modules.avalon_apps.rest_api import _RestApiEndpoint -from openpype.settings import get_project_settings + +log = Logger.get_logger("WebpublishRoutes") - -log = PypeLogger.get_logger("WebServer") +class ResourceRestApiEndpoint(RestApiEndpoint): + def __init__(self, resource): + self.resource = resource + super(ResourceRestApiEndpoint, self).__init__() -class RestApiResource: - """Resource carrying needed info and Avalon DB connection for publish.""" - def __init__(self, server_manager, executable, upload_dir, - studio_task_queue=None): - self.server_manager = server_manager - self.upload_dir = upload_dir - self.executable = executable +class WebpublishApiEndpoint(ResourceRestApiEndpoint): + @property + def dbcon(self): + return self.resource.dbcon - if studio_task_queue is None: - studio_task_queue = collections.deque().dequeu - self.studio_task_queue = studio_task_queue - self.dbcon = AvalonMongoDB() - self.dbcon.install() +class JsonApiResource: + """Resource for json manipulation. + All resources handling sending output to REST should inherit from + """ @staticmethod def json_dump_handler(value): if isinstance(value, datetime.datetime): @@ -59,28 +61,36 @@ class RestApiResource: ).encode("utf-8") -class OpenPypeRestApiResource(RestApiResource): +class RestApiResource(JsonApiResource): + """Resource carrying needed info and Avalon DB connection for publish.""" + def __init__(self, server_manager, executable, upload_dir, + studio_task_queue=None): + self.server_manager = server_manager + self.upload_dir = upload_dir + self.executable = executable + + if studio_task_queue is None: + studio_task_queue = collections.deque().dequeu + self.studio_task_queue = studio_task_queue + + +class WebpublishRestApiResource(JsonApiResource): """Resource carrying OP DB connection for storing batch info into DB.""" - def __init__(self, ): - mongo_client = OpenPypeMongoConnection.get_mongo_client() - database_name = os.environ["OPENPYPE_DATABASE_NAME"] - self.dbcon = mongo_client[database_name]["webpublishes"] + + def __init__(self): + self.dbcon = get_webpublish_conn() -class ProjectsEndpoint(_RestApiEndpoint): +class ProjectsEndpoint(ResourceRestApiEndpoint): """Returns list of dict with project info (id, name).""" async def get(self) -> Response: output = [] - for project_name in self.dbcon.database.collection_names(): - project_doc = self.dbcon.database[project_name].find_one({ - "type": "project" - }) - if project_doc: - ret_val = { - "id": project_doc["_id"], - "name": project_doc["name"] - } - output.append(ret_val) + for project_doc in get_projects(): + ret_val = { + "id": project_doc["_id"], + "name": project_doc["name"] + } + output.append(ret_val) return Response( status=200, body=self.resource.encode(output), @@ -88,7 +98,7 @@ class ProjectsEndpoint(_RestApiEndpoint): ) -class HiearchyEndpoint(_RestApiEndpoint): +class HiearchyEndpoint(ResourceRestApiEndpoint): """Returns dictionary with context tree from assets.""" async def get(self, project_name) -> Response: query_projection = { @@ -100,10 +110,7 @@ class HiearchyEndpoint(_RestApiEndpoint): "type": 1, } - asset_docs = self.dbcon.database[project_name].find( - {"type": "asset"}, - query_projection - ) + asset_docs = get_assets(project_name, fields=query_projection.keys()) asset_docs_by_id = { asset_doc["_id"]: asset_doc for asset_doc in asset_docs @@ -187,7 +194,7 @@ class TaskNode(Node): self["attributes"] = {} -class BatchPublishEndpoint(_RestApiEndpoint): +class BatchPublishEndpoint(WebpublishApiEndpoint): """Triggers headless publishing of batch.""" async def post(self, request) -> Response: # Validate existence of openpype executable @@ -207,7 +214,7 @@ class BatchPublishEndpoint(_RestApiEndpoint): # TVPaint filter { "extensions": [".tvpp"], - "command": "remotepublish", + "command": "publish", "arguments": { "targets": ["tvpaint_worker"] }, @@ -216,13 +223,13 @@ class BatchPublishEndpoint(_RestApiEndpoint): # Photoshop filter { "extensions": [".psd", ".psb"], - "command": "remotepublishfromapp", + "command": "publishfromapp", "arguments": { - # Command 'remotepublishfromapp' requires --host argument + # Command 'publishfromapp' requires --host argument "host": "photoshop", # Make sure targets are set to None for cases that default # would change - # - targets argument is not used in 'remotepublishfromapp' + # - targets argument is not used in 'publishfromapp' "targets": ["remotepublish"] }, # does publish need to be handled by a queue, eg. only @@ -234,7 +241,7 @@ class BatchPublishEndpoint(_RestApiEndpoint): batch_dir = os.path.join(self.resource.upload_dir, content["batch"]) # Default command and arguments - command = "remotepublish" + command = "publish" add_args = { # All commands need 'project' and 'user' "project": content["project_name"], @@ -265,6 +272,8 @@ class BatchPublishEndpoint(_RestApiEndpoint): args = [ openpype_app, + "module", + WebpublisherAddon.name, command, batch_dir ] @@ -292,7 +301,7 @@ class BatchPublishEndpoint(_RestApiEndpoint): ) -class TaskPublishEndpoint(_RestApiEndpoint): +class TaskPublishEndpoint(WebpublishApiEndpoint): """Prepared endpoint triggered after each task - for future development.""" async def post(self, request) -> Response: return Response( @@ -302,8 +311,12 @@ class TaskPublishEndpoint(_RestApiEndpoint): ) -class BatchStatusEndpoint(_RestApiEndpoint): - """Returns dict with info for batch_id.""" +class BatchStatusEndpoint(WebpublishApiEndpoint): + """Returns dict with info for batch_id. + + Uses 'WebpublishRestApiResource'. + """ + async def get(self, batch_id) -> Response: output = self.dbcon.find_one({"batch_id": batch_id}) @@ -322,8 +335,12 @@ class BatchStatusEndpoint(_RestApiEndpoint): ) -class UserReportEndpoint(_RestApiEndpoint): - """Returns list of dict with batch info for user (email address).""" +class UserReportEndpoint(WebpublishApiEndpoint): + """Returns list of dict with batch info for user (email address). + + Uses 'WebpublishRestApiResource'. + """ + async def get(self, user) -> Response: output = list(self.dbcon.find({"user": user}, projection={"log": False})) @@ -342,7 +359,7 @@ class UserReportEndpoint(_RestApiEndpoint): ) -class ConfiguredExtensionsEndpoint(_RestApiEndpoint): +class ConfiguredExtensionsEndpoint(WebpublishApiEndpoint): """Returns dict of extensions which have mapping to family. Returns: @@ -382,8 +399,12 @@ class ConfiguredExtensionsEndpoint(_RestApiEndpoint): ) -class BatchReprocessEndpoint(_RestApiEndpoint): - """Marks latest 'batch_id' for reprocessing, returns 404 if not found.""" +class BatchReprocessEndpoint(WebpublishApiEndpoint): + """Marks latest 'batch_id' for reprocessing, returns 404 if not found. + + Uses 'WebpublishRestApiResource'. + """ + async def post(self, batch_id) -> Response: batches = self.dbcon.find({"batch_id": batch_id, "status": ERROR_STATUS}).sort("_id", -1) diff --git a/openpype/hosts/webpublisher/webserver_service/webserver_cli.py b/openpype/hosts/webpublisher/webserver_service/webserver.py similarity index 80% rename from openpype/hosts/webpublisher/webserver_service/webserver_cli.py rename to openpype/hosts/webpublisher/webserver_service/webserver.py index 909ea38bc6..093b53d9d3 100644 --- a/openpype/hosts/webpublisher/webserver_service/webserver_cli.py +++ b/openpype/hosts/webpublisher/webserver_service/webserver.py @@ -6,11 +6,19 @@ import requests import json import subprocess -from openpype.lib import PypeLogger +from openpype.client import OpenPypeMongoConnection +from openpype.modules import ModulesManager +from openpype.lib import Logger + +from openpype_modules.webpublisher.lib import ( + ERROR_STATUS, + REPROCESS_STATUS, + SENT_REPROCESSING_STATUS +) from .webpublish_routes import ( RestApiResource, - OpenPypeRestApiResource, + WebpublishRestApiResource, HiearchyEndpoint, ProjectsEndpoint, ConfiguredExtensionsEndpoint, @@ -20,32 +28,29 @@ from .webpublish_routes import ( TaskPublishEndpoint, UserReportEndpoint ) -from openpype.lib.remote_publish import ( - ERROR_STATUS, - REPROCESS_STATUS, - SENT_REPROCESSING_STATUS -) + +log = Logger.get_logger("webserver_gui") -log = PypeLogger().get_logger("webserver_gui") - - -def run_webserver(*args, **kwargs): +def run_webserver(executable, upload_dir, host=None, port=None): """Runs webserver in command line, adds routes.""" - from openpype.modules import ModulesManager + + if not host: + host = "localhost" + if not port: + port = 8079 manager = ModulesManager() webserver_module = manager.modules_by_name["webserver"] - host = kwargs.get("host") or "localhost" - port = kwargs.get("port") or 8079 + server_manager = webserver_module.create_new_server_manager(port, host) webserver_url = server_manager.url # queue for remotepublishfromapp tasks studio_task_queue = collections.deque() resource = RestApiResource(server_manager, - upload_dir=kwargs["upload_dir"], - executable=kwargs["executable"], + upload_dir=upload_dir, + executable=executable, studio_task_queue=studio_task_queue) projects_endpoint = ProjectsEndpoint(resource) server_manager.add_route( @@ -69,16 +74,14 @@ def run_webserver(*args, **kwargs): ) # triggers publish - webpublisher_task_publish_endpoint = \ - BatchPublishEndpoint(resource) + webpublisher_task_publish_endpoint = BatchPublishEndpoint(resource) server_manager.add_route( "POST", "/api/webpublish/batch", webpublisher_task_publish_endpoint.dispatch ) - webpublisher_batch_publish_endpoint = \ - TaskPublishEndpoint(resource) + webpublisher_batch_publish_endpoint = TaskPublishEndpoint(resource) server_manager.add_route( "POST", "/api/webpublish/task", @@ -86,34 +89,33 @@ def run_webserver(*args, **kwargs): ) # reporting - openpype_resource = OpenPypeRestApiResource() - batch_status_endpoint = BatchStatusEndpoint(openpype_resource) + webpublish_resource = WebpublishRestApiResource() + batch_status_endpoint = BatchStatusEndpoint(webpublish_resource) server_manager.add_route( "GET", "/api/batch_status/{batch_id}", batch_status_endpoint.dispatch ) - user_status_endpoint = UserReportEndpoint(openpype_resource) + user_status_endpoint = UserReportEndpoint(webpublish_resource) server_manager.add_route( "GET", "/api/publishes/{user}", user_status_endpoint.dispatch ) - webpublisher_batch_reprocess_endpoint = \ - BatchReprocessEndpoint(openpype_resource) + batch_reprocess_endpoint = BatchReprocessEndpoint(webpublish_resource) server_manager.add_route( "POST", "/api/webpublish/reprocess/{batch_id}", - webpublisher_batch_reprocess_endpoint.dispatch + batch_reprocess_endpoint.dispatch ) server_manager.start_server() last_reprocessed = time.time() while True: if time.time() - last_reprocessed > 20: - reprocess_failed(kwargs["upload_dir"], webserver_url) + reprocess_failed(upload_dir, webserver_url) last_reprocessed = time.time() if studio_task_queue: args = studio_task_queue.popleft() @@ -124,8 +126,6 @@ def run_webserver(*args, **kwargs): def reprocess_failed(upload_dir, webserver_url): # log.info("check_reprocesable_records") - from openpype.lib import OpenPypeMongoConnection - mongo_client = OpenPypeMongoConnection.get_mongo_client() database_name = os.environ["OPENPYPE_DATABASE_NAME"] dbcon = mongo_client[database_name]["webpublishes"] diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index 3c1d71ecd5..a64b7c2911 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -63,7 +63,10 @@ from .execute import ( path_to_subprocess_arg, CREATE_NO_WINDOW ) -from .log import PypeLogger, timeit +from .log import ( + Logger, + PypeLogger, +) from .path_templates import ( merge_dict, @@ -83,8 +86,9 @@ from .anatomy import ( Anatomy ) -from .config import ( +from .dateutils import ( get_datetime_data, + get_timestamp, get_formatted_current_time ) @@ -111,6 +115,7 @@ from .transcoding import ( get_ffmpeg_codec_args, get_ffmpeg_format_args, convert_ffprobe_fps_value, + convert_ffprobe_fps_to_float, ) from .avalon_context import ( CURRENT_DOC_SCHEMAS, @@ -120,7 +125,6 @@ from .avalon_context import ( is_latest, any_outdated, get_asset, - get_hierarchy, get_linked_assets, get_latest_version, get_system_general_anatomy_data, @@ -185,11 +189,11 @@ from .plugin_tools import ( filter_pyblish_plugins, set_plugin_attributes_from_settings, source_hash, - get_unique_layer_name, - get_background_layers, ) from .path_tools import ( + format_file_size, + collect_frames, create_hard_link, version_up, get_version_from_path, @@ -199,18 +203,6 @@ from .path_tools import ( get_project_basic_paths, ) -from .editorial import ( - is_overlapping_otio_ranges, - otio_range_to_frame_range, - otio_range_with_handles, - convert_to_padded_path, - trim_media_range, - range_from_frames, - frames_to_secons, - frames_to_timecode, - make_sequence_collection -) - from .openpype_version import ( op_version_control_available, get_openpype_version, @@ -283,6 +275,7 @@ __all__ = [ "get_ffmpeg_codec_args", "get_ffmpeg_format_args", "convert_ffprobe_fps_value", + "convert_ffprobe_fps_to_float", "CURRENT_DOC_SCHEMAS", "PROJECT_NAME_ALLOWED_SYMBOLS", @@ -291,7 +284,6 @@ __all__ = [ "is_latest", "any_outdated", "get_asset", - "get_hierarchy", "get_linked_assets", "get_latest_version", "get_system_general_anatomy_data", @@ -349,9 +341,9 @@ __all__ = [ "filter_pyblish_plugins", "set_plugin_attributes_from_settings", "source_hash", - "get_unique_layer_name", - "get_background_layers", + "format_file_size", + "collect_frames", "create_hard_link", "version_up", "get_version_from_path", @@ -371,22 +363,13 @@ __all__ = [ "get_datetime_data", "get_formatted_current_time", + "Logger", "PypeLogger", + "get_default_components", "validate_mongo_connection", "OpenPypeMongoConnection", - "timeit", - - "is_overlapping_otio_ranges", - "otio_range_with_handles", - "convert_to_padded_path", - "otio_range_to_frame_range", - "trim_media_range", - "range_from_frames", - "frames_to_secons", - "frames_to_timecode", - "make_sequence_collection", "create_project_folders", "create_workdir_extra_folders", "get_project_basic_paths", diff --git a/openpype/lib/abstract_metaplugins.py b/openpype/lib/abstract_metaplugins.py deleted file mode 100644 index f8163956ad..0000000000 --- a/openpype/lib/abstract_metaplugins.py +++ /dev/null @@ -1,10 +0,0 @@ -from abc import ABCMeta -from pyblish.plugin import MetaPlugin, ExplicitMetaPlugin - - -class AbstractMetaInstancePlugin(ABCMeta, MetaPlugin): - pass - - -class AbstractMetaContextPlugin(ABCMeta, ExplicitMetaPlugin): - pass diff --git a/openpype/lib/anatomy.py b/openpype/lib/anatomy.py index 3fbc05ee88..6d339f058f 100644 --- a/openpype/lib/anatomy.py +++ b/openpype/lib/anatomy.py @@ -1,1260 +1,38 @@ -import os -import re -import copy -import platform -import collections -import numbers +"""Code related to project Anatomy was moved +to 'openpype.pipeline.anatomy' please change your imports as soon as +possible. File will be probably removed in OpenPype 3.14.* +""" -from openpype.settings.lib import ( - get_default_anatomy_settings, - get_anatomy_settings -) -from .path_templates import ( - TemplateUnsolved, - TemplateResult, - TemplatesDict, - FormatObject, -) -from .log import PypeLogger - -log = PypeLogger().get_logger(__name__) - -try: - StringType = basestring -except NameError: - StringType = str +import warnings +import functools -class ProjectNotSet(Exception): - """Exception raised when is created Anatomy without project name.""" +class AnatomyDeprecatedWarning(DeprecationWarning): + pass -class RootCombinationError(Exception): - """This exception is raised when templates has combined root types.""" +def anatomy_deprecated(func): + """Mark functions as deprecated. - def __init__(self, roots): - joined_roots = ", ".join( - ["\"{}\"".format(_root) for _root in roots] - ) - # TODO better error message - msg = ( - "Combination of root with and" - " without root name in AnatomyTemplates. {}" - ).format(joined_roots) - - super(RootCombinationError, self).__init__(msg) - - -class Anatomy: - """Anatomy module helps to keep project settings. - - Wraps key project specifications, AnatomyTemplates and Roots. - - Args: - project_name (str): Project name to look on overrides. + It will result in a warning being emitted when the function is used. """ - root_key_regex = re.compile(r"{(root?[^}]+)}") - root_name_regex = re.compile(r"root\[([^]]+)\]") - - def __init__(self, project_name=None, site_name=None): - if not project_name: - project_name = os.environ.get("AVALON_PROJECT") - - if not project_name: - raise ProjectNotSet(( - "Implementation bug: Project name is not set. Anatomy requires" - " to load data for specific project." - )) - - self.project_name = project_name - - self._data = self._prepare_anatomy_data( - get_anatomy_settings(project_name, site_name) + @functools.wraps(func) + def new_func(*args, **kwargs): + warnings.simplefilter("always", AnatomyDeprecatedWarning) + warnings.warn( + ( + "Deprecated import of 'Anatomy'." + " Class was moved to 'openpype.pipeline.anatomy'." + " Please change your imports of Anatomy in codebase." + ), + category=AnatomyDeprecatedWarning ) - self._site_name = site_name - self._templates_obj = AnatomyTemplates(self) - self._roots_obj = Roots(self) + return func(*args, **kwargs) + return new_func - # Anatomy used as dictionary - # - implemented only getters returning copy - def __getitem__(self, key): - return copy.deepcopy(self._data[key]) - def get(self, key, default=None): - return copy.deepcopy(self._data).get(key, default) - - def keys(self): - return copy.deepcopy(self._data).keys() - - def values(self): - return copy.deepcopy(self._data).values() - - def items(self): - return copy.deepcopy(self._data).items() - - @staticmethod - def default_data(): - """Default project anatomy data. - - Always return fresh loaded data. May be used as data for new project. - - Not used inside Anatomy itself. - """ - return get_default_anatomy_settings(clear_metadata=False) - - @staticmethod - def _prepare_anatomy_data(anatomy_data): - """Prepare anatomy data for further processing. - - Method added to replace `{task}` with `{task[name]}` in templates. - """ - templates_data = anatomy_data.get("templates") - if templates_data: - # Replace `{task}` with `{task[name]}` in templates - value_queue = collections.deque() - value_queue.append(templates_data) - while value_queue: - item = value_queue.popleft() - if not isinstance(item, dict): - continue - - for key in tuple(item.keys()): - value = item[key] - if isinstance(value, dict): - value_queue.append(value) - - elif isinstance(value, StringType): - item[key] = value.replace("{task}", "{task[name]}") - return anatomy_data - - def reset(self): - """Reset values of cached data in templates and roots objects.""" - self._data = self._prepare_anatomy_data( - get_anatomy_settings(self.project_name, self._site_name) - ) - self.templates_obj.reset() - self.roots_obj.reset() - - @property - def templates(self): - """Wrap property `templates` of Anatomy's AnatomyTemplates instance.""" - return self._templates_obj.templates - - @property - def templates_obj(self): - """Return `AnatomyTemplates` object of current Anatomy instance.""" - return self._templates_obj - - def format(self, *args, **kwargs): - """Wrap `format` method of Anatomy's `templates_obj`.""" - return self._templates_obj.format(*args, **kwargs) - - def format_all(self, *args, **kwargs): - """Wrap `format_all` method of Anatomy's `templates_obj`.""" - return self._templates_obj.format_all(*args, **kwargs) - - @property - def roots(self): - """Wrap `roots` property of Anatomy's `roots_obj`.""" - return self._roots_obj.roots - - @property - def roots_obj(self): - """Return `Roots` object of current Anatomy instance.""" - return self._roots_obj - - def root_environments(self): - """Return OPENPYPE_ROOT_* environments for current project in dict.""" - return self._roots_obj.root_environments() - - def root_environmets_fill_data(self, template=None): - """Environment variable values in dictionary for rootless path. - - Args: - template (str): Template for environment variable key fill. - By default is set to `"${}"`. - """ - return self.roots_obj.root_environmets_fill_data(template) - - def find_root_template_from_path(self, *args, **kwargs): - """Wrapper for Roots `find_root_template_from_path`.""" - return self.roots_obj.find_root_template_from_path(*args, **kwargs) - - def path_remapper(self, *args, **kwargs): - """Wrapper for Roots `path_remapper`.""" - return self.roots_obj.path_remapper(*args, **kwargs) - - def all_root_paths(self): - """Wrapper for Roots `all_root_paths`.""" - return self.roots_obj.all_root_paths() - - def set_root_environments(self): - """Set OPENPYPE_ROOT_* environments for current project.""" - self._roots_obj.set_root_environments() - - def root_names(self): - """Return root names for current project.""" - return self.root_names_from_templates(self.templates) - - def _root_keys_from_templates(self, data): - """Extract root key from templates in data. - - Args: - data (dict): Data that may contain templates as string. - - Return: - set: Set of all root names from templates as strings. - - Output example: `{"root[work]", "root[publish]"}` - """ - - output = set() - if isinstance(data, dict): - for value in data.values(): - for root in self._root_keys_from_templates(value): - output.add(root) - - elif isinstance(data, str): - for group in re.findall(self.root_key_regex, data): - output.add(group) - - return output - - def root_value_for_template(self, template): - """Returns value of root key from template.""" - root_templates = [] - for group in re.findall(self.root_key_regex, template): - root_templates.append("{" + group + "}") - - if not root_templates: - return None - - return root_templates[0].format(**{"root": self.roots}) - - def root_names_from_templates(self, templates): - """Extract root names form anatomy templates. - - Returns None if values in templates contain only "{root}". - Empty list is returned if there is no "root" in templates. - Else returns all root names from templates in list. - - RootCombinationError is raised when templates contain both root types, - basic "{root}" and with root name specification "{root[work]}". - - Args: - templates (dict): Anatomy templates where roots are not filled. - - Return: - list/None: List of all root names from templates as strings when - multiroot setup is used, otherwise None is returned. - """ - roots = list(self._root_keys_from_templates(templates)) - # Return empty list if no roots found in templates - if not roots: - return roots - - # Raise exception when root keys have roots with and without root name. - # Invalid output example: ["root", "root[project]", "root[render]"] - if len(roots) > 1 and "root" in roots: - raise RootCombinationError(roots) - - # Return None if "root" without root name in templates - if len(roots) == 1 and roots[0] == "root": - return None - - names = set() - for root in roots: - for group in re.findall(self.root_name_regex, root): - names.add(group) - return list(names) - - def fill_root(self, template_path): - """Fill template path where is only "root" key unfilled. - - Args: - template_path (str): Path with "root" key in. - Example path: "{root}/projects/MyProject/Shot01/Lighting/..." - - Return: - str: formatted path - """ - # NOTE does not care if there are different keys than "root" - return template_path.format(**{"root": self.roots}) - - @classmethod - def fill_root_with_path(cls, rootless_path, root_path): - """Fill path without filled "root" key with passed path. - - This is helper to fill root with different directory path than anatomy - has defined no matter if is single or multiroot. - - Output path is same as input path if `rootless_path` does not contain - unfilled root key. - - Args: - rootless_path (str): Path without filled "root" key. Example: - "{root[work]}/MyProject/..." - root_path (str): What should replace root key in `rootless_path`. - - Returns: - str: Path with filled root. - """ - output = str(rootless_path) - for group in re.findall(cls.root_key_regex, rootless_path): - replacement = "{" + group + "}" - output = output.replace(replacement, root_path) - - return output - - def replace_root_with_env_key(self, filepath, template=None): - """Replace root of path with environment key. - - # Example: - ## Project with roots: - ``` - { - "nas": { - "windows": P:/projects", - ... - } - ... - } - ``` - - ## Entered filepath - "P:/projects/project/asset/task/animation_v001.ma" - - ## Entered template - "<{}>" - - ## Output - "/project/asset/task/animation_v001.ma" - - Args: - filepath (str): Full file path where root should be replaced. - template (str): Optional template for environment key. Must - have one index format key. - Default value if not entered: "${}" - - Returns: - str: Path where root is replaced with environment root key. - - Raise: - ValueError: When project's roots were not found in entered path. - """ - success, rootless_path = self.find_root_template_from_path(filepath) - if not success: - raise ValueError( - "{}: Project's roots were not found in path: {}".format( - self.project_name, filepath - ) - ) - - data = self.root_environmets_fill_data(template) - return rootless_path.format(**data) - - -class AnatomyTemplateUnsolved(TemplateUnsolved): - """Exception for unsolved template when strict is set to True.""" - - msg = "Anatomy template \"{0}\" is unsolved.{1}{2}" - - -class AnatomyTemplateResult(TemplateResult): - rootless = None - - def __new__(cls, result, rootless_path): - new_obj = super(AnatomyTemplateResult, cls).__new__( - cls, - str(result), - result.template, - result.solved, - result.used_values, - result.missing_keys, - result.invalid_types - ) - new_obj.rootless = rootless_path - return new_obj - - def validate(self): - if not self.solved: - raise AnatomyTemplateUnsolved( - self.template, - self.missing_keys, - self.invalid_types - ) - - -class AnatomyTemplates(TemplatesDict): - inner_key_pattern = re.compile(r"(\{@.*?[^{}0]*\})") - inner_key_name_pattern = re.compile(r"\{@(.*?[^{}0]*)\}") - - def __init__(self, anatomy): - super(AnatomyTemplates, self).__init__() - self.anatomy = anatomy - self.loaded_project = None - - def __getitem__(self, key): - return self.templates[key] - - def get(self, key, default=None): - return self.templates.get(key, default) - - def reset(self): - self._raw_templates = None - self._templates = None - self._objected_templates = None - - @property - def project_name(self): - return self.anatomy.project_name - - @property - def roots(self): - return self.anatomy.roots - - @property - def templates(self): - self._validate_discovery() - return self._templates - - @property - def objected_templates(self): - self._validate_discovery() - return self._objected_templates - - def _validate_discovery(self): - if self.project_name != self.loaded_project: - self.reset() - - if self._templates is None: - self._discover() - self.loaded_project = self.project_name - - def _format_value(self, value, data): - if isinstance(value, RootItem): - return self._solve_dict(value, data) - - result = super(AnatomyTemplates, self)._format_value(value, data) - if isinstance(result, TemplateResult): - rootless_path = self._rootless_path(result, data) - result = AnatomyTemplateResult(result, rootless_path) - return result - - def set_templates(self, templates): - if not templates: - self.reset() - return - - self._raw_templates = copy.deepcopy(templates) - templates = copy.deepcopy(templates) - v_queue = collections.deque() - v_queue.append(templates) - while v_queue: - item = v_queue.popleft() - if not isinstance(item, dict): - continue - - for key in tuple(item.keys()): - value = item[key] - if isinstance(value, dict): - v_queue.append(value) - - elif ( - isinstance(value, StringType) - and "{task}" in value - ): - item[key] = value.replace("{task}", "{task[name]}") - - solved_templates = self.solve_template_inner_links(templates) - self._templates = solved_templates - self._objected_templates = self.create_ojected_templates( - solved_templates - ) - - def default_templates(self): - """Return default templates data with solved inner keys.""" - return self.solve_template_inner_links( - self.anatomy["templates"] - ) - - def _discover(self): - """ Loads anatomy templates from yaml. - Default templates are loaded if project is not set or project does - not have set it's own. - TODO: create templates if not exist. - - Returns: - TemplatesResultDict: Contain templates data for current project of - default templates. - """ - - if self.project_name is None: - # QUESTION create project specific if not found? - raise AssertionError(( - "Project \"{0}\" does not have his own templates." - " Trying to use default." - ).format(self.project_name)) - - self.set_templates(self.anatomy["templates"]) - - @classmethod - def replace_inner_keys(cls, matches, value, key_values, key): - """Replacement of inner keys in template values.""" - for match in matches: - anatomy_sub_keys = ( - cls.inner_key_name_pattern.findall(match) - ) - if key in anatomy_sub_keys: - raise ValueError(( - "Unsolvable recursion in inner keys, " - "key: \"{}\" is in his own value." - " Can't determine source, please check Anatomy templates." - ).format(key)) - - for anatomy_sub_key in anatomy_sub_keys: - replace_value = key_values.get(anatomy_sub_key) - if replace_value is None: - raise KeyError(( - "Anatomy templates can't be filled." - " Anatomy key `{0}` has" - " invalid inner key `{1}`." - ).format(key, anatomy_sub_key)) - - valid = isinstance(replace_value, (numbers.Number, StringType)) - if not valid: - raise ValueError(( - "Anatomy templates can't be filled." - " Anatomy key `{0}` has" - " invalid inner key `{1}`" - " with value `{2}`." - ).format(key, anatomy_sub_key, str(replace_value))) - - value = value.replace(match, str(replace_value)) - - return value - - @classmethod - def prepare_inner_keys(cls, key_values): - """Check values of inner keys. - - Check if inner key exist in template group and has valid value. - It is also required to avoid infinite loop with unsolvable recursion - when first inner key's value refers to second inner key's value where - first is used. - """ - keys_to_solve = set(key_values.keys()) - while True: - found = False - for key in tuple(keys_to_solve): - value = key_values[key] - - if isinstance(value, StringType): - matches = cls.inner_key_pattern.findall(value) - if not matches: - keys_to_solve.remove(key) - continue - - found = True - key_values[key] = cls.replace_inner_keys( - matches, value, key_values, key - ) - continue - - elif not isinstance(value, dict): - keys_to_solve.remove(key) - continue - - subdict_found = False - for _key, _value in tuple(value.items()): - matches = cls.inner_key_pattern.findall(_value) - if not matches: - continue - - subdict_found = True - found = True - key_values[key][_key] = cls.replace_inner_keys( - matches, _value, key_values, - "{}.{}".format(key, _key) - ) - - if not subdict_found: - keys_to_solve.remove(key) - - if not found: - break - - return key_values - - @classmethod - def solve_template_inner_links(cls, templates): - """Solve templates inner keys identified by "{@*}". - - Process is split into 2 parts. - First is collecting all global keys (keys in top hierarchy where value - is not dictionary). All global keys are set for all group keys (keys - in top hierarchy where value is dictionary). Value of a key is not - overridden in group if already contain value for the key. - - In second part all keys with "at" symbol in value are replaced with - value of the key afterward "at" symbol from the group. - - Args: - templates (dict): Raw templates data. - - Example: - templates:: - key_1: "value_1", - key_2: "{@key_1}/{filling_key}" - - group_1: - key_3: "value_3/{@key_2}" - - group_2: - key_2": "value_2" - key_4": "value_4/{@key_2}" - - output:: - key_1: "value_1" - key_2: "value_1/{filling_key}" - - group_1: { - key_1: "value_1" - key_2: "value_1/{filling_key}" - key_3: "value_3/value_1/{filling_key}" - - group_2: { - key_1: "value_1" - key_2: "value_2" - key_4: "value_3/value_2" - """ - default_key_values = templates.pop("defaults", {}) - for key, value in tuple(templates.items()): - if isinstance(value, dict): - continue - default_key_values[key] = templates.pop(key) - - # Pop "others" key before before expected keys are processed - other_templates = templates.pop("others") or {} - - keys_by_subkey = {} - for sub_key, sub_value in templates.items(): - key_values = {} - key_values.update(default_key_values) - key_values.update(sub_value) - keys_by_subkey[sub_key] = cls.prepare_inner_keys(key_values) - - for sub_key, sub_value in other_templates.items(): - if sub_key in keys_by_subkey: - log.warning(( - "Key \"{}\" is duplicated in others. Skipping." - ).format(sub_key)) - continue - - key_values = {} - key_values.update(default_key_values) - key_values.update(sub_value) - keys_by_subkey[sub_key] = cls.prepare_inner_keys(key_values) - - default_keys_by_subkeys = cls.prepare_inner_keys(default_key_values) - - for key, value in default_keys_by_subkeys.items(): - keys_by_subkey[key] = value - - return keys_by_subkey - - def _dict_to_subkeys_list(self, subdict, pre_keys=None): - if pre_keys is None: - pre_keys = [] - output = [] - for key in subdict: - value = subdict[key] - result = list(pre_keys) - result.append(key) - if isinstance(value, dict): - for item in self._dict_to_subkeys_list(value, result): - output.append(item) - else: - output.append(result) - return output - - def _keys_to_dicts(self, key_list, value): - if not key_list: - return None - if len(key_list) == 1: - return {key_list[0]: value} - return {key_list[0]: self._keys_to_dicts(key_list[1:], value)} - - def _rootless_path(self, result, final_data): - used_values = result.used_values - missing_keys = result.missing_keys - template = result.template - invalid_types = result.invalid_types - if ( - "root" not in used_values - or "root" in missing_keys - or "{root" not in template - ): - return - - for invalid_type in invalid_types: - if "root" in invalid_type: - return - - root_keys = self._dict_to_subkeys_list({"root": used_values["root"]}) - if not root_keys: - return - - output = str(result) - for used_root_keys in root_keys: - if not used_root_keys: - continue - - used_value = used_values - root_key = None - for key in used_root_keys: - used_value = used_value[key] - if root_key is None: - root_key = key - else: - root_key += "[{}]".format(key) - - root_key = "{" + root_key + "}" - output = output.replace(str(used_value), root_key) - - return output - - def format(self, data, strict=True): - copy_data = copy.deepcopy(data) - roots = self.roots - if roots: - copy_data["root"] = roots - result = super(AnatomyTemplates, self).format(copy_data) - result.strict = strict - return result - - def format_all(self, in_data, only_keys=True): - """ Solves templates based on entered data. - - Args: - data (dict): Containing keys to be filled into template. - - Returns: - TemplatesResultDict: Output `TemplateResult` have `strict` - attribute set to False so accessing unfilled keys in templates - won't raise any exceptions. - """ - return self.format(in_data, strict=False) - - -class RootItem(FormatObject): - """Represents one item or roots. - - Holds raw data of root item specification. Raw data contain value - for each platform, but current platform value is used when object - is used for formatting of template. - - Args: - root_raw_data (dict): Dictionary containing root values by platform - names. ["windows", "linux" and "darwin"] - name (str, optional): Root name which is representing. Used with - multi root setup otherwise None value is expected. - parent_keys (list, optional): All dictionary parent keys. Values of - `parent_keys` are used for get full key which RootItem is - representing. Used for replacing root value in path with - formattable key. e.g. parent_keys == ["work"] -> {root[work]} - parent (object, optional): It is expected to be `Roots` object. - Value of `parent` won't affect code logic much. - """ - - def __init__( - self, root_raw_data, name=None, parent_keys=None, parent=None - ): - lowered_platform_keys = {} - for key, value in root_raw_data.items(): - lowered_platform_keys[key.lower()] = value - self.raw_data = lowered_platform_keys - self.cleaned_data = self._clean_roots(lowered_platform_keys) - self.name = name - self.parent_keys = parent_keys or [] - self.parent = parent - - self.available_platforms = list(lowered_platform_keys.keys()) - self.value = lowered_platform_keys.get(platform.system().lower()) - self.clean_value = self.clean_root(self.value) - - def __format__(self, *args, **kwargs): - return self.value.__format__(*args, **kwargs) - - def __str__(self): - return str(self.value) - - def __repr__(self): - return self.__str__() - - def __getitem__(self, key): - if isinstance(key, numbers.Number): - return self.value[key] - - additional_info = "" - if self.parent and self.parent.project_name: - additional_info += " for project \"{}\"".format( - self.parent.project_name - ) - - raise AssertionError( - "Root key \"{}\" is missing{}.".format( - key, additional_info - ) - ) - - def full_key(self): - """Full key value for dictionary formatting in template. - - Returns: - str: Return full replacement key for formatting. This helps when - multiple roots are set. In that case e.g. `"root[work]"` is - returned. - """ - if not self.name: - return "root" - - joined_parent_keys = "".join( - ["[{}]".format(key) for key in self.parent_keys] - ) - return "root{}".format(joined_parent_keys) - - def clean_path(self, path): - """Just replace backslashes with forward slashes.""" - return str(path).replace("\\", "/") - - def clean_root(self, root): - """Makes sure root value does not end with slash.""" - if root: - root = self.clean_path(root) - while root.endswith("/"): - root = root[:-1] - return root - - def _clean_roots(self, raw_data): - """Clean all values of raw root item values.""" - cleaned = {} - for key, value in raw_data.items(): - cleaned[key] = self.clean_root(value) - return cleaned - - def path_remapper(self, path, dst_platform=None, src_platform=None): - """Remap path for specific platform. - - Args: - path (str): Source path which need to be remapped. - dst_platform (str, optional): Specify destination platform - for which remapping should happen. - src_platform (str, optional): Specify source platform. This is - recommended to not use and keep unset until you really want - to use specific platform. - roots (dict/RootItem/None, optional): It is possible to remap - path with different roots then instance where method was - called has. - - Returns: - str/None: When path does not contain known root then - None is returned else returns remapped path with "{root}" - or "{root[]}". - """ - cleaned_path = self.clean_path(path) - if dst_platform: - dst_root_clean = self.cleaned_data.get(dst_platform) - if not dst_root_clean: - key_part = "" - full_key = self.full_key() - if full_key != "root": - key_part += "\"{}\" ".format(full_key) - - log.warning( - "Root {}miss platform \"{}\" definition.".format( - key_part, dst_platform - ) - ) - return None - - if cleaned_path.startswith(dst_root_clean): - return cleaned_path - - if src_platform: - src_root_clean = self.cleaned_data.get(src_platform) - if src_root_clean is None: - log.warning( - "Root \"{}\" miss platform \"{}\" definition.".format( - self.full_key(), src_platform - ) - ) - return None - - if not cleaned_path.startswith(src_root_clean): - return None - - subpath = cleaned_path[len(src_root_clean):] - if dst_platform: - # `dst_root_clean` is used from upper condition - return dst_root_clean + subpath - return self.clean_value + subpath - - result, template = self.find_root_template_from_path(path) - if not result: - return None - - def parent_dict(keys, value): - if not keys: - return value - - key = keys.pop(0) - return {key: parent_dict(keys, value)} - - if dst_platform: - format_value = parent_dict(list(self.parent_keys), dst_root_clean) - else: - format_value = parent_dict(list(self.parent_keys), self.value) - - return template.format(**{"root": format_value}) - - def find_root_template_from_path(self, path): - """Replaces known root value with formattable key in path. - - All platform values are checked for this replacement. - - Args: - path (str): Path where root value should be found. - - Returns: - tuple: Tuple contain 2 values: `success` (bool) and `path` (str). - When success it True then path should contain replaced root - value with formattable key. - - Example: - When input path is:: - "C:/windows/path/root/projects/my_project/file.ext" - - And raw data of item looks like:: - { - "windows": "C:/windows/path/root", - "linux": "/mount/root" - } - - Output will be:: - (True, "{root}/projects/my_project/file.ext") - - If any of raw data value wouldn't match path's root output is:: - (False, "C:/windows/path/root/projects/my_project/file.ext") - """ - result = False - output = str(path) - - root_paths = list(self.cleaned_data.values()) - mod_path = self.clean_path(path) - for root_path in root_paths: - # Skip empty paths - if not root_path: - continue - - if mod_path.startswith(root_path): - result = True - replacement = "{" + self.full_key() + "}" - output = replacement + mod_path[len(root_path):] - break - - return (result, output) - - -class Roots: - """Object which should be used for formatting "root" key in templates. - - Args: - anatomy Anatomy: Anatomy object created for a specific project. - """ - - env_prefix = "OPENPYPE_PROJECT_ROOT" - roots_filename = "roots.json" - - def __init__(self, anatomy): - self.anatomy = anatomy - self.loaded_project = None - self._roots = None - - def __format__(self, *args, **kwargs): - return self.roots.__format__(*args, **kwargs) - - def __getitem__(self, key): - return self.roots[key] - - def reset(self): - """Reset current roots value.""" - self._roots = None - - def path_remapper( - self, path, dst_platform=None, src_platform=None, roots=None - ): - """Remap path for specific platform. - - Args: - path (str): Source path which need to be remapped. - dst_platform (str, optional): Specify destination platform - for which remapping should happen. - src_platform (str, optional): Specify source platform. This is - recommended to not use and keep unset until you really want - to use specific platform. - roots (dict/RootItem/None, optional): It is possible to remap - path with different roots then instance where method was - called has. - - Returns: - str/None: When path does not contain known root then - None is returned else returns remapped path with "{root}" - or "{root[]}". - """ - if roots is None: - roots = self.roots - - if roots is None: - raise ValueError("Roots are not set. Can't find path.") - - if "{root" in path: - path = path.format(**{"root": roots}) - # If `dst_platform` is not specified then return else continue. - if not dst_platform: - return path - - if isinstance(roots, RootItem): - return roots.path_remapper(path, dst_platform, src_platform) - - for _root in roots.values(): - result = self.path_remapper( - path, dst_platform, src_platform, _root - ) - if result is not None: - return result - - def find_root_template_from_path(self, path, roots=None): - """Find root value in entered path and replace it with formatting key. - - Args: - path (str): Source path where root will be searched. - roots (Roots/dict, optional): It is possible to use different - roots than instance where method was triggered has. - - Returns: - tuple: Output contains tuple with bool representing success as - first value and path with or without replaced root with - formatting key as second value. - - Raises: - ValueError: When roots are not entered and can't be loaded. - """ - if roots is None: - log.debug( - "Looking for matching root in path \"{}\".".format(path) - ) - roots = self.roots - - if roots is None: - raise ValueError("Roots are not set. Can't find path.") - - if isinstance(roots, RootItem): - return roots.find_root_template_from_path(path) - - for root_name, _root in roots.items(): - success, result = self.find_root_template_from_path(path, _root) - if success: - log.info("Found match in root \"{}\".".format(root_name)) - return success, result - - log.warning("No matching root was found in current setting.") - return (False, path) - - def set_root_environments(self): - """Set root environments for current project.""" - for key, value in self.root_environments().items(): - os.environ[key] = value - - def root_environments(self): - """Use root keys to create unique keys for environment variables. - - Concatenates prefix "OPENPYPE_ROOT" with root keys to create unique - keys. - - Returns: - dict: Result is `{(str): (str)}` dicitonary where key represents - unique key concatenated by keys and value is root value of - current platform root. - - Example: - With raw root values:: - "work": { - "windows": "P:/projects/work", - "linux": "/mnt/share/projects/work", - "darwin": "/darwin/path/work" - }, - "publish": { - "windows": "P:/projects/publish", - "linux": "/mnt/share/projects/publish", - "darwin": "/darwin/path/publish" - } - - Result on windows platform:: - { - "OPENPYPE_ROOT_WORK": "P:/projects/work", - "OPENPYPE_ROOT_PUBLISH": "P:/projects/publish" - } - - Short example when multiroot is not used:: - { - "OPENPYPE_ROOT": "P:/projects" - } - """ - return self._root_environments() - - def all_root_paths(self, roots=None): - """Return all paths for all roots of all platforms.""" - if roots is None: - roots = self.roots - - output = [] - if isinstance(roots, RootItem): - for value in roots.raw_data.values(): - output.append(value) - return output - - for _roots in roots.values(): - output.extend(self.all_root_paths(_roots)) - return output - - def _root_environments(self, keys=None, roots=None): - if not keys: - keys = [] - if roots is None: - roots = self.roots - - if isinstance(roots, RootItem): - key_items = [self.env_prefix] - for _key in keys: - key_items.append(_key.upper()) - - key = "_".join(key_items) - # Make sure key and value does not contain unicode - # - can happen in Python 2 hosts - return {str(key): str(roots.value)} - - output = {} - for _key, _value in roots.items(): - _keys = list(keys) - _keys.append(_key) - output.update(self._root_environments(_keys, _value)) - return output - - def root_environmets_fill_data(self, template=None): - """Environment variable values in dictionary for rootless path. - - Args: - template (str): Template for environment variable key fill. - By default is set to `"${}"`. - """ - if template is None: - template = "${}" - return self._root_environmets_fill_data(template) - - def _root_environmets_fill_data(self, template, keys=None, roots=None): - if keys is None and roots is None: - return { - "root": self._root_environmets_fill_data( - template, [], self.roots - ) - } - - if isinstance(roots, RootItem): - key_items = [Roots.env_prefix] - for _key in keys: - key_items.append(_key.upper()) - key = "_".join(key_items) - return template.format(key) - - output = {} - for key, value in roots.items(): - _keys = list(keys) - _keys.append(key) - output[key] = self._root_environmets_fill_data( - template, _keys, value - ) - return output - - @property - def project_name(self): - """Return project name which will be used for loading root values.""" - return self.anatomy.project_name - - @property - def roots(self): - """Property for filling "root" key in templates. - - This property returns roots for current project or default root values. - Warning: - Default roots value may cause issues when project use different - roots settings. That may happen when project use multiroot - templates but default roots miss their keys. - """ - if self.project_name != self.loaded_project: - self._roots = None - - if self._roots is None: - self._roots = self._discover() - self.loaded_project = self.project_name - return self._roots - - def _discover(self): - """ Loads current project's roots or default. - - Default roots are loaded if project override's does not contain roots. - - Returns: - `RootItem` or `dict` with multiple `RootItem`s when multiroot - setting is used. - """ - - return self._parse_dict(self.anatomy["roots"], parent=self) - - @staticmethod - def _parse_dict(data, key=None, parent_keys=None, parent=None): - """Parse roots raw data into RootItem or dictionary with RootItems. - - Converting raw roots data to `RootItem` helps to handle platform keys. - This method is recursive to be able handle multiroot setup and - is static to be able to load default roots without creating new object. - - Args: - data (dict): Should contain raw roots data to be parsed. - key (str, optional): Current root key. Set by recursion. - parent_keys (list): Parent dictionary keys. Set by recursion. - parent (Roots, optional): Parent object set in `RootItem` - helps to keep RootItem instance updated with `Roots` object. - - Returns: - `RootItem` or `dict` with multiple `RootItem`s when multiroot - setting is used. - """ - if not parent_keys: - parent_keys = [] - is_last = False - for value in data.values(): - if isinstance(value, StringType): - is_last = True - break - - if is_last: - return RootItem(data, key, parent_keys, parent=parent) - - output = {} - for _key, value in data.items(): - _parent_keys = list(parent_keys) - _parent_keys.append(_key) - output[_key] = Roots._parse_dict(value, _key, _parent_keys, parent) - return output +@anatomy_deprecated +def Anatomy(*args, **kwargs): + from openpype.pipeline.anatomy import Anatomy + return Anatomy(*args, **kwargs) diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index 6ade33b59c..990dc7495a 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -11,6 +11,10 @@ from abc import ABCMeta, abstractmethod import six +from openpype.client import ( + get_project, + get_asset_by_name, +) from openpype.settings import ( get_system_settings, get_project_settings, @@ -20,18 +24,9 @@ from openpype.settings.constants import ( METADATA_KEYS, M_DYNAMIC_KEY_LABEL ) -from . import ( - PypeLogger, - Anatomy -) +from .log import Logger from .profiles_filtering import filter_profiles from .local_settings import get_openpype_username -from .avalon_context import ( - get_workdir_data, - get_workdir_with_workdir_data, - get_workfile_template_key, - get_last_workfile -) from .python_module_tools import ( modules_from_path, @@ -143,7 +138,7 @@ def get_logger(): """Global lib.applications logger getter.""" global _logger if _logger is None: - _logger = PypeLogger.get_logger(__name__) + _logger = Logger.get_logger(__name__) return _logger @@ -378,7 +373,7 @@ class ApplicationManager: """ def __init__(self, system_settings=None): - self.log = PypeLogger.get_logger(self.__class__.__name__) + self.log = Logger.get_logger(self.__class__.__name__) self.app_groups = {} self.applications = {} @@ -474,6 +469,19 @@ class ApplicationManager: for tool in group: self.tools[tool.full_name] = tool + def find_latest_available_variant_for_group(self, group_name): + group = self.app_groups.get(group_name) + if group is None or not group.enabled: + return None + + output = None + for _, variant in reversed(sorted(group.variants.items())): + executable = variant.find_executable() + if executable: + output = variant + break + return output + def launch(self, app_name, **data): """Launch procedure. @@ -664,7 +672,11 @@ class ApplicationExecutable: if os.path.exists(plist_filepath): import plistlib - parsed_plist = plistlib.readPlist(plist_filepath) + if hasattr(plistlib, "load"): + with open(plist_filepath, "rb") as stream: + parsed_plist = plistlib.load(stream) + else: + parsed_plist = plistlib.readPlist(plist_filepath) executable_filename = parsed_plist.get("CFBundleExecutable") if executable_filename: @@ -736,7 +748,7 @@ class LaunchHook: Always should be called """ - self.log = PypeLogger().get_logger(self.__class__.__name__) + self.log = Logger.get_logger(self.__class__.__name__) self.launch_context = launch_context @@ -878,7 +890,7 @@ class ApplicationLaunchContext: # Logger logger_name = "{}-{}".format(self.__class__.__name__, self.app_name) - self.log = PypeLogger.get_logger(logger_name) + self.log = Logger.get_logger(logger_name) self.executable = executable @@ -951,6 +963,63 @@ class ApplicationLaunchContext: ) self.kwargs["env"] = value + def _collect_addons_launch_hook_paths(self): + """Helper to collect application launch hooks from addons. + + Module have to have implemented 'get_launch_hook_paths' method which + can expect appliction as argument or nothing. + + Returns: + List[str]: Paths to launch hook directories. + """ + + expected_types = (list, tuple, set) + + output = [] + for module in self.modules_manager.get_enabled_modules(): + # Skip module if does not have implemented 'get_launch_hook_paths' + func = getattr(module, "get_launch_hook_paths", None) + if func is None: + continue + + func = module.get_launch_hook_paths + if hasattr(inspect, "signature"): + sig = inspect.signature(func) + expect_args = len(sig.parameters) > 0 + else: + expect_args = len(inspect.getargspec(func)[0]) > 0 + + # Pass application argument if method expect it. + try: + if expect_args: + hook_paths = func(self.application) + else: + hook_paths = func() + except Exception: + self.log.warning( + "Failed to call 'get_launch_hook_paths'", + exc_info=True + ) + continue + + if not hook_paths: + continue + + # Convert string to list + if isinstance(hook_paths, six.string_types): + hook_paths = [hook_paths] + + # Skip invalid types + if not isinstance(hook_paths, expected_types): + self.log.warning(( + "Result of `get_launch_hook_paths`" + " has invalid type {}. Expected {}" + ).format(type(hook_paths), expected_types)) + continue + + output.extend(hook_paths) + return output + def paths_to_launch_hooks(self): """Directory paths where to look for launch hooks.""" # This method has potential to be part of application manager (maybe). @@ -958,32 +1027,24 @@ class ApplicationLaunchContext: # TODO load additional studio paths from settings import openpype - pype_dir = os.path.dirname(os.path.abspath(openpype.__file__)) + openpype_dir = os.path.dirname(os.path.abspath(openpype.__file__)) - # --- START: Backwards compatibility --- - hooks_dir = os.path.join(pype_dir, "hooks") + global_hooks_dir = os.path.join(openpype_dir, "hooks") - subfolder_names = ["global"] - if self.host_name: - subfolder_names.append(self.host_name) - for subfolder_name in subfolder_names: - path = os.path.join(hooks_dir, subfolder_name) - if ( - os.path.exists(path) - and os.path.isdir(path) - and path not in paths - ): - paths.append(path) - # --- END: Backwards compatibility --- - - subfolders_list = [ - ["hooks"] + hooks_dirs = [ + global_hooks_dir ] if self.host_name: - subfolders_list.append(["hosts", self.host_name, "hooks"]) + # If host requires launch hooks and is module then launch hooks + # should be collected using 'collect_launch_hook_paths' + # - module have to implement 'get_launch_hook_paths' + host_module = self.modules_manager.get_host_module(self.host_name) + if not host_module: + hooks_dirs.append(os.path.join( + openpype_dir, "hosts", self.host_name, "hooks" + )) - for subfolders in subfolders_list: - path = os.path.join(pype_dir, *subfolders) + for path in hooks_dirs: if ( os.path.exists(path) and os.path.isdir(path) @@ -992,7 +1053,7 @@ class ApplicationLaunchContext: paths.append(path) # Load modules paths - paths.extend(self.modules_manager.collect_launch_hook_paths()) + paths.extend(self._collect_addons_launch_hook_paths()) return paths @@ -1282,7 +1343,13 @@ class EnvironmentPrepData(dict): def get_app_environments_for_context( - project_name, asset_name, task_name, app_name, env_group=None, env=None + project_name, + asset_name, + task_name, + app_name, + env_group=None, + env=None, + modules_manager=None ): """Prepare environment variables by context. Args: @@ -1293,11 +1360,14 @@ def get_app_environments_for_context( by ApplicationManager. env (dict): Initial environment variables. `os.environ` is used when not passed. + modules_manager (ModulesManager): Initialized modules manager. Returns: dict: Environments for passed context and application. """ - from openpype.pipeline import AvalonMongoDB + + from openpype.modules import ModulesManager + from openpype.pipeline import AvalonMongoDB, Anatomy # Avalon database connection dbcon = AvalonMongoDB() @@ -1305,11 +1375,11 @@ def get_app_environments_for_context( dbcon.install() # Project document - project_doc = dbcon.find_one({"type": "project"}) - asset_doc = dbcon.find_one({ - "type": "asset", - "name": asset_name - }) + project_doc = get_project(project_name) + asset_doc = get_asset_by_name(project_name, asset_name) + + if modules_manager is None: + modules_manager = ModulesManager() # Prepare app object which can be obtained only from ApplciationManager app_manager = ApplicationManager() @@ -1333,9 +1403,10 @@ def get_app_environments_for_context( "env": env }) + data["env"].update(anatomy.root_environments()) - prepare_app_environments(data, env_group) - prepare_context_environments(data, env_group) + prepare_app_environments(data, env_group, modules_manager) + prepare_context_environments(data, env_group, modules_manager) # Discard avalon connection dbcon.uninstall() @@ -1355,9 +1426,12 @@ def _merge_env(env, current_env): return result -def _add_python_version_paths(app, env, logger): +def _add_python_version_paths(app, env, logger, modules_manager): """Add vendor packages specific for a Python version.""" + for module in modules_manager.get_enabled_modules(): + module.modify_application_launch_arguments(app, env) + # Skip adding if host name is not set if not app.host_name: return @@ -1390,7 +1464,9 @@ def _add_python_version_paths(app, env, logger): env["PYTHONPATH"] = os.pathsep.join(python_paths) -def prepare_app_environments(data, env_group=None, implementation_envs=True): +def prepare_app_environments( + data, env_group=None, implementation_envs=True, modules_manager=None +): """Modify launch environments based on launched app and context. Args: @@ -1403,7 +1479,12 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): log = data["log"] source_env = data["env"].copy() - _add_python_version_paths(app, source_env, log) + if modules_manager is None: + from openpype.modules import ModulesManager + + modules_manager = ModulesManager() + + _add_python_version_paths(app, source_env, log, modules_manager) # Use environments from local settings filtered_local_envs = {} @@ -1484,8 +1565,10 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): final_env = None # Add host specific environments if app.host_name and implementation_envs: - module = __import__("openpype.hosts", fromlist=[app.host_name]) - host_module = getattr(module, app.host_name, None) + host_module = modules_manager.get_host_module(app.host_name) + if not host_module: + module = __import__("openpype.hosts", fromlist=[app.host_name]) + host_module = getattr(module, app.host_name, None) add_implementation_envs = None if host_module: add_implementation_envs = getattr( @@ -1544,13 +1627,16 @@ def apply_project_environments_value( return env -def prepare_context_environments(data, env_group=None): +def prepare_context_environments(data, env_group=None, modules_manager=None): """Modify launch environments with context data for launched host. Args: data (EnvironmentPrepData): Dictionary where result and intermediate result will be stored. """ + + from openpype.pipeline.template_data import get_template_data + # Context environments log = data["log"] @@ -1571,7 +1657,9 @@ def prepare_context_environments(data, env_group=None): # Load project specific environments project_name = project_doc["name"] project_settings = get_project_settings(project_name) + system_settings = get_system_settings() data["project_settings"] = project_settings + data["system_settings"] = system_settings # Apply project specific environments on current env value apply_project_environments_value( project_name, data["env"], project_settings, env_group @@ -1594,8 +1682,8 @@ def prepare_context_environments(data, env_group=None): if not app.is_host: return - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, app.host_name + workdir_data = get_template_data( + project_doc, asset_doc, task_name, app.host_name, system_settings ) data["workdir_data"] = workdir_data @@ -1606,7 +1694,14 @@ def prepare_context_environments(data, env_group=None): data["task_type"] = task_type try: - workdir = get_workdir_with_workdir_data(workdir_data, anatomy) + from openpype.pipeline.workfile import get_workdir_with_workdir_data + + workdir = get_workdir_with_workdir_data( + workdir_data, + anatomy.project_name, + anatomy, + project_settings=project_settings + ) except Exception as exc: raise ApplicationLaunchFailed( @@ -1627,10 +1722,10 @@ def prepare_context_environments(data, env_group=None): data["env"]["AVALON_APP"] = app.host_name data["env"]["AVALON_WORKDIR"] = workdir - _prepare_last_workfile(data, workdir) + _prepare_last_workfile(data, workdir, modules_manager) -def _prepare_last_workfile(data, workdir): +def _prepare_last_workfile(data, workdir, modules_manager): """last workfile workflow preparation. Function check if should care about last workfile workflow and tries @@ -1645,8 +1740,13 @@ def _prepare_last_workfile(data, workdir): result will be stored. workdir (str): Path to folder where workfiles should be stored. """ + + from openpype.modules import ModulesManager from openpype.pipeline import HOST_WORKFILE_EXTENSIONS + if not modules_manager: + modules_manager = ModulesManager() + log = data["log"] _workdir_data = data.get("workdir_data") @@ -1694,13 +1794,26 @@ def _prepare_last_workfile(data, workdir): # Last workfile path last_workfile_path = data.get("last_workfile_path") or "" if not last_workfile_path: - extensions = HOST_WORKFILE_EXTENSIONS.get(app.host_name) + host_module = modules_manager.get_host_module(app.host_name) + if host_module: + extensions = host_module.get_workfile_extensions() + else: + extensions = HOST_WORKFILE_EXTENSIONS.get(app.host_name) + if extensions: + from openpype.pipeline.workfile import ( + get_workfile_template_key, + get_last_workfile + ) + anatomy = data["anatomy"] project_settings = data["project_settings"] task_type = workdir_data["task"]["type"] template_key = get_workfile_template_key( - task_type, app.host_name, project_settings=project_settings + task_type, + app.host_name, + project_name, + project_settings=project_settings ) # Find last workfile file_template = str(anatomy.templates[template_key]["file"]) diff --git a/openpype/lib/attribute_definitions.py b/openpype/lib/attribute_definitions.py index a1f7c1e0f4..6baeaec045 100644 --- a/openpype/lib/attribute_definitions.py +++ b/openpype/lib/attribute_definitions.py @@ -3,17 +3,83 @@ import re import collections import uuid import json -from abc import ABCMeta, abstractmethod +from abc import ABCMeta, abstractmethod, abstractproperty import six import clique +# Global variable which store attribude definitions by type +# - default types are registered on import +_attr_defs_by_type = {} + + +def register_attr_def_class(cls): + """Register attribute definition. + + Currently are registered definitions used to deserialize data to objects. + + Attrs: + cls (AbtractAttrDef): Non-abstract class to be registered with unique + 'type' attribute. + + Raises: + KeyError: When type was already registered. + """ + + if cls.type in _attr_defs_by_type: + raise KeyError("Type \"{}\" was already registered".format(cls.type)) + _attr_defs_by_type[cls.type] = cls + + +def get_attributes_keys(attribute_definitions): + """Collect keys from list of attribute definitions. + + Args: + attribute_definitions (List[AbtractAttrDef]): Objects of attribute + definitions. + + Returns: + Set[str]: Keys that will be created using passed attribute definitions. + """ + + keys = set() + if not attribute_definitions: + return keys + + for attribute_def in attribute_definitions: + if not isinstance(attribute_def, UIDef): + keys.add(attribute_def.key) + return keys + + +def get_default_values(attribute_definitions): + """Receive default values for attribute definitions. + + Args: + attribute_definitions (List[AbtractAttrDef]): Attribute definitions for + which default values should be collected. + + Returns: + Dict[str, Any]: Default values for passet attribute definitions. + """ + + output = {} + if not attribute_definitions: + return output + + for attr_def in attribute_definitions: + # Skip UI definitions + if not isinstance(attr_def, UIDef): + output[attr_def.key] = attr_def.default + return output + class AbstractAttrDefMeta(ABCMeta): """Meta class to validate existence of 'key' attribute. Each object of `AbtractAttrDef` mus have defined 'key' attribute. """ + def __call__(self, *args, **kwargs): obj = super(AbstractAttrDefMeta, self).__call__(*args, **kwargs) init_class = getattr(obj, "__init__class__", None) @@ -25,7 +91,7 @@ class AbstractAttrDefMeta(ABCMeta): @six.add_metaclass(AbstractAttrDefMeta) -class AbtractAttrDef: +class AbtractAttrDef(object): """Abstraction of attribute definiton. Each attribute definition must have implemented validation and @@ -45,6 +111,9 @@ class AbtractAttrDef: is_label_horizontal(bool): UI specific argument. Specify if label is next to value input or ahead. """ + + type_attributes = [] + is_value_def = True def __init__( @@ -70,6 +139,16 @@ class AbtractAttrDef: return False return self.key == other.key + @abstractproperty + def type(self): + """Attribute definition type also used as identifier of class. + + Returns: + str: Type of attribute definition. + """ + + pass + @abstractmethod def convert_value(self, value): """Convert value to a valid one. @@ -77,8 +156,38 @@ class AbtractAttrDef: Convert passed value to a valid type. Use default if value can't be converted. """ + pass + def serialize(self): + """Serialize object to data so it's possible to recreate it. + + Returns: + Dict[str, Any]: Serialized object that can be passed to + 'deserialize' method. + """ + + data = { + "type": self.type, + "key": self.key, + "label": self.label, + "tooltip": self.tooltip, + "default": self.default, + "is_label_horizontal": self.is_label_horizontal + } + for attr in self.type_attributes: + data[attr] = getattr(self, attr) + return data + + @classmethod + def deserialize(cls, data): + """Recreate object from data. + + Data can be received using 'serialize' method. + """ + + return cls(**data) + # ----------------------------------------- # UI attribute definitoins won't hold value @@ -95,10 +204,12 @@ class UIDef(AbtractAttrDef): class UISeparatorDef(UIDef): - pass + type = "separator" class UILabelDef(UIDef): + type = "label" + def __init__(self, label): super(UILabelDef, self).__init__(label=label) @@ -113,6 +224,9 @@ class UnknownDef(AbtractAttrDef): This attribute can be used to keep existing data unchanged but does not have known definition of type. """ + + type = "unknown" + def __init__(self, key, default=None, **kwargs): kwargs["default"] = default super(UnknownDef, self).__init__(key, **kwargs) @@ -134,6 +248,13 @@ class NumberDef(AbtractAttrDef): default(int, float): Default value for conversion. """ + type = "number" + type_attributes = [ + "minimum", + "maximum", + "decimals" + ] + def __init__( self, key, minimum=None, maximum=None, decimals=None, default=None, **kwargs @@ -204,6 +325,13 @@ class TextDef(AbtractAttrDef): placeholder(str): UI placeholder for attribute. default(str, None): Default value. Empty string used when not defined. """ + + type = "text" + type_attributes = [ + "multiline", + "placeholder", + ] + def __init__( self, key, multiline=None, regex=None, placeholder=None, default=None, **kwargs @@ -242,6 +370,11 @@ class TextDef(AbtractAttrDef): return value return self.default + def serialize(self): + data = super(TextDef, self).serialize() + data["regex"] = self.regex.pattern + return data + class EnumDef(AbtractAttrDef): """Enumeration of single item from items. @@ -253,6 +386,8 @@ class EnumDef(AbtractAttrDef): default: Default value. Must be one key(value) from passed items. """ + type = "enum" + def __init__(self, key, items, default=None, **kwargs): if not items: raise ValueError(( @@ -287,6 +422,11 @@ class EnumDef(AbtractAttrDef): return value return self.default + def serialize(self): + data = super(TextDef, self).serialize() + data["items"] = list(self.items) + return data + class BoolDef(AbtractAttrDef): """Boolean representation. @@ -295,6 +435,8 @@ class BoolDef(AbtractAttrDef): default(bool): Default value. Set to `False` if not defined. """ + type = "bool" + def __init__(self, key, default=None, **kwargs): if default is None: default = False @@ -399,6 +541,13 @@ class FileDefItem(object): return ext return None + @property + def lower_ext(self): + ext = self.ext + if ext is not None: + return ext.lower() + return ext + @property def is_dir(self): if self.is_empty: @@ -531,14 +680,24 @@ class FileDef(AbtractAttrDef): Args: single_item(bool): Allow only single path item. folders(bool): Allow folder paths. - extensions(list): Allow files with extensions. Empty list will + extensions(List[str]): Allow files with extensions. Empty list will allow all extensions and None will disable files completely. - default(str, list): Defautl value. + extensions_label(str): Custom label shown instead of extensions in UI. + default(str, List[str]): Default value. """ + type = "path" + type_attributes = [ + "single_item", + "folders", + "extensions", + "allow_sequences", + "extensions_label", + ] + def __init__( self, key, single_item=True, folders=None, extensions=None, - allow_sequences=True, default=None, **kwargs + allow_sequences=True, extensions_label=None, default=None, **kwargs ): if folders is None and extensions is None: folders = True @@ -578,6 +737,7 @@ class FileDef(AbtractAttrDef): self.folders = folders self.extensions = set(extensions) self.allow_sequences = allow_sequences + self.extensions_label = extensions_label super(FileDef, self).__init__(key, default=default, **kwargs) def __eq__(self, other): @@ -625,3 +785,71 @@ class FileDef(AbtractAttrDef): if self.single_item: return FileDefItem.create_empty_item().to_dict() return [] + + +def serialize_attr_def(attr_def): + """Serialize attribute definition to data. + + Args: + attr_def (AbtractAttrDef): Attribute definition to serialize. + + Returns: + Dict[str, Any]: Serialized data. + """ + + return attr_def.serialize() + + +def serialize_attr_defs(attr_defs): + """Serialize attribute definitions to data. + + Args: + attr_defs (List[AbtractAttrDef]): Attribute definitions to serialize. + + Returns: + List[Dict[str, Any]]: Serialized data. + """ + + return [ + serialize_attr_def(attr_def) + for attr_def in attr_defs + ] + + +def deserialize_attr_def(attr_def_data): + """Deserialize attribute definition from data. + + Args: + attr_def (Dict[str, Any]): Attribute definition data to deserialize. + """ + + attr_type = attr_def_data.pop("type") + cls = _attr_defs_by_type[attr_type] + return cls.deserialize(attr_def_data) + + +def deserialize_attr_defs(attr_defs_data): + """Deserialize attribute definitions. + + Args: + List[Dict[str, Any]]: List of attribute definitions. + """ + + return [ + deserialize_attr_def(attr_def_data) + for attr_def_data in attr_defs_data + ] + + +# Register attribute definitions +for _attr_class in ( + UISeparatorDef, + UILabelDef, + UnknownDef, + NumberDef, + TextDef, + EnumDef, + BoolDef, + FileDef +): + register_attr_def_class(_attr_class) diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index 9d8a92cfe9..12f4a5198b 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -1,23 +1,28 @@ """Should be used only inside of hosts.""" import os -import json -import re import copy import platform import logging -import collections import functools -import getpass +import warnings -from bson.objectid import ObjectId +import six -from openpype.settings import ( - get_project_settings, - get_system_settings +from openpype.client import ( + get_project, + get_assets, + get_asset_by_name, + get_last_version_by_subset_name, + get_workfile_info, +) +from openpype.client.operations import ( + CURRENT_ASSET_DOC_SCHEMA, + CURRENT_PROJECT_SCHEMA, + CURRENT_PROJECT_CONFIG_SCHEMA, + PROJECT_NAME_ALLOWED_SYMBOLS, + PROJECT_NAME_REGEX, ) -from .anatomy import Anatomy from .profiles_filtering import filter_profiles -from .events import emit_event from .path_templates import StringTemplate legacy_io = None @@ -25,17 +30,61 @@ legacy_io = None log = logging.getLogger("AvalonContext") +# Backwards compatibility - should not be used anymore +# - Will be removed in OP 3.16.* CURRENT_DOC_SCHEMAS = { - "project": "openpype:project-3.0", - "asset": "openpype:asset-3.0", - "config": "openpype:config-2.0" + "project": CURRENT_PROJECT_SCHEMA, + "asset": CURRENT_ASSET_DOC_SCHEMA, + "config": CURRENT_PROJECT_CONFIG_SCHEMA } -PROJECT_NAME_ALLOWED_SYMBOLS = "a-zA-Z0-9_" -PROJECT_NAME_REGEX = re.compile( - "^[{}]+$".format(PROJECT_NAME_ALLOWED_SYMBOLS) -) +class AvalonContextDeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", AvalonContextDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=AvalonContextDeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + +@deprecated("openpype.client.operations.create_project") def create_project( project_name, project_code, library_project=False, dbcon=None ): @@ -59,63 +108,14 @@ def create_project( Returns: dict: Created project document. + + Deprecated: + Function will be removed after release version 3.16.* """ - from openpype.settings import ProjectSettings, SaveWarningExc - from openpype.pipeline import AvalonMongoDB - from openpype.pipeline.schema import validate + from openpype.client.operations import create_project - if dbcon is None: - dbcon = AvalonMongoDB() - - if not PROJECT_NAME_REGEX.match(project_name): - raise ValueError(( - "Project name \"{}\" contain invalid characters" - ).format(project_name)) - - database = dbcon.database - project_doc = database[project_name].find_one( - {"type": "project"}, - {"name": 1} - ) - if project_doc: - raise ValueError("Project with name \"{}\" already exists".format( - project_name - )) - - project_doc = { - "type": "project", - "name": project_name, - "data": { - "code": project_code, - "library_project": library_project - }, - "schema": CURRENT_DOC_SCHEMAS["project"] - } - # Insert document with basic data - database[project_name].insert_one(project_doc) - # Load ProjectSettings for the project and save it to store all attributes - # and Anatomy - try: - project_settings_entity = ProjectSettings(project_name) - project_settings_entity.save() - except SaveWarningExc as exc: - print(str(exc)) - except Exception: - database[project_name].delete_one({"type": "project"}) - raise - - project_doc = database[project_name].find_one({"type": "project"}) - - try: - # Validate created project document - validate(project_doc) - except Exception: - # Remove project if is not valid - database[project_name].delete_one({"type": "project"}) - raise - - return project_doc + return create_project(project_name, project_code, library_project) def with_pipeline_io(func): @@ -128,7 +128,7 @@ def with_pipeline_io(func): return wrapped -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.is_representation_from_latest") def is_latest(representation): """Return whether the representation is from latest version @@ -138,144 +138,61 @@ def is_latest(representation): Returns: bool: Whether the representation is of latest version. + Deprecated: + Function will be removed after release version 3.15.* """ - version = legacy_io.find_one({"_id": representation['parent']}) - if version["type"] == "hero_version": - return True + from openpype.pipeline.context_tools import is_representation_from_latest - # Get highest version under the parent - highest_version = legacy_io.find_one({ - "type": "version", - "parent": version["parent"] - }, sort=[("name", -1)], projection={"name": True}) - - if version['name'] == highest_version['name']: - return True - else: - return False + return is_representation_from_latest(representation) -@with_pipeline_io +@deprecated("openpype.pipeline.load.any_outdated_containers") def any_outdated(): - """Return whether the current scene has any outdated content""" - from openpype.pipeline import registered_host + """Return whether the current scene has any outdated content. - checked = set() - host = registered_host() - for container in host.ls(): - representation = container['representation'] - if representation in checked: - continue + Deprecated: + Function will be removed after release version 3.15.* + """ - representation_doc = legacy_io.find_one( - { - "_id": ObjectId(representation), - "type": "representation" - }, - projection={"parent": True} - ) - if representation_doc and not is_latest(representation_doc): - return True - elif not representation_doc: - log.debug("Container '{objectName}' has an invalid " - "representation, it is missing in the " - "database".format(**container)) + from openpype.pipeline.load import any_outdated_containers - checked.add(representation) - - return False + return any_outdated_containers() -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.get_current_project_asset") def get_asset(asset_name=None): """ Returning asset document from database by its name. - Doesn't count with duplicities on asset names! - - Args: - asset_name (str) - - Returns: - (MongoDB document) - """ - if not asset_name: - asset_name = legacy_io.Session["AVALON_ASSET"] - - asset_document = legacy_io.find_one({ - "name": asset_name, - "type": "asset" - }) - - if not asset_document: - raise TypeError("Entity \"{}\" was not found in DB".format(asset_name)) - - return asset_document - - -@with_pipeline_io -def get_hierarchy(asset_name=None): - """ - Obtain asset hierarchy path string from mongo db + Doesn't count with duplicities on asset names! Args: asset_name (str) Returns: - (string): asset hierarchy path + (MongoDB document) + Deprecated: + Function will be removed after release version 3.15.* """ - if not asset_name: - asset_name = legacy_io.Session.get( - "AVALON_ASSET", - os.environ["AVALON_ASSET"] - ) - asset_entity = legacy_io.find_one({ - "type": 'asset', - "name": asset_name - }) + from openpype.pipeline.context_tools import get_current_project_asset - not_set = "PARENTS_NOT_SET" - entity_parents = asset_entity.get("data", {}).get("parents", not_set) - - # If entity already have parents then just return joined - if entity_parents != not_set: - return "/".join(entity_parents) - - # Else query parents through visualParents and store result to entity - hierarchy_items = [] - entity = asset_entity - while True: - parent_id = entity.get("data", {}).get("visualParent") - if not parent_id: - break - entity = legacy_io.find_one({"_id": parent_id}) - hierarchy_items.append(entity["name"]) - - # Add parents to entity data for next query - entity_data = asset_entity.get("data", {}) - entity_data["parents"] = hierarchy_items - legacy_io.update_many( - {"_id": asset_entity["_id"]}, - {"$set": {"data": entity_data}} - ) - - return "/".join(hierarchy_items) + return get_current_project_asset(asset_name=asset_name) -def get_system_general_anatomy_data(): - system_settings = get_system_settings() - studio_name = system_settings["general"]["studio_name"] - studio_code = system_settings["general"]["studio_code"] - return { - "studio": { - "name": studio_name, - "code": studio_code - } - } +@deprecated("openpype.pipeline.template_data.get_general_template_data") +def get_system_general_anatomy_data(system_settings=None): + """ + Deprecated: + Function will be removed after release version 3.15.* + """ + from openpype.pipeline.template_data import get_general_template_data + + return get_general_template_data(system_settings) +@deprecated("openpype.client.get_linked_asset_ids") def get_linked_asset_ids(asset_doc): """Return linked asset ids for `asset_doc` from DB @@ -284,26 +201,20 @@ def get_linked_asset_ids(asset_doc): Returns: (list): MongoDB ids of input links. + + Deprecated: + Function will be removed after release version 3.16.* """ - output = [] - if not asset_doc: - return output - input_links = asset_doc["data"].get("inputLinks") or [] - if input_links: - for item in input_links: - # Backwards compatibility for "_id" key which was replaced with - # "id" - if "_id" in item: - link_id = item["_id"] - else: - link_id = item["id"] - output.append(link_id) + from openpype.client import get_linked_asset_ids + from openpype.pipeline import legacy_io - return output + project_name = legacy_io.active_project() + + return get_linked_asset_ids(project_name, asset_doc=asset_doc) -@with_pipeline_io +@deprecated("openpype.client.get_linked_assets") def get_linked_assets(asset_doc): """Return linked assets for `asset_doc` from DB @@ -312,15 +223,20 @@ def get_linked_assets(asset_doc): Returns: (list) Asset documents of input links for passed asset doc. + + Deprecated: + Function will be removed after release version 3.15.* """ - link_ids = get_linked_asset_ids(asset_doc) - if not link_ids: - return [] - return list(legacy_io.find({"_id": {"$in": link_ids}})) + from openpype.pipeline import legacy_io + from openpype.client import get_linked_assets + + project_name = legacy_io.active_project() + + return get_linked_assets(project_name, asset_doc=asset_doc) -@with_pipeline_io +@deprecated("openpype.client.get_last_version_by_subset_name") def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): """Retrieve latest version from `asset_name`, and `subset_name`. @@ -336,62 +252,30 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): Returns: None: If asset, subset or version were not found. - dict: Last version document for entered . + dict: Last version document for entered. + + Deprecated: + Function will be removed after release version 3.15.* """ - if not dbcon: - log.debug("Using `legacy_io` for query.") - dbcon = legacy_io - # Make sure is installed - dbcon.install() + if not project_name: + if not dbcon: + from openpype.pipeline import legacy_io - if project_name and project_name != dbcon.Session.get("AVALON_PROJECT"): - # `legacy_io` has only `_database` attribute - # but `AvalonMongoDB` has `database` - database = getattr(dbcon, "database", dbcon._database) - collection = database[project_name] - else: - project_name = dbcon.Session.get("AVALON_PROJECT") - collection = dbcon + log.debug("Using `legacy_io` for query.") + dbcon = legacy_io + # Make sure is installed + dbcon.install() - log.debug(( - "Getting latest version for Project: \"{}\" Asset: \"{}\"" - " and Subset: \"{}\"" - ).format(project_name, asset_name, subset_name)) + project_name = dbcon.active_project() - # Query asset document id by asset name - asset_doc = collection.find_one( - {"type": "asset", "name": asset_name}, - {"_id": True} + return get_last_version_by_subset_name( + project_name, subset_name, asset_name=asset_name ) - if not asset_doc: - log.info( - "Asset \"{}\" was not found in Database.".format(asset_name) - ) - return None - - subset_doc = collection.find_one( - {"type": "subset", "name": subset_name, "parent": asset_doc["_id"]}, - {"_id": True} - ) - if not subset_doc: - log.info( - "Subset \"{}\" was not found in Database.".format(subset_name) - ) - return None - - version_doc = collection.find_one( - {"type": "version", "parent": subset_doc["_id"]}, - sort=[("name", -1)], - ) - if not version_doc: - log.info( - "Subset \"{}\" does not have any version yet.".format(subset_name) - ) - return None - return version_doc +@deprecated( + "openpype.pipeline.workfile.get_workfile_template_key_from_context") def get_workfile_template_key_from_context( asset_name, task_name, host_name, project_name=None, dbcon=None, project_settings=None @@ -419,39 +303,30 @@ def get_workfile_template_key_from_context( Raises: ValueError: When both 'dbcon' and 'project_name' were not passed. + + Deprecated: + Function will be removed after release version 3.16.* """ - if not dbcon: - if not project_name: + + from openpype.pipeline.workfile import ( + get_workfile_template_key_from_context + ) + + if not project_name: + if not dbcon: raise ValueError(( "`get_workfile_template_key_from_context` requires to pass" " one of 'dbcon' or 'project_name' arguments." )) - from openpype.pipeline import AvalonMongoDB + project_name = dbcon.active_project() - dbcon = AvalonMongoDB() - dbcon.Session["AVALON_PROJECT"] = project_name - - elif not project_name: - project_name = dbcon.Session["AVALON_PROJECT"] - - asset_doc = dbcon.find_one( - { - "type": "asset", - "name": asset_name - }, - { - "data.tasks": 1 - } - ) - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_info = asset_tasks.get(task_name) or {} - task_type = task_info.get("type") - - return get_workfile_template_key( - task_type, host_name, project_name, project_settings + return get_workfile_template_key_from_context( + asset_name, task_name, host_name, project_name, project_settings ) +@deprecated( + "openpype.pipeline.workfile.get_workfile_template_key") def get_workfile_template_key( task_type, host_name, project_name=None, project_settings=None ): @@ -474,44 +349,19 @@ def get_workfile_template_key( Raises: ValueError: When both 'project_name' and 'project_settings' were not passed. + + Deprecated: + Function will be removed after release version 3.16.* """ - default = "work" - if not task_type or not host_name: - return default - if not project_settings: - if not project_name: - raise ValueError(( - "`get_workfile_template_key` requires to pass" - " one of 'project_name' or 'project_settings' arguments." - )) - project_settings = get_project_settings(project_name) + from openpype.pipeline.workfile import get_workfile_template_key - try: - profiles = ( - project_settings - ["global"] - ["tools"] - ["Workfiles"] - ["workfile_template_profiles"] - ) - except Exception: - profiles = [] - - if not profiles: - return default - - profile_filter = { - "task_types": task_type, - "hosts": host_name - } - profile = filter_profiles(profiles, profile_filter) - if profile: - return profile["workfile_template"] or default - return default + return get_workfile_template_key( + task_type, host_name, project_name, project_settings + ) -# TODO rename function as is not just "work" specific +@deprecated("openpype.pipeline.template_data.get_template_data") def get_workdir_data(project_doc, asset_doc, task_name, host_name): """Prepare data for workdir template filling from entered information. @@ -524,42 +374,19 @@ def get_workdir_data(project_doc, asset_doc, task_name, host_name): Returns: dict: Data prepared for filling workdir template. + + Deprecated: + Function will be removed after release version 3.15.* """ - task_type = asset_doc['data']['tasks'].get(task_name, {}).get('type') - project_task_types = project_doc["config"]["tasks"] - task_code = project_task_types.get(task_type, {}).get("short_name") + from openpype.pipeline.template_data import get_template_data - asset_parents = asset_doc["data"]["parents"] - hierarchy = "/".join(asset_parents) - - parent_name = project_doc["name"] - if asset_parents: - parent_name = asset_parents[-1] - - data = { - "project": { - "name": project_doc["name"], - "code": project_doc["data"].get("code") - }, - "task": { - "name": task_name, - "type": task_type, - "short": task_code, - }, - "asset": asset_doc["name"], - "parent": parent_name, - "app": host_name, - "user": getpass.getuser(), - "hierarchy": hierarchy, - } - - system_general_data = get_system_general_anatomy_data() - data.update(system_general_data) - - return data + return get_template_data( + project_doc, asset_doc, task_name, host_name + ) +@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data") def get_workdir_with_workdir_data( workdir_data, anatomy=None, project_name=None, template_key=None ): @@ -585,28 +412,28 @@ def get_workdir_with_workdir_data( Raises: ValueError: When both `anatomy` and `project_name` are set to None. + + Deprecated: + Function will be removed after release version 3.15.* """ + if not anatomy and not project_name: raise ValueError(( "Missing required arguments one of `project_name` or `anatomy`" " must be entered." )) - if not anatomy: - anatomy = Anatomy(project_name) + if not project_name: + project_name = anatomy.project_name - if not template_key: - template_key = get_workfile_template_key( - workdir_data["task"]["type"], - workdir_data["app"], - project_name=workdir_data["project"]["name"] - ) + from openpype.pipeline.workfile import get_workdir_with_workdir_data - anatomy_filled = anatomy.format(workdir_data) - # Output is TemplateResult object which contain useful data - return anatomy_filled[template_key]["folder"] + return get_workdir_with_workdir_data( + workdir_data, project_name, anatomy, template_key + ) +@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data") def get_workdir( project_doc, asset_doc, @@ -633,47 +460,44 @@ def get_workdir( Returns: TemplateResult: Workdir path. + + Deprecated: + Function will be removed after release version 3.15.* """ - if not anatomy: - anatomy = Anatomy(project_doc["name"]) - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, host_name - ) + from openpype.pipeline.workfile import get_workdir # Output is TemplateResult object which contain useful data - return get_workdir_with_workdir_data( - workdir_data, anatomy, template_key=template_key + return get_workdir( + project_doc, + asset_doc, + task_name, + host_name, + anatomy, + template_key ) -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.get_template_data_from_session") def template_data_from_session(session=None): """ Return dictionary with template from session keys. Args: session (dict, Optional): The Session to use. If not provided use the currently active global Session. + Returns: dict: All available data from session. + + Deprecated: + Function will be removed after release version 3.15.* """ - if session is None: - session = legacy_io.Session + from openpype.pipeline.context_tools import get_template_data_from_session - project_name = session["AVALON_PROJECT"] - project_doc = legacy_io.database[project_name].find_one({ - "type": "project" - }) - asset_doc = legacy_io.database[project_name].find_one({ - "type": "asset", - "name": session["AVALON_ASSET"] - }) - task_name = session["AVALON_TASK"] - host_name = session["AVALON_APP"] - return get_workdir_data(project_doc, asset_doc, task_name, host_name) + return get_template_data_from_session(session) -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.compute_session_changes") def compute_session_changes( session, task=None, asset=None, app=None, template_key=None ): @@ -695,77 +519,48 @@ def compute_session_changes( Returns: dict: The required changes in the Session dictionary. + Deprecated: + Function will be removed after release version 3.16.* """ - changes = dict() - # If no changes, return directly - if not any([task, asset, app]): - return changes + from openpype.pipeline import legacy_io + from openpype.pipeline.context_tools import compute_session_changes - # Get asset document and asset - asset_document = None - asset_tasks = None - if isinstance(asset, dict): - # Assume asset database document - asset_document = asset - asset_tasks = asset_document.get("data", {}).get("tasks") - asset = asset["name"] + if isinstance(asset, six.string_types): + project_name = legacy_io.active_project() + asset = get_asset_by_name(project_name, asset) - if not asset_document or not asset_tasks: - # Assume asset name - asset_document = legacy_io.find_one( - { - "name": asset, - "type": "asset" - }, - {"data.tasks": True} - ) - assert asset_document, "Asset must exist" - - # Detect any changes compared session - mapping = { - "AVALON_ASSET": asset, - "AVALON_TASK": task, - "AVALON_APP": app, - } - changes = { - key: value - for key, value in mapping.items() - if value and value != session.get(key) - } - if not changes: - return changes - - # Compute work directory (with the temporary changed session so far) - _session = session.copy() - _session.update(changes) - - changes["AVALON_WORKDIR"] = get_workdir_from_session(_session) - - return changes + return compute_session_changes( + session, + asset, + task, + template_key + ) -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.get_workdir_from_session") def get_workdir_from_session(session=None, template_key=None): - if session is None: - session = legacy_io.Session - project_name = session["AVALON_PROJECT"] - host_name = session["AVALON_APP"] - anatomy = Anatomy(project_name) - template_data = template_data_from_session(session) - anatomy_filled = anatomy.format(template_data) + """Calculate workdir path based on session data. - if not template_key: - task_type = template_data["task"]["type"] - template_key = get_workfile_template_key( - task_type, - host_name, - project_name=project_name - ) - return anatomy_filled[template_key]["folder"] + Args: + session (Union[None, Dict[str, str]]): Session to use. If not passed + current context session is used (from legacy_io). + template_key (Union[str, None]): Precalculate template key to define + workfile template name in Anatomy. + + Returns: + str: Workdir path. + + Deprecated: + Function will be removed after release version 3.16.* + """ + + from openpype.pipeline.context_tools import get_workdir_from_session + + return get_workdir_from_session(session, template_key) -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.change_current_context") def update_current_task(task=None, asset=None, app=None, template_key=None): """Update active Session to a new task work area. @@ -779,31 +574,21 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): Returns: dict: The changed key, values in the current Session. + Deprecated: + Function will be removed after release version 3.16.* """ - changes = compute_session_changes( - legacy_io.Session, - task=task, - asset=asset, - app=app, - template_key=template_key - ) - # Update the Session and environments. Pop from environments all keys with - # value set to None. - for key, value in changes.items(): - legacy_io.Session[key] = value - if value is None: - os.environ.pop(key, None) - else: - os.environ[key] = value + from openpype.pipeline import legacy_io + from openpype.pipeline.context_tools import change_current_context - # Emit session change - emit_event("taskChanged", changes.copy()) + project_name = legacy_io.active_project() + if isinstance(asset, six.string_types): + asset = get_asset_by_name(project_name, asset) - return changes + return change_current_context(asset, task, template_key) -@with_pipeline_io +@deprecated("openpype.client.get_workfile_info") def get_workfile_doc(asset_id, task_name, filename, dbcon=None): """Return workfile document for entered context. @@ -819,20 +604,21 @@ def get_workfile_doc(asset_id, task_name, filename, dbcon=None): Returns: dict: Workfile document or None. + + Deprecated: + Function will be removed after release version 3.15.* """ + # Use legacy_io if dbcon is not entered if not dbcon: + from openpype.pipeline import legacy_io dbcon = legacy_io - return dbcon.find_one({ - "type": "workfile", - "parent": asset_id, - "task_name": task_name, - "filename": filename - }) + project_name = dbcon.active_project() + return get_workfile_info(project_name, asset_id, task_name, filename) -@with_pipeline_io +@deprecated def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): """Creates or replace workfile document in mongo. @@ -847,8 +633,13 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and `legacy_io` is used if not entered. """ + + from openpype.pipeline import Anatomy + from openpype.pipeline.template_data import get_template_data + # Use legacy_io if dbcon is not entered if not dbcon: + from openpype.pipeline import legacy_io dbcon = legacy_io # Filter of workfile document @@ -862,12 +653,13 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): doc_data = copy.deepcopy(doc_filter) # Prepare project for workdir data - project_doc = dbcon.find_one({"type": "project"}) - workdir_data = get_workdir_data( + project_name = dbcon.active_project() + project_doc = get_project(project_name) + workdir_data = get_template_data( project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"] ) # Prepare anatomy - anatomy = Anatomy(project_doc["name"]) + anatomy = Anatomy(project_name) # Get workdir path (result is anatomy.TemplateResult) template_workdir = get_workdir_with_workdir_data( workdir_data, anatomy @@ -894,7 +686,7 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): ) -@with_pipeline_io +@deprecated def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): if not workfile_doc: # TODO add log message @@ -905,6 +697,7 @@ def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): # Use legacy_io if dbcon is not entered if not dbcon: + from openpype.pipeline import legacy_io dbcon = legacy_io # Convert data to mongo modification keys/values @@ -922,673 +715,19 @@ def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): ) -class BuildWorkfile: - """Wrapper for build workfile process. +@deprecated("openpype.pipeline.workfile.BuildWorkfile") +def BuildWorkfile(): + """Build workfile class was moved to workfile pipeline. - Load representations for current context by build presets. Build presets - are host related, since each host has it's loaders. + Deprecated: + Function will be removed after release version 3.16.* """ + from openpype.pipeline.workfile import BuildWorkfile - log = logging.getLogger("BuildWorkfile") + return BuildWorkfile() - @staticmethod - def map_subsets_by_family(subsets): - subsets_by_family = collections.defaultdict(list) - for subset in subsets: - family = subset["data"].get("family") - if not family: - families = subset["data"].get("families") - if not families: - continue - family = families[0] - subsets_by_family[family].append(subset) - return subsets_by_family - - def process(self): - """Main method of this wrapper. - - Building of workfile is triggered and is possible to implement - post processing of loaded containers if necessary. - """ - containers = self.build_workfile() - - return containers - - @with_pipeline_io - def build_workfile(self): - """Prepares and load containers into workfile. - - Loads latest versions of current and linked assets to workfile by logic - stored in Workfile profiles from presets. Profiles are set by host, - filtered by current task name and used by families. - - Each family can specify representation names and loaders for - representations and first available and successful loaded - representation is returned as container. - - At the end you'll get list of loaded containers per each asset. - - loaded_containers [{ - "asset_entity": , - "containers": [, , ...] - }, { - "asset_entity": , - "containers": [, ...] - }, { - ... - }] - """ - from openpype.pipeline import discover_loader_plugins - - # Get current asset name and entity - current_asset_name = legacy_io.Session["AVALON_ASSET"] - current_asset_entity = legacy_io.find_one({ - "type": "asset", - "name": current_asset_name - }) - - # Skip if asset was not found - if not current_asset_entity: - print("Asset entity with name `{}` was not found".format( - current_asset_name - )) - return - - # Prepare available loaders - loaders_by_name = {} - for loader in discover_loader_plugins(): - loader_name = loader.__name__ - if loader_name in loaders_by_name: - raise KeyError( - "Duplicated loader name {0}!".format(loader_name) - ) - loaders_by_name[loader_name] = loader - - # Skip if there are any loaders - if not loaders_by_name: - self.log.warning("There are no registered loaders.") - return - - # Get current task name - current_task_name = legacy_io.Session["AVALON_TASK"] - - # Load workfile presets for task - self.build_presets = self.get_build_presets( - current_task_name, current_asset_entity - ) - - # Skip if there are any presets for task - if not self.build_presets: - self.log.warning( - "Current task `{}` does not have any loading preset.".format( - current_task_name - ) - ) - return - - # Get presets for loading current asset - current_context_profiles = self.build_presets.get("current_context") - # Get presets for loading linked assets - link_context_profiles = self.build_presets.get("linked_assets") - # Skip if both are missing - if not current_context_profiles and not link_context_profiles: - self.log.warning( - "Current task `{}` has empty loading preset.".format( - current_task_name - ) - ) - return - - elif not current_context_profiles: - self.log.warning(( - "Current task `{}` doesn't have any loading" - " preset for it's context." - ).format(current_task_name)) - - elif not link_context_profiles: - self.log.warning(( - "Current task `{}` doesn't have any" - "loading preset for it's linked assets." - ).format(current_task_name)) - - # Prepare assets to process by workfile presets - assets = [] - current_asset_id = None - if current_context_profiles: - # Add current asset entity if preset has current context set - assets.append(current_asset_entity) - current_asset_id = current_asset_entity["_id"] - - if link_context_profiles: - # Find and append linked assets if preset has set linked mapping - link_assets = get_linked_assets(current_asset_entity) - if link_assets: - assets.extend(link_assets) - - # Skip if there are no assets. This can happen if only linked mapping - # is set and there are no links for his asset. - if not assets: - self.log.warning( - "Asset does not have linked assets. Nothing to process." - ) - return - - # Prepare entities from database for assets - prepared_entities = self._collect_last_version_repres(assets) - - # Load containers by prepared entities and presets - loaded_containers = [] - # - Current asset containers - if current_asset_id and current_asset_id in prepared_entities: - current_context_data = prepared_entities.pop(current_asset_id) - loaded_data = self.load_containers_by_asset_data( - current_context_data, current_context_profiles, loaders_by_name - ) - if loaded_data: - loaded_containers.append(loaded_data) - - # - Linked assets container - for linked_asset_data in prepared_entities.values(): - loaded_data = self.load_containers_by_asset_data( - linked_asset_data, link_context_profiles, loaders_by_name - ) - if loaded_data: - loaded_containers.append(loaded_data) - - # Return list of loaded containers - return loaded_containers - - @with_pipeline_io - def get_build_presets(self, task_name, asset_doc): - """ Returns presets to build workfile for task name. - - Presets are loaded for current project set in - io.Session["AVALON_PROJECT"], filtered by registered host - and entered task name. - - Args: - task_name (str): Task name used for filtering build presets. - - Returns: - (dict): preset per entered task name - """ - host_name = os.environ["AVALON_APP"] - project_settings = get_project_settings( - legacy_io.Session["AVALON_PROJECT"] - ) - - host_settings = project_settings.get(host_name) or {} - # Get presets for host - wb_settings = host_settings.get("workfile_builder") - if not wb_settings: - # backward compatibility - wb_settings = host_settings.get("workfile_build") or {} - - builder_profiles = wb_settings.get("profiles") - if not builder_profiles: - return None - - task_type = ( - asset_doc - .get("data", {}) - .get("tasks", {}) - .get(task_name, {}) - .get("type") - ) - filter_data = { - "task_types": task_type, - "tasks": task_name - } - return filter_profiles(builder_profiles, filter_data) - - def _filter_build_profiles(self, build_profiles, loaders_by_name): - """ Filter build profiles by loaders and prepare process data. - - Valid profile must have "loaders", "families" and "repre_names" keys - with valid values. - - "loaders" expects list of strings representing possible loaders. - - "families" expects list of strings for filtering - by main subset family. - - "repre_names" expects list of strings for filtering by - representation name. - - Lowered "families" and "repre_names" are prepared for each profile with - all required keys. - - Args: - build_profiles (dict): Profiles for building workfile. - loaders_by_name (dict): Available loaders per name. - - Returns: - (list): Filtered and prepared profiles. - """ - valid_profiles = [] - for profile in build_profiles: - # Check loaders - profile_loaders = profile.get("loaders") - if not profile_loaders: - self.log.warning(( - "Build profile has missing loaders configuration: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check if any loader is available - loaders_match = False - for loader_name in profile_loaders: - if loader_name in loaders_by_name: - loaders_match = True - break - - if not loaders_match: - self.log.warning(( - "All loaders from Build profile are not available: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check families - profile_families = profile.get("families") - if not profile_families: - self.log.warning(( - "Build profile is missing families configuration: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check representation names - profile_repre_names = profile.get("repre_names") - if not profile_repre_names: - self.log.warning(( - "Build profile is missing" - " representation names filtering: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Prepare lowered families and representation names - profile["families_lowered"] = [ - fam.lower() for fam in profile_families - ] - profile["repre_names_lowered"] = [ - name.lower() for name in profile_repre_names - ] - - valid_profiles.append(profile) - - return valid_profiles - - def _prepare_profile_for_subsets(self, subsets, profiles): - """Select profile for each subset by it's data. - - Profiles are filtered for each subset individually. - Profile is filtered by subset's family, optionally by name regex and - representation names set in profile. - It is possible to not find matching profile for subset, in that case - subset is skipped and it is possible that none of subsets have - matching profile. - - Args: - subsets (list): Subset documents. - profiles (dict): Build profiles. - - Returns: - (dict) Profile by subset's id. - """ - # Prepare subsets - subsets_by_family = self.map_subsets_by_family(subsets) - - profiles_per_subset_id = {} - for family, subsets in subsets_by_family.items(): - family_low = family.lower() - for profile in profiles: - # Skip profile if does not contain family - if family_low not in profile["families_lowered"]: - continue - - # Precompile name filters as regexes - profile_regexes = profile.get("subset_name_filters") - if profile_regexes: - _profile_regexes = [] - for regex in profile_regexes: - _profile_regexes.append(re.compile(regex)) - profile_regexes = _profile_regexes - - # TODO prepare regex compilation - for subset in subsets: - # Verify regex filtering (optional) - if profile_regexes: - valid = False - for pattern in profile_regexes: - if re.match(pattern, subset["name"]): - valid = True - break - - if not valid: - continue - - profiles_per_subset_id[subset["_id"]] = profile - - # break profiles loop on finding the first matching profile - break - return profiles_per_subset_id - - def load_containers_by_asset_data( - self, asset_entity_data, build_profiles, loaders_by_name - ): - """Load containers for entered asset entity by Build profiles. - - Args: - asset_entity_data (dict): Prepared data with subsets, last version - and representations for specific asset. - build_profiles (dict): Build profiles. - loaders_by_name (dict): Available loaders per name. - - Returns: - (dict) Output contains asset document and loaded containers. - """ - - # Make sure all data are not empty - if not asset_entity_data or not build_profiles or not loaders_by_name: - return - - asset_entity = asset_entity_data["asset_entity"] - - valid_profiles = self._filter_build_profiles( - build_profiles, loaders_by_name - ) - if not valid_profiles: - self.log.warning( - "There are not valid Workfile profiles. Skipping process." - ) - return - - self.log.debug("Valid Workfile profiles: {}".format(valid_profiles)) - - subsets_by_id = {} - version_by_subset_id = {} - repres_by_version_id = {} - for subset_id, in_data in asset_entity_data["subsets"].items(): - subset_entity = in_data["subset_entity"] - subsets_by_id[subset_entity["_id"]] = subset_entity - - version_data = in_data["version"] - version_entity = version_data["version_entity"] - version_by_subset_id[subset_id] = version_entity - repres_by_version_id[version_entity["_id"]] = ( - version_data["repres"] - ) - - if not subsets_by_id: - self.log.warning("There are not subsets for asset {0}".format( - asset_entity["name"] - )) - return - - profiles_per_subset_id = self._prepare_profile_for_subsets( - subsets_by_id.values(), valid_profiles - ) - if not profiles_per_subset_id: - self.log.warning("There are not valid subsets.") - return - - valid_repres_by_subset_id = collections.defaultdict(list) - for subset_id, profile in profiles_per_subset_id.items(): - profile_repre_names = profile["repre_names_lowered"] - - version_entity = version_by_subset_id[subset_id] - version_id = version_entity["_id"] - repres = repres_by_version_id[version_id] - for repre in repres: - repre_name_low = repre["name"].lower() - if repre_name_low in profile_repre_names: - valid_repres_by_subset_id[subset_id].append(repre) - - # DEBUG message - msg = "Valid representations for Asset: `{}`".format( - asset_entity["name"] - ) - for subset_id, repres in valid_repres_by_subset_id.items(): - subset = subsets_by_id[subset_id] - msg += "\n# Subset Name/ID: `{}`/{}".format( - subset["name"], subset_id - ) - for repre in repres: - msg += "\n## Repre name: `{}`".format(repre["name"]) - - self.log.debug(msg) - - containers = self._load_containers( - valid_repres_by_subset_id, subsets_by_id, - profiles_per_subset_id, loaders_by_name - ) - - return { - "asset_entity": asset_entity, - "containers": containers - } - - @with_pipeline_io - def _load_containers( - self, repres_by_subset_id, subsets_by_id, - profiles_per_subset_id, loaders_by_name - ): - """Real load by collected data happens here. - - Loading of representations per subset happens here. Each subset can - loads one representation. Loading is tried in specific order. - Representations are tried to load by names defined in configuration. - If subset has representation matching representation name each loader - is tried to load it until any is successful. If none of them was - successful then next representation name is tried. - Subset process loop ends when any representation is loaded or - all matching representations were already tried. - - Args: - repres_by_subset_id (dict): Available representations mapped - by their parent (subset) id. - subsets_by_id (dict): Subset documents mapped by their id. - profiles_per_subset_id (dict): Build profiles mapped by subset id. - loaders_by_name (dict): Available loaders per name. - - Returns: - (list) Objects of loaded containers. - """ - from openpype.pipeline import ( - IncompatibleLoaderError, - load_container, - ) - - loaded_containers = [] - - # Get subset id order from build presets. - build_presets = self.build_presets.get("current_context", []) - build_presets += self.build_presets.get("linked_assets", []) - subset_ids_ordered = [] - for preset in build_presets: - for preset_family in preset["families"]: - for id, subset in subsets_by_id.items(): - if preset_family not in subset["data"].get("families", []): - continue - - subset_ids_ordered.append(id) - - # Order representations from subsets. - print("repres_by_subset_id", repres_by_subset_id) - representations_ordered = [] - representations = [] - for id in subset_ids_ordered: - for subset_id, repres in repres_by_subset_id.items(): - if repres in representations: - continue - - if id == subset_id: - representations_ordered.append((subset_id, repres)) - representations.append(repres) - - print("representations", representations) - - # Load ordered representations. - for subset_id, repres in representations_ordered: - subset_name = subsets_by_id[subset_id]["name"] - - profile = profiles_per_subset_id[subset_id] - loaders_last_idx = len(profile["loaders"]) - 1 - repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 - - repre_by_low_name = { - repre["name"].lower(): repre for repre in repres - } - - is_loaded = False - for repre_name_idx, profile_repre_name in enumerate( - profile["repre_names_lowered"] - ): - # Break iteration if representation was already loaded - if is_loaded: - break - - repre = repre_by_low_name.get(profile_repre_name) - if not repre: - continue - - for loader_idx, loader_name in enumerate(profile["loaders"]): - if is_loaded: - break - - loader = loaders_by_name.get(loader_name) - if not loader: - continue - try: - container = load_container( - loader, - repre["_id"], - name=subset_name - ) - loaded_containers.append(container) - is_loaded = True - - except Exception as exc: - if exc == IncompatibleLoaderError: - self.log.info(( - "Loader `{}` is not compatible with" - " representation `{}`" - ).format(loader_name, repre["name"])) - - else: - self.log.error( - "Unexpected error happened during loading", - exc_info=True - ) - - msg = "Loading failed." - if loader_idx < loaders_last_idx: - msg += " Trying next loader." - elif repre_name_idx < repre_names_last_idx: - msg += ( - " Loading of subset `{}` was not successful." - ).format(subset_name) - else: - msg += " Trying next representation." - self.log.info(msg) - - return loaded_containers - - @with_pipeline_io - def _collect_last_version_repres(self, asset_entities): - """Collect subsets, versions and representations for asset_entities. - - Args: - asset_entities (list): Asset entities for which want to find data - - Returns: - (dict): collected entities - - Example output: - ``` - { - {Asset ID}: { - "asset_entity": , - "subsets": { - {Subset ID}: { - "subset_entity": , - "version": { - "version_entity": , - "repres": [ - , , ... - ] - } - }, - ... - } - }, - ... - } - output[asset_id]["subsets"][subset_id]["version"]["repres"] - ``` - """ - - if not asset_entities: - return {} - - asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities} - - subsets = list(legacy_io.find({ - "type": "subset", - "parent": {"$in": list(asset_entity_by_ids.keys())} - })) - subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} - - sorted_versions = list(legacy_io.find({ - "type": "version", - "parent": {"$in": list(subset_entity_by_ids.keys())} - }).sort("name", -1)) - - subset_id_with_latest_version = [] - last_versions_by_id = {} - for version in sorted_versions: - subset_id = version["parent"] - if subset_id in subset_id_with_latest_version: - continue - subset_id_with_latest_version.append(subset_id) - last_versions_by_id[version["_id"]] = version - - repres = legacy_io.find({ - "type": "representation", - "parent": {"$in": list(last_versions_by_id.keys())} - }) - - output = {} - for repre in repres: - version_id = repre["parent"] - version = last_versions_by_id[version_id] - - subset_id = version["parent"] - subset = subset_entity_by_ids[subset_id] - - asset_id = subset["parent"] - asset = asset_entity_by_ids[asset_id] - - if asset_id not in output: - output[asset_id] = { - "asset_entity": asset, - "subsets": {} - } - - if subset_id not in output[asset_id]["subsets"]: - output[asset_id]["subsets"][subset_id] = { - "subset_entity": subset, - "version": { - "version_entity": version, - "repres": [] - } - } - - output[asset_id]["subsets"][subset_id]["version"]["repres"].append( - repre - ) - - return output - - -@with_pipeline_io +@deprecated("openpype.pipeline.create.get_legacy_creator_by_name") def get_creator_by_name(creator_name, case_sensitive=False): """Find creator plugin by name. @@ -1599,32 +738,27 @@ def get_creator_by_name(creator_name, case_sensitive=False): Returns: Creator: Return first matching plugin or `None`. + + Deprecated: + Function will be removed after release version 3.16.* """ - from openpype.pipeline import discover_legacy_creator_plugins + from openpype.pipeline.create import get_legacy_creator_by_name - # Lower input creator name if is not case sensitive - if not case_sensitive: - creator_name = creator_name.lower() - - for creator_plugin in discover_legacy_creator_plugins(): - _creator_name = creator_plugin.__name__ - - # Lower creator plugin name if is not case sensitive - if not case_sensitive: - _creator_name = _creator_name.lower() - - if _creator_name == creator_name: - return creator_plugin - return None + return get_legacy_creator_by_name(creator_name, case_sensitive) -@with_pipeline_io +@deprecated def change_timer_to_current_context(): """Called after context change to change timers. - TODO: - - use TimersManager's static method instead of reimplementing it here + Deprecated: + This method is specific for TimersManager module so please use the + functionality from there. Function will be removed after release + version 3.15.* """ + + from openpype.pipeline import legacy_io + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") if not webserver_url: log.warning("Couldn't find webserver url") @@ -1667,6 +801,7 @@ def _get_task_context_data_for_anatomy( """ if anatomy is None: + from openpype.pipeline import Anatomy anatomy = Anatomy(project_doc["name"]) asset_name = asset_doc["name"] @@ -1711,6 +846,8 @@ def _get_task_context_data_for_anatomy( return data +@deprecated( + "openpype.pipeline.workfile.get_custom_workfile_template_by_context") def get_custom_workfile_template_by_context( template_profiles, project_doc, asset_doc, task_name, anatomy=None ): @@ -1732,9 +869,13 @@ def get_custom_workfile_template_by_context( Returns: str: Path to template or None if none of profiles match current context. (Existence of formatted path is not validated.) + + Deprecated: + Function will be removed after release version 3.16.* """ if anatomy is None: + from openpype.pipeline import Anatomy anatomy = Anatomy(project_doc["name"]) # get project, asset, task anatomy context data @@ -1763,6 +904,9 @@ def get_custom_workfile_template_by_context( return None +@deprecated( + "openpype.pipeline.workfile.get_custom_workfile_template_by_string_context" +) def get_custom_workfile_template_by_string_context( template_profiles, project_name, asset_name, task_name, dbcon=None, anatomy=None @@ -1786,44 +930,31 @@ def get_custom_workfile_template_by_string_context( Returns: str: Path to template or None if none of profiles match current context. (Existence of formatted path is not validated.) + + Deprecated: + Function will be removed after release version 3.16.* """ - if dbcon is None: - from openpype.pipeline import AvalonMongoDB + project_name = None + if anatomy is not None: + project_name = anatomy.project_name - dbcon = AvalonMongoDB() + if not project_name and dbcon is not None: + project_name = dbcon.active_project() - dbcon.install() + if not project_name: + raise ValueError("Can't determina project") - if dbcon.Session["AVALON_PROJECT"] != project_name: - dbcon.Session["AVALON_PROJECT"] = project_name - - project_doc = dbcon.find_one( - {"type": "project"}, - # All we need is "name" and "data.code" keys - { - "name": 1, - "data.code": 1 - } - ) - asset_doc = dbcon.find_one( - { - "type": "asset", - "name": asset_name - }, - # All we need is "name" and "data.tasks" keys - { - "name": 1, - "data.tasks": 1 - } - ) + project_doc = get_project(project_name, fields=["name", "data.code"]) + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["name", "data.tasks"]) return get_custom_workfile_template_by_context( template_profiles, project_doc, asset_doc, task_name, anatomy ) -@with_pipeline_io +@deprecated("openpype.pipeline.context_tools.get_custom_workfile_template") def get_custom_workfile_template(template_profiles): """Filter and fill workfile template profiles by current context. @@ -1836,8 +967,13 @@ def get_custom_workfile_template(template_profiles): Returns: str: Path to template or None if none of profiles match current context. (Existence of formatted path is not validated.) + + Deprecated: + Function will be removed after release version 3.16.* """ + from openpype.pipeline import legacy_io + return get_custom_workfile_template_by_string_context( template_profiles, legacy_io.Session["AVALON_PROJECT"], @@ -1847,6 +983,7 @@ def get_custom_workfile_template(template_profiles): ) +@deprecated("openpype.pipeline.workfile.get_last_workfile_with_version") def get_last_workfile_with_version( workdir, file_template, fill_data, extensions ): @@ -1861,79 +998,19 @@ def get_last_workfile_with_version( Returns: tuple: Last workfile with version if there is any otherwise returns (None, None). + + Deprecated: + Function will be removed after release version 3.16.* """ - if not os.path.exists(workdir): - return None, None - # Fast match on extension - filenames = [ - filename - for filename in os.listdir(workdir) - if os.path.splitext(filename)[1] in extensions - ] + from openpype.pipeline.workfile import get_last_workfile_with_version - # Build template without optionals, version to digits only regex - # and comment to any definable value. - _ext = [] - for ext in extensions: - if not ext.startswith("."): - ext = "." + ext - # Escape dot for regex - ext = "\\" + ext - _ext.append(ext) - ext_expression = "(?:" + "|".join(_ext) + ")" - - # Replace `.{ext}` with `{ext}` so we are sure there is not dot at the end - file_template = re.sub(r"\.?{ext}", ext_expression, file_template) - # Replace optional keys with optional content regex - file_template = re.sub(r"<.*?>", r".*?", file_template) - # Replace `{version}` with group regex - file_template = re.sub(r"{version.*?}", r"([0-9]+)", file_template) - file_template = re.sub(r"{comment.*?}", r".+?", file_template) - file_template = StringTemplate.format_strict_template( - file_template, fill_data + return get_last_workfile_with_version( + workdir, file_template, fill_data, extensions ) - # Match with ignore case on Windows due to the Windows - # OS not being case-sensitive. This avoids later running - # into the error that the file did exist if it existed - # with a different upper/lower-case. - kwargs = {} - if platform.system().lower() == "windows": - kwargs["flags"] = re.IGNORECASE - - # Get highest version among existing matching files - version = None - output_filenames = [] - for filename in sorted(filenames): - match = re.match(file_template, filename, **kwargs) - if not match: - continue - - file_version = int(match.group(1)) - if version is None or file_version > version: - output_filenames[:] = [] - version = file_version - - if file_version == version: - output_filenames.append(filename) - - output_filename = None - if output_filenames: - if len(output_filenames) == 1: - output_filename = output_filenames[0] - else: - last_time = None - for _output_filename in output_filenames: - full_path = os.path.join(workdir, _output_filename) - mod_time = os.path.getmtime(full_path) - if last_time is None or last_time < mod_time: - output_filename = _output_filename - last_time = mod_time - - return output_filename, version - +@deprecated("openpype.pipeline.workfile.get_last_workfile") def get_last_workfile( workdir, file_template, fill_data, extensions, full_path=False ): @@ -1950,28 +1027,22 @@ def get_last_workfile( Returns: str: Last or first workfile as filename of full path to filename. + + Deprecated: + Function will be removed after release version 3.16.* """ - filename, version = get_last_workfile_with_version( - workdir, file_template, fill_data, extensions + + from openpype.pipeline.workfile import get_last_workfile + + return get_last_workfile( + workdir, file_template, fill_data, extensions, full_path ) - if filename is None: - data = copy.deepcopy(fill_data) - data["version"] = 1 - data.pop("comment", None) - if not data.get("ext"): - data["ext"] = extensions[0] - data["ext"] = data["ext"].replace('.', '') - filename = StringTemplate.format_strict_template(file_template, data) - - if full_path: - return os.path.normpath(os.path.join(workdir, filename)) - - return filename -@with_pipeline_io -def get_linked_ids_for_representations(project_name, repre_ids, dbcon=None, - link_type=None, max_depth=0): +@deprecated("openpype.client.get_linked_representation_id") +def get_linked_ids_for_representations( + project_name, repre_ids, dbcon=None, link_type=None, max_depth=0 +): """Returns list of linked ids of particular type (if provided). Goes from representations to version, back to representations @@ -1982,104 +1053,25 @@ def get_linked_ids_for_representations(project_name, repre_ids, dbcon=None, with Session. link_type (str): ['reference', '..] max_depth (int): limit how many levels of recursion + Returns: (list) of ObjectId - linked representations + + Deprecated: + Function will be removed after release version 3.16.* """ - # Create new dbcon if not passed and use passed project name - if not dbcon: - from openpype.pipeline import AvalonMongoDB - dbcon = AvalonMongoDB() - dbcon.Session["AVALON_PROJECT"] = project_name - # Validate that passed dbcon has same project - elif dbcon.Session["AVALON_PROJECT"] != project_name: - raise ValueError("Passed connection does not have right project") + + from openpype.client import get_linked_representation_id if not isinstance(repre_ids, list): repre_ids = [repre_ids] - version_ids = dbcon.distinct("parent", { - "_id": {"$in": repre_ids}, - "type": "representation" - }) - - match = { - "_id": {"$in": version_ids}, - "type": "version" - } - - graph_lookup = { - "from": project_name, - "startWith": "$data.inputLinks.id", - "connectFromField": "data.inputLinks.id", - "connectToField": "_id", - "as": "outputs_recursive", - "depthField": "depth" - } - if max_depth != 0: - # We offset by -1 since 0 basically means no recursion - # but the recursion only happens after the initial lookup - # for outputs. - graph_lookup["maxDepth"] = max_depth - 1 - - pipeline_ = [ - # Match - {"$match": match}, - # Recursive graph lookup for inputs - {"$graphLookup": graph_lookup} - ] - - result = dbcon.aggregate(pipeline_) - referenced_version_ids = _process_referenced_pipeline_result(result, - link_type) - - ref_ids = dbcon.distinct( - "_id", - filter={ - "parent": {"$in": list(referenced_version_ids)}, - "type": "representation" - } - ) - - return list(ref_ids) - - -def _process_referenced_pipeline_result(result, link_type): - """Filters result from pipeline for particular link_type. - - Pipeline cannot use link_type directly in a query. - Returns: - (list) - """ - referenced_version_ids = set() - correctly_linked_ids = set() - for item in result: - input_links = item["data"].get("inputLinks", []) - correctly_linked_ids = _filter_input_links(input_links, - link_type, - correctly_linked_ids) - - # outputs_recursive in random order, sort by depth - outputs_recursive = sorted(item.get("outputs_recursive", []), - key=lambda d: d["depth"]) - - for output in outputs_recursive: - if output["_id"] not in correctly_linked_ids: # leaf - continue - - correctly_linked_ids = _filter_input_links( - output["data"].get("inputLinks", []), - link_type, - correctly_linked_ids) - - referenced_version_ids.add(output["_id"]) - - return referenced_version_ids - - -def _filter_input_links(input_links, link_type, correctly_linked_ids): - for input_link in input_links: - if not link_type or input_link["type"] == link_type: - correctly_linked_ids.add(input_link.get("id") or - input_link.get("_id")) # legacy - - return correctly_linked_ids + output = [] + for repre_id in repre_ids: + output.extend(get_linked_representation_id( + project_name, + repre_id=repre_id, + link_type=link_type, + max_depth=max_depth + )) + return output diff --git a/openpype/lib/config.py b/openpype/lib/dateutils.py similarity index 88% rename from openpype/lib/config.py rename to openpype/lib/dateutils.py index 57e8efa57d..68cd1d1c5b 100644 --- a/openpype/lib/config.py +++ b/openpype/lib/dateutils.py @@ -76,7 +76,20 @@ def get_datetime_data(datetime_obj=None): } -def get_formatted_current_time(): - return datetime.datetime.now().strftime( +def get_timestamp(datetime_obj=None): + """Get standardized timestamp from datetime object. + + Args: + datetime_obj (datetime.datetime): Object of datetime. Current time + is used if not passed. + """ + + if datetime_obj is None: + datetime_obj = datetime.datetime.now() + return datetime_obj.strftime( "%Y%m%dT%H%M%SZ" ) + + +def get_formatted_current_time(): + return get_timestamp() diff --git a/openpype/lib/delivery.py b/openpype/lib/delivery.py index ffcfe9fa4d..efb542de75 100644 --- a/openpype/lib/delivery.py +++ b/openpype/lib/delivery.py @@ -1,81 +1,113 @@ """Functions useful for delivery action or loader""" import os import shutil -import glob -import clique -import collections - -from .path_templates import ( - StringTemplate, - TemplateUnsolved, -) +import functools +import warnings +class DeliveryDeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", DeliveryDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=DeliveryDeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + +@deprecated("openpype.lib.path_tools.collect_frames") def collect_frames(files): + """Returns dict of source path and its frame, if from sequence + + Uses clique as most precise solution, used when anatomy template that + created files is not known. + + Assumption is that frames are separated by '.', negative frames are not + allowed. + + Args: + files(list) or (set with single value): list of source paths + + Returns: + (dict): {'/asset/subset_v001.0001.png': '0001', ....} + + Deprecated: + Function was moved to different location and will be removed + after 3.16.* release. """ - Returns dict of source path and its frame, if from sequence - Uses clique as most precise solution, used when anatomy template that - created files is not known. + from .path_tools import collect_frames - Assumption is that frames are separated by '.', negative frames are not - allowed. + return collect_frames(files) - Args: - files(list) or (set with single value): list of source paths - Returns: - (dict): {'/asset/subset_v001.0001.png': '0001', ....} + +@deprecated("openpype.lib.path_tools.format_file_size") +def sizeof_fmt(num, suffix=None): + """Returns formatted string with size in appropriate unit + + Deprecated: + Function was moved to different location and will be removed + after 3.16.* release. """ - patterns = [clique.PATTERNS["frames"]] - collections, remainder = clique.assemble(files, minimum_items=1, - patterns=patterns) - sources_and_frames = {} - if collections: - for collection in collections: - src_head = collection.head - src_tail = collection.tail - - for index in collection.indexes: - src_frame = collection.format("{padding}") % index - src_file_name = "{}{}{}".format(src_head, src_frame, - src_tail) - sources_and_frames[src_file_name] = src_frame - else: - sources_and_frames[remainder.pop()] = None - - return sources_and_frames - - -def sizeof_fmt(num, suffix='B'): - """Returns formatted string with size in appropriate unit""" - for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: - if abs(num) < 1024.0: - return "%3.1f%s%s" % (num, unit, suffix) - num /= 1024.0 - return "%.1f%s%s" % (num, 'Yi', suffix) + from .path_tools import format_file_size + return format_file_size(num, suffix) +@deprecated("openpype.pipeline.load.get_representation_path_with_anatomy") def path_from_representation(representation, anatomy): - try: - template = representation["data"]["template"] + """Get representation path using representation document and anatomy. - except KeyError: - return None + Args: + representation (Dict[str, Any]): Representation document. + anatomy (Anatomy): Project anatomy. - try: - context = representation["context"] - context["root"] = anatomy.roots - path = StringTemplate.format_strict_template(template, context) - return os.path.normpath(path) + Deprecated: + Function was moved to different location and will be removed + after 3.16.* release. + """ - except TemplateUnsolved: - # Template references unavailable data - return None + from openpype.pipeline.load import get_representation_path_with_anatomy - return path + return get_representation_path_with_anatomy(representation, anatomy) +@deprecated def copy_file(src_path, dst_path): """Hardlink file if possible(to save space), copy if not""" from openpype.lib import create_hard_link # safer importing @@ -91,131 +123,96 @@ def copy_file(src_path, dst_path): shutil.copyfile(src_path, dst_path) +@deprecated("openpype.pipeline.delivery.get_format_dict") def get_format_dict(anatomy, location_path): """Returns replaced root values from user provider value. - Args: - anatomy (Anatomy) - location_path (str): user provided value - Returns: - (dict): prepared for formatting of a template + Args: + anatomy (Anatomy) + location_path (str): user provided value + + Returns: + (dict): prepared for formatting of a template + + Deprecated: + Function was moved to different location and will be removed + after 3.16.* release. """ - format_dict = {} - if location_path: - location_path = location_path.replace("\\", "/") - root_names = anatomy.root_names_from_templates( - anatomy.templates["delivery"] - ) - if root_names is None: - format_dict["root"] = location_path - else: - format_dict["root"] = {} - for name in root_names: - format_dict["root"][name] = location_path - return format_dict + + from openpype.pipeline.delivery import get_format_dict + + return get_format_dict(anatomy, location_path) +@deprecated("openpype.pipeline.delivery.check_destination_path") def check_destination_path(repre_id, anatomy, anatomy_data, datetime_data, template_name): """ Try to create destination path based on 'template_name'. - In the case that path cannot be filled, template contains unmatched - keys, provide error message to filter out repre later. + In the case that path cannot be filled, template contains unmatched + keys, provide error message to filter out repre later. - Args: - anatomy (Anatomy) - anatomy_data (dict): context to fill anatomy - datetime_data (dict): values with actual date - template_name (str): to pick correct delivery template - Returns: - (collections.defauldict): {"TYPE_OF_ERROR":"ERROR_DETAIL"} + Args: + anatomy (Anatomy) + anatomy_data (dict): context to fill anatomy + datetime_data (dict): values with actual date + template_name (str): to pick correct delivery template + + Returns: + (collections.defauldict): {"TYPE_OF_ERROR":"ERROR_DETAIL"} + + Deprecated: + Function was moved to different location and will be removed + after 3.16.* release. """ - anatomy_data.update(datetime_data) - anatomy_filled = anatomy.format_all(anatomy_data) - dest_path = anatomy_filled["delivery"][template_name] - report_items = collections.defaultdict(list) - if not dest_path.solved: - msg = ( - "Missing keys in Representation's context" - " for anatomy template \"{}\"." - ).format(template_name) + from openpype.pipeline.delivery import check_destination_path - sub_msg = ( - "Representation: {}
" - ).format(repre_id) - - if dest_path.missing_keys: - keys = ", ".join(dest_path.missing_keys) - sub_msg += ( - "- Missing keys: \"{}\"
" - ).format(keys) - - if dest_path.invalid_types: - items = [] - for key, value in dest_path.invalid_types.items(): - items.append("\"{}\" {}".format(key, str(value))) - - keys = ", ".join(items) - sub_msg += ( - "- Invalid value DataType: \"{}\"
" - ).format(keys) - - report_items[msg].append(sub_msg) - - return report_items + return check_destination_path( + repre_id, + anatomy, + anatomy_data, + datetime_data, + template_name + ) +@deprecated("openpype.pipeline.delivery.deliver_single_file") def process_single_file( src_path, repre, anatomy, template_name, anatomy_data, format_dict, report_items, log ): """Copy single file to calculated path based on template - Args: - src_path(str): path of source representation file - _repre (dict): full repre, used only in process_sequence, here only - as to share same signature - anatomy (Anatomy) - template_name (string): user selected delivery template name - anatomy_data (dict): data from repre to fill anatomy with - format_dict (dict): root dictionary with names and values - report_items (collections.defaultdict): to return error messages - log (Logger): for log printing - Returns: - (collections.defaultdict , int) + Args: + src_path(str): path of source representation file + _repre (dict): full repre, used only in process_sequence, here only + as to share same signature + anatomy (Anatomy) + template_name (string): user selected delivery template name + anatomy_data (dict): data from repre to fill anatomy with + format_dict (dict): root dictionary with names and values + report_items (collections.defaultdict): to return error messages + log (Logger): for log printing + + Returns: + (collections.defaultdict , int) + + Deprecated: + Function was moved to different location and will be removed + after 3.16.* release. """ - # Make sure path is valid for all platforms - src_path = os.path.normpath(src_path.replace("\\", "/")) - if not os.path.exists(src_path): - msg = "{} doesn't exist for {}".format(src_path, repre["_id"]) - report_items["Source file was not found"].append(msg) - return report_items, 0 + from openpype.pipeline.delivery import deliver_single_file - anatomy_filled = anatomy.format(anatomy_data) - if format_dict: - template_result = anatomy_filled["delivery"][template_name] - delivery_path = template_result.rootless.format(**format_dict) - else: - delivery_path = anatomy_filled["delivery"][template_name] - - # Backwards compatibility when extension contained `.` - delivery_path = delivery_path.replace("..", ".") - # Make sure path is valid for all platforms - delivery_path = os.path.normpath(delivery_path.replace("\\", "/")) - - delivery_folder = os.path.dirname(delivery_path) - if not os.path.exists(delivery_folder): - os.makedirs(delivery_folder) - - log.debug("Copying single: {} -> {}".format(src_path, delivery_path)) - copy_file(src_path, delivery_path) - - return report_items, 1 + return deliver_single_file( + src_path, repre, anatomy, template_name, anatomy_data, format_dict, + report_items, log + ) +@deprecated("openpype.pipeline.delivery.deliver_sequence") def process_sequence( src_path, repre, anatomy, template_name, anatomy_data, format_dict, report_items, log @@ -223,128 +220,33 @@ def process_sequence( """ For Pype2(mainly - works in 3 too) where representation might not contain files. - Uses listing physical files (not 'files' on repre as a)might not be - present, b)might not be reliable for representation and copying them. + Uses listing physical files (not 'files' on repre as a)might not be + present, b)might not be reliable for representation and copying them. - TODO Should be refactored when files are sufficient to drive all - representations. + TODO Should be refactored when files are sufficient to drive all + representations. - Args: - src_path(str): path of source representation file - repre (dict): full representation - anatomy (Anatomy) - template_name (string): user selected delivery template name - anatomy_data (dict): data from repre to fill anatomy with - format_dict (dict): root dictionary with names and values - report_items (collections.defaultdict): to return error messages - log (Logger): for log printing - Returns: - (collections.defaultdict , int) + Args: + src_path(str): path of source representation file + repre (dict): full representation + anatomy (Anatomy) + template_name (string): user selected delivery template name + anatomy_data (dict): data from repre to fill anatomy with + format_dict (dict): root dictionary with names and values + report_items (collections.defaultdict): to return error messages + log (Logger): for log printing + + Returns: + (collections.defaultdict , int) + + Deprecated: + Function was moved to different location and will be removed + after 3.16.* release. """ - src_path = os.path.normpath(src_path.replace("\\", "/")) - def hash_path_exist(myPath): - res = myPath.replace('#', '*') - glob_search_results = glob.glob(res) - if len(glob_search_results) > 0: - return True - return False + from openpype.pipeline.delivery import deliver_sequence - if not hash_path_exist(src_path): - msg = "{} doesn't exist for {}".format(src_path, - repre["_id"]) - report_items["Source file was not found"].append(msg) - return report_items, 0 - - delivery_templates = anatomy.templates.get("delivery") or {} - delivery_template = delivery_templates.get(template_name) - if delivery_template is None: - msg = ( - "Delivery template \"{}\" in anatomy of project \"{}\"" - " was not found" - ).format(template_name, anatomy.project_name) - report_items[""].append(msg) - return report_items, 0 - - # Check if 'frame' key is available in template which is required - # for sequence delivery - if "{frame" not in delivery_template: - msg = ( - "Delivery template \"{}\" in anatomy of project \"{}\"" - "does not contain '{{frame}}' key to fill. Delivery of sequence" - " can't be processed." - ).format(template_name, anatomy.project_name) - report_items[""].append(msg) - return report_items, 0 - - dir_path, file_name = os.path.split(str(src_path)) - - context = repre["context"] - ext = context.get("ext", context.get("representation")) - - if not ext: - msg = "Source extension not found, cannot find collection" - report_items[msg].append(src_path) - log.warning("{} <{}>".format(msg, context)) - return report_items, 0 - - ext = "." + ext - # context.representation could be .psd - ext = ext.replace("..", ".") - - src_collections, remainder = clique.assemble(os.listdir(dir_path)) - src_collection = None - for col in src_collections: - if col.tail != ext: - continue - - src_collection = col - break - - if src_collection is None: - msg = "Source collection of files was not found" - report_items[msg].append(src_path) - log.warning("{} <{}>".format(msg, src_path)) - return report_items, 0 - - frame_indicator = "@####@" - - anatomy_data["frame"] = frame_indicator - anatomy_filled = anatomy.format(anatomy_data) - - if format_dict: - template_result = anatomy_filled["delivery"][template_name] - delivery_path = template_result.rootless.format(**format_dict) - else: - delivery_path = anatomy_filled["delivery"][template_name] - - delivery_path = os.path.normpath(delivery_path.replace("\\", "/")) - delivery_folder = os.path.dirname(delivery_path) - dst_head, dst_tail = delivery_path.split(frame_indicator) - dst_padding = src_collection.padding - dst_collection = clique.Collection( - head=dst_head, - tail=dst_tail, - padding=dst_padding + return deliver_sequence( + src_path, repre, anatomy, template_name, anatomy_data, format_dict, + report_items, log ) - - if not os.path.exists(delivery_folder): - os.makedirs(delivery_folder) - - src_head = src_collection.head - src_tail = src_collection.tail - uploaded = 0 - for index in src_collection.indexes: - src_padding = src_collection.format("{padding}") % index - src_file_name = "{}{}{}".format(src_head, src_padding, src_tail) - src = os.path.normpath( - os.path.join(dir_path, src_file_name) - ) - - dst_padding = dst_collection.format("{padding}") % index - dst = "{}{}{}".format(dst_head, dst_padding, dst_tail) - log.debug("Copying single: {} -> {}".format(src, dst)) - copy_file(src, dst) - uploaded += 1 - - return report_items, uploaded diff --git a/openpype/lib/events.py b/openpype/lib/events.py index 7bec6ee30d..747761fb3e 100644 --- a/openpype/lib/events.py +++ b/openpype/lib/events.py @@ -1,6 +1,7 @@ """Events holding data about specific event.""" import os import re +import copy import inspect import logging import weakref @@ -11,6 +12,10 @@ except Exception: from openpype.lib.python_2_comp import WeakMethod +class MissingEventSystem(Exception): + pass + + class EventCallback(object): """Callback registered to a topic. @@ -176,16 +181,20 @@ class Event(object): topic (str): Identifier of event. data (Any): Data specific for event. Dictionary is recommended. source (str): Identifier of source. + event_system (EventSystem): Event system in which can be event + triggered. """ + _data = {} - def __init__(self, topic, data=None, source=None): + def __init__(self, topic, data=None, source=None, event_system=None): self._id = str(uuid4()) self._topic = topic if data is None: data = {} self._data = data self._source = source + self._event_system = event_system def __getitem__(self, key): return self._data[key] @@ -199,6 +208,12 @@ class Event(object): @property def source(self): + """Event's source used for triggering callbacks. + + Returns: + Union[str, None]: Source string or None. Source is optional. + """ + return self._source @property @@ -207,32 +222,164 @@ class Event(object): @property def topic(self): + """Event's topic used for triggering callbacks. + + Returns: + str: Topic string. + """ + return self._topic def emit(self): """Emit event and trigger callbacks.""" - StoredCallbacks.emit_event(self) + if self._event_system is None: + raise MissingEventSystem( + "Can't emit event {}. Does not have set event system.".format( + str(repr(self)) + ) + ) + self._event_system.emit_event(self) + def to_data(self): + """Convert Event object to data. -class StoredCallbacks: - _registered_callbacks = [] + Returns: + Dict[str, Any]: Event data. + """ + + return { + "id": self.id, + "topic": self.topic, + "source": self.source, + "data": copy.deepcopy(self.data) + } @classmethod - def add_callback(cls, topic, callback): + def from_data(cls, event_data, event_system=None): + """Create event from data. + + Args: + event_data (Dict[str, Any]): Event data with defined keys. Can be + created using 'to_data' method. + event_system (EventSystem): System to which the event belongs. + + Returns: + Event: Event with attributes from passed data. + """ + + obj = cls( + event_data["topic"], + event_data["data"], + event_data["source"], + event_system + ) + obj._id = event_data["id"] + return obj + + +class EventSystem(object): + """Encapsulate event handling into an object. + + System wraps registered callbacks and triggered events into single object + so it is possible to create mutltiple independent systems that have their + topics and callbacks. + + + """ + + def __init__(self): + self._registered_callbacks = [] + + def add_callback(self, topic, callback): + """Register callback in event system. + + Args: + topic (str): Topic for EventCallback. + callback (Callable): Function or method that will be called + when topic is triggered. + + Returns: + EventCallback: Created callback object which can be used to + stop listening. + """ + callback = EventCallback(topic, callback) - cls._registered_callbacks.append(callback) + self._registered_callbacks.append(callback) return callback - @classmethod - def emit_event(cls, event): + def create_event(self, topic, data, source): + """Create new event which is bound to event system. + + Args: + topic (str): Event topic. + data (dict): Data related to event. + source (str): Source of event. + + Returns: + Event: Object of event. + """ + + return Event(topic, data, source, self) + + def emit(self, topic, data, source): + """Create event based on passed data and emit it. + + This is easiest way how to trigger event in an event system. + + Args: + topic (str): Event topic. + data (dict): Data related to event. + source (str): Source of event. + + Returns: + Event: Created and emitted event. + """ + + event = self.create_event(topic, data, source) + event.emit() + return event + + def emit_event(self, event): + """Emit event object. + + Args: + event (Event): Prepared event with topic and data. + """ + invalid_callbacks = [] - for callback in cls._registered_callbacks: + for callback in self._registered_callbacks: callback.process_event(event) if not callback.is_ref_valid: invalid_callbacks.append(callback) for callback in invalid_callbacks: - cls._registered_callbacks.remove(callback) + self._registered_callbacks.remove(callback) + + +class GlobalEventSystem: + """Event system living in global scope of process. + + This is primarily used in host implementation to trigger events + related to DCC changes or changes of context in the host implementation. + """ + + _global_event_system = None + + @classmethod + def get_global_event_system(cls): + if cls._global_event_system is None: + cls._global_event_system = EventSystem() + return cls._global_event_system + + @classmethod + def add_callback(cls, topic, callback): + event_system = cls.get_global_event_system() + return event_system.add_callback(topic, callback) + + @classmethod + def emit(cls, topic, data, source): + event_system = cls.get_global_event_system() + return event_system.emit(topic, data, source) def register_event_callback(topic, callback): @@ -249,7 +396,8 @@ def register_event_callback(topic, callback): enable/disable listening to a topic or remove the callback from the topic completely. """ - return StoredCallbacks.add_callback(topic, callback) + + return GlobalEventSystem.add_callback(topic, callback) def emit_event(topic, data=None, source=None): @@ -263,6 +411,5 @@ def emit_event(topic, data=None, source=None): Returns: Event: Object of event that was emitted. """ - event = Event(topic, data, source) - event.emit() - return event + + return GlobalEventSystem.emit(topic, data, source) diff --git a/openpype/lib/execute.py b/openpype/lib/execute.py index c3e35772f3..f1f2a4fa0a 100644 --- a/openpype/lib/execute.py +++ b/openpype/lib/execute.py @@ -5,7 +5,7 @@ import platform import json import tempfile -from .log import PypeLogger as Logger +from .log import Logger from .vendor_bin_utils import find_executable # MSDN process creation flag (Windows only) @@ -40,7 +40,7 @@ def execute(args, log_levels = ['DEBUG:', 'INFO:', 'ERROR:', 'WARNING:', 'CRITICAL:'] - log = Logger().get_logger('execute') + log = Logger.get_logger('execute') log.info("Executing ({})".format(" ".join(args))) popen = subprocess.Popen( args, diff --git a/openpype/lib/file_transaction.py b/openpype/lib/file_transaction.py new file mode 100644 index 0000000000..1626bec6b6 --- /dev/null +++ b/openpype/lib/file_transaction.py @@ -0,0 +1,171 @@ +import os +import logging +import sys +import errno +import six + +from openpype.lib import create_hard_link + +# this is needed until speedcopy for linux is fixed +if sys.platform == "win32": + from speedcopy import copyfile +else: + from shutil import copyfile + + +class FileTransaction(object): + """ + + The file transaction is a three step process. + + 1) Rename any existing files to a "temporary backup" during `process()` + 2) Copy the files to final destination during `process()` + 3) Remove any backed up files (*no rollback possible!) during `finalize()` + + Step 3 is done during `finalize()`. If not called the .bak files will + remain on disk. + + These steps try to ensure that we don't overwrite half of any existing + files e.g. if they are currently in use. + + Note: + A regular filesystem is *not* a transactional file system and even + though this implementation tries to produce a 'safe copy' with a + potential rollback do keep in mind that it's inherently unsafe due + to how filesystem works and a myriad of things could happen during + the transaction that break the logic. A file storage could go down, + permissions could be changed, other machines could be moving or writing + files. A lot can happen. + + Warning: + Any folders created during the transfer will not be removed. + + """ + + MODE_COPY = 0 + MODE_HARDLINK = 1 + + def __init__(self, log=None): + + if log is None: + log = logging.getLogger("FileTransaction") + + self.log = log + + # The transfer queue + # todo: make this an actual FIFO queue? + self._transfers = {} + + # Destination file paths that a file was transferred to + self._transferred = [] + + # Backup file location mapping to original locations + self._backup_to_original = {} + + def add(self, src, dst, mode=MODE_COPY): + """Add a new file to transfer queue""" + opts = {"mode": mode} + + src = os.path.abspath(src) + dst = os.path.abspath(dst) + + if dst in self._transfers: + queued_src = self._transfers[dst][0] + if src == queued_src: + self.log.debug("File transfer was already " + "in queue: {} -> {}".format(src, dst)) + return + else: + self.log.warning("File transfer in queue replaced..") + self.log.debug("Removed from queue: " + "{} -> {}".format(queued_src, dst)) + self.log.debug("Added to queue: {} -> {}".format(src, dst)) + + self._transfers[dst] = (src, opts) + + def process(self): + + # Backup any existing files + for dst in self._transfers.keys(): + if os.path.exists(dst): + # Backup original file + # todo: add timestamp or uuid to ensure unique + backup = dst + ".bak" + self._backup_to_original[backup] = dst + self.log.debug("Backup existing file: " + "{} -> {}".format(dst, backup)) + os.rename(dst, backup) + + # Copy the files to transfer + for dst, (src, opts) in self._transfers.items(): + self._create_folder_for_file(dst) + + if opts["mode"] == self.MODE_COPY: + self.log.debug("Copying file ... {} -> {}".format(src, dst)) + copyfile(src, dst) + elif opts["mode"] == self.MODE_HARDLINK: + self.log.debug("Hardlinking file ... {} -> {}".format(src, + dst)) + create_hard_link(src, dst) + + self._transferred.append(dst) + + def finalize(self): + # Delete any backed up files + for backup in self._backup_to_original.keys(): + try: + os.remove(backup) + except OSError: + self.log.error("Failed to remove backup file: " + "{}".format(backup), + exc_info=True) + + def rollback(self): + + errors = 0 + + # Rollback any transferred files + for path in self._transferred: + try: + os.remove(path) + except OSError: + errors += 1 + self.log.error("Failed to rollback created file: " + "{}".format(path), + exc_info=True) + + # Rollback the backups + for backup, original in self._backup_to_original.items(): + try: + os.rename(backup, original) + except OSError: + errors += 1 + self.log.error("Failed to restore original file: " + "{} -> {}".format(backup, original), + exc_info=True) + + if errors: + self.log.error("{} errors occurred during " + "rollback.".format(errors), exc_info=True) + six.reraise(*sys.exc_info()) + + @property + def transferred(self): + """Return the processed transfers destination paths""" + return list(self._transferred) + + @property + def backups(self): + """Return the backup file paths""" + return list(self._backup_to_original.keys()) + + def _create_folder_for_file(self, path): + dirname = os.path.dirname(path) + try: + os.makedirs(dirname) + except OSError as e: + if e.errno == errno.EEXIST: + pass + else: + self.log.critical("An unexpected error occurred.") + six.reraise(*sys.exc_info()) diff --git a/openpype/lib/git_progress.py b/openpype/lib/git_progress.py deleted file mode 100644 index 331b7b6745..0000000000 --- a/openpype/lib/git_progress.py +++ /dev/null @@ -1,86 +0,0 @@ -import git -from tqdm import tqdm - - -class _GitProgress(git.remote.RemoteProgress): - """ Class handling displaying progress during git operations. - - This is using **tqdm** for showing progress bars. As **GitPython** - is parsing progress directly from git command, it is somehow unreliable - as in some operations it is difficult to get total count of iterations - to display meaningful progress bar. - - """ - _t = None - _code = 0 - _current_status = '' - _current_max = '' - - _description = { - 256: "Checking out files", - 4: "Counting objects", - 128: "Finding sources", - 32: "Receiving objects", - 64: "Resolving deltas", - 16: "Writing objects" - } - - def __init__(self): - super().__init__() - - def __del__(self): - if self._t is not None: - self._t.close() - - def _detroy_tqdm(self): - """ Used to close tqdm when operation ended. - - """ - if self._t is not None: - self._t.close() - self._t = None - - def _check_mask(self, opcode: int) -> bool: - """" Add meaningful description to **GitPython** opcodes. - - :param opcode: OP_MASK opcode - :type opcode: int - :return: String description of opcode - :rtype: str - - .. seealso:: For opcodes look at :class:`git.RemoteProgress` - - """ - if opcode & self.COUNTING: - return self._description.get(self.COUNTING) - elif opcode & self.CHECKING_OUT: - return self._description.get(self.CHECKING_OUT) - elif opcode & self.WRITING: - return self._description.get(self.WRITING) - elif opcode & self.RECEIVING: - return self._description.get(self.RECEIVING) - elif opcode & self.RESOLVING: - return self._description.get(self.RESOLVING) - elif opcode & self.FINDING_SOURCES: - return self._description.get(self.FINDING_SOURCES) - else: - return "Processing" - - def update(self, op_code, cur_count, max_count=None, message=''): - """ Called when git operation update progress. - - .. seealso:: For more details see - :func:`git.objects.submodule.base.Submodule.update` - `Documentation `_ - - """ - code = self._check_mask(op_code) - if self._current_status != code or self._current_max != max_count: - self._current_max = max_count - self._current_status = code - self._detroy_tqdm() - self._t = tqdm(total=max_count) - self._t.set_description(" . {}".format(code)) - - self._t.update(cur_count) diff --git a/openpype/lib/import_utils.py b/openpype/lib/import_utils.py deleted file mode 100644 index e88c07fca6..0000000000 --- a/openpype/lib/import_utils.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -import sys -import importlib -from .log import PypeLogger as Logger - -log = Logger().get_logger(__name__) - - -def discover_host_vendor_module(module_name): - host = os.environ["AVALON_APP"] - pype_root = os.environ["OPENPYPE_REPOS_ROOT"] - main_module = module_name.split(".")[0] - module_path = os.path.join( - pype_root, "hosts", host, "vendor", main_module) - - log.debug( - "Importing module from host vendor path: `{}`".format(module_path)) - - if not os.path.exists(module_path): - log.warning( - "Path not existing: `{}`".format(module_path)) - return None - - sys.path.insert(1, module_path) - return importlib.import_module(module_name) diff --git a/openpype/lib/local_settings.py b/openpype/lib/local_settings.py index 97e99b4b5a..c6c9699240 100644 --- a/openpype/lib/local_settings.py +++ b/openpype/lib/local_settings.py @@ -34,7 +34,7 @@ from openpype.settings import ( get_system_settings ) -from .import validate_mongo_connection +from openpype.client.mongo import validate_mongo_connection _PLACEHOLDER = object() diff --git a/openpype/lib/log.py b/openpype/lib/log.py index 2cdb7ec8e4..26dcd86eec 100644 --- a/openpype/lib/log.py +++ b/openpype/lib/log.py @@ -24,12 +24,13 @@ import traceback import threading import copy -from . import Terminal -from .mongo import ( +from openpype.client.mongo import ( MongoEnvNotSet, get_default_components, - OpenPypeMongoConnection + OpenPypeMongoConnection, ) +from . import Terminal + try: import log4mongo from log4mongo.handlers import MongoHandler @@ -41,13 +42,13 @@ except ImportError: USE_UNICODE = hasattr(__builtins__, "unicode") -class PypeStreamHandler(logging.StreamHandler): +class LogStreamHandler(logging.StreamHandler): """ StreamHandler class designed to handle utf errors in python 2.x hosts. """ def __init__(self, stream=None): - super(PypeStreamHandler, self).__init__(stream) + super(LogStreamHandler, self).__init__(stream) self.enabled = True def enable(self): @@ -56,7 +57,6 @@ class PypeStreamHandler(logging.StreamHandler): Used to silence output """ self.enabled = True - pass def disable(self): """ Disable StreamHandler @@ -107,13 +107,13 @@ class PypeStreamHandler(logging.StreamHandler): self.handleError(record) -class PypeFormatter(logging.Formatter): +class LogFormatter(logging.Formatter): DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ]' default_formatter = logging.Formatter(DFT) def __init__(self, formats): - super(PypeFormatter, self).__init__() + super(LogFormatter, self).__init__() self.formatters = {} for loglevel in formats: self.formatters[loglevel] = logging.Formatter(formats[loglevel]) @@ -141,7 +141,7 @@ class PypeFormatter(logging.Formatter): return out -class PypeMongoFormatter(logging.Formatter): +class MongoFormatter(logging.Formatter): DEFAULT_PROPERTIES = logging.LogRecord( '', '', '', '', '', '', '', '').__dict__.keys() @@ -161,7 +161,7 @@ class PypeMongoFormatter(logging.Formatter): 'method': record.funcName, 'lineNumber': record.lineno } - document.update(PypeLogger.get_process_data()) + document.update(Logger.get_process_data()) # Standard document decorated with exception info if record.exc_info is not None: @@ -181,7 +181,7 @@ class PypeMongoFormatter(logging.Formatter): return document -class PypeLogger: +class Logger: DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ] ' DBG = " - { %(name)s }: [ %(message)s ] " INF = ">>> [ %(message)s ] " @@ -239,7 +239,7 @@ class PypeLogger: for handler in logger.handlers: if isinstance(handler, MongoHandler): add_mongo_handler = False - elif isinstance(handler, PypeStreamHandler): + elif isinstance(handler, LogStreamHandler): add_console_handler = False if add_console_handler: @@ -292,7 +292,7 @@ class PypeLogger: "username": components["username"], "password": components["password"], "capped": True, - "formatter": PypeMongoFormatter() + "formatter": MongoFormatter() } if components["port"] is not None: kwargs["port"] = int(components["port"]) @@ -303,10 +303,10 @@ class PypeLogger: @classmethod def _get_console_handler(cls): - formatter = PypeFormatter(cls.FORMAT_FILE) - console_handler = PypeStreamHandler() + formatter = LogFormatter(cls.FORMAT_FILE) + console_handler = LogStreamHandler() - console_handler.set_name("PypeStreamHandler") + console_handler.set_name("LogStreamHandler") console_handler.setFormatter(formatter) return console_handler @@ -417,9 +417,9 @@ class PypeLogger: def get_process_name(cls): """Process name that is like "label" of a process. - Pype's logging can be used from pype itseld of from hosts. Even in Pype - it's good to know if logs are from Pype tray or from pype's event - server. This should help to identify that information. + OpenPype's logging can be used from OpenPyppe itself of from hosts. + Even in OpenPype process it's good to know if logs are from tray or + from other cli commands. This should help to identify that information. """ if cls._process_name is not None: return cls._process_name @@ -485,23 +485,19 @@ class PypeLogger: return OpenPypeMongoConnection.get_mongo_client() -def timeit(method): - """Print time in function. - - For debugging. +class PypeLogger(Logger): + """Duplicate of 'Logger'. + Deprecated: + Class will be removed after release version 3.16.* """ - log = logging.getLogger() - def timed(*args, **kw): - ts = time.time() - result = method(*args, **kw) - te = time.time() - if 'log_time' in kw: - name = kw.get('log_name', method.__name__.upper()) - kw['log_time'][name] = int((te - ts) * 1000) - else: - log.debug('%r %2.2f ms' % (method.__name__, (te - ts) * 1000)) - print('%r %2.2f ms' % (method.__name__, (te - ts) * 1000)) - return result - return timed + @classmethod + def get_logger(cls, *args, **kwargs): + logger = Logger.get_logger(*args, **kwargs) + # TODO uncomment when replaced most of places + logger.warning(( + "'openpype.lib.PypeLogger' is deprecated class." + " Please use 'openpype.lib.Logger' instead." + )) + return logger diff --git a/openpype/lib/mongo.py b/openpype/lib/mongo.py index c08e76c75c..bb2ee6016a 100644 --- a/openpype/lib/mongo.py +++ b/openpype/lib/mongo.py @@ -1,206 +1,61 @@ -import os -import sys -import time -import logging -import pymongo -import certifi - -if sys.version_info[0] == 2: - from urlparse import urlparse, parse_qs -else: - from urllib.parse import urlparse, parse_qs +import warnings +import functools +from openpype.client.mongo import ( + MongoEnvNotSet, + OpenPypeMongoConnection, +) -class MongoEnvNotSet(Exception): +class MongoDeprecatedWarning(DeprecationWarning): pass -def _decompose_url(url): - """Decompose mongo url to basic components. +def mongo_deprecated(func): + """Mark functions as deprecated. - Used for creation of MongoHandler which expect mongo url components as - separated kwargs. Components are at the end not used as we're setting - connection directly this is just a dumb components for MongoHandler - validation pass. + It will result in a warning being emitted when the function is used. """ - # Use first url from passed url - # - this is because it is possible to pass multiple urls for multiple - # replica sets which would crash on urlparse otherwise - # - please don't use comma in username of password - url = url.split(",")[0] - components = { - "scheme": None, - "host": None, - "port": None, - "username": None, - "password": None, - "auth_db": None - } - result = urlparse(url) - if result.scheme is None: - _url = "mongodb://{}".format(url) - result = urlparse(_url) - - components["scheme"] = result.scheme - components["host"] = result.hostname - try: - components["port"] = result.port - except ValueError: - raise RuntimeError("invalid port specified") - components["username"] = result.username - components["password"] = result.password - - try: - components["auth_db"] = parse_qs(result.query)['authSource'][0] - except KeyError: - # no auth db provided, mongo will use the one we are connecting to - pass - - return components - - -def get_default_components(): - mongo_url = os.environ.get("OPENPYPE_MONGO") - if mongo_url is None: - raise MongoEnvNotSet( - "URL for Mongo logging connection is not set." + @functools.wraps(func) + def new_func(*args, **kwargs): + warnings.simplefilter("always", MongoDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'." + " Function was moved to 'openpype.client.mongo'." + ).format(func.__name__), + category=MongoDeprecatedWarning, + stacklevel=2 ) - return _decompose_url(mongo_url) + return func(*args, **kwargs) + return new_func +@mongo_deprecated +def get_default_components(): + from openpype.client.mongo import get_default_components + + return get_default_components() + + +@mongo_deprecated def should_add_certificate_path_to_mongo_url(mongo_url): - """Check if should add ca certificate to mongo url. + from openpype.client.mongo import should_add_certificate_path_to_mongo_url - Since 30.9.2021 cloud mongo requires newer certificates that are not - available on most of workstation. This adds path to certifi certificate - which is valid for it. To add the certificate path url must have scheme - 'mongodb+srv' or has 'ssl=true' or 'tls=true' in url query. - """ - parsed = urlparse(mongo_url) - query = parse_qs(parsed.query) - lowered_query_keys = set(key.lower() for key in query.keys()) - add_certificate = False - # Check if url 'ssl' or 'tls' are set to 'true' - for key in ("ssl", "tls"): - if key in query and "true" in query["ssl"]: - add_certificate = True - break - - # Check if url contains 'mongodb+srv' - if not add_certificate and parsed.scheme == "mongodb+srv": - add_certificate = True - - # Check if url does already contain certificate path - if add_certificate and "tlscafile" in lowered_query_keys: - add_certificate = False - - return add_certificate + return should_add_certificate_path_to_mongo_url(mongo_url) +@mongo_deprecated def validate_mongo_connection(mongo_uri): - """Check if provided mongodb URL is valid. + from openpype.client.mongo import validate_mongo_connection - Args: - mongo_uri (str): URL to validate. - - Raises: - ValueError: When port in mongo uri is not valid. - pymongo.errors.InvalidURI: If passed mongo is invalid. - pymongo.errors.ServerSelectionTimeoutError: If connection timeout - passed so probably couldn't connect to mongo server. - - """ - client = OpenPypeMongoConnection.create_connection( - mongo_uri, retry_attempts=1 - ) - client.close() + return validate_mongo_connection(mongo_uri) -class OpenPypeMongoConnection: - """Singleton MongoDB connection. - - Keeps MongoDB connections by url. - """ - mongo_clients = {} - log = logging.getLogger("OpenPypeMongoConnection") - - @staticmethod - def get_default_mongo_url(): - return os.environ["OPENPYPE_MONGO"] - - @classmethod - def get_mongo_client(cls, mongo_url=None): - if mongo_url is None: - mongo_url = cls.get_default_mongo_url() - - connection = cls.mongo_clients.get(mongo_url) - if connection: - # Naive validation of existing connection - try: - connection.server_info() - with connection.start_session(): - pass - except Exception: - connection = None - - if not connection: - cls.log.debug("Creating mongo connection to {}".format(mongo_url)) - connection = cls.create_connection(mongo_url) - cls.mongo_clients[mongo_url] = connection - - return connection - - @classmethod - def create_connection(cls, mongo_url, timeout=None, retry_attempts=None): - parsed = urlparse(mongo_url) - # Force validation of scheme - if parsed.scheme not in ["mongodb", "mongodb+srv"]: - raise pymongo.errors.InvalidURI(( - "Invalid URI scheme:" - " URI must begin with 'mongodb://' or 'mongodb+srv://'" - )) - - if timeout is None: - timeout = int(os.environ.get("AVALON_TIMEOUT") or 1000) - - kwargs = { - "serverSelectionTimeoutMS": timeout - } - if should_add_certificate_path_to_mongo_url(mongo_url): - kwargs["ssl_ca_certs"] = certifi.where() - - mongo_client = pymongo.MongoClient(mongo_url, **kwargs) - - if retry_attempts is None: - retry_attempts = 3 - - elif not retry_attempts: - retry_attempts = 1 - - last_exc = None - valid = False - t1 = time.time() - for attempt in range(1, retry_attempts + 1): - try: - mongo_client.server_info() - with mongo_client.start_session(): - pass - valid = True - break - - except Exception as exc: - last_exc = exc - if attempt < retry_attempts: - cls.log.warning( - "Attempt {} failed. Retrying... ".format(attempt) - ) - time.sleep(1) - - if not valid: - raise last_exc - - cls.log.info("Connected to {}, delay {:.3f}s".format( - mongo_url, time.time() - t1 - )) - return mongo_client +__all__ = ( + "MongoEnvNotSet", + "OpenPypeMongoConnection", + "get_default_components", + "should_add_certificate_path_to_mongo_url", + "validate_mongo_connection", +) diff --git a/openpype/lib/path_templates.py b/openpype/lib/path_templates.py index 5c40aa4549..b160054e38 100644 --- a/openpype/lib/path_templates.py +++ b/openpype/lib/path_templates.py @@ -6,11 +6,6 @@ import collections import six -from .log import PypeLogger - -log = PypeLogger.get_logger(__name__) - - KEY_PATTERN = re.compile(r"(\{.*?[^{0]*\})") KEY_PADDING_PATTERN = re.compile(r"([^:]+)\S+[><]\S+") SUB_DICT_PATTERN = re.compile(r"([^\[\]]+)") @@ -211,15 +206,28 @@ class StringTemplate(object): if counted_symb > -1: parts = tmp_parts.pop(counted_symb) counted_symb -= 1 + # If part contains only single string keep value + # unchanged if parts: # Remove optional start char parts.pop(0) - if counted_symb < 0: - out_parts = new_parts - else: - out_parts = tmp_parts[counted_symb] - # Store temp parts - out_parts.append(OptionalPart(parts)) + + if not parts: + value = "<>" + elif ( + len(parts) == 1 + and isinstance(parts[0], six.string_types) + ): + value = "<{}>".format(parts[0]) + else: + value = OptionalPart(parts) + + if counted_symb < 0: + out_parts = new_parts + else: + out_parts = tmp_parts[counted_symb] + # Store value + out_parts.append(value) continue if counted_symb < 0: @@ -409,6 +417,19 @@ class TemplateResult(str): self.invalid_types ) + def normalized(self): + """Convert to normalized path.""" + + cls = self.__class__ + return cls( + os.path.normpath(self), + self.template, + self.solved, + self.used_values, + self.missing_keys, + self.invalid_types + ) + class TemplatesResultDict(dict): """Holds and wrap TemplateResults for easy bug report.""" @@ -780,6 +801,7 @@ class OptionalPart: parts(list): Parts of template. Can contain 'str', 'OptionalPart' or 'FormattingPart'. """ + def __init__(self, parts): self._parts = parts diff --git a/openpype/lib/path_tools.py b/openpype/lib/path_tools.py index 851bc872fb..0b6d0a3391 100644 --- a/openpype/lib/path_tools.py +++ b/openpype/lib/path_tools.py @@ -1,19 +1,81 @@ import os import re -import abc -import json import logging -import six import platform +import functools +import warnings -from openpype.settings import get_project_settings - -from .anatomy import Anatomy -from .profiles_filtering import filter_profiles +import clique log = logging.getLogger(__name__) +class PathToolsDeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", PathToolsDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=PathToolsDeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + +def format_file_size(file_size, suffix=None): + """Returns formatted string with size in appropriate unit. + + Args: + file_size (int): Size of file in bytes. + suffix (str): Suffix for formatted size. Default is 'B' (as bytes). + + Returns: + str: Formatted size using proper unit and passed suffix (e.g. 7 MiB). + """ + + if suffix is None: + suffix = "B" + + for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]: + if abs(file_size) < 1024.0: + return "%3.1f%s%s" % (file_size, unit, suffix) + file_size /= 1024.0 + return "%.1f%s%s" % (file_size, "Yi", suffix) + + def create_hard_link(src_path, dst_path): """Create hardlink of file. @@ -50,6 +112,43 @@ def create_hard_link(src_path, dst_path): ) +def collect_frames(files): + """Returns dict of source path and its frame, if from sequence + + Uses clique as most precise solution, used when anatomy template that + created files is not known. + + Assumption is that frames are separated by '.', negative frames are not + allowed. + + Args: + files(list) or (set with single value): list of source paths + + Returns: + (dict): {'/asset/subset_v001.0001.png': '0001', ....} + """ + + patterns = [clique.PATTERNS["frames"]] + collections, remainder = clique.assemble( + files, minimum_items=1, patterns=patterns) + + sources_and_frames = {} + if collections: + for collection in collections: + src_head = collection.head + src_tail = collection.tail + + for index in collection.indexes: + src_frame = collection.format("{padding}") % index + src_file_name = "{}{}{}".format( + src_head, src_frame, src_tail) + sources_and_frames[src_file_name] = src_frame + else: + sources_and_frames[remainder.pop()] = None + + return sources_and_frames + + def _rreplace(s, a, b, n=1): """Replace a with b in string s from right side n times.""" return b.join(s.rsplit(a, n)) @@ -119,12 +218,12 @@ def get_version_from_path(file): """Find version number in file path string. Args: - file (string): file path + file (str): file path Returns: - v: version number in string ('001') - + str: version number in string ('001') """ + pattern = re.compile(r"[\._]v([0-9]+)", re.IGNORECASE) try: return pattern.findall(file)[-1] @@ -140,16 +239,17 @@ def get_last_version_from_path(path_dir, filter): """Find last version of given directory content. Args: - path_dir (string): directory path + path_dir (str): directory path filter (list): list of strings used as file name filter Returns: - string: file name with last version + str: file name with last version Example: last_version_file = get_last_version_from_path( "/project/shots/shot01/work", ["shot01", "compositing", "nk"]) """ + assert os.path.isdir(path_dir), "`path_dir` argument needs to be directory" assert isinstance(filter, list) and ( len(filter) != 0), "`filter` argument needs to be list and not empty" @@ -171,78 +271,69 @@ def get_last_version_from_path(path_dir, filter): return None -def compute_paths(basic_paths_items, project_root): - pattern_array = re.compile(r"\[.*\]") - project_root_key = "__project_root__" - output = [] - for path_items in basic_paths_items: - clean_items = [] - for path_item in path_items: - matches = re.findall(pattern_array, path_item) - if len(matches) > 0: - path_item = path_item.replace(matches[0], "") - if path_item == project_root_key: - path_item = project_root - clean_items.append(path_item) - output.append(os.path.normpath(os.path.sep.join(clean_items))) - return output +@deprecated("openpype.pipeline.project_folders.concatenate_splitted_paths") +def concatenate_splitted_paths(split_paths, anatomy): + """ + Deprecated: + Function will be removed after release version 3.16.* + """ + + from openpype.pipeline.project_folders import concatenate_splitted_paths + + return concatenate_splitted_paths(split_paths, anatomy) +@deprecated +def get_format_data(anatomy): + """ + Deprecated: + Function will be removed after release version 3.16.* + """ + + from openpype.pipeline.template_data import get_project_template_data + + data = get_project_template_data(project_name=anatomy.project_name) + data["root"] = anatomy.roots + return data + + +@deprecated("openpype.pipeline.project_folders.fill_paths") +def fill_paths(path_list, anatomy): + """ + Deprecated: + Function will be removed after release version 3.16.* + """ + + from openpype.pipeline.project_folders import fill_paths + + return fill_paths(path_list, anatomy) + + +@deprecated("openpype.pipeline.project_folders.create_project_folders") def create_project_folders(basic_paths, project_name): - anatomy = Anatomy(project_name) - roots_paths = [] - if isinstance(anatomy.roots, dict): - for root in anatomy.roots.values(): - roots_paths.append(root.value) - else: - roots_paths.append(anatomy.roots.value) + """ + Deprecated: + Function will be removed after release version 3.16.* + """ - for root_path in roots_paths: - project_root = os.path.join(root_path, project_name) - full_paths = compute_paths(basic_paths, project_root) - # Create folders - for path in full_paths: - full_path = path.format(project_root=project_root) - if os.path.exists(full_path): - log.debug( - "Folder already exists: {}".format(full_path) - ) - else: - log.debug("Creating folder: {}".format(full_path)) - os.makedirs(full_path) - - -def _list_path_items(folder_structure): - output = [] - for key, value in folder_structure.items(): - if not value: - output.append(key) - else: - paths = _list_path_items(value) - for path in paths: - if not isinstance(path, (list, tuple)): - path = [path] - - item = [key] - item.extend(path) - output.append(item) - - return output + from openpype.pipeline.project_folders import create_project_folders + + return create_project_folders(project_name, basic_paths) +@deprecated("openpype.pipeline.project_folders.get_project_basic_paths") def get_project_basic_paths(project_name): - project_settings = get_project_settings(project_name) - folder_structure = ( - project_settings["global"]["project_folder_structure"] - ) - if not folder_structure: - return [] + """ + Deprecated: + Function will be removed after release version 3.16.* + """ - if isinstance(folder_structure, str): - folder_structure = json.loads(folder_structure) - return _list_path_items(folder_structure) + from openpype.pipeline.project_folders import get_project_basic_paths + + return get_project_basic_paths(project_name) +@deprecated("openpype.pipeline.workfile.create_workdir_extra_folders") def create_workdir_extra_folders( workdir, host_name, task_type, task_name, project_name, project_settings=None @@ -259,192 +350,18 @@ def create_workdir_extra_folders( project_name (str): Name of project on which task is. project_settings (dict): Prepared project settings. Are loaded if not passed. - """ - # Load project settings if not set - if not project_settings: - project_settings = get_project_settings(project_name) - # Load extra folders profiles - extra_folders_profiles = ( - project_settings["global"]["tools"]["Workfiles"]["extra_folders"] + Deprecated: + Function will be removed after release version 3.16.* + """ + + from openpype.pipeline.project_folders import create_workdir_extra_folders + + return create_workdir_extra_folders( + workdir, + host_name, + task_type, + task_name, + project_name, + project_settings ) - # Skip if are empty - if not extra_folders_profiles: - return - - # Prepare profiles filters - filter_data = { - "task_types": task_type, - "task_names": task_name, - "hosts": host_name - } - profile = filter_profiles(extra_folders_profiles, filter_data) - if profile is None: - return - - for subfolder in profile["folders"]: - # Make sure backslashes are converted to forwards slashes - # and does not start with slash - subfolder = subfolder.replace("\\", "/").lstrip("/") - # Skip empty strings - if not subfolder: - continue - - fullpath = os.path.join(workdir, subfolder) - if not os.path.exists(fullpath): - os.makedirs(fullpath) - - -@six.add_metaclass(abc.ABCMeta) -class HostDirmap: - """ - Abstract class for running dirmap on a workfile in a host. - - Dirmap is used to translate paths inside of host workfile from one - OS to another. (Eg. arstist created workfile on Win, different artists - opens same file on Linux.) - - Expects methods to be implemented inside of host: - on_dirmap_enabled: run host code for enabling dirmap - do_dirmap: run host code to do actual remapping - """ - def __init__(self, host_name, project_settings, sync_module=None): - self.host_name = host_name - self.project_settings = project_settings - self.sync_module = sync_module # to limit reinit of Modules - - self._mapping = None # cache mapping - - @abc.abstractmethod - def on_enable_dirmap(self): - """ - Run host dependent operation for enabling dirmap if necessary. - """ - - @abc.abstractmethod - def dirmap_routine(self, source_path, destination_path): - """ - Run host dependent remapping from source_path to destination_path - """ - - def process_dirmap(self): - # type: (dict) -> None - """Go through all paths in Settings and set them using `dirmap`. - - If artists has Site Sync enabled, take dirmap mapping directly from - Local Settings when artist is syncing workfile locally. - - Args: - project_settings (dict): Settings for current project. - - """ - if not self._mapping: - self._mapping = self.get_mappings(self.project_settings) - if not self._mapping: - return - - log.info("Processing directory mapping ...") - self.on_enable_dirmap() - log.info("mapping:: {}".format(self._mapping)) - - for k, sp in enumerate(self._mapping["source-path"]): - try: - print("{} -> {}".format(sp, - self._mapping["destination-path"][k])) - self.dirmap_routine(sp, - self._mapping["destination-path"][k]) - except IndexError: - # missing corresponding destination path - log.error(("invalid dirmap mapping, missing corresponding" - " destination directory.")) - break - except RuntimeError: - log.error("invalid path {} -> {}, mapping not registered".format( # noqa: E501 - sp, self._mapping["destination-path"][k] - )) - continue - - def get_mappings(self, project_settings): - """Get translation from source-path to destination-path. - - It checks if Site Sync is enabled and user chose to use local - site, in that case configuration in Local Settings takes precedence - """ - local_mapping = self._get_local_sync_dirmap(project_settings) - dirmap_label = "{}-dirmap".format(self.host_name) - if not self.project_settings[self.host_name].get(dirmap_label) and \ - not local_mapping: - return [] - mapping = local_mapping or \ - self.project_settings[self.host_name][dirmap_label]["paths"] or {} - enbled = self.project_settings[self.host_name][dirmap_label]["enabled"] - mapping_enabled = enbled or bool(local_mapping) - - if not mapping or not mapping_enabled or \ - not mapping.get("destination-path") or \ - not mapping.get("source-path"): - return [] - return mapping - - def _get_local_sync_dirmap(self, project_settings): - """ - Returns dirmap if synch to local project is enabled. - - Only valid mapping is from roots of remote site to local site set - in Local Settings. - - Args: - project_settings (dict) - Returns: - dict : { "source-path": [XXX], "destination-path": [YYYY]} - """ - import json - mapping = {} - - if not project_settings["global"]["sync_server"]["enabled"]: - return mapping - - from openpype.settings.lib import get_site_local_overrides - - if not self.sync_module: - from openpype.modules import ModulesManager - manager = ModulesManager() - self.sync_module = manager.modules_by_name["sync_server"] - - project_name = os.getenv("AVALON_PROJECT") - - active_site = self.sync_module.get_local_normalized_site( - self.sync_module.get_active_site(project_name)) - remote_site = self.sync_module.get_local_normalized_site( - self.sync_module.get_remote_site(project_name)) - log.debug("active {} - remote {}".format(active_site, remote_site)) - - if active_site == "local" \ - and project_name in self.sync_module.get_enabled_projects()\ - and active_site != remote_site: - - sync_settings = self.sync_module.get_sync_project_setting( - os.getenv("AVALON_PROJECT"), exclude_locals=False, - cached=False) - - active_overrides = get_site_local_overrides( - os.getenv("AVALON_PROJECT"), active_site) - remote_overrides = get_site_local_overrides( - os.getenv("AVALON_PROJECT"), remote_site) - - log.debug("local overrides".format(active_overrides)) - log.debug("remote overrides".format(remote_overrides)) - for root_name, active_site_dir in active_overrides.items(): - remote_site_dir = remote_overrides.get(root_name) or\ - sync_settings["sites"][remote_site]["root"][root_name] - if os.path.isdir(active_site_dir): - if not mapping.get("destination-path"): - mapping["destination-path"] = [] - mapping["destination-path"].append(active_site_dir) - - if not mapping.get("source-path"): - mapping["source-path"] = [] - mapping["source-path"].append(remote_site_dir) - - log.debug("local sync mapping:: {}".format(mapping)) - return mapping diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index bcbf06a0e8..1e157dfbfd 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -1,29 +1,71 @@ # -*- coding: utf-8 -*- """Avalon/Pyblish plugin tools.""" import os -import inspect import logging import re -import json -from .profiles_filtering import filter_profiles +import warnings +import functools +from openpype.client import get_asset_by_id from openpype.settings import get_project_settings - log = logging.getLogger(__name__) -# Subset name template used when plugin does not have defined any -DEFAULT_SUBSET_TEMPLATE = "{family}{Variant}" + +class PluginToolsDeprecatedWarning(DeprecationWarning): + pass -class TaskNotSetError(KeyError): - def __init__(self, msg=None): - if not msg: - msg = "Creator's subset name template requires task name." - super(TaskNotSetError, self).__init__(msg) +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", PluginToolsDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=PluginToolsDeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) +@deprecated("openpype.pipeline.create.TaskNotSetError") +def TaskNotSetError(*args, **kwargs): + from openpype.pipeline.create import TaskNotSetError + + return TaskNotSetError(*args, **kwargs) + + +@deprecated("openpype.pipeline.create.get_subset_name") def get_subset_name_with_asset_doc( family, variant, @@ -62,61 +104,22 @@ def get_subset_name_with_asset_doc( dbcon (AvalonMongoDB): Mongo connection to be able query asset document if 'asset_doc' is not passed. """ - if not family: - return "" - if not host_name: - host_name = os.environ["AVALON_APP"] + from openpype.pipeline.create import get_subset_name - # Use only last part of class family value split by dot (`.`) - family = family.rsplit(".", 1)[-1] - - if project_name is None: - from openpype.pipeline import legacy_io - - project_name = legacy_io.Session["AVALON_PROJECT"] - - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_info = asset_tasks.get(task_name) or {} - task_type = task_info.get("type") - - # Get settings - tools_settings = get_project_settings(project_name)["global"]["tools"] - profiles = tools_settings["creator"]["subset_name_profiles"] - filtering_criteria = { - "families": family, - "hosts": host_name, - "tasks": task_name, - "task_types": task_type - } - - matching_profile = filter_profiles(profiles, filtering_criteria) - template = None - if matching_profile: - template = matching_profile["template"] - - # Make sure template is set (matching may have empty string) - if not template: - template = default_template or DEFAULT_SUBSET_TEMPLATE - - # Simple check of task name existence for template with {task} in - # - missing task should be possible only in Standalone publisher - if not task_name and "{task" in template.lower(): - raise TaskNotSetError() - - fill_pairs = { - "variant": variant, - "family": family, - "task": task_name - } - if dynamic_data: - # Dynamic data may override default values - for key, value in dynamic_data.items(): - fill_pairs[key] = value - - return template.format(**prepare_template_data(fill_pairs)) + return get_subset_name( + family, + variant, + task_name, + asset_doc, + project_name, + host_name, + default_template, + dynamic_data + ) +@deprecated def get_subset_name( family, variant, @@ -135,20 +138,15 @@ def get_subset_name( This is legacy function should be replaced with `get_subset_name_with_asset_doc` where asset document is expected. """ - if dbcon is None: - from openpype.pipeline import AvalonMongoDB - dbcon = AvalonMongoDB() - dbcon.Session["AVALON_PROJECT"] = project_name + from openpype.pipeline.create import get_subset_name - dbcon.install() + if project_name is None: + project_name = dbcon.project_name - asset_doc = dbcon.find_one( - {"_id": asset_id}, - {"data.tasks": True} - ) or {} + asset_doc = get_asset_by_id(project_name, asset_id, fields=["data.tasks"]) - return get_subset_name_with_asset_doc( + return get_subset_name( family, variant, task_name, @@ -204,6 +202,7 @@ def prepare_template_data(fill_pairs): return fill_data +@deprecated("openpype.pipeline.publish.lib.filter_pyblish_plugins") def filter_pyblish_plugins(plugins): """Filter pyblish plugins by presets. @@ -214,56 +213,16 @@ def filter_pyblish_plugins(plugins): plugins (dict): Dictionary of plugins produced by :mod:`pyblish-base` `discover()` method. + Deprecated: + Function will be removed after release version 3.15.* """ - from pyblish import api - host = api.current_host() + from openpype.pipeline.publish.lib import filter_pyblish_plugins - presets = get_project_settings(os.environ['AVALON_PROJECT']) or {} - # skip if there are no presets to process - if not presets: - return - - # iterate over plugins - for plugin in plugins[:]: - - try: - config_data = presets[host]["publish"][plugin.__name__] - except KeyError: - # host determined from path - file = os.path.normpath(inspect.getsourcefile(plugin)) - file = os.path.normpath(file) - - split_path = file.split(os.path.sep) - if len(split_path) < 4: - log.warning( - 'plugin path too short to extract host {}'.format(file) - ) - continue - - host_from_file = split_path[-4] - plugin_kind = split_path[-2] - - # TODO: change after all plugins are moved one level up - if host_from_file == "openpype": - host_from_file = "global" - - try: - config_data = presets[host_from_file][plugin_kind][plugin.__name__] # noqa: E501 - except KeyError: - continue - - for option, value in config_data.items(): - if option == "enabled" and value is False: - log.info('removing plugin {}'.format(plugin.__name__)) - plugins.remove(plugin) - else: - log.info('setting {}:{} on plugin {}'.format( - option, value, plugin.__name__)) - - setattr(plugin, option, value) + filter_pyblish_plugins(plugins) +@deprecated def set_plugin_attributes_from_settings( plugins, superclass, host_name=None, project_name=None ): @@ -279,7 +238,12 @@ def set_plugin_attributes_from_settings( Value from environment `AVALON_APP` is used if not entered. project_name (str): Name of project for which settings will be loaded. Value from environment `AVALON_PROJECT` is used if not entered. + + Deprecated: + Function will be removed after release version 3.15.* """ + + # Function is not used anymore from openpype.pipeline import LegacyCreator, LoaderPlugin # determine host application to use for finding presets @@ -373,102 +337,3 @@ def source_hash(filepath, *args): time = str(os.path.getmtime(filepath)) size = str(os.path.getsize(filepath)) return "|".join([file_name, time, size] + list(args)).replace(".", ",") - - -def get_unique_layer_name(layers, name): - """ - Gets all layer names and if 'name' is present in them, increases - suffix by 1 (eg. creates unique layer name - for Loader) - Args: - layers (list): of strings, names only - name (string): checked value - - Returns: - (string): name_00X (without version) - """ - names = {} - for layer in layers: - layer_name = re.sub(r'_\d{3}$', '', layer) - if layer_name in names.keys(): - names[layer_name] = names[layer_name] + 1 - else: - names[layer_name] = 1 - occurrences = names.get(name, 0) - - return "{}_{:0>3d}".format(name, occurrences + 1) - - -def get_background_layers(file_url): - """ - Pulls file name from background json file, enrich with folder url for - AE to be able import files. - - Order is important, follows order in json. - - Args: - file_url (str): abs url of background json - - Returns: - (list): of abs paths to images - """ - with open(file_url) as json_file: - data = json.load(json_file) - - layers = list() - bg_folder = os.path.dirname(file_url) - for child in data['children']: - if child.get("filename"): - layers.append(os.path.join(bg_folder, child.get("filename")). - replace("\\", "/")) - else: - for layer in child['children']: - if layer.get("filename"): - layers.append(os.path.join(bg_folder, - layer.get("filename")). - replace("\\", "/")) - return layers - - -def parse_json(path): - """Parses json file at 'path' location - - Returns: - (dict) or None if unparsable - Raises: - AsssertionError if 'path' doesn't exist - """ - path = path.strip('\"') - assert os.path.isfile(path), ( - "Path to json file doesn't exist. \"{}\"".format(path) - ) - data = None - with open(path, "r") as json_file: - try: - data = json.load(json_file) - except Exception as exc: - log.error( - "Error loading json: " - "{} - Exception: {}".format(path, exc) - ) - return data - - -def get_batch_asset_task_info(ctx): - """Parses context data from webpublisher's batch metadata - - Returns: - (tuple): asset, task_name (Optional), task_type - """ - task_type = "default_task_type" - task_name = None - asset = None - - if ctx["type"] == "task": - items = ctx["path"].split('/') - asset = items[-2] - task_name = ctx["name"] - task_type = ctx["attributes"]["type"] - else: - asset = ctx["name"] - - return asset, task_name, task_type diff --git a/openpype/lib/project_backpack.py b/openpype/lib/project_backpack.py index 396479c725..ff2f1d4b88 100644 --- a/openpype/lib/project_backpack.py +++ b/openpype/lib/project_backpack.py @@ -24,7 +24,10 @@ from bson.json_util import ( dumps, CANONICAL_JSON_OPTIONS ) - +from openpype.client import ( + get_project, + get_whole_project, +) from openpype.pipeline import AvalonMongoDB DOCUMENTS_FILE_NAME = "database" @@ -50,14 +53,12 @@ def pack_project(project_name, destination_dir=None): Args: project_name(str): Project that should be packaged. - destination_dir(str): Optinal path where zip will be stored. Project's + destination_dir(str): Optional path where zip will be stored. Project's root is used if not passed. """ print("Creating package of project \"{}\"".format(project_name)) # Validate existence of project - dbcon = AvalonMongoDB() - dbcon.Session["AVALON_PROJECT"] = project_name - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) if not project_doc: raise ValueError("Project \"{}\" was not found in database".format( project_name @@ -118,7 +119,7 @@ def pack_project(project_name, destination_dir=None): temp_docs_json = s.name # Query all project documents and store them to temp json - docs = list(dbcon.find({})) + docs = list(get_whole_project(project_name)) data = dumps( docs, json_options=CANONICAL_JSON_OPTIONS ) @@ -147,7 +148,7 @@ def pack_project(project_name, destination_dir=None): # Cleanup os.remove(temp_docs_json) os.remove(temp_metadata_json) - dbcon.uninstall() + print("*** Packing finished ***") @@ -207,7 +208,7 @@ def unpack_project(path_to_zip, new_root=None): print("Using different root path {}".format(new_root)) root_path = new_root - project_doc = collection.find_one({"type": "project"}) + project_doc = get_project(project_name) roots = project_doc["config"]["roots"] key = tuple(roots.keys())[0] update_key = "config.roots.{}.{}".format(key, low_platform) diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py index adb9bb2c3a..0bfccd3443 100644 --- a/openpype/lib/transcoding.py +++ b/openpype/lib/transcoding.py @@ -42,6 +42,28 @@ XML_CHAR_REF_REGEX_HEX = re.compile(r"&#x?[0-9a-fA-F]+;") # Regex to parse array attributes ARRAY_TYPE_REGEX = re.compile(r"^(int|float|string)\[\d+\]$") +IMAGE_EXTENSIONS = { + ".ani", ".anim", ".apng", ".art", ".bmp", ".bpg", ".bsave", ".cal", + ".cin", ".cpc", ".cpt", ".dds", ".dpx", ".ecw", ".exr", ".fits", + ".flic", ".flif", ".fpx", ".gif", ".hdri", ".hevc", ".icer", + ".icns", ".ico", ".cur", ".ics", ".ilbm", ".jbig", ".jbig2", + ".jng", ".jpeg", ".jpeg-ls", ".jpeg", ".2000", ".jpg", ".xr", + ".jpeg", ".xt", ".jpeg-hdr", ".kra", ".mng", ".miff", ".nrrd", + ".ora", ".pam", ".pbm", ".pgm", ".ppm", ".pnm", ".pcx", ".pgf", + ".pictor", ".png", ".psb", ".psp", ".qtvr", ".ras", + ".rgbe", ".logluv", ".tiff", ".sgi", ".tga", ".tiff", ".tiff/ep", + ".tiff/it", ".ufo", ".ufp", ".wbmp", ".webp", ".xbm", ".xcf", + ".xpm", ".xwd" +} + +VIDEO_EXTENSIONS = { + ".3g2", ".3gp", ".amv", ".asf", ".avi", ".drc", ".f4a", ".f4b", + ".f4p", ".f4v", ".flv", ".gif", ".gifv", ".m2v", ".m4p", ".m4v", + ".mkv", ".mng", ".mov", ".mp2", ".mp4", ".mpe", ".mpeg", ".mpg", + ".mpv", ".mxf", ".nsv", ".ogg", ".ogv", ".qt", ".rm", ".rmvb", + ".roq", ".svi", ".vob", ".webm", ".wmv", ".yuv" +} + def get_transcode_temp_directory(): """Creates temporary folder for transcoding. @@ -139,7 +161,7 @@ def convert_value_by_type_name(value_type, value, logger=None): return float(value) # Vectors will probably have more types - if value_type == "vec2f": + if value_type in ("vec2f", "float2"): return [float(item) for item in value.split(",")] # Matrix should be always have square size of element 3x3, 4x4 @@ -154,7 +176,7 @@ def convert_value_by_type_name(value_type, value, logger=None): elif parts_len == 4: divisor = 2 elif parts_len == 9: - divisor == 3 + divisor = 3 elif parts_len == 16: divisor = 4 else: @@ -204,8 +226,8 @@ def convert_value_by_type_name(value_type, value, logger=None): ) return output - logger.info(( - "MISSING IMPLEMENTATION:" + logger.debug(( + "Dev note (missing implementation):" " Unknown attrib type \"{}\". Value: {}" ).format(value_type, value)) return value @@ -263,8 +285,8 @@ def parse_oiio_xml_output(xml_string, logger=None): # - feel free to add more tags else: value = child.text - logger.info(( - "MISSING IMPLEMENTATION:" + logger.debug(( + "Dev note (missing implementation):" " Unknown tag \"{}\". Value \"{}\"" ).format(tag_name, value)) @@ -533,7 +555,7 @@ def convert_input_paths_for_ffmpeg( output_dir, logger=None ): - """Contert source file to format supported in ffmpeg. + """Convert source file to format supported in ffmpeg. Currently can convert only exrs. The input filepaths should be files with same type. Information about input is loaded only from first found @@ -938,3 +960,40 @@ def convert_ffprobe_fps_value(str_value): fps = int(fps) return str(fps) + + +def convert_ffprobe_fps_to_float(value): + """Convert string value of frame rate to float. + + Copy of 'convert_ffprobe_fps_value' which raises exceptions on invalid + value, does not convert value to string and does not return "Unknown" + string. + + Args: + value (str): Value to be converted. + + Returns: + Float: Converted frame rate in float. If divisor in value is '0' then + '0.0' is returned. + + Raises: + ValueError: Passed value is invalid for conversion. + """ + + if not value: + raise ValueError("Got empty value.") + + items = value.split("/") + if len(items) == 1: + return float(items[0]) + + if len(items) > 2: + raise ValueError(( + "FPS expression contains multiple dividers \"{}\"." + ).format(value)) + + dividend = float(items.pop(0)) + divisor = float(items.pop(0)) + if divisor == 0.0: + return 0.0 + return dividend / divisor diff --git a/openpype/lib/usdlib.py b/openpype/lib/usdlib.py index 86de19b4be..20703ee308 100644 --- a/openpype/lib/usdlib.py +++ b/openpype/lib/usdlib.py @@ -8,10 +8,8 @@ except ImportError: # Allow to fall back on Multiverse 6.3.0+ pxr usd library from mvpxr import Usd, UsdGeom, Sdf, Kind -from openpype.pipeline import ( - registered_root, - legacy_io, -) +from openpype.client import get_project, get_asset_by_name +from openpype.pipeline import legacy_io, Anatomy log = logging.getLogger(__name__) @@ -128,7 +126,8 @@ def create_model(filename, asset, variant_subsets): """ - asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) + project_name = legacy_io.active_project() + asset_doc = get_asset_by_name(project_name, asset) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -178,7 +177,8 @@ def create_shade(filename, asset, variant_subsets): """ - asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) + project_name = legacy_io.active_project() + asset_doc = get_asset_by_name(project_name, asset) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -213,7 +213,8 @@ def create_shade_variation(filename, asset, model_variant, shade_variants): """ - asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) + project_name = legacy_io.active_project() + asset_doc = get_asset_by_name(project_name, asset) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -313,21 +314,25 @@ def get_usd_master_path(asset, subset, representation): """ - project = legacy_io.find_one( - {"type": "project"}, projection={"config.template.publish": True} + project_name = legacy_io.active_project() + anatomy = Anatomy(project_name) + project_doc = get_project( + project_name, + fields=["name", "data.code"] ) - template = project["config"]["template"]["publish"] if isinstance(asset, dict) and "name" in asset: # Allow explicitly passing asset document asset_doc = asset else: - asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) + asset_doc = get_asset_by_name(project_name, asset, fields=["name"]) - path = template.format( - **{ - "root": registered_root(), - "project": legacy_io.Session["AVALON_PROJECT"], + formatted_result = anatomy.format( + { + "project": { + "name": project_name, + "code": project_doc.get("data", {}).get("code") + }, "asset": asset_doc["name"], "subset": subset, "representation": representation, @@ -335,6 +340,7 @@ def get_usd_master_path(asset, subset, representation): } ) + path = formatted_result["publish"]["path"] # Remove the version folder subset_folder = os.path.dirname(os.path.dirname(path)) master_folder = os.path.join(subset_folder, "master") diff --git a/openpype/lib/vendor_bin_utils.py b/openpype/lib/vendor_bin_utils.py index 23e28ea304..099f9a34ba 100644 --- a/openpype/lib/vendor_bin_utils.py +++ b/openpype/lib/vendor_bin_utils.py @@ -1,10 +1,33 @@ import os import logging import platform +import subprocess log = logging.getLogger("Vendor utils") +class CachedToolPaths: + """Cache already used and discovered tools and their executables. + + Discovering path can take some time and can trigger subprocesses so it's + better to cache the paths on first get. + """ + + _cached_paths = {} + + @classmethod + def is_tool_cached(cls, tool): + return tool in cls._cached_paths + + @classmethod + def get_executable_path(cls, tool): + return cls._cached_paths.get(tool) + + @classmethod + def cache_executable_path(cls, tool, path): + cls._cached_paths[tool] = path + + def is_file_executable(filepath): """Filepath lead to executable file. @@ -98,6 +121,7 @@ def get_vendor_bin_path(bin_app): Returns: str: Path to vendorized binaries folder. """ + return os.path.join( os.environ["OPENPYPE_ROOT"], "vendor", @@ -107,6 +131,123 @@ def get_vendor_bin_path(bin_app): ) +def find_tool_in_custom_paths(paths, tool, validation_func=None): + """Find a tool executable in custom paths. + + Args: + paths (Iterable[str]): Iterable of paths where to look for tool. + tool (str): Name of tool (binary file) to find in passed paths. + validation_func (Function): Custom validation function of path. + Function must expect one argument which is path to executable. + If not passed only 'find_executable' is used to be able identify + if path is valid. + + Reuturns: + Union[str, None]: Path to validated executable or None if was not + found. + """ + + for path in paths: + # Skip empty strings + if not path: + continue + + # Handle cases when path is just an executable + # - it allows to use executable from PATH + # - basename must match 'tool' value (without extension) + extless_path, ext = os.path.splitext(path) + if extless_path == tool: + executable_path = find_executable(tool) + if executable_path and ( + validation_func is None + or validation_func(executable_path) + ): + return executable_path + continue + + # Normalize path because it should be a path and check if exists + normalized = os.path.normpath(path) + if not os.path.exists(normalized): + continue + + # Note: Path can be both file and directory + + # If path is a file validate it + if os.path.isfile(normalized): + basename, ext = os.path.splitext(os.path.basename(path)) + # Check if the filename has actually the sane bane as 'tool' + if basename == tool: + executable_path = find_executable(normalized) + if executable_path and ( + validation_func is None + or validation_func(executable_path) + ): + return executable_path + + # Check if path is a directory and look for tool inside the dir + if os.path.isdir(normalized): + executable_path = find_executable(os.path.join(normalized, tool)) + if executable_path and ( + validation_func is None + or validation_func(executable_path) + ): + return executable_path + return None + + +def _check_args_returncode(args): + try: + # Python 2 compatibility where DEVNULL is not available + if hasattr(subprocess, "DEVNULL"): + proc = subprocess.Popen( + args, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + proc.wait() + else: + with open(os.devnull, "w") as devnull: + proc = subprocess.Popen( + args, stdout=devnull, stderr=devnull, + ) + proc.wait() + + except Exception: + return False + return proc.returncode == 0 + + +def _oiio_executable_validation(filepath): + """Validate oiio tool executable if can be executed. + + Validation has 2 steps. First is using 'find_executable' to fill possible + missing extension or fill directory then launch executable and validate + that it can be executed. For that is used '--help' argument which is fast + and does not need any other inputs. + + Any possible crash of missing libraries or invalid build should be catched. + + Main reason is to validate if executable can be executed on OS just running + which can be issue ob linux machines. + + Note: + It does not validate if the executable is really a oiio tool which + should be used. + + Args: + filepath (str): Path to executable. + + Returns: + bool: Filepath is valid executable. + """ + + filepath = find_executable(filepath) + if not filepath: + return False + + return _check_args_returncode([filepath, "--help"]) + + def get_oiio_tools_path(tool="oiiotool"): """Path to vendorized OpenImageIO tool executables. @@ -116,8 +257,63 @@ def get_oiio_tools_path(tool="oiiotool"): tool (string): Tool name (oiiotool, maketx, ...). Default is "oiiotool". """ - oiio_dir = get_vendor_bin_path("oiio") - return find_executable(os.path.join(oiio_dir, tool)) + + if CachedToolPaths.is_tool_cached(tool): + return CachedToolPaths.get_executable_path(tool) + + custom_paths_str = os.environ.get("OPENPYPE_OIIO_PATHS") or "" + tool_executable_path = find_tool_in_custom_paths( + custom_paths_str.split(os.pathsep), + tool, + _oiio_executable_validation + ) + + if not tool_executable_path: + oiio_dir = get_vendor_bin_path("oiio") + if platform.system().lower() == "linux": + oiio_dir = os.path.join(oiio_dir, "bin") + default_path = os.path.join(oiio_dir, tool) + if _oiio_executable_validation(default_path): + tool_executable_path = default_path + + # Look to PATH for the tool + if not tool_executable_path: + from_path = find_executable(tool) + if from_path and _oiio_executable_validation(from_path): + tool_executable_path = from_path + + CachedToolPaths.cache_executable_path(tool, tool_executable_path) + return tool_executable_path + + +def _ffmpeg_executable_validation(filepath): + """Validate ffmpeg tool executable if can be executed. + + Validation has 2 steps. First is using 'find_executable' to fill possible + missing extension or fill directory then launch executable and validate + that it can be executed. For that is used '-version' argument which is fast + and does not need any other inputs. + + Any possible crash of missing libraries or invalid build should be catched. + + Main reason is to validate if executable can be executed on OS just running + which can be issue ob linux machines. + + Note: + It does not validate if the executable is really a ffmpeg tool. + + Args: + filepath (str): Path to executable. + + Returns: + bool: Filepath is valid executable. + """ + + filepath = find_executable(filepath) + if not filepath: + return False + + return _check_args_returncode([filepath, "-version"]) def get_ffmpeg_tool_path(tool="ffmpeg"): @@ -130,10 +326,33 @@ def get_ffmpeg_tool_path(tool="ffmpeg"): Returns: str: Full path to ffmpeg executable. """ - ffmpeg_dir = get_vendor_bin_path("ffmpeg") - if platform.system().lower() == "windows": - ffmpeg_dir = os.path.join(ffmpeg_dir, "bin") - return find_executable(os.path.join(ffmpeg_dir, tool)) + + if CachedToolPaths.is_tool_cached(tool): + return CachedToolPaths.get_executable_path(tool) + + custom_paths_str = os.environ.get("OPENPYPE_FFMPEG_PATHS") or "" + tool_executable_path = find_tool_in_custom_paths( + custom_paths_str.split(os.pathsep), + tool, + _ffmpeg_executable_validation + ) + + if not tool_executable_path: + ffmpeg_dir = get_vendor_bin_path("ffmpeg") + if platform.system().lower() == "windows": + ffmpeg_dir = os.path.join(ffmpeg_dir, "bin") + tool_path = find_executable(os.path.join(ffmpeg_dir, tool)) + if tool_path and _ffmpeg_executable_validation(tool_path): + tool_executable_path = tool_path + + # Look to PATH for the tool + if not tool_executable_path: + from_path = find_executable(tool) + if from_path and _oiio_executable_validation(from_path): + tool_executable_path = from_path + + CachedToolPaths.cache_executable_path(tool, tool_executable_path) + return tool_executable_path def is_oiio_supported(): diff --git a/openpype/modules/__init__.py b/openpype/modules/__init__.py index 68b5f6c247..1f345feea9 100644 --- a/openpype/modules/__init__.py +++ b/openpype/modules/__init__.py @@ -1,8 +1,17 @@ # -*- coding: utf-8 -*- +from .interfaces import ( + ILaunchHookPaths, + IPluginPaths, + ITrayModule, + ITrayAction, + ITrayService, + ISettingsChangeListener, + IHostAddon, +) + from .base import ( OpenPypeModule, OpenPypeAddOn, - OpenPypeInterface, load_modules, @@ -18,9 +27,16 @@ from .base import ( __all__ = ( + "ILaunchHookPaths", + "IPluginPaths", + "ITrayModule", + "ITrayAction", + "ITrayService", + "ISettingsChangeListener", + "IHostAddon", + "OpenPypeModule", "OpenPypeAddOn", - "OpenPypeInterface", "load_modules", diff --git a/openpype/modules/avalon_apps/avalon_app.py b/openpype/modules/avalon_apps/avalon_app.py index 1d21de129b..f9085522b0 100644 --- a/openpype/modules/avalon_apps/avalon_app.py +++ b/openpype/modules/avalon_apps/avalon_app.py @@ -1,7 +1,6 @@ import os -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayModule +from openpype.modules import OpenPypeModule, ITrayModule class AvalonModule(OpenPypeModule, ITrayModule): diff --git a/openpype/modules/avalon_apps/rest_api.py b/openpype/modules/avalon_apps/rest_api.py index b35f5bf357..a52ce1b6df 100644 --- a/openpype/modules/avalon_apps/rest_api.py +++ b/openpype/modules/avalon_apps/rest_api.py @@ -5,7 +5,12 @@ from bson.objectid import ObjectId from aiohttp.web_response import Response -from openpype.pipeline import AvalonMongoDB +from openpype.client import ( + get_projects, + get_project, + get_assets, + get_asset_by_name, +) from openpype_modules.webserver.base_routes import RestApiEndpoint @@ -14,19 +19,13 @@ class _RestApiEndpoint(RestApiEndpoint): self.resource = resource super(_RestApiEndpoint, self).__init__() - @property - def dbcon(self): - return self.resource.dbcon - class AvalonProjectsEndpoint(_RestApiEndpoint): async def get(self) -> Response: - output = [] - for project_name in self.dbcon.database.collection_names(): - project_doc = self.dbcon.database[project_name].find_one({ - "type": "project" - }) - output.append(project_doc) + output = [ + project_doc + for project_doc in get_projects() + ] return Response( status=200, body=self.resource.encode(output), @@ -36,9 +35,7 @@ class AvalonProjectsEndpoint(_RestApiEndpoint): class AvalonProjectEndpoint(_RestApiEndpoint): async def get(self, project_name) -> Response: - project_doc = self.dbcon.database[project_name].find_one({ - "type": "project" - }) + project_doc = get_project(project_name) if project_doc: return Response( status=200, @@ -53,9 +50,7 @@ class AvalonProjectEndpoint(_RestApiEndpoint): class AvalonAssetsEndpoint(_RestApiEndpoint): async def get(self, project_name) -> Response: - asset_docs = list(self.dbcon.database[project_name].find({ - "type": "asset" - })) + asset_docs = list(get_assets(project_name)) return Response( status=200, body=self.resource.encode(asset_docs), @@ -65,10 +60,7 @@ class AvalonAssetsEndpoint(_RestApiEndpoint): class AvalonAssetEndpoint(_RestApiEndpoint): async def get(self, project_name, asset_name) -> Response: - asset_doc = self.dbcon.database[project_name].find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) if asset_doc: return Response( status=200, @@ -88,9 +80,6 @@ class AvalonRestApiResource: self.module = avalon_module self.server_manager = server_manager - self.dbcon = AvalonMongoDB() - self.dbcon.install() - self.prefix = "/avalon" self.endpoint_defs = ( diff --git a/openpype/modules/base.py b/openpype/modules/base.py index 0dd512ee8b..4761462df0 100644 --- a/openpype/modules/base.py +++ b/openpype/modules/base.py @@ -9,11 +9,11 @@ import logging import platform import threading import collections +import traceback from uuid import uuid4 from abc import ABCMeta, abstractmethod import six -import openpype from openpype.settings import ( get_system_settings, SYSTEM_SETTINGS_KEY, @@ -26,7 +26,20 @@ from openpype.settings.lib import ( get_studio_system_settings_overrides, load_json_file ) -from openpype.lib import PypeLogger + +from openpype.lib import ( + Logger, + import_filepath, + import_module_from_dirpath +) + +from .interfaces import ( + OpenPypeInterface, + IPluginPaths, + IHostAddon, + ITrayModule, + ITrayService +) # Files that will be always ignored on modules import IGNORED_FILENAMES = ( @@ -49,6 +62,7 @@ class _ModuleClass(object): Object of this class can be stored to `sys.modules` and used for storing dynamically imported modules. """ + def __init__(self, name): # Call setattr on super class super(_ModuleClass, self).__setattr__("name", name) @@ -92,7 +106,7 @@ class _ModuleClass(object): def log(self): if self._log is None: super(_ModuleClass, self).__setattr__( - "_log", PypeLogger.get_logger(self.name) + "_log", Logger.get_logger(self.name) ) return self._log @@ -116,15 +130,25 @@ class _InterfacesClass(_ModuleClass): - this is because interfaces must be available even if are missing implementation """ + def __getattr__(self, attr_name): if attr_name not in self.__attributes__: if attr_name in ("__path__", "__file__"): return None - raise ImportError(( + raise AttributeError(( "cannot import name '{}' from 'openpype_interfaces'" ).format(attr_name)) + if _LoadCache.interfaces_loaded and attr_name != "log": + stack = list(traceback.extract_stack()) + stack.pop(-1) + self.log.warning(( + "Using deprecated import of \"{}\" from 'openpype_interfaces'." + " Please switch to use import" + " from 'openpype.modules.interfaces'" + " (will be removed after 3.16.x).{}" + ).format(attr_name, "".join(traceback.format_list(stack)))) return self.__attributes__[attr_name] @@ -138,7 +162,7 @@ class _LoadCache: def get_default_modules_dir(): """Path to default OpenPype modules.""" - current_dir = os.path.abspath(os.path.dirname(__file__)) + current_dir = os.path.dirname(os.path.abspath(__file__)) output = [] for folder_name in ("default_modules", ): @@ -276,19 +300,13 @@ def load_modules(force=False): def _load_modules(): - # Import helper functions from lib - from openpype.lib import ( - import_filepath, - import_module_from_dirpath - ) - # Key under which will be modules imported in `sys.modules` modules_key = "openpype_modules" # Change `sys.modules` sys.modules[modules_key] = openpype_modules = _ModuleClass(modules_key) - log = PypeLogger.get_logger("ModulesLoader") + log = Logger.get_logger("ModulesLoader") # Look for OpenPype modules in paths defined with `get_module_dirs` # - dynamically imported OpenPype modules and addons @@ -296,6 +314,8 @@ def _load_modules(): # Add current directory at first place # - has small differences in import logic current_dir = os.path.abspath(os.path.dirname(__file__)) + hosts_dir = os.path.join(os.path.dirname(current_dir), "hosts") + module_dirs.insert(0, hosts_dir) module_dirs.insert(0, current_dir) processed_paths = set() @@ -312,6 +332,7 @@ def _load_modules(): continue is_in_current_dir = dirpath == current_dir + is_in_host_dir = dirpath == hosts_dir for filename in os.listdir(dirpath): # Ignore filenames if filename in IGNORED_FILENAMES: @@ -351,6 +372,24 @@ def _load_modules(): sys.modules[new_import_str] = default_module setattr(openpype_modules, basename, default_module) + elif is_in_host_dir: + import_str = "openpype.hosts.{}".format(basename) + new_import_str = "{}.{}".format(modules_key, basename) + # Until all hosts are converted to be able use them as + # modules is this error check needed + try: + default_module = __import__( + import_str, fromlist=("", ) + ) + sys.modules[new_import_str] = default_module + setattr(openpype_modules, basename, default_module) + + except Exception: + log.warning( + "Failed to import host folder {}".format(basename), + exc_info=True + ) + elif os.path.isdir(fullpath): import_module_from_dirpath(dirpath, filename, modules_key) @@ -368,29 +407,6 @@ def _load_modules(): log.error(msg, exc_info=True) -class _OpenPypeInterfaceMeta(ABCMeta): - """OpenPypeInterface meta class to print proper string.""" - def __str__(self): - return "<'OpenPypeInterface.{}'>".format(self.__name__) - - def __repr__(self): - return str(self) - - -@six.add_metaclass(_OpenPypeInterfaceMeta) -class OpenPypeInterface: - """Base class of Interface that can be used as Mixin with abstract parts. - - This is way how OpenPype module or addon can tell that has implementation - for specific part or for other module/addon. - - Child classes of OpenPypeInterface may be used as mixin in different - OpenPype modules which means they have to have implemented methods defined - in the interface. By default interface does not have any abstract parts. - """ - pass - - @six.add_metaclass(ABCMeta) class OpenPypeModule: """Base class of pype module. @@ -415,7 +431,7 @@ class OpenPypeModule: def __init__(self, manager, settings): self.manager = manager - self.log = PypeLogger.get_logger(self.name) + self.log = Logger.get_logger(self.name) self.initialize(settings) @@ -432,10 +448,12 @@ class OpenPypeModule: It is not recommended to override __init__ that's why specific method was implemented. """ + pass def connect_with_modules(self, enabled_modules): """Connect with other enabled modules.""" + pass def get_global_environments(self): @@ -443,8 +461,41 @@ class OpenPypeModule: Environment variables that can be get only from system settings. """ + return {} + def modify_application_launch_arguments(self, application, env): + """Give option to modify launch environments before application launch. + + Implementation is optional. To change environments modify passed + dictionary of environments. + + Args: + application (Application): Application that is launched. + env (dict): Current environemnt variables. + """ + + pass + + def on_host_install(self, host, host_name, project_name): + """Host was installed which gives option to handle in-host logic. + + It is a good option to register in-host event callbacks which are + specific for the module. The module is kept in memory for rest of + the process. + + Arguments may change in future. E.g. 'host_name' should be possible + to receive from 'host' object. + + Args: + host (ModuleType): Access to installed/registered host object. + host_name (str): Name of host. + project_name (str): Project name which is main part of host + context. + """ + + pass + def cli(self, module_click_group): """Add commands to click group. @@ -465,6 +516,7 @@ class OpenPypeModule: def mycommand(): print("my_command") """ + pass @@ -501,6 +553,40 @@ class ModulesManager: self.initialize_modules() self.connect_modules() + def __getitem__(self, module_name): + return self.modules_by_name[module_name] + + def get(self, module_name, default=None): + """Access module by name. + + Args: + module_name (str): Name of module which should be returned. + default (Any): Default output if module is not available. + + Returns: + Union[OpenPypeModule, None]: Module found by name or None. + """ + return self.modules_by_name.get(module_name, default) + + def get_enabled_module(self, module_name, default=None): + """Fast access to enabled module. + + If module is available but is not enabled default value is returned. + + Args: + module_name (str): Name of module which should be returned. + default (Any): Default output if module is not available or is + not enabled. + + Returns: + Union[OpenPypeModule, None]: Enabled module found by name or None. + """ + + module = self.get(module_name) + if module is not None and module.enabled: + return module + return default + def initialize_modules(self): """Import and initialize modules.""" # Make sure modules are loaded @@ -654,8 +740,6 @@ class ModulesManager: and "actions" each containing list of paths. """ # Output structure - from openpype_interfaces import IPluginPaths - output = { "publish": [], "create": [], @@ -702,42 +786,65 @@ class ModulesManager: ).format(expected_keys, " | ".join(msg_items))) return output - def collect_launch_hook_paths(self): - """Helper to collect hooks from modules inherited ILaunchHookPaths. + def collect_creator_plugin_paths(self, host_name): + """Helper to collect creator plugin paths from modules. + + Args: + host_name (str): For which host are creators meants. Returns: - list: Paths to launch hook directories. + list: List of creator plugin paths. """ - from openpype_interfaces import ILaunchHookPaths - - str_type = type("") - expected_types = (list, tuple, set) - + # Output structure output = [] for module in self.get_enabled_modules(): - # Skip module that do not inherit from `ILaunchHookPaths` - if not isinstance(module, ILaunchHookPaths): + # Skip module that do not inherit from `IPluginPaths` + if not isinstance(module, IPluginPaths): continue - hook_paths = module.get_launch_hook_paths() - if not hook_paths: - continue - - # Convert string to list - if isinstance(hook_paths, str_type): - hook_paths = [hook_paths] - - # Skip invalid types - if not isinstance(hook_paths, expected_types): - self.log.warning(( - "Result of `get_launch_hook_paths`" - " has invalid type {}. Expected {}" - ).format(type(hook_paths), expected_types)) - continue - - output.extend(hook_paths) + paths = module.get_creator_plugin_paths(host_name) + if paths: + # Convert to list if value is not list + if not isinstance(paths, (list, tuple, set)): + paths = [paths] + output.extend(paths) return output + def get_host_module(self, host_name): + """Find host module by host name. + + Args: + host_name (str): Host name for which is found host module. + + Returns: + OpenPypeModule: Found host module by name. + None: There was not found module inheriting IHostAddon which has + host name set to passed 'host_name'. + """ + + for module in self.get_enabled_modules(): + if ( + isinstance(module, IHostAddon) + and module.host_name == host_name + ): + return module + return None + + def get_host_names(self): + """List of available host names based on host modules. + + Returns: + Iterable[str]: All available host names based on enabled modules + inheriting 'IHostAddon'. + """ + + host_names = { + module.host_name + for module in self.get_enabled_modules() + if isinstance(module, IHostAddon) + } + return host_names + def print_report(self): """Print out report of time spent on modules initialization parts. @@ -860,6 +967,7 @@ class TrayModulesManager(ModulesManager): modules_menu_order = ( "user", "ftrack", + "kitsu", "muster", "launcher_tool", "avalon", @@ -872,7 +980,7 @@ class TrayModulesManager(ModulesManager): ) def __init__(self): - self.log = PypeLogger.get_logger(self.__class__.__name__) + self.log = Logger.get_logger(self.__class__.__name__) self.modules = [] self.modules_by_id = {} @@ -911,8 +1019,6 @@ class TrayModulesManager(ModulesManager): self.tray_menu(tray_menu) def get_enabled_tray_modules(self): - from openpype_interfaces import ITrayModule - output = [] for module in self.modules: if module.enabled and isinstance(module, ITrayModule): @@ -988,8 +1094,6 @@ class TrayModulesManager(ModulesManager): self._report["Tray menu"] = report def start_modules(self): - from openpype_interfaces import ITrayService - report = {} time_start = time.time() prev_start_time = time_start @@ -1048,7 +1152,7 @@ def get_module_settings_defs(): settings_defs = [] - log = PypeLogger.get_logger("ModuleSettingsLoad") + log = Logger.get_logger("ModuleSettingsLoad") for raw_module in openpype_modules: for attr_name in dir(raw_module): diff --git a/openpype/modules/clockify/clockify_module.py b/openpype/modules/clockify/clockify_module.py index 932ce87c36..14fcb01f67 100644 --- a/openpype/modules/clockify/clockify_module.py +++ b/openpype/modules/clockify/clockify_module.py @@ -2,16 +2,17 @@ import os import threading import time +from openpype.modules import ( + OpenPypeModule, + ITrayModule, + IPluginPaths +) + from .clockify_api import ClockifyAPI from .constants import ( CLOCKIFY_FTRACK_USER_PATH, CLOCKIFY_FTRACK_SERVER_PATH ) -from openpype.modules import OpenPypeModule -from openpype_interfaces import ( - ITrayModule, - IPluginPaths -) class ClockifyModule( diff --git a/openpype/modules/clockify/launcher_actions/ClockifyStart.py b/openpype/modules/clockify/launcher_actions/ClockifyStart.py index 4669f98b01..7663aecc31 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifyStart.py +++ b/openpype/modules/clockify/launcher_actions/ClockifyStart.py @@ -1,16 +1,9 @@ -from openpype.api import Logger -from openpype.pipeline import ( - legacy_io, - LauncherAction, -) +from openpype.client import get_asset_by_name +from openpype.pipeline import LauncherAction from openpype_modules.clockify.clockify_api import ClockifyAPI -log = Logger.get_logger(__name__) - - class ClockifyStart(LauncherAction): - name = "clockify_start_timer" label = "Clockify - Start Timer" icon = "clockify_icon" @@ -24,20 +17,19 @@ class ClockifyStart(LauncherAction): return False def process(self, session, **kwargs): - project_name = session['AVALON_PROJECT'] - asset_name = session['AVALON_ASSET'] - task_name = session['AVALON_TASK'] + project_name = session["AVALON_PROJECT"] + asset_name = session["AVALON_ASSET"] + task_name = session["AVALON_TASK"] description = asset_name - asset = legacy_io.find_one({ - 'type': 'asset', - 'name': asset_name - }) - if asset is not None: - desc_items = asset.get('data', {}).get('parents', []) + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["data.parents"] + ) + if asset_doc is not None: + desc_items = asset_doc.get("data", {}).get("parents", []) desc_items.append(asset_name) desc_items.append(task_name) - description = '/'.join(desc_items) + description = "/".join(desc_items) project_id = self.clockapi.get_project_id(project_name) tag_ids = [] diff --git a/openpype/modules/clockify/launcher_actions/ClockifySync.py b/openpype/modules/clockify/launcher_actions/ClockifySync.py index 356bbd0306..c346a1b4f6 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifySync.py +++ b/openpype/modules/clockify/launcher_actions/ClockifySync.py @@ -1,11 +1,6 @@ +from openpype.client import get_projects, get_project from openpype_modules.clockify.clockify_api import ClockifyAPI -from openpype.api import Logger -from openpype.pipeline import ( - legacy_io, - LauncherAction, -) - -log = Logger.get_logger(__name__) +from openpype.pipeline import LauncherAction class ClockifySync(LauncherAction): @@ -22,39 +17,36 @@ class ClockifySync(LauncherAction): return self.have_permissions def process(self, session, **kwargs): - project_name = session.get('AVALON_PROJECT', None) + project_name = session.get("AVALON_PROJECT") or "" projects_to_sync = [] - if project_name.strip() == '' or project_name is None: - for project in legacy_io.projects(): - projects_to_sync.append(project) + if project_name.strip(): + projects_to_sync = [get_project(project_name)] else: - project = legacy_io.find_one({'type': 'project'}) - projects_to_sync.append(project) + projects_to_sync = get_projects() projects_info = {} for project in projects_to_sync: - task_types = project['config']['tasks'].keys() - projects_info[project['name']] = task_types + task_types = project["config"]["tasks"].keys() + projects_info[project["name"]] = task_types clockify_projects = self.clockapi.get_projects() for project_name, task_types in projects_info.items(): - if project_name not in clockify_projects: - response = self.clockapi.add_project(project_name) - if 'id' not in response: - self.log.error('Project {} can\'t be created'.format( - project_name - )) - continue - project_id = response['id'] - else: - project_id = clockify_projects[project_name] + if project_name in clockify_projects: + continue + + response = self.clockapi.add_project(project_name) + if "id" not in response: + self.log.error("Project {} can't be created".format( + project_name + )) + continue clockify_workspace_tags = self.clockapi.get_tags() for task_type in task_types: if task_type not in clockify_workspace_tags: response = self.clockapi.add_tag(task_type) - if 'id' not in response: + if "id" not in response: self.log.error('Task {} can\'t be created'.format( task_type )) diff --git a/openpype/modules/deadline/abstract_submit_deadline.py b/openpype/modules/deadline/abstract_submit_deadline.py index 22902d79ea..512ff800ee 100644 --- a/openpype/modules/deadline/abstract_submit_deadline.py +++ b/openpype/modules/deadline/abstract_submit_deadline.py @@ -4,10 +4,12 @@ It provides Deadline JobInfo data class. """ +import json.decoder import os from abc import abstractmethod import platform import getpass +from functools import partial from collections import OrderedDict import six @@ -15,7 +17,12 @@ import attr import requests import pyblish.api -from openpype.lib.abstract_metaplugins import AbstractMetaInstancePlugin +from openpype.pipeline.publish import ( + AbstractMetaInstancePlugin, + KnownPublishError +) + +JSONDecodeError = getattr(json.decoder, "JSONDecodeError", ValueError) def requests_post(*args, **kwargs): @@ -60,6 +67,96 @@ def requests_get(*args, **kwargs): return requests.get(*args, **kwargs) +class DeadlineKeyValueVar(dict): + """ + + Serializes dictionary key values as "{key}={value}" like Deadline uses + for EnvironmentKeyValue. + + As an example: + EnvironmentKeyValue0="A_KEY=VALUE_A" + EnvironmentKeyValue1="OTHER_KEY=VALUE_B" + + The keys are serialized in alphabetical order (sorted). + + Example: + >>> var = DeadlineKeyValueVar("EnvironmentKeyValue") + >>> var["my_var"] = "hello" + >>> var["my_other_var"] = "hello2" + >>> var.serialize() + + + """ + def __init__(self, key): + super(DeadlineKeyValueVar, self).__init__() + self.__key = key + + def serialize(self): + key = self.__key + + # Allow custom location for index in serialized string + if "{}" not in key: + key = key + "{}" + + return { + key.format(index): "{}={}".format(var_key, var_value) + for index, (var_key, var_value) in enumerate(sorted(self.items())) + } + + +class DeadlineIndexedVar(dict): + """ + + Allows to set and query values by integer indices: + Query: var[1] or var.get(1) + Set: var[1] = "my_value" + Append: var += "value" + + Note: Iterating the instance is not guarantueed to be the order of the + indices. To do so iterate with `sorted()` + + """ + def __init__(self, key): + super(DeadlineIndexedVar, self).__init__() + self.__key = key + + def serialize(self): + key = self.__key + + # Allow custom location for index in serialized string + if "{}" not in key: + key = key + "{}" + + return { + key.format(index): value for index, value in sorted(self.items()) + } + + def next_available_index(self): + # Add as first unused entry + i = 0 + while i in self.keys(): + i += 1 + return i + + def update(self, data): + # Force the integer key check + for key, value in data.items(): + self.__setitem__(key, value) + + def __iadd__(self, other): + index = self.next_available_index() + self[index] = other + return self + + def __setitem__(self, key, value): + if not isinstance(key, int): + raise TypeError("Key must be an integer: {}".format(key)) + + if key < 0: + raise ValueError("Negative index can't be set: {}".format(key)) + dict.__setitem__(self, key, value) + + @attr.s class DeadlineJobInfo(object): """Mapping of all Deadline *JobInfo* attributes. @@ -212,24 +309,8 @@ class DeadlineJobInfo(object): # Environment # ---------------------------------------------- - _environmentKeyValue = attr.ib(factory=list) - - @property - def EnvironmentKeyValue(self): # noqa: N802 - """Return all environment key values formatted for Deadline. - - Returns: - dict: as `{'EnvironmentKeyValue0', 'key=value'}` - - """ - out = {} - for index, v in enumerate(self._environmentKeyValue): - out["EnvironmentKeyValue{}".format(index)] = v - return out - - @EnvironmentKeyValue.setter - def EnvironmentKeyValue(self, val): # noqa: N802 - self._environmentKeyValue.append(val) + EnvironmentKeyValue = attr.ib(factory=partial(DeadlineKeyValueVar, + "EnvironmentKeyValue")) IncludeEnvironment = attr.ib(default=None) # Default: false UseJobEnvironmentOnly = attr.ib(default=None) # Default: false @@ -237,121 +318,29 @@ class DeadlineJobInfo(object): # Job Extra Info # ---------------------------------------------- - _extraInfos = attr.ib(factory=list) - _extraInfoKeyValues = attr.ib(factory=list) - - @property - def ExtraInfo(self): # noqa: N802 - """Return all ExtraInfo values formatted for Deadline. - - Returns: - dict: as `{'ExtraInfo0': 'value'}` - - """ - out = {} - for index, v in enumerate(self._extraInfos): - out["ExtraInfo{}".format(index)] = v - return out - - @ExtraInfo.setter - def ExtraInfo(self, val): # noqa: N802 - self._extraInfos.append(val) - - @property - def ExtraInfoKeyValue(self): # noqa: N802 - """Return all ExtraInfoKeyValue values formatted for Deadline. - - Returns: - dict: as {'ExtraInfoKeyValue0': 'key=value'}` - - """ - out = {} - for index, v in enumerate(self._extraInfoKeyValues): - out["ExtraInfoKeyValue{}".format(index)] = v - return out - - @ExtraInfoKeyValue.setter - def ExtraInfoKeyValue(self, val): # noqa: N802 - self._extraInfoKeyValues.append(val) + ExtraInfo = attr.ib(factory=partial(DeadlineIndexedVar, "ExtraInfo")) + ExtraInfoKeyValue = attr.ib(factory=partial(DeadlineKeyValueVar, + "ExtraInfoKeyValue")) # Task Extra Info Names # ---------------------------------------------- OverrideTaskExtraInfoNames = attr.ib(default=None) # Default: false - _taskExtraInfos = attr.ib(factory=list) - - @property - def TaskExtraInfoName(self): # noqa: N802 - """Return all TaskExtraInfoName values formatted for Deadline. - - Returns: - dict: as `{'TaskExtraInfoName0': 'value'}` - - """ - out = {} - for index, v in enumerate(self._taskExtraInfos): - out["TaskExtraInfoName{}".format(index)] = v - return out - - @TaskExtraInfoName.setter - def TaskExtraInfoName(self, val): # noqa: N802 - self._taskExtraInfos.append(val) + TaskExtraInfoName = attr.ib(factory=partial(DeadlineIndexedVar, + "TaskExtraInfoName")) # Output # ---------------------------------------------- - _outputFilename = attr.ib(factory=list) - _outputFilenameTile = attr.ib(factory=list) - _outputDirectory = attr.ib(factory=list) + OutputFilename = attr.ib(factory=partial(DeadlineIndexedVar, + "OutputFilename")) + OutputFilenameTile = attr.ib(factory=partial(DeadlineIndexedVar, + "OutputFilename{}Tile")) + OutputDirectory = attr.ib(factory=partial(DeadlineIndexedVar, + "OutputDirectory")) - @property - def OutputFilename(self): # noqa: N802 - """Return all OutputFilename values formatted for Deadline. - - Returns: - dict: as `{'OutputFilename0': 'filename'}` - - """ - out = {} - for index, v in enumerate(self._outputFilename): - out["OutputFilename{}".format(index)] = v - return out - - @OutputFilename.setter - def OutputFilename(self, val): # noqa: N802 - self._outputFilename.append(val) - - @property - def OutputFilenameTile(self): # noqa: N802 - """Return all OutputFilename#Tile values formatted for Deadline. - - Returns: - dict: as `{'OutputFilenme#Tile': 'tile'}` - - """ - out = {} - for index, v in enumerate(self._outputFilenameTile): - out["OutputFilename{}Tile".format(index)] = v - return out - - @OutputFilenameTile.setter - def OutputFilenameTile(self, val): # noqa: N802 - self._outputFilenameTile.append(val) - - @property - def OutputDirectory(self): # noqa: N802 - """Return all OutputDirectory values formatted for Deadline. - - Returns: - dict: as `{'OutputDirectory0': 'dir'}` - - """ - out = {} - for index, v in enumerate(self._outputDirectory): - out["OutputDirectory{}".format(index)] = v - return out - - @OutputDirectory.setter - def OutputDirectory(self, val): # noqa: N802 - self._outputDirectory.append(val) + # Asset Dependency + # ---------------------------------------------- + AssetDependency = attr.ib(factory=partial(DeadlineIndexedVar, + "AssetDependency")) # Tile Job # ---------------------------------------------- @@ -375,7 +364,7 @@ class DeadlineJobInfo(object): """ def filter_data(a, v): - if a.name.startswith("_"): + if isinstance(v, (DeadlineIndexedVar, DeadlineKeyValueVar)): return False if v is None: return False @@ -383,15 +372,27 @@ class DeadlineJobInfo(object): serialized = attr.asdict( self, dict_factory=OrderedDict, filter=filter_data) - serialized.update(self.EnvironmentKeyValue) - serialized.update(self.ExtraInfo) - serialized.update(self.ExtraInfoKeyValue) - serialized.update(self.TaskExtraInfoName) - serialized.update(self.OutputFilename) - serialized.update(self.OutputFilenameTile) - serialized.update(self.OutputDirectory) + + # Custom serialize these attributes + for attribute in [ + self.EnvironmentKeyValue, + self.ExtraInfo, + self.ExtraInfoKeyValue, + self.TaskExtraInfoName, + self.OutputFilename, + self.OutputFilenameTile, + self.OutputDirectory, + self.AssetDependency + ]: + serialized.update(attribute.serialize()) + return serialized + def update(self, data): + """Update instance with data dict""" + for key, value in data.items(): + setattr(self, key, value) + @six.add_metaclass(AbstractMetaInstancePlugin) class AbstractSubmitDeadline(pyblish.api.InstancePlugin): @@ -515,68 +516,72 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin): published. """ - anatomy = self._instance.context.data['anatomy'] - file_path = None - for i in self._instance.context: - if "workfile" in i.data["families"] \ - or i.data["family"] == "workfile": - # test if there is instance of workfile waiting - # to be published. - assert i.data["publish"] is True, ( - "Workfile (scene) must be published along") - # determine published path from Anatomy. - template_data = i.data.get("anatomyData") - rep = i.data.get("representations")[0].get("ext") - template_data["representation"] = rep - template_data["ext"] = rep - template_data["comment"] = None - anatomy_filled = anatomy.format(template_data) - template_filled = anatomy_filled["publish"]["path"] - file_path = os.path.normpath(template_filled) - self.log.info("Using published scene for render {}".format( - file_path)) + instance = self._instance + workfile_instance = self._get_workfile_instance(instance.context) + if workfile_instance is None: + return - if not os.path.exists(file_path): - self.log.error("published scene does not exist!") - raise + # determine published path from Anatomy. + template_data = workfile_instance.data.get("anatomyData") + rep = workfile_instance.data.get("representations")[0] + template_data["representation"] = rep.get("name") + template_data["ext"] = rep.get("ext") + template_data["comment"] = None - if not replace_in_path: - return file_path + anatomy = instance.context.data['anatomy'] + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled["publish"]["path"] + file_path = os.path.normpath(template_filled) - # now we need to switch scene in expected files - # because token will now point to published - # scene file and that might differ from current one - new_scene = os.path.splitext( - os.path.basename(file_path))[0] - orig_scene = os.path.splitext( - os.path.basename( - self._instance.context.data["currentFile"]))[0] - exp = self._instance.data.get("expectedFiles") + self.log.info("Using published scene for render {}".format(file_path)) - if isinstance(exp[0], dict): - # we have aovs and we need to iterate over them - new_exp = {} - for aov, files in exp[0].items(): - replaced_files = [] - for f in files: - replaced_files.append( - str(f).replace(orig_scene, new_scene) - ) - new_exp[aov] = replaced_files - # [] might be too much here, TODO - self._instance.data["expectedFiles"] = [new_exp] - else: - new_exp = [] - for f in exp: - new_exp.append( - str(f).replace(orig_scene, new_scene) - ) - self._instance.data["expectedFiles"] = new_exp + if not os.path.exists(file_path): + self.log.error("published scene does not exist!") + raise - self.log.info("Scene name was switched {} -> {}".format( - orig_scene, new_scene - )) + if not replace_in_path: + return file_path + + # now we need to switch scene in expected files + # because token will now point to published + # scene file and that might differ from current one + def _clean_name(path): + return os.path.splitext(os.path.basename(path))[0] + + new_scene = _clean_name(file_path) + orig_scene = _clean_name(instance.context.data["currentFile"]) + expected_files = instance.data.get("expectedFiles") + + if isinstance(expected_files[0], dict): + # we have aovs and we need to iterate over them + new_exp = {} + for aov, files in expected_files[0].items(): + replaced_files = [] + for f in files: + replaced_files.append( + str(f).replace(orig_scene, new_scene) + ) + new_exp[aov] = replaced_files + # [] might be too much here, TODO + instance.data["expectedFiles"] = [new_exp] + else: + new_exp = [] + for f in expected_files: + new_exp.append( + str(f).replace(orig_scene, new_scene) + ) + instance.data["expectedFiles"] = new_exp + + metadata_folder = instance.data.get("publishRenderMetadataFolder") + if metadata_folder: + metadata_folder = metadata_folder.replace(orig_scene, + new_scene) + instance.data["publishRenderMetadataFolder"] = metadata_folder + + self.log.info("Scene name was switched {} -> {}".format( + orig_scene, new_scene + )) return file_path @@ -615,7 +620,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin): str: resulting Deadline job id. Throws: - RuntimeError: if submission fails. + KnownPublishError: if submission fails. """ url = "{}/api/jobs".format(self._deadline_url) @@ -625,10 +630,36 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin): self.log.error(response.status_code) self.log.error(response.content) self.log.debug(payload) - raise RuntimeError(response.text) + raise KnownPublishError(response.text) + + try: + result = response.json() + except JSONDecodeError: + msg = "Broken response {}. ".format(response) + msg += "Try restarting the Deadline Webservice." + self.log.warning(msg, exc_info=True) + raise KnownPublishError("Broken response from DL") - result = response.json() # for submit publish job self._instance.data["deadlineSubmissionJob"] = result return result["_id"] + + @staticmethod + def _get_workfile_instance(context): + """Find workfile instance in context""" + for i in context: + + is_workfile = ( + "workfile" in i.data.get("families", []) or + i.data["family"] == "workfile" + ) + if not is_workfile: + continue + + # test if there is instance of workfile waiting + # to be published. + assert i.data["publish"] is True, ( + "Workfile (scene) must be published along") + + return i diff --git a/openpype/modules/deadline/deadline_module.py b/openpype/modules/deadline/deadline_module.py index c30db75188..9855f8c1b1 100644 --- a/openpype/modules/deadline/deadline_module.py +++ b/openpype/modules/deadline/deadline_module.py @@ -3,9 +3,8 @@ import requests import six import sys -from openpype.lib import requests_get, PypeLogger -from openpype.modules import OpenPypeModule -from openpype_interfaces import IPluginPaths +from openpype.lib import requests_get, Logger +from openpype.modules import OpenPypeModule, IPluginPaths class DeadlineWebserviceError(Exception): @@ -58,7 +57,7 @@ class DeadlineModule(OpenPypeModule, IPluginPaths): """ if not log: - log = PypeLogger.get_logger(__name__) + log = Logger.get_logger(__name__) argument = "{}/api/pools?NamesOnly=true".format(webservice) try: diff --git a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py index a7035cd99f..9981bead3e 100644 --- a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py +++ b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py @@ -13,7 +13,7 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder + 0.415 label = "Deadline Webservice from the Instance" - families = ["rendering"] + families = ["rendering", "renderlayer"] def process(self, instance): instance.data["deadlineUrl"] = self._collect_deadline_url(instance) diff --git a/openpype/modules/deadline/plugins/publish/collect_publishable_instances.py b/openpype/modules/deadline/plugins/publish/collect_publishable_instances.py new file mode 100644 index 0000000000..b00381b6cf --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/collect_publishable_instances.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +"""Collect instances that should be processed and published on DL. + +""" +import os + +import pyblish.api +from openpype.pipeline import PublishValidationError + + +class CollectDeadlinePublishableInstances(pyblish.api.InstancePlugin): + """Collect instances that should be processed and published on DL. + + Some long running publishes (not just renders) could be offloaded to DL, + this plugin compares theirs name against env variable, marks only + publishable by farm. + + Triggered only when running only in headless mode, eg on a farm. + """ + + order = pyblish.api.CollectorOrder + 0.499 + label = "Collect Deadline Publishable Instance" + targets = ["remote"] + + def process(self, instance): + self.log.debug("CollectDeadlinePublishableInstances") + publish_inst = os.environ.get("OPENPYPE_PUBLISH_SUBSET", '') + if not publish_inst: + raise PublishValidationError("OPENPYPE_PUBLISH_SUBSET env var " + "required for remote publishing") + + subset_name = instance.data["subset"] + if subset_name == publish_inst: + self.log.debug("Publish {}".format(subset_name)) + instance.data["publish"] = True + instance.data["farm"] = False + else: + self.log.debug("Skipping {}".format(subset_name)) + instance.data["publish"] = False diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index b6584f239e..0c1ffa6bd7 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -3,8 +3,10 @@ import attr import getpass import pyblish.api -from openpype.lib import env_value_to_bool -from openpype.lib.delivery import collect_frames +from openpype.lib import ( + env_value_to_bool, + collect_frames, +) from openpype.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo @@ -33,6 +35,7 @@ class AfterEffectsSubmitDeadline( hosts = ["aftereffects"] families = ["render.farm"] # cannot be "render' as that is integrated use_published = True + targets = ["local"] priority = 50 chunk_size = 1000000 @@ -64,9 +67,9 @@ class AfterEffectsSubmitDeadline( dln_job_info.Group = self.group dln_job_info.Department = self.department dln_job_info.ChunkSize = self.chunk_size - dln_job_info.OutputFilename = \ + dln_job_info.OutputFilename += \ os.path.basename(self._instance.data["expectedFiles"][0]) - dln_job_info.OutputDirectory = \ + dln_job_info.OutputDirectory += \ os.path.dirname(self._instance.data["expectedFiles"][0]) dln_job_info.JobDelay = "00:00:00" @@ -79,7 +82,8 @@ class AfterEffectsSubmitDeadline( "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" + "OPENPYPE_LOG_NO_COLORS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): @@ -88,13 +92,12 @@ class AfterEffectsSubmitDeadline( environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) for key in keys: - val = environment.get(key) - if val: - dln_job_info.EnvironmentKeyValue = "{key}={value}".format( - key=key, - value=val) + value = environment.get(key) + if value: + dln_job_info.EnvironmentKeyValue[key] = value + # to recognize job from PYPE for turning Event On/Off - dln_job_info.EnvironmentKeyValue = "OPENPYPE_RENDER_JOB=1" + dln_job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1" return dln_job_info diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py index 912f0f4026..6327143623 100644 --- a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py @@ -238,6 +238,7 @@ class HarmonySubmitDeadline( order = pyblish.api.IntegratorOrder + 0.1 hosts = ["harmony"] families = ["render.farm"] + targets = ["local"] optional = True use_published = False @@ -273,7 +274,8 @@ class HarmonySubmitDeadline( "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" + "OPENPYPE_LOG_NO_COLORS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): @@ -282,14 +284,12 @@ class HarmonySubmitDeadline( environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) for key in keys: - val = environment.get(key) - if val: - job_info.EnvironmentKeyValue = "{key}={value}".format( - key=key, - value=val) + value = environment.get(key) + if value: + job_info.EnvironmentKeyValue[key] = value # to recognize job from PYPE for turning Event On/Off - job_info.EnvironmentKeyValue = "OPENPYPE_RENDER_JOB=1" + job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1" return job_info @@ -321,7 +321,9 @@ class HarmonySubmitDeadline( ) unzip_dir = (published_scene.parent / published_scene.stem) with _ZipFile(published_scene, "r") as zip_ref: - zip_ref.extractall(unzip_dir.as_posix()) + # UNC path (//?/) added to minimalize risk with extracting + # to large file paths + zip_ref.extractall("//?/" + str(unzip_dir.as_posix())) # find any xstage files in directory, prefer the one with the same name # as directory (plus extension) diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py index f834ae7e92..95856137e2 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py @@ -55,7 +55,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): scenename = os.path.basename(scene) # Get project code - project = legacy_io.find_one({"type": "project"}) + project = context.data["projectEntity"] code = project["data"].get("code", project["name"]) job_name = "{scene} [PUBLISH]".format(scene=scenename) @@ -130,6 +130,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): # this application with so the Render Slave can build its own # similar environment using it, e.g. "houdini17.5;pluginx2.3" "AVALON_TOOLS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index aca88c7440..beda753723 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -101,6 +101,7 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): # this application with so the Render Slave can build its own # similar environment using it, e.g. "maya2018;vray4.x;yeti3.1.9" "AVALON_TOOLS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if context.data.get("deadlinePassMongoUrl"): diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 8562c85f7d..3398e1725e 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -18,7 +18,6 @@ Attributes: from __future__ import print_function import os -import json import getpass import copy import re @@ -27,44 +26,723 @@ from datetime import datetime import itertools from collections import OrderedDict -import clique -import requests +import attr from maya import cmds -import pyblish.api - -from openpype.lib import requests_post -from openpype.hosts.maya.api import lib from openpype.pipeline import legacy_io -# Documentation for keys available at: -# https://docs.thinkboxsoftware.com -# /products/deadline/8.0/1_User%20Manual/manual -# /manual-submission.html#job-info-file-options +from openpype.hosts.maya.api.lib_rendersettings import RenderSettings +from openpype.hosts.maya.api.lib import get_attr_in_layer -payload_skeleton_template = { - "JobInfo": { - "BatchName": None, # Top-level group name - "Name": None, # Job name, as seen in Monitor - "UserName": None, - "Plugin": "MayaBatch", - "Frames": "{start}-{end}x{step}", - "Comment": None, - "Priority": 50, - }, - "PluginInfo": { - "SceneFile": None, # Input - "OutputFilePath": None, # Output directory and filename - "OutputFilePrefix": None, - "Version": cmds.about(version=True), # Mandatory for Deadline - "UsingRenderLayers": True, - "RenderLayer": None, # Render only this layer - "Renderer": None, - "ProjectPath": None, # Resolve relative references - }, - "AuxFiles": [] # Mandatory for Deadline, may be empty -} +from openpype_modules.deadline import abstract_submit_deadline +from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo + + +def _validate_deadline_bool_value(instance, attribute, value): + if not isinstance(value, (str, bool)): + raise TypeError( + "Attribute {} must be str or bool.".format(attribute)) + if value not in {"1", "0", True, False}: + raise ValueError( + ("Value of {} must be one of " + "'0', '1', True, False").format(attribute) + ) + + +@attr.s +class MayaPluginInfo(object): + SceneFile = attr.ib(default=None) # Input + OutputFilePath = attr.ib(default=None) # Output directory and filename + OutputFilePrefix = attr.ib(default=None) + Version = attr.ib(default=None) # Mandatory for Deadline + UsingRenderLayers = attr.ib(default=True) + RenderLayer = attr.ib(default=None) # Render only this layer + Renderer = attr.ib(default=None) + ProjectPath = attr.ib(default=None) # Resolve relative references + # Include all lights flag + RenderSetupIncludeLights = attr.ib( + default="1", validator=_validate_deadline_bool_value) + + +@attr.s +class PythonPluginInfo(object): + ScriptFile = attr.ib() + Version = attr.ib(default="3.6") + Arguments = attr.ib(default=None) + SingleFrameOnly = attr.ib(default=None) + + +@attr.s +class VRayPluginInfo(object): + InputFilename = attr.ib(default=None) # Input + SeparateFilesPerFrame = attr.ib(default=None) + VRayEngine = attr.ib(default="V-Ray") + Width = attr.ib(default=None) + Height = attr.ib(default=None) # Mandatory for Deadline + OutputFilePath = attr.ib(default=True) + OutputFileName = attr.ib(default=None) # Render only this layer + + +@attr.s +class ArnoldPluginInfo(object): + ArnoldFile = attr.ib(default=None) + + +class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): + + label = "Submit Render to Deadline" + hosts = ["maya"] + families = ["renderlayer"] + targets = ["local"] + + tile_assembler_plugin = "OpenPypeTileAssembler" + priority = 50 + tile_priority = 50 + limit = [] # limit groups + jobInfo = {} + pluginInfo = {} + group = "none" + + def get_job_info(self): + job_info = DeadlineJobInfo(Plugin="MayaBatch") + + # todo: test whether this works for existing production cases + # where custom jobInfo was stored in the project settings + job_info.update(self.jobInfo) + + instance = self._instance + context = instance.context + + # Always use the original work file name for the Job name even when + # rendering is done from the published Work File. The original work + # file name is clearer because it can also have subversion strings, + # etc. which are stripped for the published file. + src_filepath = context.data["currentFile"] + src_filename = os.path.basename(src_filepath) + + job_info.Name = "%s - %s" % (src_filename, instance.name) + job_info.BatchName = src_filename + job_info.Plugin = instance.data.get("mayaRenderPlugin", "MayaBatch") + job_info.UserName = context.data.get("deadlineUser", getpass.getuser()) + + # Deadline requires integers in frame range + frames = "{start}-{end}x{step}".format( + start=int(instance.data["frameStartHandle"]), + end=int(instance.data["frameEndHandle"]), + step=int(instance.data["byFrameStep"]), + ) + job_info.Frames = frames + + job_info.Pool = instance.data.get("primaryPool") + job_info.SecondaryPool = instance.data.get("secondaryPool") + job_info.ChunkSize = instance.data.get("chunkSize", 10) + job_info.Comment = context.data.get("comment") + job_info.Priority = instance.data.get("priority", self.priority) + job_info.FramesPerTask = instance.data.get("framesPerTask", 1) + + if self.group != "none" and self.group: + job_info.Group = self.group + + if self.limit: + job_info.LimitGroups = ",".join(self.limit) + + # Add options from RenderGlobals + render_globals = instance.data.get("renderGlobals", {}) + job_info.update(render_globals) + + keys = [ + "FTRACK_API_KEY", + "FTRACK_API_USER", + "FTRACK_SERVER", + "OPENPYPE_SG_USER", + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_TASK", + "AVALON_APP_NAME", + "OPENPYPE_DEV", + "OPENPYPE_VERSION" + ] + # Add mongo url if it's enabled + if self._instance.context.data.get("deadlinePassMongoUrl"): + keys.append("OPENPYPE_MONGO") + + environment = dict({key: os.environ[key] for key in keys + if key in os.environ}, **legacy_io.Session) + + for key in keys: + value = environment.get(key) + if not value: + continue + job_info.EnvironmentKeyValue[key] = value + + # to recognize job from PYPE for turning Event On/Off + job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1" + job_info.EnvironmentKeyValue["OPENPYPE_LOG_NO_COLORS"] = "1" + + # Adding file dependencies. + if self.asset_dependencies: + dependencies = instance.context.data["fileDependencies"] + dependencies.append(context.data["currentFile"]) + for dependency in dependencies: + job_info.AssetDependency += dependency + + # Add list of expected files to job + # --------------------------------- + exp = instance.data.get("expectedFiles") + for filepath in self._iter_expected_files(exp): + job_info.OutputDirectory += os.path.dirname(filepath) + job_info.OutputFilename += os.path.basename(filepath) + + return job_info + + def get_plugin_info(self): + + instance = self._instance + context = instance.context + + # Set it to default Maya behaviour if it cannot be determined + # from instance (but it should be, by the Collector). + + default_rs_include_lights = ( + instance.context.data['project_settings'] + ['maya'] + ['RenderSettings'] + ['enable_all_lights'] + ) + + rs_include_lights = instance.data.get( + "renderSetupIncludeLights", default_rs_include_lights) + if rs_include_lights not in {"1", "0", True, False}: + rs_include_lights = default_rs_include_lights + plugin_info = MayaPluginInfo( + SceneFile=self.scene_path, + Version=cmds.about(version=True), + RenderLayer=instance.data['setMembers'], + Renderer=instance.data["renderer"], + RenderSetupIncludeLights=rs_include_lights, # noqa + ProjectPath=context.data["workspaceDir"], + UsingRenderLayers=True, + ) + + plugin_payload = attr.asdict(plugin_info) + + # Patching with pluginInfo from settings + for key, value in self.pluginInfo.items(): + plugin_payload[key] = value + + return plugin_payload + + def process_submission(self): + + instance = self._instance + context = instance.context + + filepath = self.scene_path # publish if `use_publish` else workfile + + # TODO: Avoid the need for this logic here, needed for submit publish + # Store output dir for unified publisher (filesequence) + expected_files = instance.data["expectedFiles"] + first_file = next(self._iter_expected_files(expected_files)) + output_dir = os.path.dirname(first_file) + instance.data["outputDir"] = output_dir + instance.data["toBeRenderedOn"] = "deadline" + + # Patch workfile (only when use_published is enabled) + if self.use_published: + self._patch_workfile() + + # Gather needed data ------------------------------------------------ + workspace = context.data["workspaceDir"] + default_render_file = instance.context.data.get('project_settings')\ + .get('maya')\ + .get('RenderSettings')\ + .get('default_render_image_folder') + filename = os.path.basename(filepath) + dirname = os.path.join(workspace, default_render_file) + + # Fill in common data to payload ------------------------------------ + # TODO: Replace these with collected data from CollectRender + payload_data = { + "filename": filename, + "dirname": dirname, + } + + # Submit preceding export jobs ------------------------------------- + export_job = None + assert not all(x in instance.data["families"] + for x in ['vrayscene', 'assscene']), ( + "Vray Scene and Ass Scene options are mutually exclusive") + + if "vrayscene" in instance.data["families"]: + self.log.debug("Submitting V-Ray scene render..") + vray_export_payload = self._get_vray_export_payload(payload_data) + export_job = self.submit(vray_export_payload) + + payload = self._get_vray_render_payload(payload_data) + + elif "assscene" in instance.data["families"]: + self.log.debug("Submitting Arnold .ass standalone render..") + ass_export_payload = self._get_arnold_export_payload(payload_data) + export_job = self.submit(ass_export_payload) + + payload = self._get_arnold_render_payload(payload_data) + else: + self.log.debug("Submitting MayaBatch render..") + payload = self._get_maya_payload(payload_data) + + # Add export job as dependency -------------------------------------- + if export_job: + job_info, _ = payload + job_info.JobDependency = export_job + + if instance.data.get("tileRendering"): + # Prepare tiles data + self._tile_render(payload) + else: + # Submit main render job + job_info, plugin_info = payload + self.submit(self.assemble_payload(job_info, plugin_info)) + + def _tile_render(self, payload): + """Submit as tile render per frame with dependent assembly jobs.""" + + # As collected by super process() + instance = self._instance + + payload_job_info, payload_plugin_info = payload + job_info = copy.deepcopy(payload_job_info) + plugin_info = copy.deepcopy(payload_plugin_info) + + # if we have sequence of files, we need to create tile job for + # every frame + job_info.TileJob = True + job_info.TileJobTilesInX = instance.data.get("tilesX") + job_info.TileJobTilesInY = instance.data.get("tilesY") + + tiles_count = job_info.TileJobTilesInX * job_info.TileJobTilesInY + + plugin_info["ImageHeight"] = instance.data.get("resolutionHeight") + plugin_info["ImageWidth"] = instance.data.get("resolutionWidth") + plugin_info["RegionRendering"] = True + + R_FRAME_NUMBER = re.compile( + r".+\.(?P[0-9]+)\..+") # noqa: N806, E501 + REPL_FRAME_NUMBER = re.compile( + r"(.+\.)([0-9]+)(\..+)") # noqa: N806, E501 + + exp = instance.data["expectedFiles"] + if isinstance(exp[0], dict): + # we have aovs and we need to iterate over them + # get files from `beauty` + files = exp[0].get("beauty") + # assembly files are used for assembly jobs as we need to put + # together all AOVs + assembly_files = list( + itertools.chain.from_iterable( + [f for _, f in exp[0].items()])) + if not files: + # if beauty doesn't exist, use first aov we found + files = exp[0].get(list(exp[0].keys())[0]) + else: + files = exp + assembly_files = files + + # Define frame tile jobs + frame_file_hash = {} + frame_payloads = {} + file_index = 1 + for file in files: + frame = re.search(R_FRAME_NUMBER, file).group("frame") + + new_job_info = copy.deepcopy(job_info) + new_job_info.Name += " (Frame {} - {} tiles)".format(frame, + tiles_count) + new_job_info.TileJobFrame = frame + + new_plugin_info = copy.deepcopy(plugin_info) + + # Add tile data into job info and plugin info + tiles_data = _format_tiles( + file, 0, + instance.data.get("tilesX"), + instance.data.get("tilesY"), + instance.data.get("resolutionWidth"), + instance.data.get("resolutionHeight"), + payload_plugin_info["OutputFilePrefix"] + )[0] + + new_job_info.update(tiles_data["JobInfo"]) + new_plugin_info.update(tiles_data["PluginInfo"]) + + self.log.info("hashing {} - {}".format(file_index, file)) + job_hash = hashlib.sha256( + ("{}_{}".format(file_index, file)).encode("utf-8")) + + file_hash = job_hash.hexdigest() + frame_file_hash[frame] = file_hash + + new_job_info.ExtraInfo[0] = file_hash + new_job_info.ExtraInfo[1] = file + + frame_payloads[frame] = self.assemble_payload( + job_info=new_job_info, + plugin_info=new_plugin_info + ) + file_index += 1 + + self.log.info( + "Submitting tile job(s) [{}] ...".format(len(frame_payloads))) + + # Submit frame tile jobs + frame_tile_job_id = {} + for frame, tile_job_payload in frame_payloads.items(): + job_id = self.submit(tile_job_payload) + frame_tile_job_id[frame] = job_id + + # Define assembly payloads + assembly_job_info = copy.deepcopy(job_info) + assembly_job_info.Plugin = self.tile_assembler_plugin + assembly_job_info.Name += " - Tile Assembly Job" + assembly_job_info.Frames = 1 + assembly_job_info.MachineLimit = 1 + assembly_job_info.Priority = instance.data.get("tile_priority", + self.tile_priority) + + assembly_plugin_info = { + "CleanupTiles": 1, + "ErrorOnMissing": True, + "Renderer": self._instance.data["renderer"] + } + + assembly_payloads = [] + output_dir = self.job_info.OutputDirectory[0] + for file in assembly_files: + frame = re.search(R_FRAME_NUMBER, file).group("frame") + + frame_assembly_job_info = copy.deepcopy(assembly_job_info) + frame_assembly_job_info.Name += " (Frame {})".format(frame) + frame_assembly_job_info.OutputFilename[0] = re.sub( + REPL_FRAME_NUMBER, + "\\1{}\\3".format("#" * len(frame)), file) + + file_hash = frame_file_hash[frame] + tile_job_id = frame_tile_job_id[frame] + + frame_assembly_job_info.ExtraInfo[0] = file_hash + frame_assembly_job_info.ExtraInfo[1] = file + frame_assembly_job_info.JobDependency = tile_job_id + + # write assembly job config files + now = datetime.now() + + config_file = os.path.join( + output_dir, + "{}_config_{}.txt".format( + os.path.splitext(file)[0], + now.strftime("%Y_%m_%d_%H_%M_%S") + ) + ) + try: + if not os.path.isdir(output_dir): + os.makedirs(output_dir) + except OSError: + # directory is not available + self.log.warning("Path is unreachable: " + "`{}`".format(output_dir)) + + with open(config_file, "w") as cf: + print("TileCount={}".format(tiles_count), file=cf) + print("ImageFileName={}".format(file), file=cf) + print("ImageWidth={}".format( + instance.data.get("resolutionWidth")), file=cf) + print("ImageHeight={}".format( + instance.data.get("resolutionHeight")), file=cf) + + tiles = _format_tiles( + file, 0, + instance.data.get("tilesX"), + instance.data.get("tilesY"), + instance.data.get("resolutionWidth"), + instance.data.get("resolutionHeight"), + payload_plugin_info["OutputFilePrefix"] + )[1] + for k, v in sorted(tiles.items()): + print("{}={}".format(k, v), file=cf) + + payload = self.assemble_payload( + job_info=frame_assembly_job_info, + plugin_info=assembly_plugin_info.copy(), + # todo: aux file transfers don't work with deadline webservice + # add config file as job auxFile + # aux_files=[config_file] + ) + assembly_payloads.append(payload) + + # Submit assembly jobs + assembly_job_ids = [] + num_assemblies = len(assembly_payloads) + for i, payload in enumerate(assembly_payloads): + self.log.info( + "submitting assembly job {} of {}".format(i + 1, + num_assemblies) + ) + assembly_job_id = self.submit(payload) + assembly_job_ids.append(assembly_job_id) + + instance.data["assemblySubmissionJobs"] = assembly_job_ids + + def _get_maya_payload(self, data): + + job_info = copy.deepcopy(self.job_info) + + if self.asset_dependencies: + # Asset dependency to wait for at least the scene file to sync. + job_info.AssetDependency += self.scene_path + + # Get layer prefix + renderlayer = self._instance.data["setMembers"] + renderer = self._instance.data["renderer"] + layer_prefix_attr = RenderSettings.get_image_prefix_attr(renderer) + layer_prefix = get_attr_in_layer(layer_prefix_attr, layer=renderlayer) + + plugin_info = copy.deepcopy(self.plugin_info) + plugin_info.update({ + # Output directory and filename + "OutputFilePath": data["dirname"].replace("\\", "/"), + "OutputFilePrefix": layer_prefix, + }) + + # This hack is here because of how Deadline handles Renderman version. + # it considers everything with `renderman` set as version older than + # Renderman 22, and so if we are using renderman > 21 we need to set + # renderer string on the job to `renderman22`. We will have to change + # this when Deadline releases new version handling this. + renderer = self._instance.data["renderer"] + if renderer == "renderman": + try: + from rfm2.config import cfg # noqa + except ImportError: + raise Exception("Cannot determine renderman version") + + rman_version = cfg().build_info.version() # type: str + if int(rman_version.split(".")[0]) > 22: + renderer = "renderman22" + + plugin_info["Renderer"] = renderer + + # this is needed because renderman plugin in Deadline + # handles directory and file prefixes separately + plugin_info["OutputFilePath"] = job_info.OutputDirectory[0] + + return job_info, plugin_info + + def _get_vray_export_payload(self, data): + + job_info = copy.deepcopy(self.job_info) + job_info.Name = self._job_info_label("Export") + + # Get V-Ray settings info to compute output path + vray_scene = self.format_vray_output_filename() + + plugin_info = { + "Renderer": "vray", + "SkipExistingFrames": True, + "UseLegacyRenderLayers": True, + "OutputFilePath": os.path.dirname(vray_scene) + } + + return job_info, attr.asdict(plugin_info) + + def _get_arnold_export_payload(self, data): + + try: + from openpype.scripts import export_maya_ass_job + except Exception: + raise AssertionError( + "Expected module 'export_maya_ass_job' to be available") + + module_path = export_maya_ass_job.__file__ + if module_path.endswith(".pyc"): + module_path = module_path[: -len(".pyc")] + ".py" + + script = os.path.normpath(module_path) + + job_info = copy.deepcopy(self.job_info) + job_info.Name = self._job_info_label("Export") + + # Force a single frame Python job + job_info.Plugin = "Python" + job_info.Frames = 1 + + renderlayer = self._instance.data["setMembers"] + + # add required env vars for the export script + envs = { + "AVALON_APP_NAME": os.environ.get("AVALON_APP_NAME"), + "OPENPYPE_ASS_EXPORT_RENDER_LAYER": renderlayer, + "OPENPYPE_ASS_EXPORT_SCENE_FILE": self.scene_path, + "OPENPYPE_ASS_EXPORT_OUTPUT": job_info.OutputFilename[0], + "OPENPYPE_ASS_EXPORT_START": int(self._instance.data["frameStartHandle"]), # noqa + "OPENPYPE_ASS_EXPORT_END": int(self._instance.data["frameEndHandle"]), # noqa + "OPENPYPE_ASS_EXPORT_STEP": 1 + } + for key, value in envs.items(): + if not value: + continue + job_info.EnvironmentKeyValue[key] = value + + plugin_info = PythonPluginInfo( + ScriptFile=script, + Version="3.6", + Arguments="", + SingleFrameOnly="True" + ) + + return job_info, attr.asdict(plugin_info) + + def _get_vray_render_payload(self, data): + + # Job Info + job_info = copy.deepcopy(self.job_info) + job_info.Name = self._job_info_label("Render") + job_info.Plugin = "Vray" + job_info.OverrideTaskExtraInfoNames = False + + # Plugin Info + plugin_info = VRayPluginInfo( + InputFilename=self.format_vray_output_filename(), + SeparateFilesPerFrame=False, + VRayEngine="V-Ray", + Width=self._instance.data["resolutionWidth"], + Height=self._instance.data["resolutionHeight"], + OutputFilePath=job_info.OutputDirectory[0], + OutputFileName=job_info.OutputFilename[0] + ) + + return job_info, attr.asdict(plugin_info) + + def _get_arnold_render_payload(self, data): + + # Job Info + job_info = copy.deepcopy(self.job_info) + job_info.Name = self._job_info_label("Render") + job_info.Plugin = "Arnold" + job_info.OverrideTaskExtraInfoNames = False + + # Plugin Info + ass_file, _ = os.path.splitext(data["output_filename_0"]) + ass_filepath = ass_file + ".ass" + + plugin_info = ArnoldPluginInfo( + ArnoldFile=ass_filepath + ) + + return job_info, attr.asdict(plugin_info) + + def format_vray_output_filename(self): + """Format the expected output file of the Export job. + + Example: + /_/ + "shot010_v006/shot010_v006_CHARS/CHARS_0001.vrscene" + Returns: + str + + """ + + # "vrayscene//_/" + vray_settings = cmds.ls(type="VRaySettingsNode") + node = vray_settings[0] + template = cmds.getAttr("{}.vrscene_filename".format(node)) + scene, _ = os.path.splitext(self.scene_path) + + def smart_replace(string, key_values): + new_string = string + for key, value in key_values.items(): + new_string = new_string.replace(key, value) + return new_string + + # Get workfile scene path without extension to format vrscene_filename + scene_filename = os.path.basename(self.scene_path) + scene_filename_no_ext, _ = os.path.splitext(scene_filename) + + layer = self._instance.data['setMembers'] + + # Reformat without tokens + output_path = smart_replace( + template, + {"": scene_filename_no_ext, + "": layer}) + + start_frame = int(self._instance.data["frameStartHandle"]) + workspace = self._instance.context.data["workspace"] + filename_zero = "{}_{:04d}.vrscene".format(output_path, start_frame) + filepath_zero = os.path.join(workspace, filename_zero) + + return filepath_zero.replace("\\", "/") + + def _patch_workfile(self): + """Patch Maya scene. + + This will take list of patches (lines to add) and apply them to + *published* Maya scene file (that is used later for rendering). + + Patches are dict with following structure:: + { + "name": "Name of patch", + "regex": "regex of line before patch", + "line": "line to insert" + } + + """ + project_settings = self._instance.context.data["project_settings"] + patches = ( + project_settings.get( + "deadline", {}).get( + "publish", {}).get( + "MayaSubmitDeadline", {}).get( + "scene_patches", {}) + ) + if not patches: + return + + if not os.path.splitext(self.scene_path)[1].lower() != ".ma": + self.log.debug("Skipping workfile patch since workfile is not " + ".ma file") + return + + compiled_regex = [re.compile(p["regex"]) for p in patches] + with open(self.scene_path, "r+") as pf: + scene_data = pf.readlines() + for ln, line in enumerate(scene_data): + for i, r in enumerate(compiled_regex): + if re.match(r, line): + scene_data.insert(ln + 1, patches[i]["line"]) + pf.seek(0) + pf.writelines(scene_data) + pf.truncate() + self.log.info("Applied {} patch to scene.".format( + patches[i]["name"] + )) + + def _job_info_label(self, label): + return "{label} {job.Name} [{start}-{end}]".format( + label=label, + job=self.job_info, + start=int(self._instance.data["frameStartHandle"]), + end=int(self._instance.data["frameEndHandle"]), + ) + + @staticmethod + def _iter_expected_files(exp): + if isinstance(exp[0], dict): + for _aov, files in exp[0].items(): + for file in files: + yield file + else: + for file in exp: + yield file def _format_tiles( @@ -88,12 +766,12 @@ def _format_tiles( Example:: Image prefix is: - `maya///_` + `//_` Result for tile 0 for 4x4 will be: - `maya///_tile_1x1_4x4__` + `//_tile_1x1_4x4__` - Calculating coordinates is tricky as in Job they are defined as top, + Calculating coordinates is tricky as in Job they are defined as top, left, bottom, right with zero being in top-left corner. But Assembler configuration file takes tile coordinates as X, Y, Width and Height and zero is bottom left corner. @@ -102,25 +780,32 @@ def _format_tiles( filename (str): Filename to process as tiles. index (int): Index of that file if it is sequence. tiles_x (int): Number of tiles in X. - tiles_y (int): Number if tikes in Y. + tiles_y (int): Number of tiles in Y. width (int): Width resolution of final image. height (int): Height resolution of final image. prefix (str): Image prefix. Returns: - (dict, dict): Tuple of two dictionaires - first can be used to + (dict, dict): Tuple of two dictionaries - first can be used to extend JobInfo, second has tiles x, y, width and height used for assembler configuration. """ - tile = 0 + # Math used requires integers for correct output - as such + # we ensure our inputs are correct. + assert type(tiles_x) is int, "tiles_x must be an integer" + assert type(tiles_y) is int, "tiles_y must be an integer" + assert type(width) is int, "width must be an integer" + assert type(height) is int, "height must be an integer" + out = {"JobInfo": {}, "PluginInfo": {}} cfg = OrderedDict() - w_space = width / tiles_x - h_space = height / tiles_y + w_space = width // tiles_x + h_space = height // tiles_y cfg["TilesCropped"] = "False" + tile = 0 for tile_x in range(1, tiles_x + 1): for tile_y in reversed(range(1, tiles_y + 1)): tile_prefix = "_tile_{}x{}_{}x{}_".format( @@ -128,1029 +813,38 @@ def _format_tiles( tiles_x, tiles_y ) - out_tile_index = "OutputFilename{}Tile{}".format( - str(index), tile - ) + new_filename = "{}/{}{}".format( os.path.dirname(filename), tile_prefix, os.path.basename(filename) ) - out["JobInfo"][out_tile_index] = new_filename + + top = height - (tile_y * h_space) + bottom = height - ((tile_y - 1) * h_space) - 1 + left = (tile_x - 1) * w_space + right = (tile_x * w_space) - 1 + + # Job info + out["JobInfo"]["OutputFilename{}Tile{}".format(index, tile)] = new_filename # noqa: E501 + + # Plugin Info out["PluginInfo"]["RegionPrefix{}".format(str(tile))] = \ "/{}".format(tile_prefix).join(prefix.rsplit("/", 1)) + out["PluginInfo"]["RegionTop{}".format(tile)] = top + out["PluginInfo"]["RegionBottom{}".format(tile)] = bottom + out["PluginInfo"]["RegionLeft{}".format(tile)] = left + out["PluginInfo"]["RegionRight{}".format(tile)] = right - out["PluginInfo"]["RegionTop{}".format(tile)] = int(height) - (tile_y * h_space) # noqa: E501 - out["PluginInfo"]["RegionBottom{}".format(tile)] = int(height) - ((tile_y - 1) * h_space) - 1 # noqa: E501 - out["PluginInfo"]["RegionLeft{}".format(tile)] = (tile_x - 1) * w_space # noqa: E501 - out["PluginInfo"]["RegionRight{}".format(tile)] = (tile_x * w_space) - 1 # noqa: E501 - + # Tile config cfg["Tile{}".format(tile)] = new_filename cfg["Tile{}Tile".format(tile)] = new_filename cfg["Tile{}FileName".format(tile)] = new_filename - cfg["Tile{}X".format(tile)] = (tile_x - 1) * w_space - - cfg["Tile{}Y".format(tile)] = int(height) - (tile_y * h_space) - + cfg["Tile{}X".format(tile)] = left + cfg["Tile{}Y".format(tile)] = top cfg["Tile{}Width".format(tile)] = w_space cfg["Tile{}Height".format(tile)] = h_space tile += 1 + return out, cfg - - -def get_renderer_variables(renderlayer, root): - """Retrieve the extension which has been set in the VRay settings. - - Will return None if the current renderer is not VRay - For Maya 2016.5 and up the renderSetup creates renderSetupLayer node which - start with `rs`. Use the actual node name, do NOT use the `nice name` - - Args: - renderlayer (str): the node name of the renderlayer. - root (str): base path to render - - Returns: - dict - - """ - renderer = lib.get_renderer(renderlayer or lib.get_current_renderlayer()) - render_attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS["default"]) - - padding = cmds.getAttr("{}.{}".format(render_attrs["node"], - render_attrs["padding"])) - - filename_0 = cmds.renderSettings( - fullPath=True, - gin="#" * int(padding), - lut=True, - layer=renderlayer or lib.get_current_renderlayer())[0] - filename_0 = re.sub('_', '_beauty', - filename_0, flags=re.IGNORECASE) - prefix_attr = "defaultRenderGlobals.imageFilePrefix" - - scene = cmds.file(query=True, sceneName=True) - scene, _ = os.path.splitext(os.path.basename(scene)) - - if renderer == "vray": - renderlayer = renderlayer.split("_")[-1] - # Maya's renderSettings function does not return V-Ray file extension - # so we get the extension from vraySettings - extension = cmds.getAttr("vraySettings.imageFormatStr") - - # When V-Ray image format has not been switched once from default .png - # the getAttr command above returns None. As such we explicitly set - # it to `.png` - if extension is None: - extension = "png" - - if extension in ["exr (multichannel)", "exr (deep)"]: - extension = "exr" - - prefix_attr = "vraySettings.fileNamePrefix" - filename_prefix = cmds.getAttr(prefix_attr) - # we need to determine path for vray as maya `renderSettings` query - # does not work for vray. - - filename_0 = re.sub('', scene, filename_prefix, flags=re.IGNORECASE) # noqa: E501 - filename_0 = re.sub('', renderlayer, filename_0, flags=re.IGNORECASE) # noqa: E501 - filename_0 = "{}.{}.{}".format( - filename_0, "#" * int(padding), extension) - filename_0 = os.path.normpath(os.path.join(root, filename_0)) - elif renderer == "renderman": - prefix_attr = "rmanGlobals.imageFileFormat" - # NOTE: This is guessing extensions from renderman display types. - # Some of them are just framebuffers, d_texture format can be - # set in display setting. We set those now to None, but it - # should be handled more gracefully. - display_types = { - "d_deepexr": "exr", - "d_it": None, - "d_null": None, - "d_openexr": "exr", - "d_png": "png", - "d_pointcloud": "ptc", - "d_targa": "tga", - "d_texture": None, - "d_tiff": "tif" - } - - extension = display_types.get( - cmds.listConnections("rmanDefaultDisplay.displayType")[0], - "exr" - ) or "exr" - - filename_prefix = "{}/{}".format( - cmds.getAttr("rmanGlobals.imageOutputDir"), - cmds.getAttr("rmanGlobals.imageFileFormat") - ) - - renderlayer = renderlayer.split("_")[-1] - - filename_0 = re.sub('', scene, filename_prefix, flags=re.IGNORECASE) # noqa: E501 - filename_0 = re.sub('', renderlayer, filename_0, flags=re.IGNORECASE) # noqa: E501 - filename_0 = re.sub('', "#" * int(padding), filename_0, flags=re.IGNORECASE) # noqa: E501 - filename_0 = re.sub('', extension, filename_0, flags=re.IGNORECASE) # noqa: E501 - filename_0 = os.path.normpath(os.path.join(root, filename_0)) - elif renderer == "redshift": - # mapping redshift extension dropdown values to strings - ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"] - extension = ext_mapping[ - cmds.getAttr("redshiftOptions.imageFormat") - ] - else: - # Get the extension, getAttr defaultRenderGlobals.imageFormat - # returns an index number. - filename_base = os.path.basename(filename_0) - extension = os.path.splitext(filename_base)[-1].strip(".") - - filename_prefix = cmds.getAttr(prefix_attr) - return {"ext": extension, - "filename_prefix": filename_prefix, - "padding": padding, - "filename_0": filename_0} - - -class MayaSubmitDeadline(pyblish.api.InstancePlugin): - """Submit available render layers to Deadline. - - Renders are submitted to a Deadline Web Service as - supplied via settings key "DEADLINE_REST_URL". - - Attributes: - use_published (bool): Use published scene to render instead of the - one in work area. - - """ - - label = "Submit to Deadline" - order = pyblish.api.IntegratorOrder + 0.1 - hosts = ["maya"] - families = ["renderlayer"] - - use_published = True - tile_assembler_plugin = "OpenPypeTileAssembler" - asset_dependencies = False - priority = 50 - tile_priority = 50 - limit_groups = [] - jobInfo = {} - pluginInfo = {} - group = "none" - - def process(self, instance): - """Plugin entry point.""" - instance.data["toBeRenderedOn"] = "deadline" - context = instance.context - - self._instance = instance - self.payload_skeleton = copy.deepcopy(payload_skeleton_template) - - # get default deadline webservice url from deadline module - self.deadline_url = instance.context.data.get("defaultDeadline") - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - self.deadline_url = instance.data.get("deadlineUrl") - assert self.deadline_url, "Requires Deadline Webservice URL" - - # just using existing names from Setting - self._job_info = self.jobInfo - - self._plugin_info = self.pluginInfo - - self.limit_groups = self.limit - - context = instance.context - workspace = context.data["workspaceDir"] - anatomy = context.data['anatomy'] - instance.data["toBeRenderedOn"] = "deadline" - - filepath = None - patches = ( - context.data["project_settings"].get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}).get( - "scene_patches", {}) - ) - - # Handle render/export from published scene or not ------------------ - if self.use_published: - patched_files = [] - for i in context: - if "workfile" not in i.data["families"]: - continue - assert i.data["publish"] is True, ( - "Workfile (scene) must be published along") - template_data = i.data.get("anatomyData") - rep = i.data.get("representations")[0].get("name") - template_data["representation"] = rep - template_data["ext"] = rep - template_data["comment"] = None - anatomy_filled = anatomy.format(template_data) - template_filled = anatomy_filled["publish"]["path"] - filepath = os.path.normpath(template_filled) - self.log.info("Using published scene for render {}".format( - filepath)) - - if not os.path.exists(filepath): - self.log.error("published scene does not exist!") - raise - # now we need to switch scene in expected files - # because token will now point to published - # scene file and that might differ from current one - new_scene = os.path.splitext( - os.path.basename(filepath))[0] - orig_scene = os.path.splitext( - os.path.basename(context.data["currentFile"]))[0] - exp = instance.data.get("expectedFiles") - - if isinstance(exp[0], dict): - # we have aovs and we need to iterate over them - new_exp = {} - for aov, files in exp[0].items(): - replaced_files = [] - for f in files: - replaced_files.append( - f.replace(orig_scene, new_scene) - ) - new_exp[aov] = replaced_files - instance.data["expectedFiles"] = [new_exp] - else: - new_exp = [] - for f in exp: - new_exp.append( - f.replace(orig_scene, new_scene) - ) - instance.data["expectedFiles"] = [new_exp] - - if instance.data.get("publishRenderMetadataFolder"): - instance.data["publishRenderMetadataFolder"] = \ - instance.data["publishRenderMetadataFolder"].replace( - orig_scene, new_scene) - self.log.info("Scene name was switched {} -> {}".format( - orig_scene, new_scene - )) - # patch workfile is needed - if filepath not in patched_files: - patched_file = self._patch_workfile(filepath, patches) - patched_files.append(patched_file) - - all_instances = [] - for result in context.data["results"]: - if (result["instance"] is not None and - result["instance"] not in all_instances): # noqa: E128 - all_instances.append(result["instance"]) - - # fallback if nothing was set - if not filepath: - self.log.warning("Falling back to workfile") - filepath = context.data["currentFile"] - - self.log.debug(filepath) - - # Gather needed data ------------------------------------------------ - default_render_file = instance.context.data.get('project_settings')\ - .get('maya')\ - .get('create')\ - .get('CreateRender')\ - .get('default_render_image_folder') - filename = os.path.basename(filepath) - comment = context.data.get("comment", "") - dirname = os.path.join(workspace, default_render_file) - renderlayer = instance.data['setMembers'] # rs_beauty - deadline_user = context.data.get("user", getpass.getuser()) - - # Always use the original work file name for the Job name even when - # rendering is done from the published Work File. The original work - # file name is clearer because it can also have subversion strings, - # etc. which are stripped for the published file. - src_filename = os.path.basename(context.data["currentFile"]) - jobname = "%s - %s" % (src_filename, instance.name) - - # Get the variables depending on the renderer - render_variables = get_renderer_variables(renderlayer, dirname) - filename_0 = render_variables["filename_0"] - if self.use_published: - new_scene = os.path.splitext(filename)[0] - orig_scene = os.path.splitext( - os.path.basename(context.data["currentFile"]))[0] - filename_0 = render_variables["filename_0"].replace( - orig_scene, new_scene) - - output_filename_0 = filename_0 - - # this is needed because renderman handles directory and file - # prefixes separately - if self._instance.data["renderer"] == "renderman": - dirname = os.path.dirname(output_filename_0) - - # Create render folder ---------------------------------------------- - try: - # Ensure render folder exists - os.makedirs(dirname) - except OSError: - pass - - # Fill in common data to payload ------------------------------------ - payload_data = {} - payload_data["filename"] = filename - payload_data["filepath"] = filepath - payload_data["jobname"] = jobname - payload_data["deadline_user"] = deadline_user - payload_data["comment"] = comment - payload_data["output_filename_0"] = output_filename_0 - payload_data["render_variables"] = render_variables - payload_data["renderlayer"] = renderlayer - payload_data["workspace"] = workspace - payload_data["dirname"] = dirname - - self.log.info("--- Submission data:") - for k, v in payload_data.items(): - self.log.info("- {}: {}".format(k, v)) - self.log.info("-" * 20) - - frame_pattern = self.payload_skeleton["JobInfo"]["Frames"] - self.payload_skeleton["JobInfo"]["Frames"] = frame_pattern.format( - start=int(self._instance.data["frameStartHandle"]), - end=int(self._instance.data["frameEndHandle"]), - step=int(self._instance.data["byFrameStep"])) - - self.payload_skeleton["JobInfo"]["Plugin"] = self._instance.data.get( - "mayaRenderPlugin", "MayaBatch") - - self.payload_skeleton["JobInfo"]["BatchName"] = src_filename - # Job name, as seen in Monitor - self.payload_skeleton["JobInfo"]["Name"] = jobname - # Arbitrary username, for visualisation in Monitor - self.payload_skeleton["JobInfo"]["UserName"] = deadline_user - # Set job priority - self.payload_skeleton["JobInfo"]["Priority"] = \ - self._instance.data.get("priority", self.priority) - - if self.group != "none" and self.group: - self.payload_skeleton["JobInfo"]["Group"] = self.group - - if self.limit_groups: - self.payload_skeleton["JobInfo"]["LimitGroups"] = \ - ",".join(self.limit_groups) - # Optional, enable double-click to preview rendered - # frames from Deadline Monitor - self.payload_skeleton["JobInfo"]["OutputDirectory0"] = \ - os.path.dirname(output_filename_0).replace("\\", "/") - self.payload_skeleton["JobInfo"]["OutputFilename0"] = \ - output_filename_0.replace("\\", "/") - - self.payload_skeleton["JobInfo"]["Comment"] = comment - self.payload_skeleton["PluginInfo"]["RenderLayer"] = renderlayer - - # Adding file dependencies. - dependencies = instance.context.data["fileDependencies"] - dependencies.append(filepath) - if self.asset_dependencies: - for dependency in dependencies: - key = "AssetDependency" + str(dependencies.index(dependency)) - self.payload_skeleton["JobInfo"][key] = dependency - - # Handle environments ----------------------------------------------- - # We need those to pass them to pype for it to set correct context - keys = [ - "FTRACK_API_KEY", - "FTRACK_API_USER", - "FTRACK_SERVER", - "AVALON_PROJECT", - "AVALON_ASSET", - "AVALON_TASK", - "AVALON_APP_NAME", - "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" - ] - # Add mongo url if it's enabled - if instance.context.data.get("deadlinePassMongoUrl"): - keys.append("OPENPYPE_MONGO") - - environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **legacy_io.Session) - environment["OPENPYPE_LOG_NO_COLORS"] = "1" - environment["OPENPYPE_MAYA_VERSION"] = cmds.about(v=True) - # to recognize job from PYPE for turning Event On/Off - environment["OPENPYPE_RENDER_JOB"] = "1" - self.payload_skeleton["JobInfo"].update({ - "EnvironmentKeyValue%d" % index: "{key}={value}".format( - key=key, - value=environment[key] - ) for index, key in enumerate(environment) - }) - # Add options from RenderGlobals------------------------------------- - render_globals = instance.data.get("renderGlobals", {}) - self.payload_skeleton["JobInfo"].update(render_globals) - - # Submit preceding export jobs ------------------------------------- - export_job = None - assert not all(x in instance.data["families"] - for x in ['vrayscene', 'assscene']), ( - "Vray Scene and Ass Scene options are mutually exclusive") - if "vrayscene" in instance.data["families"]: - export_job = self._submit_export(payload_data, "vray") - - if "assscene" in instance.data["families"]: - export_job = self._submit_export(payload_data, "arnold") - - # Prepare main render job ------------------------------------------- - if "vrayscene" in instance.data["families"]: - payload = self._get_vray_render_payload(payload_data) - elif "assscene" in instance.data["families"]: - payload = self._get_arnold_render_payload(payload_data) - else: - payload = self._get_maya_payload(payload_data) - - # Add export job as dependency -------------------------------------- - if export_job: - payload["JobInfo"]["JobDependency0"] = export_job - - # Add list of expected files to job --------------------------------- - exp = instance.data.get("expectedFiles") - exp_index = 0 - output_filenames = {} - - if isinstance(exp[0], dict): - # we have aovs and we need to iterate over them - for _aov, files in exp[0].items(): - col, rem = clique.assemble(files) - if not col and rem: - # we couldn't find any collections but have - # individual files. - assert len(rem) == 1, ("Found multiple non related files " - "to render, don't know what to do " - "with them.") - output_file = rem[0] - if not instance.data.get("tileRendering"): - payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501 - else: - output_file = col[0].format('{head}{padding}{tail}') - if not instance.data.get("tileRendering"): - payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501 - - output_filenames['OutputFilename' + str(exp_index)] = output_file # noqa: E501 - exp_index += 1 - else: - col, rem = clique.assemble(exp) - if not col and rem: - # we couldn't find any collections but have - # individual files. - assert len(rem) == 1, ("Found multiple non related files " - "to render, don't know what to do " - "with them.") - - output_file = rem[0] - if not instance.data.get("tileRendering"): - payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501 - else: - output_file = col[0].format('{head}{padding}{tail}') - if not instance.data.get("tileRendering"): - payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501 - - output_filenames['OutputFilename' + str(exp_index)] = output_file - - plugin = payload["JobInfo"]["Plugin"] - self.log.info("using render plugin : {}".format(plugin)) - - # Store output dir for unified publisher (filesequence) - instance.data["outputDir"] = os.path.dirname(output_filename_0) - - self.preflight_check(instance) - - # add jobInfo and pluginInfo variables from Settings - payload["JobInfo"].update(self._job_info) - payload["PluginInfo"].update(self._plugin_info) - - # Prepare tiles data ------------------------------------------------ - if instance.data.get("tileRendering"): - # if we have sequence of files, we need to create tile job for - # every frame - - payload["JobInfo"]["TileJob"] = True - payload["JobInfo"]["TileJobTilesInX"] = instance.data.get("tilesX") - payload["JobInfo"]["TileJobTilesInY"] = instance.data.get("tilesY") - payload["PluginInfo"]["ImageHeight"] = instance.data.get("resolutionHeight") # noqa: E501 - payload["PluginInfo"]["ImageWidth"] = instance.data.get("resolutionWidth") # noqa: E501 - payload["PluginInfo"]["RegionRendering"] = True - - assembly_payload = { - "AuxFiles": [], - "JobInfo": { - "BatchName": payload["JobInfo"]["BatchName"], - "Frames": 1, - "Name": "{} - Tile Assembly Job".format( - payload["JobInfo"]["Name"]), - "OutputDirectory0": - payload["JobInfo"]["OutputDirectory0"].replace( - "\\", "/"), - "Plugin": self.tile_assembler_plugin, - "MachineLimit": 1 - }, - "PluginInfo": { - "CleanupTiles": 1, - "ErrorOnMissing": True - } - } - assembly_payload["JobInfo"].update(output_filenames) - assembly_payload["JobInfo"]["Priority"] = self._instance.data.get( - "tile_priority", self.tile_priority) - assembly_payload["JobInfo"]["UserName"] = deadline_user - - frame_payloads = [] - assembly_payloads = [] - - R_FRAME_NUMBER = re.compile(r".+\.(?P[0-9]+)\..+") # noqa: N806, E501 - REPL_FRAME_NUMBER = re.compile(r"(.+\.)([0-9]+)(\..+)") # noqa: N806, E501 - - if isinstance(exp[0], dict): - # we have aovs and we need to iterate over them - # get files from `beauty` - files = exp[0].get("beauty") - # assembly files are used for assembly jobs as we need to put - # together all AOVs - assembly_files = list( - itertools.chain.from_iterable( - [f for _, f in exp[0].items()])) - if not files: - # if beauty doesn't exists, use first aov we found - files = exp[0].get(list(exp[0].keys())[0]) - else: - files = exp - assembly_files = files - - frame_jobs = {} - - file_index = 1 - for file in files: - frame = re.search(R_FRAME_NUMBER, file).group("frame") - new_payload = copy.deepcopy(payload) - new_payload["JobInfo"]["Name"] = \ - "{} (Frame {} - {} tiles)".format( - payload["JobInfo"]["Name"], - frame, - instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501 - ) - self.log.info( - "... preparing job {}".format( - new_payload["JobInfo"]["Name"])) - new_payload["JobInfo"]["TileJobFrame"] = frame - - tiles_data = _format_tiles( - file, 0, - instance.data.get("tilesX"), - instance.data.get("tilesY"), - instance.data.get("resolutionWidth"), - instance.data.get("resolutionHeight"), - payload["PluginInfo"]["OutputFilePrefix"] - )[0] - new_payload["JobInfo"].update(tiles_data["JobInfo"]) - new_payload["PluginInfo"].update(tiles_data["PluginInfo"]) - - job_hash = hashlib.sha256("{}_{}".format(file_index, file)) - frame_jobs[frame] = job_hash.hexdigest() - new_payload["JobInfo"]["ExtraInfo0"] = job_hash.hexdigest() - new_payload["JobInfo"]["ExtraInfo1"] = file - - frame_payloads.append(new_payload) - file_index += 1 - - file_index = 1 - for file in assembly_files: - frame = re.search(R_FRAME_NUMBER, file).group("frame") - new_assembly_payload = copy.deepcopy(assembly_payload) - new_assembly_payload["JobInfo"]["Name"] = \ - "{} (Frame {})".format( - assembly_payload["JobInfo"]["Name"], - frame) - new_assembly_payload["JobInfo"]["OutputFilename0"] = re.sub( - REPL_FRAME_NUMBER, - "\\1{}\\3".format("#" * len(frame)), file) - - new_assembly_payload["PluginInfo"]["Renderer"] = self._instance.data["renderer"] # noqa: E501 - new_assembly_payload["JobInfo"]["ExtraInfo0"] = frame_jobs[frame] # noqa: E501 - new_assembly_payload["JobInfo"]["ExtraInfo1"] = file - assembly_payloads.append(new_assembly_payload) - file_index += 1 - - self.log.info( - "Submitting tile job(s) [{}] ...".format(len(frame_payloads))) - - url = "{}/api/jobs".format(self.deadline_url) - tiles_count = instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501 - - for tile_job in frame_payloads: - response = requests_post(url, json=tile_job) - if not response.ok: - raise Exception(response.text) - - job_id = response.json()["_id"] - hash = response.json()["Props"]["Ex0"] - - for assembly_job in assembly_payloads: - if assembly_job["JobInfo"]["ExtraInfo0"] == hash: - assembly_job["JobInfo"]["JobDependency0"] = job_id - - for assembly_job in assembly_payloads: - file = assembly_job["JobInfo"]["ExtraInfo1"] - # write assembly job config files - now = datetime.now() - - config_file = os.path.join( - os.path.dirname(output_filename_0), - "{}_config_{}.txt".format( - os.path.splitext(file)[0], - now.strftime("%Y_%m_%d_%H_%M_%S") - ) - ) - - try: - if not os.path.isdir(os.path.dirname(config_file)): - os.makedirs(os.path.dirname(config_file)) - except OSError: - # directory is not available - self.log.warning( - "Path is unreachable: `{}`".format( - os.path.dirname(config_file))) - - # add config file as job auxFile - assembly_job["AuxFiles"] = [config_file] - - with open(config_file, "w") as cf: - print("TileCount={}".format(tiles_count), file=cf) - print("ImageFileName={}".format(file), file=cf) - print("ImageWidth={}".format( - instance.data.get("resolutionWidth")), file=cf) - print("ImageHeight={}".format( - instance.data.get("resolutionHeight")), file=cf) - - tiles = _format_tiles( - file, 0, - instance.data.get("tilesX"), - instance.data.get("tilesY"), - instance.data.get("resolutionWidth"), - instance.data.get("resolutionHeight"), - payload["PluginInfo"]["OutputFilePrefix"] - )[1] - sorted(tiles) - for k, v in tiles.items(): - print("{}={}".format(k, v), file=cf) - - job_idx = 1 - instance.data["assemblySubmissionJobs"] = [] - for ass_job in assembly_payloads: - self.log.info("submitting assembly job {} of {}".format( - job_idx, len(assembly_payloads) - )) - self.log.debug(json.dumps(ass_job, indent=4, sort_keys=True)) - response = requests_post(url, json=ass_job) - if not response.ok: - raise Exception(response.text) - - instance.data["assemblySubmissionJobs"].append( - response.json()["_id"]) - job_idx += 1 - - instance.data["jobBatchName"] = payload["JobInfo"]["BatchName"] - self.log.info("Setting batch name on instance: {}".format( - instance.data["jobBatchName"])) - else: - # Submit job to farm -------------------------------------------- - self.log.info("Submitting ...") - self.log.debug(json.dumps(payload, indent=4, sort_keys=True)) - - # E.g. http://192.168.0.1:8082/api/jobs - url = "{}/api/jobs".format(self.deadline_url) - response = requests_post(url, json=payload) - if not response.ok: - raise Exception(response.text) - instance.data["deadlineSubmissionJob"] = response.json() - - def _get_maya_payload(self, data): - payload = copy.deepcopy(self.payload_skeleton) - - if not self.asset_dependencies: - job_info_ext = {} - - else: - job_info_ext = { - # Asset dependency to wait for at least the scene file to sync. - "AssetDependency0": data["filepath"], - } - - renderer = self._instance.data["renderer"] - - # This hack is here because of how Deadline handles Renderman version. - # it considers everything with `renderman` set as version older than - # Renderman 22, and so if we are using renderman > 21 we need to set - # renderer string on the job to `renderman22`. We will have to change - # this when Deadline releases new version handling this. - if self._instance.data["renderer"] == "renderman": - try: - from rfm2.config import cfg # noqa - except ImportError: - raise Exception("Cannot determine renderman version") - - rman_version = cfg().build_info.version() # type: str - if int(rman_version.split(".")[0]) > 22: - renderer = "renderman22" - - plugin_info = { - "SceneFile": data["filepath"], - # Output directory and filename - "OutputFilePath": data["dirname"].replace("\\", "/"), - "OutputFilePrefix": data["render_variables"]["filename_prefix"], # noqa: E501 - - # Only render layers are considered renderable in this pipeline - "UsingRenderLayers": True, - - # Render only this layer - "RenderLayer": data["renderlayer"], - - # Determine which renderer to use from the file itself - "Renderer": renderer, - - # Resolve relative references - "ProjectPath": data["workspace"], - } - payload["JobInfo"].update(job_info_ext) - payload["PluginInfo"].update(plugin_info) - return payload - - def _get_vray_export_payload(self, data): - payload = copy.deepcopy(self.payload_skeleton) - vray_settings = cmds.ls(type="VRaySettingsNode") - node = vray_settings[0] - template = cmds.getAttr("{}.vrscene_filename".format(node)) - scene, _ = os.path.splitext(data["filename"]) - first_file = self.format_vray_output_filename(scene, template) - first_file = "{}/{}".format(data["workspace"], first_file) - output = os.path.dirname(first_file) - job_info_ext = { - # Job name, as seen in Monitor - "Name": "Export {} [{}-{}]".format( - data["jobname"], - int(self._instance.data["frameStartHandle"]), - int(self._instance.data["frameEndHandle"])), - - "Plugin": self._instance.data.get( - "mayaRenderPlugin", "MayaPype"), - "FramesPerTask": self._instance.data.get("framesPerTask", 1) - } - - plugin_info_ext = { - # Renderer - "Renderer": "vray", - # Input - "SceneFile": data["filepath"], - "SkipExistingFrames": True, - "UsingRenderLayers": True, - "UseLegacyRenderLayers": True, - "RenderLayer": data["renderlayer"], - "ProjectPath": data["workspace"], - "OutputFilePath": output - } - - payload["JobInfo"].update(job_info_ext) - payload["PluginInfo"].update(plugin_info_ext) - return payload - - def _get_arnold_export_payload(self, data): - - try: - from openpype.scripts import export_maya_ass_job - except Exception: - raise AssertionError( - "Expected module 'export_maya_ass_job' to be available") - - module_path = export_maya_ass_job.__file__ - if module_path.endswith(".pyc"): - module_path = module_path[: -len(".pyc")] + ".py" - - script = os.path.normpath(module_path) - - payload = copy.deepcopy(self.payload_skeleton) - job_info_ext = { - # Job name, as seen in Monitor - "Name": "Export {} [{}-{}]".format( - data["jobname"], - int(self._instance.data["frameStartHandle"]), - int(self._instance.data["frameEndHandle"])), - - "Plugin": "Python", - "FramesPerTask": self._instance.data.get("framesPerTask", 1), - "Frames": 1 - } - - plugin_info_ext = { - "Version": "3.6", - "ScriptFile": script, - "Arguments": "", - "SingleFrameOnly": "True", - } - payload["JobInfo"].update(job_info_ext) - payload["PluginInfo"].update(plugin_info_ext) - - envs = [ - v - for k, v in payload["JobInfo"].items() - if k.startswith("EnvironmentKeyValue") - ] - - # add app name to environment - envs.append( - "AVALON_APP_NAME={}".format(os.environ.get("AVALON_APP_NAME"))) - envs.append( - "OPENPYPE_ASS_EXPORT_RENDER_LAYER={}".format(data["renderlayer"])) - envs.append( - "OPENPYPE_ASS_EXPORT_SCENE_FILE={}".format(data["filepath"])) - envs.append( - "OPENPYPE_ASS_EXPORT_OUTPUT={}".format( - payload['JobInfo']['OutputFilename0'])) - envs.append( - "OPENPYPE_ASS_EXPORT_START={}".format( - int(self._instance.data["frameStartHandle"]))) - envs.append( - "OPENPYPE_ASS_EXPORT_END={}".format( - int(self._instance.data["frameEndHandle"]))) - envs.append( - "OPENPYPE_ASS_EXPORT_STEP={}".format(1)) - - for i, e in enumerate(envs): - payload["JobInfo"]["EnvironmentKeyValue{}".format(i)] = e - return payload - - def _get_vray_render_payload(self, data): - payload = copy.deepcopy(self.payload_skeleton) - vray_settings = cmds.ls(type="VRaySettingsNode") - node = vray_settings[0] - template = cmds.getAttr("{}.vrscene_filename".format(node)) - # "vrayscene//_/" - - scene, _ = os.path.splitext(data["filename"]) - first_file = self.format_vray_output_filename(scene, template) - first_file = "{}/{}".format(data["workspace"], first_file) - job_info_ext = { - "Name": "Render {} [{}-{}]".format( - data["jobname"], - int(self._instance.data["frameStartHandle"]), - int(self._instance.data["frameEndHandle"])), - - "Plugin": "Vray", - "OverrideTaskExtraInfoNames": False, - } - - plugin_info = { - "InputFilename": first_file, - "SeparateFilesPerFrame": True, - "VRayEngine": "V-Ray", - - "Width": self._instance.data["resolutionWidth"], - "Height": self._instance.data["resolutionHeight"], - "OutputFilePath": payload["JobInfo"]["OutputDirectory0"], - "OutputFileName": payload["JobInfo"]["OutputFilename0"] - } - - payload["JobInfo"].update(job_info_ext) - payload["PluginInfo"].update(plugin_info) - return payload - - def _get_arnold_render_payload(self, data): - payload = copy.deepcopy(self.payload_skeleton) - ass_file, _ = os.path.splitext(data["output_filename_0"]) - first_file = ass_file + ".ass" - job_info_ext = { - "Name": "Render {} [{}-{}]".format( - data["jobname"], - int(self._instance.data["frameStartHandle"]), - int(self._instance.data["frameEndHandle"])), - - "Plugin": "Arnold", - "OverrideTaskExtraInfoNames": False, - } - - plugin_info = { - "ArnoldFile": first_file, - } - - payload["JobInfo"].update(job_info_ext) - payload["PluginInfo"].update(plugin_info) - return payload - - def _submit_export(self, data, format): - if format == "vray": - payload = self._get_vray_export_payload(data) - self.log.info("Submitting vrscene export job.") - elif format == "arnold": - payload = self._get_arnold_export_payload(data) - self.log.info("Submitting ass export job.") - - url = "{}/api/jobs".format(self.deadline_url) - response = requests_post(url, json=payload) - if not response.ok: - self.log.error("Submition failed!") - self.log.error(response.status_code) - self.log.error(response.content) - self.log.debug(payload) - raise RuntimeError(response.text) - - dependency = response.json() - return dependency["_id"] - - def preflight_check(self, instance): - """Ensure the startFrame, endFrame and byFrameStep are integers.""" - for key in ("frameStartHandle", "frameEndHandle", "byFrameStep"): - value = instance.data[key] - - if int(value) == value: - continue - - self.log.warning( - "%f=%d was rounded off to nearest integer" - % (value, int(value)) - ) - - def format_vray_output_filename(self, filename, template, dir=False): - """Format the expected output file of the Export job. - - Example: - /_/ - "shot010_v006/shot010_v006_CHARS/CHARS" - - Args: - instance: - filename(str): - dir(bool): - - Returns: - str - - """ - def smart_replace(string, key_values): - new_string = string - for key, value in key_values.items(): - new_string = new_string.replace(key, value) - return new_string - - # Ensure filename has no extension - file_name, _ = os.path.splitext(filename) - - layer = self._instance.data['setMembers'] - - # Reformat without tokens - output_path = smart_replace( - template, - {"": file_name, - "": layer}) - - if dir: - return output_path.replace("\\", "/") - - start_frame = int(self._instance.data["frameStartHandle"]) - filename_zero = "{}_{:04d}.vrscene".format(output_path, start_frame) - - result = filename_zero.replace("\\", "/") - - return result - - def _patch_workfile(self, file, patches): - # type: (str, dict) -> [str, None] - """Patch Maya scene. - - This will take list of patches (lines to add) and apply them to - *published* Maya scene file (that is used later for rendering). - - Patches are dict with following structure:: - { - "name": "Name of patch", - "regex": "regex of line before patch", - "line": "line to insert" - } - - Args: - file (str): File to patch. - patches (dict): Dictionary defining patches. - - Returns: - str: Patched file path or None - - """ - if os.path.splitext(file)[1].lower() != ".ma" or not patches: - return None - - compiled_regex = [re.compile(p["regex"]) for p in patches] - with open(file, "r+") as pf: - scene_data = pf.readlines() - for ln, line in enumerate(scene_data): - for i, r in enumerate(compiled_regex): - if re.match(r, line): - scene_data.insert(ln + 1, patches[i]["line"]) - pf.seek(0) - pf.writelines(scene_data) - pf.truncate() - self.log.info( - "Applied {} patch to scene.".format( - patches[i]["name"])) - return file diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py new file mode 100644 index 0000000000..38ae5d2f7f --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py @@ -0,0 +1,136 @@ +import os +import requests + +from maya import cmds + +from openpype.pipeline import legacy_io, PublishXmlValidationError +from openpype.settings import get_project_settings + +import pyblish.api + + +class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): + """Submit Maya scene to perform a local publish in Deadline. + + Publishing in Deadline can be helpful for scenes that publish very slow. + This way it can process in the background on another machine without the + Artist having to wait for the publish to finish on their local machine. + + Submission is done through the Deadline Web Service. DL then triggers + `openpype/scripts/remote_publish.py`. + + Each publishable instance creates its own full publish job. + + Different from `ProcessSubmittedJobOnFarm` which creates publish job + depending on metadata json containing context and instance data of + rendered files. + """ + + label = "Submit Scene to Deadline" + order = pyblish.api.IntegratorOrder + hosts = ["maya"] + families = ["publish.farm"] + targets = ["local"] + + def process(self, instance): + project_name = instance.context.data["projectName"] + # TODO settings can be received from 'context.data["project_settings"]' + settings = get_project_settings(project_name) + # use setting for publish job on farm, no reason to have it separately + deadline_publish_job_sett = (settings["deadline"] + ["publish"] + ["ProcessSubmittedJobOnFarm"]) + + # Ensure no errors so far + if not (all(result["success"] + for result in instance.context.data["results"])): + raise PublishXmlValidationError("Publish process has errors") + + if not instance.data["publish"]: + self.log.warning("No active instances found. " + "Skipping submission..") + return + + scene = instance.context.data["currentFile"] + scenename = os.path.basename(scene) + + job_name = "{scene} [PUBLISH]".format(scene=scenename) + batch_name = "{code} - {scene}".format(code=project_name, + scene=scenename) + + # Generate the payload for Deadline submission + payload = { + "JobInfo": { + "Plugin": "MayaBatch", + "BatchName": batch_name, + "Name": job_name, + "UserName": instance.context.data["user"], + "Comment": instance.context.data.get("comment", ""), + # "InitialStatus": state + "Department": deadline_publish_job_sett["deadline_department"], + "ChunkSize": deadline_publish_job_sett["deadline_chunk_size"], + "Priority": deadline_publish_job_sett["deadline_priority"], + "Group": deadline_publish_job_sett["deadline_group"], + "Pool": deadline_publish_job_sett["deadline_pool"], + }, + "PluginInfo": { + + "Build": None, # Don't force build + "StrictErrorChecking": True, + "ScriptJob": True, + + # Inputs + "SceneFile": scene, + "ScriptFilename": "{OPENPYPE_REPOS_ROOT}/openpype/scripts/remote_publish.py", # noqa + + # Mandatory for Deadline + "Version": cmds.about(version=True), + + # Resolve relative references + "ProjectPath": cmds.workspace(query=True, + rootDirectory=True), + + }, + + # Mandatory for Deadline, may be empty + "AuxFiles": [] + } + + # Include critical environment variables with submission + api.Session + keys = [ + "FTRACK_API_USER", + "FTRACK_API_KEY", + "FTRACK_SERVER", + "OPENPYPE_VERSION" + ] + environment = dict({key: os.environ[key] for key in keys + if key in os.environ}, **legacy_io.Session) + + # TODO replace legacy_io with context.data + environment["AVALON_PROJECT"] = project_name + environment["AVALON_ASSET"] = legacy_io.Session["AVALON_ASSET"] + environment["AVALON_TASK"] = legacy_io.Session["AVALON_TASK"] + environment["AVALON_APP_NAME"] = os.environ.get("AVALON_APP_NAME") + environment["OPENPYPE_LOG_NO_COLORS"] = "1" + environment["OPENPYPE_REMOTE_JOB"] = "1" + environment["OPENPYPE_USERNAME"] = instance.context.data["user"] + environment["OPENPYPE_PUBLISH_SUBSET"] = instance.data["subset"] + environment["OPENPYPE_REMOTE_PUBLISH"] = "1" + + payload["JobInfo"].update({ + "EnvironmentKeyValue%d" % index: "{key}={value}".format( + key=key, + value=environment[key] + ) for index, key in enumerate(environment) + }) + + self.log.info("Submitting Deadline job ...") + deadline_url = instance.context.data["defaultDeadline"] + # if custom one is set in instance, use that + if instance.data.get("deadlineUrl"): + deadline_url = instance.data.get("deadlineUrl") + assert deadline_url, "Requires Deadline Webservice URL" + url = "{}/api/jobs".format(deadline_url) + response = requests.post(url, json=payload, timeout=10) + if not response.ok: + raise Exception(response.text) diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py index 94c703d66d..b09d2935ab 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -23,6 +23,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): hosts = ["nuke", "nukestudio"] families = ["render.farm", "prerender.farm"] optional = True + targets = ["local"] # presets priority = 50 @@ -54,8 +55,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): self._ver = re.search(r"\d+\.\d+", context.data.get("hostVersion")) self._deadline_user = context.data.get( "deadlineUser", getpass.getuser()) - self._frame_start = int(instance.data["frameStartHandle"]) - self._frame_end = int(instance.data["frameEndHandle"]) + submit_frame_start = int(instance.data["frameStartHandle"]) + submit_frame_end = int(instance.data["frameEndHandle"]) # get output path render_path = instance.data['path'] @@ -79,15 +80,14 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "Using published scene for render {}".format(script_path) ) - # exception for slate workflow - if "slate" in instance.data["families"]: - self._frame_start -= 1 - - response = self.payload_submit(instance, - script_path, - render_path, - node.name() - ) + response = self.payload_submit( + instance, + script_path, + render_path, + node.name(), + submit_frame_start, + submit_frame_end + ) # Store output dir for unified publisher (filesequence) instance.data["deadlineSubmissionJob"] = response.json() instance.data["outputDir"] = os.path.dirname( @@ -100,15 +100,13 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): script_path = baking_script["bakeScriptPath"] exe_node_name = baking_script["bakeWriteNodeName"] - # exception for slate workflow - if "slate" in instance.data["families"]: - self._frame_start += 1 - resp = self.payload_submit( instance, script_path, render_path, exe_node_name, + submit_frame_start, + submit_frame_end, response.json() ) @@ -116,6 +114,13 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): instance.data["deadlineSubmissionJob"] = resp.json() instance.data["publishJobState"] = "Suspended" + # add to list of job Id + if not instance.data.get("bakingSubmissionJobs"): + instance.data["bakingSubmissionJobs"] = [] + + instance.data["bakingSubmissionJobs"].append( + resp.json()["_id"]) + # redefinition of families if "render.farm" in families: instance.data['family'] = 'write' @@ -125,13 +130,16 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): families.insert(0, "prerender") instance.data["families"] = families - def payload_submit(self, - instance, - script_path, - render_path, - exe_node_name, - responce_data=None - ): + def payload_submit( + self, + instance, + script_path, + render_path, + exe_node_name, + start_frame, + end_frame, + responce_data=None + ): render_dir = os.path.normpath(os.path.dirname(render_path)) script_name = os.path.basename(script_path) jobname = "%s - %s" % (script_name, instance.name) @@ -191,8 +199,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "Plugin": "Nuke", "Frames": "{start}-{end}".format( - start=self._frame_start, - end=self._frame_end + start=start_frame, + end=end_frame ), "Comment": self._comment, @@ -252,7 +260,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "PYBLISHPLUGINPATH", "NUKE_PATH", "TOOL_ENV", - "FOUNDRY_LICENSE" + "FOUNDRY_LICENSE", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if instance.context.data.get("deadlinePassMongoUrl"): @@ -292,7 +301,13 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): self.log.info(json.dumps(payload, indent=4, sort_keys=True)) # adding expectied files to instance.data - self.expected_files(instance, render_path) + self.expected_files( + instance, + render_path, + start_frame, + end_frame + ) + self.log.debug("__ expectedFiles: `{}`".format( instance.data["expectedFiles"])) response = requests.post(self.deadline_url, json=payload, timeout=10) @@ -338,15 +353,19 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): self.log.debug("_ path: `{}`".format(path)) return path - def expected_files(self, - instance, - path): + def expected_files( + self, + instance, + path, + start_frame, + end_frame + ): """ Create expected files in instance data """ if not instance.data.get("expectedFiles"): instance.data["expectedFiles"] = [] - dir = os.path.dirname(path) + dirname = os.path.dirname(path) file = os.path.basename(path) if "#" in file: @@ -358,9 +377,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): instance.data["expectedFiles"].append(path) return - for i in range(self._frame_start, (self._frame_end + 1)): + if instance.data.get("slate"): + start_frame -= 1 + + for i in range(start_frame, (end_frame + 1)): instance.data["expectedFiles"].append( - os.path.join(dir, (file % i)).replace("\\", "/")) + os.path.join(dirname, (file % i)).replace("\\", "/")) def get_limit_groups(self): """Search for limit group nodes and return group name. diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index 306237c78c..35f2532c16 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -10,7 +10,10 @@ import clique import pyblish.api -import openpype.api +from openpype.client import ( + get_last_version_by_subset_name, + get_representations, +) from openpype.pipeline import ( get_representation_path, legacy_io, @@ -18,15 +21,23 @@ from openpype.pipeline import ( from openpype.pipeline.farm.patterning import match_aov_pattern -def get_resources(version, extension=None): +def get_resources(project_name, version, extension=None): """Get the files from the specific version.""" - query = {"type": "representation", "parent": version["_id"]} + + # TODO this functions seems to be weird + # - it's looking for representation with one extension or first (any) + # representation from a version? + # - not sure how this should work, maybe it does for specific use cases + # but probably can't be used for all resources from 2D workflows + extensions = None if extension: - query["name"] = extension - - representation = legacy_io.find_one(query) - assert representation, "This is a bug" + extensions = [extension] + repre_docs = list(get_representations( + project_name, version_ids=[version["_id"]], extensions=extensions + )) + assert repre_docs, "This is a bug" + representation = repre_docs[0] directory = get_representation_path(representation) print("Source: ", directory) resources = sorted( @@ -103,6 +114,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): order = pyblish.api.IntegratorOrder + 0.2 icon = "tractor" deadline_plugin = "OpenPype" + targets = ["local"] hosts = ["fusion", "maya", "nuke", "celaction", "aftereffects", "harmony"] @@ -128,7 +140,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "OPENPYPE_LOG_NO_COLORS", "OPENPYPE_USERNAME", "OPENPYPE_RENDER_JOB", - "OPENPYPE_PUBLISH_JOB" + "OPENPYPE_PUBLISH_JOB", + "OPENPYPE_MONGO", + "OPENPYPE_VERSION" ] # custom deadline attributes @@ -145,7 +159,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # mapping of instance properties to be transfered to new instance for every # specified family instance_transfer = { - "slate": ["slateFrame"], + "slate": ["slateFrames", "slate"], "review": ["lutPath"], "render2d": ["bakingNukeScripts", "version"], "renderlayer": ["convertToScanline"] @@ -282,6 +296,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): for assembly_id in instance.data.get("assemblySubmissionJobs"): payload["JobInfo"]["JobDependency{}".format(job_index)] = assembly_id # noqa: E501 job_index += 1 + elif instance.data.get("bakingSubmissionJobs"): + self.log.info("Adding baking submission jobs as dependencies...") + job_index = 0 + for assembly_id in instance.data["bakingSubmissionJobs"]: + payload["JobInfo"]["JobDependency{}".format(job_index)] = assembly_id # noqa: E501 + job_index += 1 else: payload["JobInfo"]["JobDependency0"] = job["_id"] @@ -328,13 +348,21 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self.log.info("Preparing to copy ...") start = instance.data.get("frameStart") end = instance.data.get("frameEnd") + project_name = legacy_io.active_project() # get latest version of subset # this will stop if subset wasn't published yet - version = openpype.api.get_latest_version(instance.data.get("asset"), - instance.data.get("subset")) + project_name = legacy_io.active_project() + version = get_last_version_by_subset_name( + project_name, + instance.data.get("subset"), + asset_name=instance.data.get("asset") + ) + # get its files based on extension - subset_resources = get_resources(version, representation.get("ext")) + subset_resources = get_resources( + project_name, version, representation.get("ext") + ) r_col, _ = clique.assemble(subset_resources) # if override remove all frames we are expecting to be rendered @@ -429,9 +457,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): cam = [c for c in cameras if c in col.head] if cam: - subset_name = '{}_{}_{}'.format(group_name, cam, aov) + if aov: + subset_name = '{}_{}_{}'.format(group_name, cam, aov) + else: + subset_name = '{}_{}'.format(group_name, cam) else: - subset_name = '{}_{}'.format(group_name, aov) + if aov: + subset_name = '{}_{}'.format(group_name, aov) + else: + subset_name = '{}'.format(group_name) if isinstance(col, (list, tuple)): staging = os.path.dirname(col[0]) @@ -466,7 +500,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if instance_data.get("multipartExr"): preview = True - new_instance = copy(instance_data) + new_instance = deepcopy(instance_data) new_instance["subset"] = subset_name new_instance["subsetGroup"] = group_name if preview: @@ -564,11 +598,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): " This may cause issues on farm." ).format(staging)) + frame_start = int(instance.get("frameStartHandle")) + if instance.get("slate"): + frame_start -= 1 + rep = { "name": ext, "ext": ext, "files": [os.path.basename(f) for f in list(collection)], - "frameStart": int(instance.get("frameStartHandle")), + "frameStart": frame_start, "frameEnd": int(instance.get("frameEndHandle")), # If expectedFile are absolute, we need only filenames "stagingDir": staging, @@ -640,6 +678,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): def _solve_families(self, instance, preview=False): families = instance.get("families") + # if we have one representation with preview tag # flag whole instance for review and for ftrack if preview: @@ -667,9 +706,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self.context = context self.anatomy = instance.context.data["anatomy"] - if hasattr(instance, "_log"): - data['_log'] = instance._log - asset = data.get("asset") or legacy_io.Session["AVALON_ASSET"] subset = data.get("subset") @@ -719,10 +755,17 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): " This may cause issues." ).format(source)) - families = ["render"] + family = "render" + if "prerender" in instance.data["families"]: + family = "prerender" + families = [family] + + # pass review to families if marked as review + if data.get("review"): + families.append("review") instance_skeleton_data = { - "family": "render", + "family": family, "subset": subset, "families": families, "asset": asset, @@ -741,14 +784,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "resolutionHeight": data.get("resolutionHeight", 1080), "multipartExr": data.get("multipartExr", False), "jobBatchName": data.get("jobBatchName", ""), - "useSequenceForReview": data.get("useSequenceForReview", True) + "useSequenceForReview": data.get("useSequenceForReview", True), + # map inputVersions `ObjectId` -> `str` so json supports it + "inputVersions": list(map(str, data.get("inputVersions", []))) } - if "prerender" in instance.data["families"]: - instance_skeleton_data.update({ - "family": "prerender", - "families": []}) - # skip locking version if we are creating v01 instance_version = instance.data.get("version") # take this if exists if instance_version != 1: @@ -883,8 +923,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): new_i = copy(i) new_i["version"] = at.get("version") new_i["subset"] = at.get("subset") + new_i["family"] = at.get("family") new_i["append"] = True - new_i["families"].append(at.get("family")) + # don't set subsetGroup if we are attaching + new_i.pop("subsetGroup") new_instances.append(new_i) self.log.info(" - {} / v{}".format( at.get("subset"), at.get("version"))) @@ -1006,9 +1048,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): prev_start = None prev_end = None - version = openpype.api.get_latest_version(asset_name=asset, - subset_name=subset - ) + project_name = legacy_io.active_project() + version = get_last_version_by_subset_name( + project_name, + subset, + asset_name=asset + ) # Set prev start / end frames for comparison if not prev_start and not prev_end: @@ -1038,7 +1083,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): get publish_path Args: - anatomy (pype.lib.anatomy.Anatomy): + anatomy (openpype.pipeline.anatomy.Anatomy): template_data (dict): pre-calculated collected data for process asset (string): asset name subset (string): subset name (actually group name of subset) @@ -1053,7 +1098,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): based on 'publish' template """ if not version: - version = openpype.api.get_latest_version(asset, subset) + project_name = legacy_io.active_project() + version = get_last_version_by_subset_name( + project_name, + subset, + asset_name=asset + ) if version: version = int(version["name"]) + 1 else: diff --git a/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py b/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py index c2426e0d78..f0a3ddd246 100644 --- a/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py +++ b/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py @@ -3,7 +3,7 @@ import requests import pyblish.api -from openpype.lib.delivery import collect_frames +from openpype.lib import collect_frames from openpype_modules.deadline.abstract_submit_deadline import requests_get diff --git a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py index eeb1f7744c..9b35c9502d 100644 --- a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -6,13 +6,57 @@ import subprocess import json import platform import uuid -from Deadline.Scripting import RepositoryUtils, FileUtils +import re +from Deadline.Scripting import ( + RepositoryUtils, + FileUtils, + DirectoryUtils, + ProcessUtils, +) + + +def get_openpype_version_from_path(path, build=True): + """Get OpenPype version from provided path. + path (str): Path to scan. + build (bool, optional): Get only builds, not sources + + Returns: + str or None: version of OpenPype if found. + + """ + # fix path for application bundle on macos + if platform.system().lower() == "darwin": + path = os.path.join(path, "Contents", "MacOS", "lib", "Python") + + version_file = os.path.join(path, "openpype", "version.py") + if not os.path.isfile(version_file): + return None + + # skip if the version is not build + exe = os.path.join(path, "openpype_console.exe") + if platform.system().lower() in ["linux", "darwin"]: + exe = os.path.join(path, "openpype_console") + + # if only builds are requested + if build and not os.path.isfile(exe): # noqa: E501 + print(" ! path is not a build: {}".format(path)) + return None + + version = {} + with open(version_file, "r") as vf: + exec(vf.read(), version) + + version_match = re.search(r"(\d+\.\d+.\d+).*", version["__version__"]) + return version_match[1] def get_openpype_executable(): """Return OpenPype Executable from Event Plug-in Settings""" config = RepositoryUtils.GetPluginConfig("OpenPype") - return config.GetConfigEntryWithDefault("OpenPypeExecutable", "") + exe_list = config.GetConfigEntryWithDefault("OpenPypeExecutable", "") + dir_list = config.GetConfigEntryWithDefault( + "OpenPypeInstallationDirs", "") + return exe_list, dir_list def inject_openpype_environment(deadlinePlugin): @@ -25,16 +69,94 @@ def inject_openpype_environment(deadlinePlugin): print(">>> Injecting OpenPype environments ...") try: print(">>> Getting OpenPype executable ...") - exe_list = get_openpype_executable() - openpype_app = FileUtils.SearchFileList(exe_list) - if openpype_app == "": + exe_list, dir_list = get_openpype_executable() + openpype_versions = [] + # if the job requires specific OpenPype version, + # lets go over all available and find compatible build. + requested_version = job.GetJobEnvironmentKeyValue("OPENPYPE_VERSION") + if requested_version: + print(( + ">>> Scanning for compatible requested version {}" + ).format(requested_version)) + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if install_dir: + print("--- Looking for OpenPype at: {}".format(install_dir)) + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = get_openpype_version_from_path(subdir) + if not version: + continue + print(" - found: {} - {}".format(version, subdir)) + openpype_versions.append((version, subdir)) + + exe = FileUtils.SearchFileList(exe_list) + if openpype_versions: + # if looking for requested compatible version, + # add the implicitly specified to the list too. + print("Looking for OpenPype at: {}".format(os.path.dirname(exe))) + version = get_openpype_version_from_path( + os.path.dirname(exe)) + if version: + print(" - found: {} - {}".format( + version, os.path.dirname(exe) + )) + openpype_versions.append((version, os.path.dirname(exe))) + + if requested_version: + # sort detected versions + if openpype_versions: + # use natural sorting + openpype_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + print(( + "*** Latest available version found is {}" + ).format(openpype_versions[-1][0])) + requested_major, requested_minor, _ = requested_version.split(".")[:3] # noqa: E501 + compatible_versions = [] + for version in openpype_versions: + v = version[0].split(".")[:3] + if v[0] == requested_major and v[1] == requested_minor: + compatible_versions.append(version) + if not compatible_versions: + raise RuntimeError( + ("Cannot find compatible version available " + "for version {} requested by the job. " + "Please add it through plugin configuration " + "in Deadline or install it to configured " + "directory.").format(requested_version)) + # sort compatible versions nad pick the last one + compatible_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + print(( + "*** Latest compatible version found is {}" + ).format(compatible_versions[-1][0])) + # create list of executables for different platform and let + # Deadline decide. + exe_list = [ + os.path.join( + compatible_versions[-1][1], "openpype_console.exe"), + os.path.join( + compatible_versions[-1][1], "openpype_console") + ] + exe = FileUtils.SearchFileList(";".join(exe_list)) + if exe == "": raise RuntimeError( "OpenPype executable was not found " + - "in the semicolon separated list \"" + exe_list + "\". " + + "in the semicolon separated list " + + "\"" + ";".join(exe_list) + "\". " + "The path to the render executable can be configured " + "from the Plugin Configuration in the Deadline Monitor.") - print("--- OpenPype executable: {}".format(openpype_app)) + print("--- OpenPype executable: {}".format(exe)) # tempfile.TemporaryFile cannot be used because of locking temp_file_name = "{}_{}.json".format( @@ -45,9 +167,8 @@ def inject_openpype_environment(deadlinePlugin): print(">>> Temporary path: {}".format(export_url)) args = [ - openpype_app, "--headless", - 'extractenvironments', + "extractenvironments", export_url ] @@ -71,15 +192,18 @@ def inject_openpype_environment(deadlinePlugin): if not os.environ.get("OPENPYPE_MONGO"): print(">>> Missing OPENPYPE_MONGO env var, process won't work") - env = os.environ - env["OPENPYPE_HEADLESS_MODE"] = "1" - env["AVALON_TIMEOUT"] = "5000" + os.environ["AVALON_TIMEOUT"] = "5000" - print(">>> Executing: {}".format(args)) - std_output = subprocess.check_output(args, - cwd=os.path.dirname(openpype_app), - env=env) - print(">>> Process result {}".format(std_output)) + args_str = subprocess.list2cmdline(args) + print(">>> Executing: {} {}".format(exe, args_str)) + process = ProcessUtils.SpawnProcess( + exe, args_str, os.path.dirname(exe) + ) + ProcessUtils.WaitForExit(process, -1) + if process.ExitCode != 0: + raise RuntimeError( + "Failed to run OpenPype process to extract environments." + ) print(">>> Loading file ...") with open(export_url) as fp: @@ -87,6 +211,13 @@ def inject_openpype_environment(deadlinePlugin): for key, value in contents.items(): deadlinePlugin.SetProcessEnvironmentVariable(key, value) + script_url = job.GetJobPluginInfoKeyValue("ScriptFilename") + if script_url: + + script_url = script_url.format(**contents).replace("\\", "/") + print(">>> Setting script path {}".format(script_url)) + job.SetJobPluginInfoKeyValue("ScriptFilename", script_url) + print(">>> Removing temporary file") os.remove(export_url) @@ -115,78 +246,6 @@ def inject_render_job_id(deadlinePlugin): print(">>> Injection end.") -def pype_command_line(executable, arguments, workingDirectory): - """Remap paths in comand line argument string. - - Using Deadline rempper it will remap all path found in command-line. - - Args: - executable (str): path to executable - arguments (str): arguments passed to executable - workingDirectory (str): working directory path - - Returns: - Tuple(executable, arguments, workingDirectory) - - """ - print("-" * 40) - print("executable: {}".format(executable)) - print("arguments: {}".format(arguments)) - print("workingDirectory: {}".format(workingDirectory)) - print("-" * 40) - print("Remapping arguments ...") - arguments = RepositoryUtils.CheckPathMapping(arguments) - print("* {}".format(arguments)) - print("-" * 40) - return executable, arguments, workingDirectory - - -def pype(deadlinePlugin): - """Remaps `PYPE_METADATA_FILE` and `PYPE_PYTHON_EXE` environment vars. - - `PYPE_METADATA_FILE` is used on farm to point to rendered data. This path - originates on platform from which this job was published. To be able to - publish on different platform, this path needs to be remapped. - - `PYPE_PYTHON_EXE` can be used to specify custom location of python - interpreter to use for Pype. This is remappeda also if present even - though it probably doesn't make much sense. - - Arguments: - deadlinePlugin: Deadline job plugin passed by Deadline - - """ - print(">>> Getting job ...") - job = deadlinePlugin.GetJob() - # PYPE should be here, not OPENPYPE - backward compatibility!! - pype_metadata = job.GetJobEnvironmentKeyValue("PYPE_METADATA_FILE") - pype_python = job.GetJobEnvironmentKeyValue("PYPE_PYTHON_EXE") - print(">>> Having backward compatible env vars {}/{}".format(pype_metadata, - pype_python)) - # test if it is pype publish job. - if pype_metadata: - pype_metadata = RepositoryUtils.CheckPathMapping(pype_metadata) - if platform.system().lower() == "linux": - pype_metadata = pype_metadata.replace("\\", "/") - - print("- remapping PYPE_METADATA_FILE: {}".format(pype_metadata)) - job.SetJobEnvironmentKeyValue("PYPE_METADATA_FILE", pype_metadata) - deadlinePlugin.SetProcessEnvironmentVariable( - "PYPE_METADATA_FILE", pype_metadata) - - if pype_python: - pype_python = RepositoryUtils.CheckPathMapping(pype_python) - if platform.system().lower() == "linux": - pype_python = pype_python.replace("\\", "/") - - print("- remapping PYPE_PYTHON_EXE: {}".format(pype_python)) - job.SetJobEnvironmentKeyValue("PYPE_PYTHON_EXE", pype_python) - deadlinePlugin.SetProcessEnvironmentVariable( - "PYPE_PYTHON_EXE", pype_python) - - deadlinePlugin.ModifyCommandLineCallback += pype_command_line - - def __main__(deadlinePlugin): print("*** GlobalJobPreload start ...") print(">>> Getting job ...") @@ -196,16 +255,17 @@ def __main__(deadlinePlugin): job.GetJobEnvironmentKeyValue('OPENPYPE_RENDER_JOB') or '0' openpype_publish_job = \ job.GetJobEnvironmentKeyValue('OPENPYPE_PUBLISH_JOB') or '0' + openpype_remote_job = \ + job.GetJobEnvironmentKeyValue('OPENPYPE_REMOTE_JOB') or '0' print("--- Job type - render {}".format(openpype_render_job)) print("--- Job type - publish {}".format(openpype_publish_job)) + print("--- Job type - remote {}".format(openpype_remote_job)) if openpype_publish_job == '1' and openpype_render_job == '1': raise RuntimeError("Misconfiguration. Job couldn't be both " + "render and publish.") if openpype_publish_job == '1': inject_render_job_id(deadlinePlugin) - elif openpype_render_job == '1': + elif openpype_render_job == '1' or openpype_remote_job == '1': inject_openpype_environment(deadlinePlugin) - else: - pype(deadlinePlugin) # backward compatibility with Pype2 diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param index 8bd6dce12d..b3ac18e20c 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param @@ -7,11 +7,20 @@ Index=0 Default=OpenPype Plugin for Deadline Description=Not configurable +[OpenPypeInstallationDirs] +Type=multilinemultifolder +Label=Directories where OpenPype versions are installed +Category=OpenPype Installation Directories +CategoryOrder=0 +Index=0 +Default=C:\Program Files (x86)\OpenPype +Description=Path or paths to directories where multiple versions of OpenPype might be installed. Enter every such path on separate lines. + [OpenPypeExecutable] Type=multilinemultifilename Label=OpenPype Executable Category=OpenPype Executables -CategoryOrder=0 +CategoryOrder=1 Index=0 Default= Description=The path to the OpenPype executable. Enter alternative paths on separate lines. diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py index 451d71fb63..6b0f69d98f 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py @@ -1,10 +1,19 @@ +#!/usr/bin/env python3 + from System.IO import Path from System.Text.RegularExpressions import Regex from Deadline.Plugins import PluginType, DeadlinePlugin -from Deadline.Scripting import StringUtils, FileUtils, RepositoryUtils +from Deadline.Scripting import ( + StringUtils, + FileUtils, + DirectoryUtils, + RepositoryUtils +) import re +import os +import platform ###################################################################### @@ -52,13 +61,115 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin): self.AddStdoutHandlerCallback( ".*Progress: (\d+)%.*").HandleCallback += self.HandleProgress + @staticmethod + def get_openpype_version_from_path(path, build=True): + """Get OpenPype version from provided path. + path (str): Path to scan. + build (bool, optional): Get only builds, not sources + + Returns: + str or None: version of OpenPype if found. + + """ + # fix path for application bundle on macos + if platform.system().lower() == "darwin": + path = os.path.join(path, "Contents", "MacOS", "lib", "Python") + + version_file = os.path.join(path, "openpype", "version.py") + if not os.path.isfile(version_file): + return None + + # skip if the version is not build + exe = os.path.join(path, "openpype_console.exe") + if platform.system().lower() in ["linux", "darwin"]: + exe = os.path.join(path, "openpype_console") + + # if only builds are requested + if build and not os.path.isfile(exe): # noqa: E501 + print(f" ! path is not a build: {path}") + return None + + version = {} + with open(version_file, "r") as vf: + exec(vf.read(), version) + + version_match = re.search(r"(\d+\.\d+.\d+).*", version["__version__"]) + return version_match[1] + def RenderExecutable(self): - exeList = self.GetConfigEntry("OpenPypeExecutable") - exe = FileUtils.SearchFileList(exeList) + job = self.GetJob() + openpype_versions = [] + # if the job requires specific OpenPype version, + # lets go over all available and find compatible build. + requested_version = job.GetJobEnvironmentKeyValue("OPENPYPE_VERSION") + if requested_version: + self.LogInfo(( + "Scanning for compatible requested " + f"version {requested_version}")) + dir_list = self.GetConfigEntry("OpenPypeInstallationDirs") + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if dir: + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = self.get_openpype_version_from_path(subdir) + if not version: + continue + openpype_versions.append((version, subdir)) + + exe_list = self.GetConfigEntry("OpenPypeExecutable") + exe = FileUtils.SearchFileList(exe_list) + if openpype_versions: + # if looking for requested compatible version, + # add the implicitly specified to the list too. + version = self.get_openpype_version_from_path( + os.path.dirname(exe)) + if version: + openpype_versions.append((version, os.path.dirname(exe))) + + if requested_version: + # sort detected versions + if openpype_versions: + openpype_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + requested_major, requested_minor, _ = requested_version.split(".")[:3] # noqa: E501 + compatible_versions = [] + for version in openpype_versions: + v = version[0].split(".")[:3] + if v[0] == requested_major and v[1] == requested_minor: + compatible_versions.append(version) + if not compatible_versions: + self.FailRender(("Cannot find compatible version available " + "for version {} requested by the job. " + "Please add it through plugin configuration " + "in Deadline or install it to configured " + "directory.").format(requested_version)) + # sort compatible versions nad pick the last one + compatible_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + # create list of executables for different platform and let + # Deadline decide. + exe_list = [ + os.path.join( + compatible_versions[-1][1], "openpype_console.exe"), + os.path.join( + compatible_versions[-1][1], "openpype_console") + ] + exe = FileUtils.SearchFileList(";".join(exe_list)) + if exe == "": self.FailRender( "OpenPype executable was not found " + - "in the semicolon separated list \"" + exeList + "\". " + + "in the semicolon separated list " + + "\"" + ";".join(exe_list) + "\". " + "The path to the render executable can be configured " + "from the Plugin Configuration in the Deadline Monitor.") return exe diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py index 9fca1b5391..625a3f1a28 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py @@ -56,7 +56,7 @@ def convert_value_by_type_name(value_type, value): return float(value) # Vectors will probably have more types - if value_type == "vec2f": + if value_type in ("vec2f", "float2"): return [float(item) for item in value.split(",")] # Matrix should be always have square size of element 3x3, 4x4 @@ -71,7 +71,7 @@ def convert_value_by_type_name(value_type, value): elif parts_len == 4: divisor = 2 elif parts_len == 9: - divisor == 3 + divisor = 3 elif parts_len == 16: divisor = 4 else: @@ -127,7 +127,7 @@ def convert_value_by_type_name(value_type, value): return output print(( - "MISSING IMPLEMENTATION:" + "Dev note (missing implementation):" " Unknown attrib type \"{}\". Value: {}" ).format(value_type, value)) return value @@ -183,7 +183,7 @@ def parse_oiio_xml_output(xml_string): else: value = child.text print(( - "MISSING IMPLEMENTATION:" + "Dev note (missing implementation):" " Unknown tag \"{}\". Value \"{}\"" ).format(tag_name, value)) @@ -453,7 +453,7 @@ class OpenPypeTileAssembler(DeadlinePlugin): # Swap to have input as foreground args.append("--swap") # Paste foreground to background - args.append("--paste +{}+{}".format(pos_x, pos_y)) + args.append("--paste {x:+d}{y:+d}".format(x=pos_x, y=pos_y)) args.append("-o") args.append(output_path) diff --git a/openpype/modules/example_addons/example_addon/addon.py b/openpype/modules/example_addons/example_addon/addon.py index 50554b1e43..ead647b41d 100644 --- a/openpype/modules/example_addons/example_addon/addon.py +++ b/openpype/modules/example_addons/example_addon/addon.py @@ -13,10 +13,7 @@ import click from openpype.modules import ( JsonFilesSettingsDef, OpenPypeAddOn, - ModulesManager -) -# Import interface defined by this addon to be able find other addons using it -from openpype_interfaces import ( + ModulesManager, IPluginPaths, ITrayAction ) diff --git a/openpype/modules/ftrack/__init__.py b/openpype/modules/ftrack/__init__.py index 7261254c6f..e520f08337 100644 --- a/openpype/modules/ftrack/__init__.py +++ b/openpype/modules/ftrack/__init__.py @@ -1,9 +1,13 @@ from .ftrack_module import ( FtrackModule, - FTRACK_MODULE_DIR + FTRACK_MODULE_DIR, + + resolve_ftrack_url, ) __all__ = ( "FtrackModule", - "FTRACK_MODULE_DIR" + "FTRACK_MODULE_DIR", + + "resolve_ftrack_url", ) diff --git a/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py b/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py new file mode 100644 index 0000000000..21382007a0 --- /dev/null +++ b/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py @@ -0,0 +1,311 @@ +import threading +import datetime +import copy +import collections + +import ftrack_api + +from openpype.lib import get_datetime_data +from openpype.settings.lib import ( + get_project_settings, + get_default_project_settings +) +from openpype_modules.ftrack.lib import ServerAction + + +class CreateDailyReviewSessionServerAction(ServerAction): + """Create daily review session object per project. + + Action creates review sessions based on settings. Settings define if is + action enabled and what is a template for review session name. Logic works + in a way that if review session with the name already exists then skip + process. If review session for current day does not exist but yesterdays + review exists and is empty then yesterdays is renamed otherwise creates + new review session. + + Also contains cycle creation of dailies which is triggered each morning. + This option must be enabled in project settings. Cycle creation is also + checked on registration of action. + """ + + identifier = "create.daily.review.session" + #: Action label. + label = "OpenPype Admin" + variant = "- Create Daily Review Session (Server)" + #: Action description. + description = "Manually create daily review session" + role_list = {"Pypeclub", "Administrator", "Project Manager"} + + settings_key = "create_daily_review_session" + default_template = "{yy}{mm}{dd}" + + def __init__(self, *args, **kwargs): + super(CreateDailyReviewSessionServerAction, self).__init__( + *args, **kwargs + ) + + self._cycle_timer = None + self._last_cyle_time = None + self._day_delta = datetime.timedelta(days=1) + + def discover(self, session, entities, event): + """Show action only on AssetVersions.""" + + valid_selection = False + for ent in event["data"]["selection"]: + # Ignore entities that are not tasks or projects + if ent["entityType"].lower() in ( + "show", "task", "reviewsession", "assetversion" + ): + valid_selection = True + break + + if not valid_selection: + return False + return self.valid_roles(session, entities, event) + + def launch(self, session, entities, event): + project_entity = self.get_project_from_entity(entities[0], session) + project_name = project_entity["full_name"] + project_settings = self.get_project_settings_from_event( + event, project_name + ) + action_settings = self._extract_action_settings(project_settings) + project_name_by_id = { + project_entity["id"]: project_name + } + settings_by_project_id = { + project_entity["id"]: action_settings + } + self._process_review_session( + session, settings_by_project_id, project_name_by_id + ) + return True + + def _calculate_next_cycle_delta(self): + studio_default_settings = get_default_project_settings() + action_settings = ( + studio_default_settings + ["ftrack"] + [self.settings_frack_subkey] + [self.settings_key] + ) + cycle_hour_start = action_settings.get("cycle_hour_start") + if not cycle_hour_start: + h = m = s = 0 + else: + h, m, s = cycle_hour_start + + # Create threading timer which will trigger creation of report + # at the 00:00:01 of next day + # - callback will trigger another timer which will have 1 day offset + now = datetime.datetime.now() + # Create object of today morning + expected_next_trigger = datetime.datetime( + now.year, now.month, now.day, h, m, s + ) + if expected_next_trigger > now: + seconds = (expected_next_trigger - now).total_seconds() + else: + expected_next_trigger += self._day_delta + seconds = (expected_next_trigger - now).total_seconds() + return seconds, expected_next_trigger + + def register(self, *args, **kwargs): + """Override register to be able trigger """ + # Register server action as would be normally + super(CreateDailyReviewSessionServerAction, self).register( + *args, **kwargs + ) + + seconds_delta, cycle_time = self._calculate_next_cycle_delta() + + # Store cycle time which will be used to create next timer + self._last_cyle_time = cycle_time + # Create timer thread + self._cycle_timer = threading.Timer( + seconds_delta, self._timer_callback + ) + self._cycle_timer.start() + + self._check_review_session() + + def _timer_callback(self): + if ( + self._cycle_timer is not None + and self._last_cyle_time is not None + ): + seconds_delta, cycle_time = self._calculate_next_cycle_delta() + self._last_cyle_time = cycle_time + + self._cycle_timer = threading.Timer( + seconds_delta, self._timer_callback + ) + self._cycle_timer.start() + self._check_review_session() + + def _check_review_session(self): + session = ftrack_api.Session( + server_url=self.session.server_url, + api_key=self.session.api_key, + api_user=self.session.api_user, + auto_connect_event_hub=False + ) + project_entities = session.query( + "select id, full_name from Project" + ).all() + project_names_by_id = { + project_entity["id"]: project_entity["full_name"] + for project_entity in project_entities + } + + action_settings_by_project_id = self._get_action_settings( + project_names_by_id + ) + enabled_action_settings_by_project_id = {} + for item in action_settings_by_project_id.items(): + project_id, action_settings = item + if action_settings.get("cycle_enabled"): + enabled_action_settings_by_project_id[project_id] = ( + action_settings + ) + + if not enabled_action_settings_by_project_id: + self.log.info(( + "There are no projects that have enabled" + " cycle review sesison creation" + )) + + else: + self._process_review_session( + session, + enabled_action_settings_by_project_id, + project_names_by_id + ) + + session.close() + + def _process_review_session( + self, session, settings_by_project_id, project_names_by_id + ): + review_sessions = session.query(( + "select id, name, project_id" + " from ReviewSession where project_id in ({})" + ).format(self.join_query_keys(settings_by_project_id))).all() + + review_sessions_by_project_id = collections.defaultdict(list) + for review_session in review_sessions: + project_id = review_session["project_id"] + review_sessions_by_project_id[project_id].append(review_session) + + # Prepare fill data for today's review sesison and yesterdays + now = datetime.datetime.now() + today_obj = datetime.datetime( + now.year, now.month, now.day, 0, 0, 0 + ) + yesterday_obj = today_obj - self._day_delta + + today_fill_data = get_datetime_data(today_obj) + yesterday_fill_data = get_datetime_data(yesterday_obj) + + # Loop through projects and try to create daily reviews + for project_id, action_settings in settings_by_project_id.items(): + review_session_template = ( + action_settings["review_session_template"] + ).strip() or self.default_template + + today_project_fill_data = copy.deepcopy(today_fill_data) + yesterday_project_fill_data = copy.deepcopy(yesterday_fill_data) + project_name = project_names_by_id[project_id] + today_project_fill_data["project_name"] = project_name + yesterday_project_fill_data["project_name"] = project_name + + today_session_name = self._fill_review_template( + review_session_template, today_project_fill_data + ) + yesterday_session_name = self._fill_review_template( + review_session_template, yesterday_project_fill_data + ) + # Skip if today's session name could not be filled + if not today_session_name: + continue + + # Find matchin review session + project_review_sessions = review_sessions_by_project_id[project_id] + todays_session = None + yesterdays_session = None + for review_session in project_review_sessions: + session_name = review_session["name"] + if session_name == today_session_name: + todays_session = review_session + break + elif session_name == yesterday_session_name: + yesterdays_session = review_session + + # Skip if today's session already exist + if todays_session is not None: + self.log.debug(( + "Todays ReviewSession \"{}\"" + " in project \"{}\" already exists" + ).format(today_session_name, project_name)) + continue + + # Check if there is yesterday's session and is empty + # - in that case just rename it + if ( + yesterdays_session is not None + and len(yesterdays_session["review_session_objects"]) == 0 + ): + self.log.debug(( + "Renaming yesterdays empty review session \"{}\" to \"{}\"" + " in project \"{}\"" + ).format( + yesterday_session_name, today_session_name, project_name + )) + yesterdays_session["name"] = today_session_name + session.commit() + continue + + # Create new review session with new name + self.log.debug(( + "Creating new review session \"{}\" in project \"{}\"" + ).format(today_session_name, project_name)) + session.create("ReviewSession", { + "project_id": project_id, + "name": today_session_name + }) + session.commit() + + def _get_action_settings(self, project_names_by_id): + settings_by_project_id = {} + for project_id, project_name in project_names_by_id.items(): + project_settings = get_project_settings(project_name) + action_settings = self._extract_action_settings(project_settings) + settings_by_project_id[project_id] = action_settings + return settings_by_project_id + + def _extract_action_settings(self, project_settings): + return ( + project_settings + .get("ftrack", {}) + .get(self.settings_frack_subkey, {}) + .get(self.settings_key) + ) or {} + + def _fill_review_template(self, template, data): + output = None + try: + output = template.format(**data) + except Exception: + self.log.warning( + ( + "Failed to fill review session template {} with data {}" + ).format(template, data), + exc_info=True + ) + return output + + +def register(session): + '''Register plugin. Called when used as an plugin.''' + CreateDailyReviewSessionServerAction(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py index 975e49cb28..332648cd02 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py @@ -1,9 +1,8 @@ import json +import copy -from openpype.api import ProjectSettings -from openpype.lib import create_project -from openpype.pipeline import AvalonMongoDB -from openpype.settings import SaveWarningExc +from openpype.client import get_project, create_project +from openpype.settings import ProjectSettings, SaveWarningExc from openpype_modules.ftrack.lib import ( ServerAction, @@ -363,12 +362,8 @@ class PrepareProjectServer(ServerAction): project_name = project_entity["full_name"] # Try to find project document - dbcon = AvalonMongoDB() - dbcon.install() - dbcon.Session["AVALON_PROJECT"] = project_name - project_doc = dbcon.find_one({ - "type": "project" - }) + project_doc = get_project(project_name) + # Create project if is not available # - creation is required to be able set project anatomy and attributes if not project_doc: @@ -376,9 +371,11 @@ class PrepareProjectServer(ServerAction): self.log.info("Creating project \"{} [{}]\"".format( project_name, project_code )) - create_project(project_name, project_code, dbcon=dbcon) - - dbcon.uninstall() + create_project(project_name, project_code) + self.trigger_event( + "openpype.project.created", + {"project_name": project_name} + ) project_settings = ProjectSettings(project_name) project_anatomy_settings = project_settings["project_anatomy"] @@ -406,6 +403,10 @@ class PrepareProjectServer(ServerAction): self.log.debug("- Key \"{}\" set to \"{}\"".format(key, value)) session.commit() + event_data = copy.deepcopy(in_data) + event_data["project_name"] = project_name + self.trigger_event("openpype.project.prepared", event_data) + return True diff --git a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py index 868bbb8463..1209375f82 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py +++ b/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py @@ -356,7 +356,7 @@ class PushHierValuesToNonHier(ServerAction): values_per_entity_id[entity_id][key] = None values = query_custom_attributes( - session, all_ids_with_parents, hier_attr_ids, True + session, hier_attr_ids, all_ids_with_parents, True ) for item in values: entity_id = item["entity_id"] diff --git a/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py index 58f79e8a2b..df9147bdf7 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py @@ -1,7 +1,8 @@ import time import sys import json -import traceback + +import ftrack_api from openpype_modules.ftrack.lib import ServerAction from openpype_modules.ftrack.lib.avalon_sync import SyncEntitiesFactory @@ -180,6 +181,13 @@ class SyncToAvalonServer(ServerAction): "* Total time: {}".format(time_7 - time_start) ) + if self.entities_factory.project_created: + event = ftrack_api.event.base.Event( + topic="openpype.project.created", + data={"project_name": project_name} + ) + self.session.event_hub.publish(event) + report = self.entities_factory.report() if report and report.get("items"): default_title = "Synchronization report ({}):".format( diff --git a/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py b/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py new file mode 100644 index 0000000000..d160b7200d --- /dev/null +++ b/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py @@ -0,0 +1,346 @@ +import copy +import json +import collections + +import ftrack_api + +from openpype_modules.ftrack.lib import ( + ServerAction, + statics_icon, +) +from openpype_modules.ftrack.lib.avalon_sync import create_chunks + + +class TransferHierarchicalValues(ServerAction): + """Transfer values across hierarhcical attributes. + + Aalso gives ability to convert types meanwhile. That is limited to + conversions between numbers and strings + - int <-> float + - in, float -> string + """ + + identifier = "transfer.hierarchical.values" + label = "OpenPype Admin" + variant = "- Transfer values between 2 custom attributes" + description = ( + "Move values from a hierarchical attribute to" + " second hierarchical attribute." + ) + icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") + + all_project_entities_query = ( + "select id, name, parent_id, link" + " from TypedContext where project_id is \"{}\"" + ) + cust_attr_query = ( + "select value, entity_id from CustomAttributeValue" + " where entity_id in ({}) and configuration_id is \"{}\"" + ) + settings_key = "transfer_values_of_hierarchical_attributes" + + def discover(self, session, entities, event): + """Show anywhere.""" + + return self.valid_roles(session, entities, event) + + def _selection_interface(self, session, event_values=None): + title = "Transfer hierarchical values" + + attr_confs = session.query( + ( + "select id, key from CustomAttributeConfiguration" + " where is_hierarchical is true" + ) + ).all() + attr_items = [] + for attr_conf in attr_confs: + attr_items.append({ + "value": attr_conf["id"], + "label": attr_conf["key"] + }) + + if len(attr_items) < 2: + return { + "title": title, + "items": [{ + "type": "label", + "value": ( + "Didn't found custom attributes" + " that can be transfered." + ) + }] + } + + attr_items = sorted(attr_items, key=lambda item: item["label"]) + items = [] + item_splitter = {"type": "label", "value": "---"} + items.append({ + "type": "label", + "value": ( + "

Please select source and destination" + " Custom attribute

" + ) + }) + items.append({ + "type": "label", + "value": ( + "WARNING: This will take affect for all projects!" + ) + }) + if event_values: + items.append({ + "type": "label", + "value": ( + "Note: Please select 2 different custom attributes." + ) + }) + + items.append(item_splitter) + + src_item = { + "type": "enumerator", + "label": "Source", + "name": "src_attr_id", + "data": copy.deepcopy(attr_items) + } + dst_item = { + "type": "enumerator", + "label": "Destination", + "name": "dst_attr_id", + "data": copy.deepcopy(attr_items) + } + delete_item = { + "type": "boolean", + "name": "delete_dst_attr_first", + "label": "Delete first", + "value": False + } + if event_values: + src_item["value"] = event_values["src_attr_id"] + dst_item["value"] = event_values["dst_attr_id"] + delete_item["value"] = event_values["delete_dst_attr_first"] + + items.append(src_item) + items.append(dst_item) + items.append(item_splitter) + items.append({ + "type": "label", + "value": ( + "WARNING: All values from destination" + " Custom Attribute will be removed if this is enabled." + ) + }) + items.append(delete_item) + + return { + "title": title, + "items": items + } + + def interface(self, session, entities, event): + if event["data"].get("values", {}): + return None + + return self._selection_interface(session) + + def launch(self, session, entities, event): + values = event["data"].get("values", {}) + if not values: + return None + src_attr_id = values["src_attr_id"] + dst_attr_id = values["dst_attr_id"] + delete_dst_values = values["delete_dst_attr_first"] + + if not src_attr_id or not dst_attr_id: + self.log.info("Attributes were not filled. Nothing to do.") + return { + "success": True, + "message": "Nothing to do" + } + + if src_attr_id == dst_attr_id: + self.log.info(( + "Same attributes were selected {}, {}." + " Showing interface again." + ).format(src_attr_id, dst_attr_id)) + return self._selection_interface(session, values) + + # Query custom attrbutes + src_conf = session.query(( + "select id from CustomAttributeConfiguration where id is {}" + ).format(src_attr_id)).one() + dst_conf = session.query(( + "select id from CustomAttributeConfiguration where id is {}" + ).format(dst_attr_id)).one() + src_type_name = src_conf["type"]["name"] + dst_type_name = dst_conf["type"]["name"] + # Limit conversion to + # - same type -> same type (there is no need to do conversion) + # - number -> number (int to float and back) + # - number -> str (any number can be converted to str) + src_type = None + dst_type = None + if src_type_name == "number" or src_type_name != dst_type_name: + src_type = self._get_attr_type(dst_conf) + dst_type = self._get_attr_type(dst_conf) + valid = False + # Can convert numbers + if src_type in (int, float) and dst_type in (int, float): + valid = True + # Can convert numbers to string + elif dst_type is str: + valid = True + + if not valid: + self.log.info(( + "Don't know how to properly convert" + " custom attribute types {} > {}" + ).format(src_type_name, dst_type_name)) + return { + "message": ( + "Don't know how to properly convert" + " custom attribute types {} > {}" + ).format(src_type_name, dst_type_name), + "success": False + } + + # Query source values + src_attr_values = session.query( + ( + "select value, entity_id" + " from CustomAttributeValue" + " where configuration_id is {}" + ).format(src_attr_id) + ).all() + + self.log.debug("Queried source values.") + failed_entity_ids = [] + if dst_type is not None: + self.log.debug("Converting source values to desctination type") + value_by_id = {} + for attr_value in src_attr_values: + entity_id = attr_value["entity_id"] + value = attr_value["value"] + if value is not None: + try: + if dst_type is not None: + value = dst_type(value) + value_by_id[entity_id] = value + except Exception: + failed_entity_ids.append(entity_id) + + if failed_entity_ids: + self.log.info( + "Couldn't convert some values to destination attribute" + ) + return { + "success": False, + "message": ( + "Couldn't convert some values to destination attribute" + ) + } + + # Delete destination custom attributes first + if delete_dst_values: + self.log.info("Deleting destination custom attribute values first") + self._delete_custom_attribute_values(session, dst_attr_id) + + self.log.info("Applying source values on destination custom attribute") + self._apply_values(session, value_by_id, dst_attr_id) + return True + + def _delete_custom_attribute_values(self, session, dst_attr_id): + dst_attr_values = session.query( + ( + "select configuration_id, entity_id" + " from CustomAttributeValue" + " where configuration_id is {}" + ).format(dst_attr_id) + ).all() + delete_operations = [] + for attr_value in dst_attr_values: + entity_id = attr_value["entity_id"] + configuration_id = attr_value["configuration_id"] + entity_key = collections.OrderedDict(( + ("configuration_id", configuration_id), + ("entity_id", entity_id) + )) + delete_operations.append( + ftrack_api.operation.DeleteEntityOperation( + "CustomAttributeValue", + entity_key + ) + ) + + if not delete_operations: + return + + for chunk in create_chunks(delete_operations, 500): + for operation in chunk: + session.recorded_operations.push(operation) + session.commit() + + def _apply_values(self, session, value_by_id, dst_attr_id): + dst_attr_values = session.query( + ( + "select configuration_id, entity_id" + " from CustomAttributeValue" + " where configuration_id is {}" + ).format(dst_attr_id) + ).all() + + dst_entity_ids_with_value = { + item["entity_id"] + for item in dst_attr_values + } + operations = [] + for entity_id, value in value_by_id.items(): + entity_key = collections.OrderedDict(( + ("configuration_id", dst_attr_id), + ("entity_id", entity_id) + )) + if entity_id in dst_entity_ids_with_value: + operations.append( + ftrack_api.operation.UpdateEntityOperation( + "CustomAttributeValue", + entity_key, + "value", + ftrack_api.symbol.NOT_SET, + value + ) + ) + else: + operations.append( + ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + entity_key, + {"value": value} + ) + ) + + if not operations: + return + + for chunk in create_chunks(operations, 500): + for operation in chunk: + session.recorded_operations.push(operation) + session.commit() + + def _get_attr_type(self, conf_def): + type_name = conf_def["type"]["name"] + if type_name == "text": + return str + + if type_name == "number": + config = json.loads(conf_def["config"]) + if config["isdecimal"]: + return float + return int + return None + + +def register(session): + '''Register plugin. Called when used as an plugin.''' + + TransferHierarchicalValues(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py index 0914933de4..dc76920a57 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py +++ b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py @@ -1,10 +1,11 @@ import collections import datetime +import copy import ftrack_api from openpype_modules.ftrack.lib import ( BaseEvent, - query_custom_attributes + query_custom_attributes, ) @@ -124,10 +125,15 @@ class PushFrameValuesToTaskEvent(BaseEvent): # Separate value changes and task parent changes _entities_info = [] + added_entities = [] + added_entity_ids = set() task_parent_changes = [] for entity_info in entities_info: if entity_info["entity_type"].lower() == "task": task_parent_changes.append(entity_info) + elif entity_info.get("action") == "add": + added_entities.append(entity_info) + added_entity_ids.add(entity_info["entityId"]) else: _entities_info.append(entity_info) entities_info = _entities_info @@ -136,6 +142,13 @@ class PushFrameValuesToTaskEvent(BaseEvent): interesting_data, changed_keys_by_object_id = self.filter_changes( session, event, entities_info, interest_attributes ) + self.interesting_data_for_added( + session, + added_entities, + interest_attributes, + interesting_data, + changed_keys_by_object_id + ) if not interesting_data and not task_parent_changes: return @@ -151,9 +164,13 @@ class PushFrameValuesToTaskEvent(BaseEvent): # - it is a complex way how to find out if interesting_data: self.process_attribute_changes( - session, object_types_by_name, - interesting_data, changed_keys_by_object_id, - interest_entity_types, interest_attributes + session, + object_types_by_name, + interesting_data, + changed_keys_by_object_id, + interest_entity_types, + interest_attributes, + added_entity_ids ) if task_parent_changes: @@ -163,8 +180,12 @@ class PushFrameValuesToTaskEvent(BaseEvent): ) def process_task_parent_change( - self, session, object_types_by_name, task_parent_changes, - interest_entity_types, interest_attributes + self, + session, + object_types_by_name, + task_parent_changes, + interest_entity_types, + interest_attributes ): """Push custom attribute values if task parent has changed. @@ -176,6 +197,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): real hierarchical value and non hierarchical custom attribute value should be set to hierarchical value. """ + # Store task ids which were created or moved under parent with entity # type defined in settings (interest_entity_types). task_ids = set() @@ -380,33 +402,49 @@ class PushFrameValuesToTaskEvent(BaseEvent): uncommited_changes = False for idx, item in enumerate(changes): new_value = item["new_value"] + old_value = item["old_value"] attr_id = item["attr_id"] entity_id = item["entity_id"] attr_key = item["attr_key"] - entity_key = collections.OrderedDict() - entity_key["configuration_id"] = attr_id - entity_key["entity_id"] = entity_id + entity_key = collections.OrderedDict(( + ("configuration_id", attr_id), + ("entity_id", entity_id) + )) self._cached_changes.append({ "attr_key": attr_key, "entity_id": entity_id, "value": new_value, "time": datetime.datetime.now() }) + old_value_is_set = ( + old_value is not ftrack_api.symbol.NOT_SET + and old_value is not None + ) if new_value is None: + if not old_value_is_set: + continue op = ftrack_api.operation.DeleteEntityOperation( "CustomAttributeValue", entity_key ) - else: + + elif old_value_is_set: op = ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", + "CustomAttributeValue", entity_key, "value", - ftrack_api.symbol.NOT_SET, + old_value, new_value ) + else: + op = ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + entity_key, + {"value": new_value} + ) + session.recorded_operations.push(op) self.log.info(( "Changing Custom Attribute \"{}\" to value" @@ -432,9 +470,14 @@ class PushFrameValuesToTaskEvent(BaseEvent): self.log.warning("Changing of values failed.", exc_info=True) def process_attribute_changes( - self, session, object_types_by_name, - interesting_data, changed_keys_by_object_id, - interest_entity_types, interest_attributes + self, + session, + object_types_by_name, + interesting_data, + changed_keys_by_object_id, + interest_entity_types, + interest_attributes, + added_entity_ids ): # Prepare task object id task_object_id = object_types_by_name["task"]["id"] @@ -522,15 +565,26 @@ class PushFrameValuesToTaskEvent(BaseEvent): parent_id_by_task_id[task_id] = task_entity["parent_id"] self.finalize_attribute_changes( - session, interesting_data, - changed_keys, attrs_by_obj_id, hier_attrs, - task_entity_ids, parent_id_by_task_id + session, + interesting_data, + changed_keys, + attrs_by_obj_id, + hier_attrs, + task_entity_ids, + parent_id_by_task_id, + added_entity_ids ) def finalize_attribute_changes( - self, session, interesting_data, - changed_keys, attrs_by_obj_id, hier_attrs, - task_entity_ids, parent_id_by_task_id + self, + session, + interesting_data, + changed_keys, + attrs_by_obj_id, + hier_attrs, + task_entity_ids, + parent_id_by_task_id, + added_entity_ids ): attr_id_to_key = {} for attr_confs in attrs_by_obj_id.values(): @@ -550,7 +604,11 @@ class PushFrameValuesToTaskEvent(BaseEvent): attr_ids = set(attr_id_to_key.keys()) current_values_by_id = self.get_current_values( - session, attr_ids, entity_ids, task_entity_ids, hier_attrs + session, + attr_ids, + entity_ids, + task_entity_ids, + hier_attrs ) changes = [] @@ -560,14 +618,25 @@ class PushFrameValuesToTaskEvent(BaseEvent): parent_id = entity_id values = interesting_data[parent_id] + added_entity = entity_id in added_entity_ids for attr_id, old_value in current_values.items(): + if added_entity and attr_id in hier_attrs: + continue + attr_key = attr_id_to_key.get(attr_id) if not attr_key: continue # Convert new value from string new_value = values.get(attr_key) - if new_value is not None and old_value is not None: + new_value_is_valid = ( + old_value is not ftrack_api.symbol.NOT_SET + and new_value is not None + ) + if added_entity and not new_value_is_valid: + continue + + if new_value is not None and new_value_is_valid: try: new_value = type(old_value)(new_value) except Exception: @@ -581,6 +650,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): changes.append({ "new_value": new_value, "attr_id": attr_id, + "old_value": old_value, "entity_id": entity_id, "attr_key": attr_key }) @@ -599,6 +669,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): interesting_data = {} changed_keys_by_object_id = {} + for entity_info in entities_info: # Care only about changes if specific keys entity_changes = {} @@ -644,16 +715,123 @@ class PushFrameValuesToTaskEvent(BaseEvent): return interesting_data, changed_keys_by_object_id + def interesting_data_for_added( + self, + session, + added_entities, + interest_attributes, + interesting_data, + changed_keys_by_object_id + ): + if not added_entities or not interest_attributes: + return + + object_type_ids = set() + entity_ids = set() + all_entity_ids = set() + object_id_by_entity_id = {} + project_id = None + entity_ids_by_parent_id = collections.defaultdict(set) + for entity_info in added_entities: + object_id = entity_info["objectTypeId"] + entity_id = entity_info["entityId"] + object_type_ids.add(object_id) + entity_ids.add(entity_id) + object_id_by_entity_id[entity_id] = object_id + + for item in entity_info["parents"]: + entity_id = item["entityId"] + all_entity_ids.add(entity_id) + parent_id = item["parentId"] + if not parent_id: + project_id = entity_id + else: + entity_ids_by_parent_id[parent_id].add(entity_id) + + hier_attrs = self.get_hierarchical_configurations( + session, interest_attributes + ) + if not hier_attrs: + return + + hier_attrs_key_by_id = { + attr_conf["id"]: attr_conf["key"] + for attr_conf in hier_attrs + } + default_values_by_key = { + attr_conf["key"]: attr_conf["default"] + for attr_conf in hier_attrs + } + + values = query_custom_attributes( + session, list(hier_attrs_key_by_id.keys()), all_entity_ids, True + ) + values_per_entity_id = {} + for entity_id in all_entity_ids: + values_per_entity_id[entity_id] = {} + for attr_name in interest_attributes: + values_per_entity_id[entity_id][attr_name] = None + + for item in values: + entity_id = item["entity_id"] + key = hier_attrs_key_by_id[item["configuration_id"]] + values_per_entity_id[entity_id][key] = item["value"] + + fill_queue = collections.deque() + fill_queue.append((project_id, default_values_by_key)) + while fill_queue: + item = fill_queue.popleft() + entity_id, values_by_key = item + entity_values = values_per_entity_id[entity_id] + new_values_by_key = copy.deepcopy(values_by_key) + for key, value in values_by_key.items(): + current_value = entity_values[key] + if current_value is None: + entity_values[key] = value + else: + new_values_by_key[key] = current_value + + for child_id in entity_ids_by_parent_id[entity_id]: + fill_queue.append((child_id, new_values_by_key)) + + for entity_id in entity_ids: + entity_changes = {} + for key, value in values_per_entity_id[entity_id].items(): + if value is not None: + entity_changes[key] = value + + if not entity_changes: + continue + + interesting_data[entity_id] = entity_changes + object_id = object_id_by_entity_id[entity_id] + if object_id not in changed_keys_by_object_id: + changed_keys_by_object_id[object_id] = set() + changed_keys_by_object_id[object_id] |= set(entity_changes.keys()) + def get_current_values( - self, session, attr_ids, entity_ids, task_entity_ids, hier_attrs + self, + session, + attr_ids, + entity_ids, + task_entity_ids, + hier_attrs ): current_values_by_id = {} if not attr_ids or not entity_ids: return current_values_by_id + for entity_id in entity_ids: + current_values_by_id[entity_id] = {} + for attr_id in attr_ids: + current_values_by_id[entity_id][attr_id] = ( + ftrack_api.symbol.NOT_SET + ) + values = query_custom_attributes( session, attr_ids, entity_ids, True ) + for item in values: entity_id = item["entity_id"] attr_id = item["configuration_id"] @@ -699,6 +877,18 @@ class PushFrameValuesToTaskEvent(BaseEvent): output[obj_id][attr["key"]] = attr["id"] return output, hiearchical + def get_hierarchical_configurations(self, session, interest_attributes): + hier_attr_query = ( + "select id, key, object_type_id, is_hierarchical, default" + " from CustomAttributeConfiguration" + " where key in ({}) and is_hierarchical is true" + ) + if not interest_attributes: + return [] + return list(session.query(hier_attr_query.format( + self.join_query_keys(interest_attributes), + )).all()) + def register(session): PushFrameValuesToTaskEvent(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index b5f199b3e4..e549de7ed0 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -12,6 +12,13 @@ from pymongo import UpdateOne import arrow import ftrack_api +from openpype.client import ( + get_project, + get_assets, + get_archived_assets, + get_asset_ids_with_subsets +) +from openpype.client.operations import CURRENT_ASSET_DOC_SCHEMA from openpype.pipeline import AvalonMongoDB, schema from openpype_modules.ftrack.lib import ( @@ -29,7 +36,6 @@ from openpype_modules.ftrack.lib.avalon_sync import ( convert_to_fps, InvalidFpsValue ) -from openpype.lib import CURRENT_DOC_SCHEMAS class SyncToAvalonEvent(BaseEvent): @@ -149,12 +155,11 @@ class SyncToAvalonEvent(BaseEvent): @property def avalon_entities(self): if self._avalon_ents is None: + project_name = self.cur_project["full_name"] self.dbcon.install() - self.dbcon.Session["AVALON_PROJECT"] = ( - self.cur_project["full_name"] - ) - avalon_project = self.dbcon.find_one({"type": "project"}) - avalon_entities = list(self.dbcon.find({"type": "asset"})) + self.dbcon.Session["AVALON_PROJECT"] = project_name + avalon_project = get_project(project_name) + avalon_entities = list(get_assets(project_name)) self._avalon_ents = (avalon_project, avalon_entities) return self._avalon_ents @@ -284,28 +289,21 @@ class SyncToAvalonEvent(BaseEvent): self._avalon_ents_by_ftrack_id[ftrack_id] = doc @property - def avalon_subsets_by_parents(self): - if self._avalon_subsets_by_parents is None: - self._avalon_subsets_by_parents = collections.defaultdict(list) - self.dbcon.install() - self.dbcon.Session["AVALON_PROJECT"] = ( - self.cur_project["full_name"] + def avalon_asset_ids_with_subsets(self): + if self._avalon_asset_ids_with_subsets is None: + project_name = self.cur_project["full_name"] + self._avalon_asset_ids_with_subsets = get_asset_ids_with_subsets( + project_name ) - for subset in self.dbcon.find({"type": "subset"}): - self._avalon_subsets_by_parents[subset["parent"]].append( - subset - ) - return self._avalon_subsets_by_parents + + return self._avalon_asset_ids_with_subsets @property def avalon_archived_by_id(self): if self._avalon_archived_by_id is None: self._avalon_archived_by_id = {} - self.dbcon.install() - self.dbcon.Session["AVALON_PROJECT"] = ( - self.cur_project["full_name"] - ) - for asset in self.dbcon.find({"type": "archived_asset"}): + project_name = self.cur_project["full_name"] + for asset in get_archived_assets(project_name): self._avalon_archived_by_id[asset["_id"]] = asset return self._avalon_archived_by_id @@ -327,7 +325,7 @@ class SyncToAvalonEvent(BaseEvent): avalon_project, avalon_entities = self.avalon_entities self._changeability_by_mongo_id[avalon_project["_id"]] = False self._bubble_changeability( - list(self.avalon_subsets_by_parents.keys()) + list(self.avalon_asset_ids_with_subsets) ) return self._changeability_by_mongo_id @@ -449,14 +447,9 @@ class SyncToAvalonEvent(BaseEvent): if not entity: # if entity is not found then it is subset without parent if entity_id in unchangeable_ids: - _subset_ids = [ - str(sub["_id"]) for sub in - self.avalon_subsets_by_parents[entity_id] - ] - joined_subset_ids = "| ".join(_subset_ids) self.log.warning(( - "Parent <{}> for subsets <{}> does not exist" - ).format(str(entity_id), joined_subset_ids)) + "Parent <{}> with subsets does not exist" + ).format(str(entity_id))) else: self.log.warning(( "In avalon are entities without valid parents that" @@ -483,7 +476,7 @@ class SyncToAvalonEvent(BaseEvent): self._avalon_ents_by_parent_id = None self._avalon_ents_by_ftrack_id = None self._avalon_ents_by_name = None - self._avalon_subsets_by_parents = None + self._avalon_asset_ids_with_subsets = None self._changeability_by_mongo_id = None self._avalon_archived_by_id = None self._avalon_archived_by_name = None @@ -704,13 +697,22 @@ class SyncToAvalonEvent(BaseEvent): continue auto_sync = changes[CUST_ATTR_AUTO_SYNC]["new"] - if auto_sync == "1": + turned_on = auto_sync == "1" + ft_project = self.cur_project + username = self._get_username(session, event) + message = ( + "Auto sync was turned {} for project \"{}\" by \"{}\"." + ).format( + "on" if turned_on else "off", + ft_project["full_name"], + username + ) + if turned_on: + message += " Triggering syncToAvalon action." + self.log.debug(message) + + if turned_on: # Trigger sync to avalon action if auto sync was turned on - ft_project = self.cur_project - self.log.debug(( - "Auto sync was turned on for project <{}>." - " Triggering syncToAvalon action." - ).format(ft_project["full_name"])) selection = [{ "entityId": ft_project["id"], "entityType": "show" @@ -858,6 +860,26 @@ class SyncToAvalonEvent(BaseEvent): self.report() return True + def _get_username(self, session, event): + username = "Unknown" + event_source = event.get("source") + if not event_source: + return username + user_info = event_source.get("user") + if not user_info: + return username + user_id = user_info.get("id") + if not user_id: + return username + + user_entity = session.query( + "User where id is {}".format(user_id) + ).first() + if user_entity: + username = user_entity["username"] or username + return username + + def process_removed(self): """ Handles removed entities (not removed tasks - handle separately). @@ -1214,7 +1236,7 @@ class SyncToAvalonEvent(BaseEvent): "_id": mongo_id, "name": name, "type": "asset", - "schema": CURRENT_DOC_SCHEMAS["asset"], + "schema": CURRENT_ASSET_DOC_SCHEMA, "parent": proj["_id"], "data": { "ftrackId": ftrack_ent["id"], diff --git a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py index 593fc5e596..c4e48b92f0 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py +++ b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py @@ -1,13 +1,11 @@ import re import subprocess +from openpype.client import get_asset_by_id, get_asset_by_name +from openpype.settings import get_project_settings +from openpype.pipeline import Anatomy from openpype_modules.ftrack.lib import BaseEvent from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY -from openpype.pipeline import AvalonMongoDB - -from bson.objectid import ObjectId - -from openpype.api import Anatomy, get_project_settings class UserAssigmentEvent(BaseEvent): @@ -36,8 +34,6 @@ class UserAssigmentEvent(BaseEvent): 3) path to publish files of task user was (de)assigned to """ - db_con = AvalonMongoDB() - def error(self, *err): for e in err: self.log.error(e) @@ -101,26 +97,16 @@ class UserAssigmentEvent(BaseEvent): :rtype: dict """ parent = task['parent'] - self.db_con.install() - self.db_con.Session['AVALON_PROJECT'] = task['project']['full_name'] - + project_name = task["project"]["full_name"] avalon_entity = None parent_id = parent['custom_attributes'].get(CUST_ATTR_ID_KEY) if parent_id: - parent_id = ObjectId(parent_id) - avalon_entity = self.db_con.find_one({ - '_id': parent_id, - 'type': 'asset' - }) + avalon_entity = get_asset_by_id(project_name, parent_id) if not avalon_entity: - avalon_entity = self.db_con.find_one({ - 'type': 'asset', - 'name': parent['name'] - }) + avalon_entity = get_asset_by_name(project_name, parent["name"]) if not avalon_entity: - self.db_con.uninstall() msg = 'Entity "{}" not found in avalon database'.format( parent['name'] ) @@ -129,7 +115,6 @@ class UserAssigmentEvent(BaseEvent): 'success': False, 'message': msg } - self.db_con.uninstall() return avalon_entity def _get_hierarchy(self, asset): @@ -147,7 +132,7 @@ class UserAssigmentEvent(BaseEvent): """ Get data to fill template from task - .. seealso:: :mod:`openpype.api.Anatomy` + .. seealso:: :mod:`openpype.pipeline.Anatomy` :param task: Task entity :type task: dict diff --git a/openpype/modules/ftrack/event_handlers_user/action_applications.py b/openpype/modules/ftrack/event_handlers_user/action_applications.py index b25bc1b5cb..102f04c956 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_applications.py +++ b/openpype/modules/ftrack/event_handlers_user/action_applications.py @@ -1,5 +1,6 @@ import os +from openpype.client import get_project from openpype_modules.ftrack.lib import BaseAction from openpype.lib.applications import ( ApplicationManager, @@ -7,7 +8,6 @@ from openpype.lib.applications import ( ApplictionExecutableNotFound, CUSTOM_LAUNCH_APP_GROUPS ) -from openpype.pipeline import AvalonMongoDB class AppplicationsAction(BaseAction): @@ -25,7 +25,6 @@ class AppplicationsAction(BaseAction): super(AppplicationsAction, self).__init__(*args, **kwargs) self.application_manager = ApplicationManager() - self.dbcon = AvalonMongoDB() @property def discover_identifier(self): @@ -110,12 +109,7 @@ class AppplicationsAction(BaseAction): if avalon_project_doc is None: ft_project = self.get_project_from_entity(entity) project_name = ft_project["full_name"] - if not self.dbcon.is_installed(): - self.dbcon.install() - self.dbcon.Session["AVALON_PROJECT"] = project_name - avalon_project_doc = self.dbcon.find_one({ - "type": "project" - }) or False + avalon_project_doc = get_project(project_name) or False event["data"]["avalon_project_doc"] = avalon_project_doc if not avalon_project_doc: diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py index 88dc8213bd..c19cfd1502 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py @@ -18,7 +18,7 @@ from openpype_modules.ftrack.lib import ( tool_definitions_from_app_manager ) -from openpype.api import get_system_settings +from openpype.settings import get_system_settings from openpype.lib import ApplicationManager """ @@ -140,9 +140,9 @@ class CustomAttributes(BaseAction): identifier = 'create.update.attributes' #: Action label. label = "OpenPype Admin" - variant = '- Create/Update Avalon Attributes' + variant = '- Create/Update Custom Attributes' #: Action description. - description = 'Creates Avalon/Mongo ID for double check' + description = 'Creates required custom attributes in ftrack' icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") settings_key = "create_update_attributes" diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py index 81f38e0c39..9806f83773 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py @@ -1,7 +1,7 @@ import os import collections import copy -from openpype.api import Anatomy +from openpype.pipeline import Anatomy from openpype_modules.ftrack.lib import BaseAction, statics_icon diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py index ebea8872f9..7c896570b1 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py @@ -1,7 +1,10 @@ import re +from openpype.pipeline.project_folders import ( + get_project_basic_paths, + create_project_folders, +) from openpype_modules.ftrack.lib import BaseAction, statics_icon -from openpype.api import get_project_basic_paths, create_project_folders class CreateProjectFolders(BaseAction): @@ -81,9 +84,14 @@ class CreateProjectFolders(BaseAction): } # Invoking OpenPype API to create the project folders - create_project_folders(basic_paths, project_name) + create_project_folders(project_name, basic_paths) self.create_ftrack_entities(basic_paths, project_entity) + self.trigger_event( + "openpype.project.structure.created", + {"project_name": project_name} + ) + except Exception as exc: self.log.warning("Creating of structure crashed.", exc_info=True) session.rollback() diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py index ee5c3d0d97..03d029b0c1 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py @@ -4,6 +4,7 @@ from datetime import datetime from bson.objectid import ObjectId +from openpype.client import get_assets, get_subsets from openpype.pipeline import AvalonMongoDB from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import create_chunks @@ -91,10 +92,8 @@ class DeleteAssetSubset(BaseAction): continue ftrack_id = entity.get("entityId") - if not ftrack_id: - continue - - ftrack_ids.append(ftrack_id) + if ftrack_id: + ftrack_ids.append(ftrack_id) if project_in_selection: msg = "It is not possible to use this action on project entity." @@ -120,48 +119,51 @@ class DeleteAssetSubset(BaseAction): "message": "Invalid selection for this action (Bug)" } - if entities[0].entity_type.lower() == "project": - project = entities[0] - else: - project = entities[0]["project"] - + project = self.get_project_from_entity(entities[0], session) project_name = project["full_name"] self.dbcon.Session["AVALON_PROJECT"] = project_name - selected_av_entities = list(self.dbcon.find({ - "type": "asset", - "data.ftrackId": {"$in": ftrack_ids} - })) + asset_docs = list(get_assets( + project_name, + fields=["_id", "name", "data.ftrackId", "data.parents"] + )) + selected_av_entities = [] + found_ftrack_ids = set() + asset_docs_by_name = collections.defaultdict(list) + for asset_doc in asset_docs: + ftrack_id = asset_doc["data"].get("ftrackId") + if ftrack_id: + found_ftrack_ids.add(ftrack_id) + if ftrack_id in entity_mapping: + selected_av_entities.append(asset_doc) + + asset_name = asset_doc["name"] + asset_docs_by_name[asset_name].append(asset_doc) + found_without_ftrack_id = {} - if len(selected_av_entities) != len(ftrack_ids): - found_ftrack_ids = [ - ent["data"]["ftrackId"] for ent in selected_av_entities - ] - for ftrack_id, entity in entity_mapping.items(): - if ftrack_id in found_ftrack_ids: + for ftrack_id, entity in entity_mapping.items(): + if ftrack_id in found_ftrack_ids: + continue + + av_ents_by_name = asset_docs_by_name[entity["name"]] + if not av_ents_by_name: + continue + + ent_path_items = [ent["name"] for ent in entity["link"]] + end_index = len(ent_path_items) - 1 + parents = ent_path_items[1:end_index:] + # TODO we should say to user that + # few of them are missing in avalon + for av_ent in av_ents_by_name: + if av_ent["data"]["parents"] != parents: continue - av_ents_by_name = list(self.dbcon.find({ - "type": "asset", - "name": entity["name"] - })) - if not av_ents_by_name: - continue - - ent_path_items = [ent["name"] for ent in entity["link"]] - parents = ent_path_items[1:len(ent_path_items)-1:] - # TODO we should say to user that - # few of them are missing in avalon - for av_ent in av_ents_by_name: - if av_ent["data"]["parents"] != parents: - continue - - # TODO we should say to user that found entity - # with same name does not match same ftrack id? - if "ftrackId" not in av_ent["data"]: - selected_av_entities.append(av_ent) - found_without_ftrack_id[str(av_ent["_id"])] = ftrack_id - break + # TODO we should say to user that found entity + # with same name does not match same ftrack id? + if "ftrackId" not in av_ent["data"]: + selected_av_entities.append(av_ent) + found_without_ftrack_id[str(av_ent["_id"])] = ftrack_id + break if not selected_av_entities: return { @@ -206,10 +208,7 @@ class DeleteAssetSubset(BaseAction): items.append(id_item) asset_ids = [ent["_id"] for ent in selected_av_entities] - subsets_for_selection = self.dbcon.find({ - "type": "subset", - "parent": {"$in": asset_ids} - }) + subsets_for_selection = get_subsets(project_name, asset_ids=asset_ids) asset_ending = "" if len(selected_av_entities) > 1: @@ -459,13 +458,9 @@ class DeleteAssetSubset(BaseAction): if len(assets_to_delete) > 0: map_av_ftrack_id = spec_data["without_ftrack_id"] # Prepare data when deleting whole avalon asset - avalon_assets = self.dbcon.find( - {"type": "asset"}, - { - "_id": 1, - "data.visualParent": 1, - "data.ftrackId": 1 - } + avalon_assets = get_assets( + project_name, + fields=["_id", "data.visualParent", "data.ftrackId"] ) avalon_assets_by_parent = collections.defaultdict(list) for asset in avalon_assets: diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py index a0bf6622e9..c543dc8834 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py @@ -5,10 +5,18 @@ import uuid import clique from pymongo import UpdateOne - -from openpype.api import Anatomy -from openpype.lib import StringTemplate, TemplateUnsolved -from openpype.pipeline import AvalonMongoDB +from openpype.client import ( + get_assets, + get_subsets, + get_versions, + get_representations +) +from openpype.lib import ( + StringTemplate, + TemplateUnsolved, + format_file_size, +) +from openpype.pipeline import AvalonMongoDB, Anatomy from openpype_modules.ftrack.lib import BaseAction, statics_icon @@ -130,13 +138,6 @@ class DeleteOldVersions(BaseAction): "title": self.inteface_title } - def sizeof_fmt(self, num, suffix='B'): - for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: - if abs(num) < 1024.0: - return "%3.1f%s%s" % (num, unit, suffix) - num /= 1024.0 - return "%.1f%s%s" % (num, 'Yi', suffix) - def launch(self, session, entities, event): values = event["data"].get("values") if not values: @@ -198,10 +199,9 @@ class DeleteOldVersions(BaseAction): self.log.debug("Project is set to {}".format(project_name)) # Get Assets from avalon database - assets = list(self.dbcon.find({ - "type": "asset", - "name": {"$in": avalon_asset_names} - })) + assets = list( + get_assets(project_name, asset_names=avalon_asset_names) + ) asset_id_to_name_map = { asset["_id"]: asset["name"] for asset in assets } @@ -210,10 +210,9 @@ class DeleteOldVersions(BaseAction): self.log.debug("Collected assets ({})".format(len(asset_ids))) # Get Subsets - subsets = list(self.dbcon.find({ - "type": "subset", - "parent": {"$in": asset_ids} - })) + subsets = list( + get_subsets(project_name, asset_ids=asset_ids) + ) subsets_by_id = {} subset_ids = [] for subset in subsets: @@ -230,10 +229,9 @@ class DeleteOldVersions(BaseAction): self.log.debug("Collected subsets ({})".format(len(subset_ids))) # Get Versions - versions = list(self.dbcon.find({ - "type": "version", - "parent": {"$in": subset_ids} - })) + versions = list( + get_versions(project_name, subset_ids=subset_ids) + ) versions_by_parent = collections.defaultdict(list) for ent in versions: @@ -295,10 +293,9 @@ class DeleteOldVersions(BaseAction): "message": msg } - repres = list(self.dbcon.find({ - "type": "representation", - "parent": {"$in": version_ids} - })) + repres = list( + get_representations(project_name, version_ids=version_ids) + ) self.log.debug( "Collected representations to remove ({})".format(len(repres)) @@ -359,7 +356,7 @@ class DeleteOldVersions(BaseAction): dir_paths, file_paths_by_dir, delete=False ) - msg = "Total size of files: " + self.sizeof_fmt(size) + msg = "Total size of files: {}".format(format_file_size(size)) self.log.warning(msg) @@ -430,7 +427,7 @@ class DeleteOldVersions(BaseAction): "message": msg } - msg = "Total size of files deleted: " + self.sizeof_fmt(size) + msg = "Total size of files deleted: {}".format(format_file_size(size)) self.log.warning(msg) diff --git a/openpype/modules/ftrack/event_handlers_user/action_delivery.py b/openpype/modules/ftrack/event_handlers_user/action_delivery.py index 9ef2a1668e..a400c8f5f0 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delivery.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delivery.py @@ -3,23 +3,30 @@ import copy import json import collections -from bson.objectid import ObjectId - -from openpype.api import Anatomy, config +from openpype.client import ( + get_project, + get_assets, + get_subsets, + get_versions, + get_representations +) from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY -from openpype.lib.delivery import ( - path_from_representation, +from openpype_modules.ftrack.lib.custom_attributes import ( + query_custom_attributes +) +from openpype.lib.dateutils import get_datetime_data +from openpype.pipeline import Anatomy +from openpype.pipeline.load import get_representation_path_with_anatomy +from openpype.pipeline.delivery import ( get_format_dict, check_destination_path, - process_single_file, - process_sequence + deliver_single_file, + deliver_sequence, ) -from openpype.pipeline import AvalonMongoDB class Delivery(BaseAction): - identifier = "delivery.action" label = "Delivery" description = "Deliver data to client" @@ -27,15 +34,10 @@ class Delivery(BaseAction): icon = statics_icon("ftrack", "action_icons", "Delivery.svg") settings_key = "delivery_action" - def __init__(self, *args, **kwargs): - self.db_con = AvalonMongoDB() - - super(Delivery, self).__init__(*args, **kwargs) - def discover(self, session, entities, event): is_valid = False for entity in entities: - if entity.entity_type.lower() == "assetversion": + if entity.entity_type.lower() in ("assetversion", "reviewsession"): is_valid = True break @@ -54,9 +56,7 @@ class Delivery(BaseAction): project_entity = self.get_project_from_entity(entities[0]) project_name = project_entity["full_name"] - self.db_con.install() - self.db_con.Session["AVALON_PROJECT"] = project_name - project_doc = self.db_con.find_one({"type": "project"}) + project_doc = get_project(project_name, fields=["name"]) if not project_doc: return { "success": False, @@ -65,8 +65,7 @@ class Delivery(BaseAction): ).format(project_name) } - repre_names = self._get_repre_names(entities) - self.db_con.uninstall() + repre_names = self._get_repre_names(project_name, session, entities) items.append({ "type": "hidden", @@ -195,51 +194,122 @@ class Delivery(BaseAction): "title": title } - def _get_repre_names(self, entities): - version_ids = self._get_interest_version_ids(entities) - repre_docs = self.db_con.find({ - "type": "representation", - "parent": {"$in": version_ids} - }) - return list(sorted(repre_docs.distinct("name"))) + def _get_repre_names(self, project_name, session, entities): + version_ids = self._get_interest_version_ids( + project_name, session, entities + ) + if not version_ids: + return [] + repre_docs = get_representations( + project_name, + version_ids=version_ids, + fields=["name"] + ) + repre_names = {repre_doc["name"] for repre_doc in repre_docs} + return list(sorted(repre_names)) - def _get_interest_version_ids(self, entities): - parent_ent_by_id = {} + def _get_interest_version_ids(self, project_name, session, entities): + # Extract AssetVersion entities + asset_versions = self._extract_asset_versions(session, entities) + # Prepare Asset ids + asset_ids = { + asset_version["asset_id"] + for asset_version in asset_versions + } + # Query Asset entities + assets = session.query(( + "select id, name, context_id from Asset where id in ({})" + ).format(self.join_query_keys(asset_ids))).all() + assets_by_id = { + asset["id"]: asset + for asset in assets + } + parent_ids = set() subset_names = set() version_nums = set() - for entity in entities: - asset = entity["asset"] - parent = asset["parent"] - parent_ent_by_id[parent["id"]] = parent + for asset_version in asset_versions: + asset_id = asset_version["asset_id"] + asset = assets_by_id[asset_id] - subset_name = asset["name"] - subset_names.add(subset_name) + parent_ids.add(asset["context_id"]) + subset_names.add(asset["name"]) + version_nums.add(asset_version["version"]) - version = entity["version"] - version_nums.add(version) - - asset_docs_by_ftrack_id = self._get_asset_docs(parent_ent_by_id) + asset_docs_by_ftrack_id = self._get_asset_docs( + project_name, session, parent_ids + ) subset_docs = self._get_subset_docs( - asset_docs_by_ftrack_id, subset_names, entities + project_name, + asset_docs_by_ftrack_id, + subset_names, + asset_versions, + assets_by_id ) version_docs = self._get_version_docs( - asset_docs_by_ftrack_id, subset_docs, version_nums, entities + project_name, + asset_docs_by_ftrack_id, + subset_docs, + version_nums, + asset_versions, + assets_by_id ) return [version_doc["_id"] for version_doc in version_docs] + def _extract_asset_versions(self, session, entities): + asset_version_ids = set() + review_session_ids = set() + for entity in entities: + entity_type_low = entity.entity_type.lower() + if entity_type_low == "assetversion": + asset_version_ids.add(entity["id"]) + elif entity_type_low == "reviewsession": + review_session_ids.add(entity["id"]) + + for version_id in self._get_asset_version_ids_from_review_sessions( + session, review_session_ids + ): + asset_version_ids.add(version_id) + + asset_versions = session.query(( + "select id, version, asset_id from AssetVersion where id in ({})" + ).format(self.join_query_keys(asset_version_ids))).all() + + return asset_versions + + def _get_asset_version_ids_from_review_sessions( + self, session, review_session_ids + ): + if not review_session_ids: + return set() + review_session_objects = session.query(( + "select version_id from ReviewSessionObject" + " where review_session_id in ({})" + ).format(self.join_query_keys(review_session_ids))).all() + + return { + review_session_object["version_id"] + for review_session_object in review_session_objects + } + def _get_version_docs( - self, asset_docs_by_ftrack_id, subset_docs, version_nums, entities + self, + project_name, + asset_docs_by_ftrack_id, + subset_docs, + version_nums, + asset_versions, + assets_by_id ): subset_docs_by_id = { subset_doc["_id"]: subset_doc for subset_doc in subset_docs } - version_docs = list(self.db_con.find({ - "type": "version", - "parent": {"$in": list(subset_docs_by_id.keys())}, - "name": {"$in": list(version_nums)} - })) + version_docs = list(get_versions( + project_name, + subset_ids=subset_docs_by_id.keys(), + versions=version_nums + )) version_docs_by_parent_id = collections.defaultdict(dict) for version_doc in version_docs: subset_doc = subset_docs_by_id[version_doc["parent"]] @@ -255,11 +325,13 @@ class Delivery(BaseAction): ) filtered_versions = [] - for entity in entities: - asset = entity["asset"] - - parent = asset["parent"] - asset_doc = asset_docs_by_ftrack_id[parent["id"]] + for asset_version in asset_versions: + asset_id = asset_version["asset_id"] + asset = assets_by_id[asset_id] + parent_id = asset["context_id"] + asset_doc = asset_docs_by_ftrack_id.get(parent_id) + if not asset_doc: + continue subsets_by_name = version_docs_by_parent_id.get(asset_doc["_id"]) if not subsets_by_name: @@ -270,24 +342,29 @@ class Delivery(BaseAction): if not version_docs_by_version: continue - version = entity["version"] + version = asset_version["version"] version_doc = version_docs_by_version.get(version) if version_doc: filtered_versions.append(version_doc) return filtered_versions def _get_subset_docs( - self, asset_docs_by_ftrack_id, subset_names, entities + self, + project_name, + asset_docs_by_ftrack_id, + subset_names, + asset_versions, + assets_by_id ): - asset_doc_ids = list() - for asset_doc in asset_docs_by_ftrack_id.values(): - asset_doc_ids.append(asset_doc["_id"]) - - subset_docs = list(self.db_con.find({ - "type": "subset", - "parent": {"$in": asset_doc_ids}, - "name": {"$in": list(subset_names)} - })) + asset_doc_ids = [ + asset_doc["_id"] + for asset_doc in asset_docs_by_ftrack_id.values() + ] + subset_docs = list(get_subsets( + project_name, + asset_ids=asset_doc_ids, + subset_names=subset_names + )) subset_docs_by_parent_id = collections.defaultdict(dict) for subset_doc in subset_docs: asset_id = subset_doc["parent"] @@ -295,11 +372,14 @@ class Delivery(BaseAction): subset_docs_by_parent_id[asset_id][subset_name] = subset_doc filtered_subsets = [] - for entity in entities: - asset = entity["asset"] + for asset_version in asset_versions: + asset_id = asset_version["asset_id"] + asset = assets_by_id[asset_id] - parent = asset["parent"] - asset_doc = asset_docs_by_ftrack_id[parent["id"]] + parent_id = asset["context_id"] + asset_doc = asset_docs_by_ftrack_id.get(parent_id) + if not asset_doc: + continue subsets_by_name = subset_docs_by_parent_id.get(asset_doc["_id"]) if not subsets_by_name: @@ -311,58 +391,58 @@ class Delivery(BaseAction): filtered_subsets.append(subset_doc) return filtered_subsets - def _get_asset_docs(self, parent_ent_by_id): - asset_docs = list(self.db_con.find({ - "type": "asset", - "data.ftrackId": {"$in": list(parent_ent_by_id.keys())} - })) - asset_docs_by_ftrack_id = { - asset_doc["data"]["ftrackId"]: asset_doc - for asset_doc in asset_docs - } + def _get_asset_docs(self, project_name, session, parent_ids): + asset_docs = list(get_assets( + project_name, fields=["_id", "name", "data.ftrackId"] + )) - entities_by_mongo_id = {} - entities_by_names = {} - for ftrack_id, entity in parent_ent_by_id.items(): - if ftrack_id not in asset_docs_by_ftrack_id: - parent_mongo_id = entity["custom_attributes"].get( - CUST_ATTR_ID_KEY - ) - if parent_mongo_id: - entities_by_mongo_id[ObjectId(parent_mongo_id)] = entity - else: - entities_by_names[entity["name"]] = entity + asset_docs_by_id = {} + asset_docs_by_name = {} + asset_docs_by_ftrack_id = {} + for asset_doc in asset_docs: + asset_id = str(asset_doc["_id"]) + asset_name = asset_doc["name"] + ftrack_id = asset_doc["data"].get("ftrackId") - expressions = [] - if entities_by_mongo_id: - expression = { - "type": "asset", - "_id": {"$in": list(entities_by_mongo_id.keys())} + asset_docs_by_id[asset_id] = asset_doc + asset_docs_by_name[asset_name] = asset_doc + if ftrack_id: + asset_docs_by_ftrack_id[ftrack_id] = asset_doc + + attr_def = session.query(( + "select id from CustomAttributeConfiguration where key is \"{}\"" + ).format(CUST_ATTR_ID_KEY)).first() + if attr_def is None: + return asset_docs_by_ftrack_id + + avalon_mongo_id_values = query_custom_attributes( + session, [attr_def["id"]], parent_ids, True + ) + missing_ids = set(parent_ids) + for item in avalon_mongo_id_values: + if not item["value"]: + continue + asset_id = item["value"] + entity_id = item["entity_id"] + asset_doc = asset_docs_by_id.get(asset_id) + if asset_doc: + asset_docs_by_ftrack_id[entity_id] = asset_doc + missing_ids.remove(entity_id) + + entity_ids_by_name = {} + if missing_ids: + not_found_entities = session.query(( + "select id, name from TypedContext where id in ({})" + ).format(self.join_query_keys(missing_ids))).all() + entity_ids_by_name = { + entity["name"]: entity["id"] + for entity in not_found_entities } - expressions.append(expression) - if entities_by_names: - expression = { - "type": "asset", - "name": {"$in": list(entities_by_names.keys())} - } - expressions.append(expression) - - if expressions: - if len(expressions) == 1: - filter = expressions[0] - else: - filter = {"$or": expressions} - - asset_docs = self.db_con.find(filter) - for asset_doc in asset_docs: - if asset_doc["_id"] in entities_by_mongo_id: - entity = entities_by_mongo_id[asset_doc["_id"]] - asset_docs_by_ftrack_id[entity["id"]] = asset_doc - - elif asset_doc["name"] in entities_by_names: - entity = entities_by_names[asset_doc["name"]] - asset_docs_by_ftrack_id[entity["id"]] = asset_doc + for asset_name, entity_id in entity_ids_by_name.items(): + asset_doc = asset_docs_by_name.get(asset_name) + if asset_doc: + asset_docs_by_ftrack_id[entity_id] = asset_doc return asset_docs_by_ftrack_id @@ -396,7 +476,6 @@ class Delivery(BaseAction): session.commit() try: - self.db_con.install() report = self.real_launch(session, entities, event) except Exception as exc: @@ -422,7 +501,6 @@ class Delivery(BaseAction): else: job["status"] = "failed" session.commit() - self.db_con.uninstall() if not report["success"]: self.show_interface( @@ -464,21 +542,20 @@ class Delivery(BaseAction): if not os.path.exists(location_path): os.makedirs(location_path) - self.db_con.Session["AVALON_PROJECT"] = project_name - self.log.debug("Collecting representations to process.") - version_ids = self._get_interest_version_ids(entities) - repres_to_deliver = list(self.db_con.find({ - "type": "representation", - "parent": {"$in": version_ids}, - "name": {"$in": repre_names} - })) - + version_ids = self._get_interest_version_ids( + project_name, session, entities + ) + repres_to_deliver = list(get_representations( + project_name, + representation_names=repre_names, + version_ids=version_ids + )) anatomy = Anatomy(project_name) format_dict = get_format_dict(anatomy, location_path) - datetime_data = config.get_datetime_data() + datetime_data = get_datetime_data() for repre in repres_to_deliver: source_path = repre.get("data", {}).get("path") debug_msg = "Processing representation {}".format(repre["_id"]) @@ -503,7 +580,7 @@ class Delivery(BaseAction): if frame: repre["context"]["frame"] = len(str(frame)) * "#" - repre_path = path_from_representation(repre, anatomy) + repre_path = get_representation_path_with_anatomy(repre, anatomy) # TODO add backup solution where root of path from component # is replaced with root args = ( @@ -517,9 +594,9 @@ class Delivery(BaseAction): self.log ) if not frame: - process_single_file(*args) + deliver_single_file(*args) else: - process_sequence(*args) + deliver_sequence(*args) return self.report(report_items) diff --git a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py index c7237a1150..fb1cdf340e 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py +++ b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py @@ -7,14 +7,15 @@ import datetime import ftrack_api -from openpype.api import get_project_settings -from openpype.lib import ( - get_workfile_template_key, - get_workdir_data, - Anatomy, - StringTemplate, +from openpype.client import ( + get_project, + get_assets, ) -from openpype.pipeline import AvalonMongoDB +from openpype.settings import get_project_settings, get_system_settings +from openpype.lib import StringTemplate +from openpype.pipeline import Anatomy +from openpype.pipeline.template_data import get_template_data +from openpype.pipeline.workfile import get_workfile_template_key from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import create_chunks @@ -248,10 +249,8 @@ class FillWorkfileAttributeAction(BaseAction): # Find matchin asset documents and map them by ftrack task entities # - result stored to 'asset_docs_with_task_entities' is list with # tuple `(asset document, [task entitis, ...])` - dbcon = AvalonMongoDB() - dbcon.Session["AVALON_PROJECT"] = project_name # Quety all asset documents - asset_docs = list(dbcon.find({"type": "asset"})) + asset_docs = list(get_assets(project_name)) job_entity["data"] = json.dumps({ "description": "(1/3) Asset documents queried." }) @@ -276,16 +275,21 @@ class FillWorkfileAttributeAction(BaseAction): # Keep placeholders in the template unfilled host_name = "{app}" extension = "{ext}" - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) project_settings = get_project_settings(project_name) + system_settings = get_system_settings() anatomy = Anatomy(project_name) templates_by_key = {} operations = [] for asset_doc, task_entities in asset_docs_with_task_entities: for task_entity in task_entities: - workfile_data = get_workdir_data( - project_doc, asset_doc, task_entity["name"], host_name + workfile_data = get_template_data( + project_doc, + asset_doc, + task_entity["name"], + host_name, + system_settings ) # Use version 1 for each workfile workfile_data["version"] = 1 @@ -293,7 +297,10 @@ class FillWorkfileAttributeAction(BaseAction): task_type = workfile_data["task"]["type"] template_key = get_workfile_template_key( - task_type, host_name, project_settings=project_settings + task_type, + host_name, + project_name, + project_settings=project_settings ) if template_key in templates_by_key: template = templates_by_key[template_key] diff --git a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py index 0b14e7aa2b..e825198180 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py @@ -1,9 +1,8 @@ import json +import copy -from openpype.api import ProjectSettings -from openpype.lib import create_project -from openpype.pipeline import AvalonMongoDB -from openpype.settings import SaveWarningExc +from openpype.client import get_project, create_project +from openpype.settings import ProjectSettings, SaveWarningExc from openpype_modules.ftrack.lib import ( BaseAction, @@ -389,12 +388,8 @@ class PrepareProjectLocal(BaseAction): project_name = project_entity["full_name"] # Try to find project document - dbcon = AvalonMongoDB() - dbcon.install() - dbcon.Session["AVALON_PROJECT"] = project_name - project_doc = dbcon.find_one({ - "type": "project" - }) + project_doc = get_project(project_name) + # Create project if is not available # - creation is required to be able set project anatomy and attributes if not project_doc: @@ -402,9 +397,11 @@ class PrepareProjectLocal(BaseAction): self.log.info("Creating project \"{} [{}]\"".format( project_name, project_code )) - create_project(project_name, project_code, dbcon=dbcon) - - dbcon.uninstall() + create_project(project_name, project_code) + self.trigger_event( + "openpype.project.created", + {"project_name": project_name} + ) project_settings = ProjectSettings(project_name) project_anatomy_settings = project_settings["project_anatomy"] @@ -439,6 +436,10 @@ class PrepareProjectLocal(BaseAction): self.process_identifier() ) self.trigger_action(trigger_identifier, event) + + event_data = copy.deepcopy(in_data) + event_data["project_name"] = project_name + self.trigger_event("openpype.project.prepared", event_data) return True diff --git a/openpype/modules/ftrack/event_handlers_user/action_rv.py b/openpype/modules/ftrack/event_handlers_user/action_rv.py index 040ca75582..d05f0c47f6 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_rv.py +++ b/openpype/modules/ftrack/event_handlers_user/action_rv.py @@ -5,9 +5,16 @@ import json import ftrack_api +from openpype.client import ( + get_asset_by_name, + get_subset_by_name, + get_version_by_name, + get_representation_by_name +) from openpype.pipeline import ( get_representation_path, - legacy_io, + AvalonMongoDB, + Anatomy, ) from openpype_modules.ftrack.lib import BaseAction, statics_icon @@ -255,9 +262,10 @@ class RVAction(BaseAction): "Component", list(event["data"]["values"].values())[0] )["version"]["asset"]["parent"]["link"][0] project = session.get(link["type"], link["id"]) - os.environ["AVALON_PROJECT"] = project["name"] - legacy_io.Session["AVALON_PROJECT"] = project["name"] - legacy_io.install() + project_name = project["full_name"] + dbcon = AvalonMongoDB() + dbcon.Session["AVALON_PROJECT"] = project_name + anatomy = Anatomy(project_name) location = ftrack_api.Session().pick_location() @@ -281,37 +289,38 @@ class RVAction(BaseAction): if online_source: continue - asset = legacy_io.find_one({"type": "asset", "name": parent_name}) - subset = legacy_io.find_one( - { - "type": "subset", - "name": component["version"]["asset"]["name"], - "parent": asset["_id"] - } + subset_name = component["version"]["asset"]["name"] + version_name = component["version"]["version"] + representation_name = component["file_type"][1:] + + asset_doc = get_asset_by_name( + project_name, parent_name, fields=["_id"] ) - version = legacy_io.find_one( - { - "type": "version", - "name": component["version"]["version"], - "parent": subset["_id"] - } + subset_doc = get_subset_by_name( + project_name, + subset_name=subset_name, + asset_id=asset_doc["_id"] ) - representation = legacy_io.find_one( - { - "type": "representation", - "parent": version["_id"], - "name": component["file_type"][1:] - } + version_doc = get_version_by_name( + project_name, + version=version_name, + subset_id=subset_doc["_id"] ) - if representation is None: - representation = legacy_io.find_one( - { - "type": "representation", - "parent": version["_id"], - "name": "preview" - } + repre_doc = get_representation_by_name( + project_name, + version_id=version_doc["_id"], + representation_name=representation_name + ) + if not repre_doc: + repre_doc = get_representation_by_name( + project_name, + version_id=version_doc["_id"], + representation_name="preview" ) - paths.append(get_representation_path(representation)) + + paths.append(get_representation_path( + repre_doc, root=anatomy.roots, dbcon=dbcon + )) return paths diff --git a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py index 62fdfa2bdd..8748f426bd 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py @@ -5,9 +5,16 @@ import requests from bson.objectid import ObjectId +from openpype.client import ( + get_project, + get_asset_by_id, + get_assets, + get_subset_by_name, + get_version_by_name, + get_representations +) from openpype_modules.ftrack.lib import BaseAction, statics_icon -from openpype.api import Anatomy -from openpype.pipeline import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB, Anatomy from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY @@ -385,7 +392,7 @@ class StoreThumbnailsToAvalon(BaseAction): db_con.Session["AVALON_PROJECT"] = project_name - avalon_project = db_con.find_one({"type": "project"}) + avalon_project = get_project(project_name) output["project"] = avalon_project if not avalon_project: @@ -399,19 +406,17 @@ class StoreThumbnailsToAvalon(BaseAction): asset_mongo_id = parent["custom_attributes"].get(CUST_ATTR_ID_KEY) if asset_mongo_id: try: - asset_mongo_id = ObjectId(asset_mongo_id) - asset_ent = db_con.find_one({ - "type": "asset", - "_id": asset_mongo_id - }) + asset_ent = get_asset_by_id(project_name, asset_mongo_id) except Exception: pass if not asset_ent: - asset_ent = db_con.find_one({ - "type": "asset", - "data.ftrackId": parent["id"] - }) + asset_docs = get_assets(project_name, asset_names=[parent["name"]]) + for asset_doc in asset_docs: + ftrack_id = asset_doc.get("data", {}).get("ftrackId") + if ftrack_id == parent["id"]: + asset_ent = asset_doc + break output["asset"] = asset_ent @@ -422,13 +427,11 @@ class StoreThumbnailsToAvalon(BaseAction): ) return output - asset_mongo_id = asset_ent["_id"] - - subset_ent = db_con.find_one({ - "type": "subset", - "parent": asset_mongo_id, - "name": subset_name - }) + subset_ent = get_subset_by_name( + project_name, + subset_name=subset_name, + asset_id=asset_ent["_id"] + ) output["subset"] = subset_ent @@ -439,11 +442,11 @@ class StoreThumbnailsToAvalon(BaseAction): ).format(subset_name, ent_path) return output - version_ent = db_con.find_one({ - "type": "version", - "name": version, - "parent": subset_ent["_id"] - }) + version_ent = get_version_by_name( + project_name, + version, + subset_ent["_id"] + ) output["version"] = version_ent @@ -454,10 +457,10 @@ class StoreThumbnailsToAvalon(BaseAction): ).format(version, subset_name, ent_path) return output - repre_ents = list(db_con.find({ - "type": "representation", - "parent": version_ent["_id"] - })) + repre_ents = list(get_representations( + project_name, + version_ids=[version_ent["_id"]] + )) output["representations"] = repre_ents return output diff --git a/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py index cd2f371f38..e52a061471 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py @@ -1,7 +1,8 @@ import time import sys import json -import traceback + +import ftrack_api from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import SyncEntitiesFactory @@ -184,6 +185,13 @@ class SyncToAvalonLocal(BaseAction): "* Total time: {}".format(time_7 - time_start) ) + if self.entities_factory.project_created: + event = ftrack_api.event.base.Event( + topic="openpype.project.created", + data={"project_name": project_name} + ) + self.session.event_hub.publish(event) + report = self.entities_factory.report() if report and report.get("items"): default_title = "Synchronization report ({}):".format( diff --git a/openpype/modules/ftrack/ftrack_module.py b/openpype/modules/ftrack/ftrack_module.py index 5c38df2e03..6f14f8428d 100644 --- a/openpype/modules/ftrack/ftrack_module.py +++ b/openpype/modules/ftrack/ftrack_module.py @@ -5,23 +5,23 @@ import platform import click -from openpype.modules import OpenPypeModule -from openpype_interfaces import ( +from openpype.modules import ( + OpenPypeModule, ITrayModule, IPluginPaths, - ILaunchHookPaths, ISettingsChangeListener ) from openpype.settings import SaveWarningExc +from openpype.lib import Logger FTRACK_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) +_URL_NOT_SET = object() class FtrackModule( OpenPypeModule, ITrayModule, IPluginPaths, - ILaunchHookPaths, ISettingsChangeListener ): name = "ftrack" @@ -30,17 +30,8 @@ class FtrackModule( ftrack_settings = settings[self.name] self.enabled = ftrack_settings["enabled"] - # Add http schema - ftrack_url = ftrack_settings["ftrack_server"].strip("/ ") - if ftrack_url: - if "http" not in ftrack_url: - ftrack_url = "https://" + ftrack_url - - # Check if "ftrack.app" is part os url - if "ftrackapp.com" not in ftrack_url: - ftrack_url = ftrack_url + ".ftrackapp.com" - - self.ftrack_url = ftrack_url + self._settings_ftrack_url = ftrack_settings["ftrack_server"] + self._ftrack_url = _URL_NOT_SET current_dir = os.path.dirname(os.path.abspath(__file__)) low_platform = platform.system().lower() @@ -72,6 +63,16 @@ class FtrackModule( self.timers_manager_connector = None self._timers_manager_module = None + def get_ftrack_url(self): + if self._ftrack_url is _URL_NOT_SET: + self._ftrack_url = resolve_ftrack_url( + self._settings_ftrack_url, + logger=self.log + ) + return self._ftrack_url + + ftrack_url = property(get_ftrack_url) + def get_global_environments(self): """Ftrack's global environments.""" return { @@ -85,9 +86,44 @@ class FtrackModule( } def get_launch_hook_paths(self): - """Implementation of `ILaunchHookPaths`.""" + """Implementation for applications launch hooks.""" + return os.path.join(FTRACK_MODULE_DIR, "launch_hooks") + def modify_application_launch_arguments(self, application, env): + if not application.use_python_2: + return + + self.log.info("Adding Ftrack Python 2 packages to PYTHONPATH.") + + # Prepare vendor dir path + python_2_vendor = os.path.join(FTRACK_MODULE_DIR, "python2_vendor") + + # Add Python 2 modules + python_paths = [ + # `python-ftrack-api` + os.path.join(python_2_vendor, "ftrack-python-api", "source"), + # `arrow` + os.path.join(python_2_vendor, "arrow"), + # `builtins` from `python-future` + # - `python-future` is strict Python 2 module that cause crashes + # of Python 3 scripts executed through OpenPype + # (burnin script etc.) + os.path.join(python_2_vendor, "builtins"), + # `backports.functools_lru_cache` + os.path.join( + python_2_vendor, "backports.functools_lru_cache" + ) + ] + + # Load PYTHONPATH from current launch context + python_path = env.get("PYTHONPATH") + if python_path: + python_paths.append(python_path) + + # Set new PYTHONPATH to launch context environments + env["PYTHONPATH"] = os.pathsep.join(python_paths) + def connect_with_modules(self, enabled_modules): for module in enabled_modules: if not hasattr(module, "get_ftrack_event_handler_paths"): @@ -159,7 +195,7 @@ class FtrackModule( app_definitions_from_app_manager, tool_definitions_from_app_manager ) - from openpype.api import ApplicationManager + from openpype.lib import ApplicationManager query_keys = [ "id", "key", @@ -446,6 +482,51 @@ class FtrackModule( click_group.add_command(cli_main) +def _check_ftrack_url(url): + import requests + + try: + result = requests.get(url, allow_redirects=False) + except requests.exceptions.RequestException: + return False + + if (result.status_code != 200 or "FTRACK_VERSION" not in result.headers): + return False + return True + + +def resolve_ftrack_url(url, logger=None): + """Checks if Ftrack server is responding.""" + + if logger is None: + logger = Logger.get_logger(__name__) + + url = url.strip("/ ") + if not url: + logger.error("Ftrack URL is not set!") + return None + + if not url.startswith("http"): + url = "https://" + url + + ftrack_url = None + if not url.endswith("ftrackapp.com"): + ftrackapp_url = url + ".ftrackapp.com" + if _check_ftrack_url(ftrackapp_url): + ftrack_url = ftrackapp_url + + if not ftrack_url and _check_ftrack_url(url): + ftrack_url = url + + if ftrack_url: + logger.debug("Ftrack server \"{}\" is accessible.".format(ftrack_url)) + + else: + logger.error("Ftrack server \"{}\" is not accessible!".format(url)) + + return ftrack_url + + @click.group(FtrackModule.name, help="Ftrack module related commands.") def cli_main(): pass diff --git a/openpype/modules/ftrack/ftrack_server/__init__.py b/openpype/modules/ftrack/ftrack_server/__init__.py index 9e3920b500..8e5f7c4c51 100644 --- a/openpype/modules/ftrack/ftrack_server/__init__.py +++ b/openpype/modules/ftrack/ftrack_server/__init__.py @@ -1,8 +1,6 @@ from .ftrack_server import FtrackServer -from .lib import check_ftrack_url __all__ = ( "FtrackServer", - "check_ftrack_url" ) diff --git a/openpype/modules/ftrack/ftrack_server/event_server_cli.py b/openpype/modules/ftrack/ftrack_server/event_server_cli.py index 90ce757242..20c5ab24a8 100644 --- a/openpype/modules/ftrack/ftrack_server/event_server_cli.py +++ b/openpype/modules/ftrack/ftrack_server/event_server_cli.py @@ -1,11 +1,9 @@ import os -import sys import signal import datetime import subprocess import socket import json -import platform import getpass import atexit import time @@ -13,16 +11,20 @@ import uuid import ftrack_api import pymongo +from openpype.client.mongo import ( + OpenPypeMongoConnection, + validate_mongo_connection, +) from openpype.lib import ( get_openpype_execute_args, - OpenPypeMongoConnection, get_openpype_version, get_build_version, - validate_mongo_connection ) -from openpype_modules.ftrack import FTRACK_MODULE_DIR +from openpype_modules.ftrack import ( + FTRACK_MODULE_DIR, + resolve_ftrack_url, +) from openpype_modules.ftrack.lib import credentials -from openpype_modules.ftrack.ftrack_server.lib import check_ftrack_url from openpype_modules.ftrack.ftrack_server import socket_thread @@ -114,7 +116,7 @@ def legacy_server(ftrack_url): while True: if not ftrack_accessible: - ftrack_accessible = check_ftrack_url(ftrack_url) + ftrack_accessible = resolve_ftrack_url(ftrack_url) # Run threads only if Ftrack is accessible if not ftrack_accessible and not printed_ftrack_error: @@ -257,7 +259,7 @@ def main_loop(ftrack_url): while True: # Check if accessible Ftrack and Mongo url if not ftrack_accessible: - ftrack_accessible = check_ftrack_url(ftrack_url) + ftrack_accessible = resolve_ftrack_url(ftrack_url) if not mongo_accessible: mongo_accessible = check_mongo_url(mongo_uri) @@ -441,7 +443,7 @@ def run_event_server( os.environ["CLOCKIFY_API_KEY"] = clockify_api_key # Check url regex and accessibility - ftrack_url = check_ftrack_url(ftrack_url) + ftrack_url = resolve_ftrack_url(ftrack_url) if not ftrack_url: print('Exiting! < Please enter Ftrack server url >') return 1 diff --git a/openpype/modules/ftrack/ftrack_server/ftrack_server.py b/openpype/modules/ftrack/ftrack_server/ftrack_server.py index 8944591b71..c75b8f7172 100644 --- a/openpype/modules/ftrack/ftrack_server/ftrack_server.py +++ b/openpype/modules/ftrack/ftrack_server/ftrack_server.py @@ -7,12 +7,10 @@ import traceback import ftrack_api from openpype.lib import ( - PypeLogger, + Logger, modules_from_path ) -log = PypeLogger.get_logger(__name__) - """ # Required - Needed for connection to Ftrack FTRACK_SERVER # Ftrack server e.g. "https://myFtrack.ftrackapp.com" @@ -43,10 +41,13 @@ class FtrackServer: server.run_server() .. """ + # set Ftrack logging to Warning only - OPTIONAL ftrack_log = logging.getLogger("ftrack_api") ftrack_log.setLevel(logging.WARNING) + self.log = Logger.get_logger(__name__) + self.stopped = True self.is_running = False @@ -72,7 +73,7 @@ class FtrackServer: # Get all modules with functions modules, crashed = modules_from_path(path) for filepath, exc_info in crashed: - log.warning("Filepath load crashed {}.\n{}".format( + self.log.warning("Filepath load crashed {}.\n{}".format( filepath, traceback.format_exception(*exc_info) )) @@ -87,7 +88,7 @@ class FtrackServer: break if not register_function: - log.warning( + self.log.warning( "\"{}\" - Missing register method".format(filepath) ) continue @@ -97,7 +98,7 @@ class FtrackServer: ) if not register_functions: - log.warning(( + self.log.warning(( "There are no events with `register` function" " in registered paths: \"{}\"" ).format("| ".join(paths))) @@ -106,7 +107,7 @@ class FtrackServer: try: register_func(self.session) except Exception: - log.warning( + self.log.warning( "\"{}\" - register was not successful".format(filepath), exc_info=True ) @@ -141,7 +142,7 @@ class FtrackServer: self.session = session if load_files: if not self.handler_paths: - log.warning(( + self.log.warning(( "Paths to event handlers are not set." " Ftrack server won't launch." )) @@ -151,8 +152,8 @@ class FtrackServer: self.set_files(self.handler_paths) msg = "Registration of event handlers has finished!" - log.info(len(msg) * "*") - log.info(msg) + self.log.info(len(msg) * "*") + self.log.info(msg) # keep event_hub on session running self.session.event_hub.wait() diff --git a/openpype/modules/ftrack/ftrack_server/lib.py b/openpype/modules/ftrack/ftrack_server/lib.py index 5c6d6352d2..c8143f739c 100644 --- a/openpype/modules/ftrack/ftrack_server/lib.py +++ b/openpype/modules/ftrack/ftrack_server/lib.py @@ -7,6 +7,7 @@ import threading import datetime import time import queue +import collections import appdirs import pymongo @@ -24,46 +25,13 @@ except ImportError: from ftrack_api._weakref import WeakMethod from openpype_modules.ftrack.lib import get_ftrack_event_mongo_info -from openpype.lib import OpenPypeMongoConnection -from openpype.api import Logger +from openpype.client import OpenPypeMongoConnection +from openpype.lib import Logger TOPIC_STATUS_SERVER = "openpype.event.server.status" TOPIC_STATUS_SERVER_RESULT = "openpype.event.server.status.result" -def check_ftrack_url(url, log_errors=True, logger=None): - """Checks if Ftrack server is responding""" - if logger is None: - logger = Logger.get_logger(__name__) - - if not url: - logger.error("Ftrack URL is not set!") - return None - - url = url.strip('/ ') - - if 'http' not in url: - if url.endswith('ftrackapp.com'): - url = 'https://' + url - else: - url = 'https://{0}.ftrackapp.com'.format(url) - try: - result = requests.get(url, allow_redirects=False) - except requests.exceptions.RequestException: - if log_errors: - logger.error("Entered Ftrack URL is not accesible!") - return False - - if (result.status_code != 200 or 'FTRACK_VERSION' not in result.headers): - if log_errors: - logger.error("Entered Ftrack URL is not accesible!") - return False - - logger.debug("Ftrack server {} is accessible.".format(url)) - - return url - - class SocketBaseEventHub(ftrack_api.event.hub.EventHub): hearbeat_msg = b"hearbeat" @@ -309,7 +277,20 @@ class CustomEventHubSession(ftrack_api.session.Session): # Currently pending operations. self.recorded_operations = ftrack_api.operation.Operations() - self.record_operations = True + + # OpenPype change - In new API are operations properties + new_api = hasattr(self.__class__, "record_operations") + + if new_api: + self._record_operations = collections.defaultdict( + lambda: True + ) + self._auto_populate = collections.defaultdict( + lambda: auto_populate + ) + else: + self.record_operations = True + self.auto_populate = auto_populate self.cache_key_maker = cache_key_maker if self.cache_key_maker is None: @@ -328,6 +309,9 @@ class CustomEventHubSession(ftrack_api.session.Session): if cache is not None: self.cache.caches.append(cache) + if new_api: + self.merge_lock = threading.RLock() + self._managed_request = None self._request = requests.Session() self._request.auth = ftrack_api.session.SessionAuthentication( @@ -335,8 +319,6 @@ class CustomEventHubSession(ftrack_api.session.Session): ) self.request_timeout = timeout - self.auto_populate = auto_populate - # Fetch server information and in doing so also check credentials. self._server_information = self._fetch_server_information() diff --git a/openpype/modules/ftrack/ftrack_server/socket_thread.py b/openpype/modules/ftrack/ftrack_server/socket_thread.py index f49ca5557e..3ef55f8daa 100644 --- a/openpype/modules/ftrack/ftrack_server/socket_thread.py +++ b/openpype/modules/ftrack/ftrack_server/socket_thread.py @@ -5,8 +5,8 @@ import socket import threading import traceback import subprocess -from openpype.api import Logger -from openpype.lib import get_openpype_execute_args + +from openpype.lib import get_openpype_execute_args, Logger class SocketThread(threading.Thread): @@ -16,7 +16,7 @@ class SocketThread(threading.Thread): def __init__(self, name, port, filepath, additional_args=[]): super(SocketThread, self).__init__() - self.log = Logger().get_logger(self.__class__.__name__) + self.log = Logger.get_logger(self.__class__.__name__) self.setName(name) self.name = name self.port = port diff --git a/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py b/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py index d5a95fad91..86ecffd5b8 100644 --- a/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py +++ b/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py @@ -1,7 +1,7 @@ import os import ftrack_api -from openpype.api import get_project_settings +from openpype.settings import get_project_settings from openpype.lib import PostLaunchHook diff --git a/openpype/modules/ftrack/launch_hooks/pre_python2_vendor.py b/openpype/modules/ftrack/launch_hooks/pre_python2_vendor.py deleted file mode 100644 index 0dd894bebf..0000000000 --- a/openpype/modules/ftrack/launch_hooks/pre_python2_vendor.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -from openpype.lib import PreLaunchHook -from openpype_modules.ftrack import FTRACK_MODULE_DIR - - -class PrePython2Support(PreLaunchHook): - """Add python ftrack api module for Python 2 to PYTHONPATH. - - Path to vendor modules is added to the beggining of PYTHONPATH. - """ - - def execute(self): - if not self.application.use_python_2: - return - - self.log.info("Adding Ftrack Python 2 packages to PYTHONPATH.") - - # Prepare vendor dir path - python_2_vendor = os.path.join(FTRACK_MODULE_DIR, "python2_vendor") - - # Add Python 2 modules - python_paths = [ - # `python-ftrack-api` - os.path.join(python_2_vendor, "ftrack-python-api", "source"), - # `arrow` - os.path.join(python_2_vendor, "arrow"), - # `builtins` from `python-future` - # - `python-future` is strict Python 2 module that cause crashes - # of Python 3 scripts executed through OpenPype (burnin script etc.) - os.path.join(python_2_vendor, "builtins"), - # `backports.functools_lru_cache` - os.path.join( - python_2_vendor, "backports.functools_lru_cache" - ) - ] - - # Load PYTHONPATH from current launch context - python_path = self.launch_context.env.get("PYTHONPATH") - if python_path: - python_paths.append(python_path) - - # Set new PYTHONPATH to launch context environments - self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths) diff --git a/openpype/modules/ftrack/lib/avalon_sync.py b/openpype/modules/ftrack/lib/avalon_sync.py index 124787e467..935d1e85c9 100644 --- a/openpype/modules/ftrack/lib/avalon_sync.py +++ b/openpype/modules/ftrack/lib/avalon_sync.py @@ -6,11 +6,21 @@ import numbers import six -from openpype.api import ( - Logger, - get_anatomy_settings +from openpype.client import ( + get_project, + get_assets, + get_archived_assets, + get_subsets, + get_versions, + get_representations ) -from openpype.lib import ApplicationManager +from openpype.client.operations import ( + CURRENT_ASSET_DOC_SCHEMA, + CURRENT_PROJECT_SCHEMA, + CURRENT_PROJECT_CONFIG_SCHEMA, +) +from openpype.settings import get_anatomy_settings +from openpype.lib import ApplicationManager, Logger from openpype.pipeline import AvalonMongoDB, schema from .constants import CUST_ATTR_ID_KEY, FPS_KEYS @@ -24,14 +34,6 @@ import ftrack_api log = Logger.get_logger(__name__) -# Current schemas for avalon types -CURRENT_DOC_SCHEMAS = { - "project": "openpype:project-3.0", - "asset": "openpype:asset-3.0", - "config": "openpype:config-2.0" -} - - class InvalidFpsValue(Exception): pass @@ -143,14 +145,17 @@ def create_chunks(iterable, chunk_size=None): list: Chunked items. """ chunks = [] - if not iterable: - return chunks tupled_iterable = tuple(iterable) + if not tupled_iterable: + return chunks iterable_size = len(tupled_iterable) if chunk_size is None: chunk_size = 200 + if chunk_size < 1: + chunk_size = 1 + for idx in range(0, iterable_size, chunk_size): chunks.append(tupled_iterable[idx:idx + chunk_size]) return chunks @@ -432,6 +437,7 @@ class SyncEntitiesFactory: } self.create_list = [] + self.project_created = False self.unarchive_list = [] self.updates = collections.defaultdict(dict) @@ -573,6 +579,10 @@ class SyncEntitiesFactory: self.ft_project_id = ft_project_id self.entities_dict = entities_dict + @property + def project_name(self): + return self.entities_dict[self.ft_project_id]["name"] + @property def avalon_ents_by_id(self): """ @@ -657,9 +667,9 @@ class SyncEntitiesFactory: (list) of assets """ if self._avalon_archived_ents is None: - self._avalon_archived_ents = [ - ent for ent in self.dbcon.find({"type": "archived_asset"}) - ] + self._avalon_archived_ents = list( + get_archived_assets(self.project_name) + ) return self._avalon_archived_ents @property @@ -727,7 +737,7 @@ class SyncEntitiesFactory: """ if self._subsets_by_parent_id is None: self._subsets_by_parent_id = collections.defaultdict(list) - for subset in self.dbcon.find({"type": "subset"}): + for subset in get_subsets(self.project_name): self._subsets_by_parent_id[str(subset["parent"])].append( subset ) @@ -1418,8 +1428,8 @@ class SyncEntitiesFactory: # Avalon entities self.dbcon.install() self.dbcon.Session["AVALON_PROJECT"] = ft_project_name - avalon_project = self.dbcon.find_one({"type": "project"}) - avalon_entities = self.dbcon.find({"type": "asset"}) + avalon_project = get_project(ft_project_name) + avalon_entities = get_assets(ft_project_name) self.avalon_project = avalon_project self.avalon_entities = avalon_entities @@ -2047,7 +2057,7 @@ class SyncEntitiesFactory: item["_id"] = new_id item["parent"] = self.avalon_project_id - item["schema"] = CURRENT_DOC_SCHEMAS["asset"] + item["schema"] = CURRENT_ASSET_DOC_SCHEMA item["data"]["visualParent"] = avalon_parent new_id_str = str(new_id) @@ -2182,8 +2192,8 @@ class SyncEntitiesFactory: project_item["_id"] = new_id project_item["parent"] = None - project_item["schema"] = CURRENT_DOC_SCHEMAS["project"] - project_item["config"]["schema"] = CURRENT_DOC_SCHEMAS["config"] + project_item["schema"] = CURRENT_PROJECT_SCHEMA + project_item["config"]["schema"] = CURRENT_PROJECT_CONFIG_SCHEMA self.ftrack_avalon_mapper[self.ft_project_id] = new_id self.avalon_ftrack_mapper[new_id] = self.ft_project_id @@ -2199,6 +2209,7 @@ class SyncEntitiesFactory: self._avalon_ents_by_name[project_item["name"]] = str(new_id) self.create_list.append(project_item) + self.project_created = True # store mongo id to ftrack entity entity = self.entities_dict[self.ft_project_id]["entity"] @@ -2255,46 +2266,37 @@ class SyncEntitiesFactory: self._delete_subsets_without_asset(subsets_to_remove) def _delete_subsets_without_asset(self, not_existing_parents): - subset_ids = [] - version_ids = [] repre_ids = [] to_delete = [] + subset_ids = [] for parent_id in not_existing_parents: subsets = self.subsets_by_parent_id.get(parent_id) if not subsets: continue for subset in subsets: - if subset.get("type") != "subset": - continue - subset_ids.append(subset["_id"]) + if subset.get("type") == "subset": + subset_ids.append(subset["_id"]) - db_subsets = self.dbcon.find({ - "_id": {"$in": subset_ids}, - "type": "subset" - }) - if not db_subsets: - return - - db_versions = self.dbcon.find({ - "parent": {"$in": subset_ids}, - "type": "version" - }) - if db_versions: - version_ids = [ver["_id"] for ver in db_versions] - - db_repres = self.dbcon.find({ - "parent": {"$in": version_ids}, - "type": "representation" - }) - if db_repres: - repre_ids = [repre["_id"] for repre in db_repres] + db_versions = get_versions( + self.project_name, + subset_ids=subset_ids, + fields=["_id"] + ) + version_ids = [ver["_id"] for ver in db_versions] + db_repres = get_representations( + self.project_name, + version_ids=version_ids, + fields=["_id"] + ) + repre_ids = [repre["_id"] for repre in db_repres] to_delete.extend(subset_ids) to_delete.extend(version_ids) to_delete.extend(repre_ids) - self.dbcon.delete_many({"_id": {"$in": to_delete}}) + if to_delete: + self.dbcon.delete_many({"_id": {"$in": to_delete}}) # Probably deprecated def _check_changeability(self, parent_id=None): @@ -2776,8 +2778,7 @@ class SyncEntitiesFactory: def report(self): items = [] - project_name = self.entities_dict[self.ft_project_id]["name"] - title = "Synchronization report ({}):".format(project_name) + title = "Synchronization report ({}):".format(self.project_name) keys = ["error", "warning", "info"] for key in keys: diff --git a/openpype/modules/ftrack/lib/credentials.py b/openpype/modules/ftrack/lib/credentials.py index 4e29e66382..2eb64254d1 100644 --- a/openpype/modules/ftrack/lib/credentials.py +++ b/openpype/modules/ftrack/lib/credentials.py @@ -92,14 +92,18 @@ def check_credentials(username, api_key, ftrack_server=None): if not ftrack_server or not username or not api_key: return False + user_exists = False try: session = ftrack_api.Session( server_url=ftrack_server, api_key=api_key, api_user=username ) + # Validated that the username actually exists + user = session.query("User where username is \"{}\"".format(username)) + user_exists = user is not None session.close() except Exception: - return False - return True + pass + return user_exists diff --git a/openpype/modules/ftrack/lib/ftrack_base_handler.py b/openpype/modules/ftrack/lib/ftrack_base_handler.py index 2130abc20c..c0b03f8a41 100644 --- a/openpype/modules/ftrack/lib/ftrack_base_handler.py +++ b/openpype/modules/ftrack/lib/ftrack_base_handler.py @@ -6,7 +6,7 @@ import uuid import datetime import traceback import time -from openpype.api import Logger +from openpype.lib import Logger from openpype.settings import get_project_settings import ftrack_api @@ -52,7 +52,7 @@ class BaseHandler(object): def __init__(self, session): '''Expects a ftrack_api.Session instance''' - self.log = Logger().get_logger(self.__class__.__name__) + self.log = Logger.get_logger(self.__class__.__name__) if not( isinstance(session, ftrack_api.session.Session) or isinstance(session, ftrack_server.lib.SocketSession) @@ -535,7 +535,7 @@ class BaseHandler(object): ) def trigger_event( - self, topic, event_data={}, session=None, source=None, + self, topic, event_data=None, session=None, source=None, event=None, on_error="ignore" ): if session is None: @@ -543,6 +543,9 @@ class BaseHandler(object): if not source and event: source = event.get("source") + + if event_data is None: + event_data = {} # Create and trigger event event = ftrack_api.event.base.Event( topic=topic, diff --git a/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py index 14da188150..e13b7e65cd 100644 --- a/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py +++ b/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py @@ -105,11 +105,17 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): context.data["ftrackEntity"] = asset_entity context.data["ftrackTask"] = task_entity - self.per_instance_process(context, asset_name, task_name) + self.per_instance_process(context, asset_entity, task_entity) def per_instance_process( - self, context, context_asset_name, context_task_name + self, context, context_asset_entity, context_task_entity ): + context_task_name = None + context_asset_name = None + if context_asset_entity: + context_asset_name = context_asset_entity["name"] + if context_task_entity: + context_task_name = context_task_entity["name"] instance_by_asset_and_task = {} for instance in context: self.log.debug( @@ -120,6 +126,8 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): if not instance_asset_name and not instance_task_name: self.log.debug("Instance does not have set context keys.") + instance.data["ftrackEntity"] = context_asset_entity + instance.data["ftrackTask"] = context_task_entity continue elif instance_asset_name and instance_task_name: @@ -131,6 +139,8 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): "Instance's context is same as in publish context." " Asset: {} | Task: {}" ).format(context_asset_name, context_task_name)) + instance.data["ftrackEntity"] = context_asset_entity + instance.data["ftrackTask"] = context_task_entity continue asset_name = instance_asset_name task_name = instance_task_name @@ -141,6 +151,8 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): "Instance's context task is same as in publish" " context. Task: {}" ).format(context_task_name)) + instance.data["ftrackEntity"] = context_asset_entity + instance.data["ftrackTask"] = context_task_entity continue asset_name = context_asset_name @@ -152,6 +164,8 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): "Instance's context asset is same as in publish" " context. Asset: {}" ).format(context_asset_name)) + instance.data["ftrackEntity"] = context_asset_entity + instance.data["ftrackTask"] = context_task_entity continue # Do not use context's task name diff --git a/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py b/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py index 5758068f86..576a7d36c4 100644 --- a/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py +++ b/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py @@ -8,7 +8,7 @@ Provides: import pyblish.api from openpype.pipeline import legacy_io -from openpype.lib.plugin_tools import filter_profiles +from openpype.lib import filter_profiles class CollectFtrackFamily(pyblish.api.InstancePlugin): diff --git a/openpype/modules/ftrack/plugins/publish/collect_username.py b/openpype/modules/ftrack/plugins/publish/collect_username.py index a9b746ea51..798f3960a8 100644 --- a/openpype/modules/ftrack/plugins/publish/collect_username.py +++ b/openpype/modules/ftrack/plugins/publish/collect_username.py @@ -1,5 +1,8 @@ """Loads publishing context from json and continues in publish process. +Should run before 'CollectAnatomyContextData' so the user on context is +changed before it's stored to context anatomy data or instance anatomy data. + Requires: anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.11) @@ -13,7 +16,7 @@ import os import pyblish.api -class CollectUsername(pyblish.api.ContextPlugin): +class CollectUsernameForWebpublish(pyblish.api.ContextPlugin): """ Translates user email to Ftrack username. @@ -32,10 +35,8 @@ class CollectUsername(pyblish.api.ContextPlugin): hosts = ["webpublisher", "photoshop"] targets = ["remotepublish", "filespublish", "tvpaint_worker"] - _context = None - def process(self, context): - self.log.info("CollectUsername") + self.log.info("{}".format(self.__class__.__name__)) os.environ["FTRACK_API_USER"] = os.environ["FTRACK_BOT_API_USER"] os.environ["FTRACK_API_KEY"] = os.environ["FTRACK_BOT_API_KEY"] @@ -54,12 +55,14 @@ class CollectUsername(pyblish.api.ContextPlugin): return session = ftrack_api.Session(auto_connect_event_hub=False) - user = session.query("User where email like '{}'".format(user_email)) + user = session.query( + "User where email like '{}'".format(user_email) + ).first() if not user: raise ValueError( "Couldn't find user with {} email".format(user_email)) - user = user[0] + username = user.get("username") self.log.debug("Resolved ftrack username:: {}".format(username)) os.environ["FTRACK_API_USER"] = username @@ -67,5 +70,4 @@ class CollectUsername(pyblish.api.ContextPlugin): burnin_name = username if '@' in burnin_name: burnin_name = burnin_name[:burnin_name.index('@')] - os.environ["WEBPUBLISH_OPENPYPE_USERNAME"] = burnin_name context.data["user"] = burnin_name diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py index 64af8cb208..159e60024d 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py @@ -12,6 +12,8 @@ Provides: import os import sys +import collections + import six import pyblish.api import clique @@ -20,13 +22,11 @@ import clique class IntegrateFtrackApi(pyblish.api.InstancePlugin): """ Commit components to server. """ - order = pyblish.api.IntegratorOrder+0.499 + order = pyblish.api.IntegratorOrder + 0.499 label = "Integrate Ftrack Api" families = ["ftrack"] def process(self, instance): - session = instance.context.data["ftrackSession"] - context = instance.context component_list = instance.data.get("ftrackComponentsList") if not component_list: self.log.info( @@ -35,8 +35,8 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): ) return - session = instance.context.data["ftrackSession"] context = instance.context + session = context.data["ftrackSession"] parent_entity = None default_asset_name = None @@ -84,9 +84,11 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): asset_types_by_short = self._ensure_asset_types_exists( session, component_list ) + self._fill_component_locations(session, component_list) asset_versions_data_by_id = {} used_asset_versions = [] + # Iterate over components and publish for data in component_list: self.log.debug("data: {}".format(data)) @@ -116,9 +118,6 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): asset_version_status_ids_by_name ) - # Component - self.create_component(session, asset_version_entity, data) - # Store asset version and components items that were version_id = asset_version_entity["id"] if version_id not in asset_versions_data_by_id: @@ -135,6 +134,8 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): if asset_version_entity not in used_asset_versions: used_asset_versions.append(asset_version_entity) + self._create_components(session, asset_versions_data_by_id) + instance.data["ftrackIntegratedAssetVersionsData"] = ( asset_versions_data_by_id ) @@ -193,6 +194,70 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): session._configure_locations() six.reraise(tp, value, tb) + def _fill_component_locations(self, session, component_list): + components_by_location_name = collections.defaultdict(list) + components_by_location_id = collections.defaultdict(list) + for component_item in component_list: + # Location entity can be prefilled + # - this is not recommended as connection to ftrack server may + # be lost and in that case the entity is not valid when gets + # to this plugin + location = component_item.get("component_location") + if location is not None: + continue + + # Collect location id + location_id = component_item.get("component_location_id") + if location_id: + components_by_location_id[location_id].append( + component_item + ) + continue + + location_name = component_item.get("component_location_name") + if location_name: + components_by_location_name[location_name].append( + component_item + ) + continue + + # Skip if there is nothing to do + if not components_by_location_name and not components_by_location_id: + return + + # Query locations + query_filters = [] + if components_by_location_id: + joined_location_ids = ",".join([ + '"{}"'.format(location_id) + for location_id in components_by_location_id + ]) + query_filters.append("id in ({})".format(joined_location_ids)) + + if components_by_location_name: + joined_location_names = ",".join([ + '"{}"'.format(location_name) + for location_name in components_by_location_name + ]) + query_filters.append("name in ({})".format(joined_location_names)) + + locations = session.query( + "select id, name from Location where {}".format( + " or ".join(query_filters) + ) + ).all() + # Fill locations in components + for location in locations: + location_id = location["id"] + location_name = location["name"] + if location_id in components_by_location_id: + for component in components_by_location_id[location_id]: + component["component_location"] = location + + if location_name in components_by_location_name: + for component in components_by_location_name[location_name]: + component["component_location"] = location + def _ensure_asset_types_exists(self, session, component_list): """Make sure that all AssetType entities exists for integration. @@ -559,3 +624,40 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): session.rollback() session._configure_locations() six.reraise(tp, value, tb) + + def _create_components(self, session, asset_versions_data_by_id): + for item in asset_versions_data_by_id.values(): + asset_version_entity = item["asset_version"] + component_items = item["component_items"] + + component_entities = session.query( + ( + "select id, name from Component where version_id is \"{}\"" + ).format(asset_version_entity["id"]) + ).all() + + existing_component_names = { + component["name"] + for component in component_entities + } + + contain_review = "ftrackreview-mp4" in existing_component_names + thumbnail_component_item = None + for component_item in component_items: + component_data = component_item.get("component_data") or {} + component_name = component_data.get("name") + if component_name == "ftrackreview-mp4": + contain_review = True + elif component_name == "ftrackreview-image": + thumbnail_component_item = component_item + + if contain_review and thumbnail_component_item: + thumbnail_component_item["component_data"]["name"] = ( + "thumbnail" + ) + + # Component + for component_item in component_items: + self.create_component( + session, asset_version_entity, component_item + ) diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py index 047fd8462c..8cb2336391 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py @@ -13,7 +13,10 @@ class IntegrateFtrackComponentOverwrite(pyblish.api.InstancePlugin): active = False def process(self, instance): - component_list = instance.data['ftrackComponentsList'] + component_list = instance.data.get('ftrackComponentsList') + if not component_list: + self.log.info("No component to overwrite...") + return for cl in component_list: cl['component_overwrite'] = True diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py index c6a3d47f66..e7c265988e 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py @@ -6,9 +6,11 @@ Requires: """ import sys +import json import six import pyblish.api +from openpype.lib import StringTemplate class IntegrateFtrackDescription(pyblish.api.InstancePlugin): @@ -25,6 +27,10 @@ class IntegrateFtrackDescription(pyblish.api.InstancePlugin): description_template = "{comment}" def process(self, instance): + if not self.description_template: + self.log.info("Skipping. Description template is not set.") + return + # Check if there are any integrated AssetVersion entities asset_versions_key = "ftrackIntegratedAssetVersionsData" asset_versions_data_by_id = instance.data.get(asset_versions_key) @@ -38,39 +44,62 @@ class IntegrateFtrackDescription(pyblish.api.InstancePlugin): else: self.log.debug("Comment is set to `{}`".format(comment)) - session = instance.context.data["ftrackSession"] - intent = instance.context.data.get("intent") - intent_label = None - if intent and isinstance(intent, dict): - intent_val = intent.get("value") - intent_label = intent.get("label") - else: - intent_val = intent + if intent and "{intent}" in self.description_template: + value = intent.get("value") + if value: + intent = intent.get("label") or value - if not intent_label: - intent_label = intent_val or "" + if not intent and not comment: + self.log.info("Skipping. Intent and comment are empty.") + return # if intent label is set then format comment # - it is possible that intent_label is equal to "" (empty string) - if intent_label: - self.log.debug( - "Intent label is set to `{}`.".format(intent_label) - ) - + if intent: + self.log.debug("Intent is set to `{}`.".format(intent)) else: self.log.debug("Intent is not set.") + # If we would like to use more "optional" possibilities we would have + # come up with some expressions in templates or speicifc templates + # for all 3 possible combinations when comment and intent are + # set or not (when both are not set then description does not + # make sense). + fill_data = {} + if comment: + fill_data["comment"] = comment + if intent: + fill_data["intent"] = intent + + description = StringTemplate.format_template( + self.description_template, fill_data + ) + if not description.solved: + self.log.warning(( + "Couldn't solve template \"{}\" with data {}" + ).format( + self.description_template, json.dumps(fill_data, indent=4) + )) + return + + if not description: + self.log.debug(( + "Skipping. Result of template is empty string." + " Template \"{}\" Fill data: {}" + ).format( + self.description_template, json.dumps(fill_data, indent=4) + )) + return + + session = instance.context.data["ftrackSession"] for asset_version_data in asset_versions_data_by_id.values(): asset_version = asset_version_data["asset_version"] # Backwards compatibility for older settings using # attribute 'note_with_intent_template' - comment = self.description_template.format(**{ - "intent": intent_label, - "comment": comment - }) - asset_version["comment"] = comment + + asset_version["comment"] = description try: session.commit() diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_farm_status.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_farm_status.py new file mode 100644 index 0000000000..ab5738c33f --- /dev/null +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_farm_status.py @@ -0,0 +1,150 @@ +import pyblish.api +from openpype.lib import filter_profiles + + +class IntegrateFtrackFarmStatus(pyblish.api.ContextPlugin): + """Change task status when should be published on farm. + + Instance which has set "farm" key in data to 'True' is considered as will + be rendered on farm thus it's status should be changed. + """ + + order = pyblish.api.IntegratorOrder + 0.48 + label = "Integrate Ftrack Farm Status" + + farm_status_profiles = [] + + def process(self, context): + # Quick end + if not self.farm_status_profiles: + project_name = context.data["projectName"] + self.log.info(( + "Status profiles are not filled for project \"{}\". Skipping" + ).format(project_name)) + return + + filtered_instances = self.filter_instances(context) + instances_with_status_names = self.get_instances_with_statuse_names( + context, filtered_instances + ) + if instances_with_status_names: + self.fill_statuses(context, instances_with_status_names) + + def filter_instances(self, context): + filtered_instances = [] + for instance in context: + # Skip disabled instances + if instance.data.get("publish") is False: + continue + subset_name = instance.data["subset"] + msg_start = "Skipping instance {}.".format(subset_name) + if not instance.data.get("farm"): + self.log.debug( + "{} Won't be rendered on farm.".format(msg_start) + ) + continue + + task_entity = instance.data.get("ftrackTask") + if not task_entity: + self.log.debug( + "{} Does not have filled task".format(msg_start) + ) + continue + + filtered_instances.append(instance) + return filtered_instances + + def get_instances_with_statuse_names(self, context, instances): + instances_with_status_names = [] + for instance in instances: + family = instance.data["family"] + subset_name = instance.data["subset"] + task_entity = instance.data["ftrackTask"] + host_name = context.data["hostName"] + task_name = task_entity["name"] + task_type = task_entity["type"]["name"] + status_profile = filter_profiles( + self.farm_status_profiles, + { + "hosts": host_name, + "task_types": task_type, + "task_names": task_name, + "families": family, + "subsets": subset_name, + }, + logger=self.log + ) + if not status_profile: + # There already is log in 'filter_profiles' + continue + + status_name = status_profile["status_name"] + if status_name: + instances_with_status_names.append((instance, status_name)) + return instances_with_status_names + + def fill_statuses(self, context, instances_with_status_names): + # Prepare available task statuses on the project + project_name = context.data["projectName"] + session = context.data["ftrackSession"] + project_entity = session.query(( + "select project_schema from Project where full_name is \"{}\"" + ).format(project_name)).one() + project_schema = project_entity["project_schema"] + + task_type_ids = set() + for item in instances_with_status_names: + instance, _ = item + task_entity = instance.data["ftrackTask"] + task_type_ids.add(task_entity["type"]["id"]) + + task_statuses_by_type_id = { + task_type_id: project_schema.get_statuses("Task", task_type_id) + for task_type_id in task_type_ids + } + + # Keep track if anything has changed + skipped_status_names = set() + status_changed = False + for item in instances_with_status_names: + instance, status_name = item + task_entity = instance.data["ftrackTask"] + task_statuses = task_statuses_by_type_id[task_entity["type"]["id"]] + status_name_low = status_name.lower() + + status_id = None + status_name = None + # Skip if status name was already tried to be found + for status in task_statuses: + if status["name"].lower() == status_name_low: + status_id = status["id"] + status_name = status["name"] + break + + if status_id is None: + if status_name_low not in skipped_status_names: + skipped_status_names.add(status_name_low) + joined_status_names = ", ".join({ + '"{}"'.format(status["name"]) + for status in task_statuses + }) + self.log.warning(( + "Status \"{}\" is not available on project \"{}\"." + " Available statuses are {}" + ).format(status_name, project_name, joined_status_names)) + continue + + # Change task status id + if status_id != task_entity["status_id"]: + task_entity["status_id"] = status_id + status_changed = True + path = "/".join([ + item["name"] + for item in task_entity["link"] + ]) + self.log.debug("Set status \"{}\" to \"{}\"".format( + status_name, path + )) + + if status_changed: + session.commit() diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py index 0dd7b1c6e4..2d06e2ab02 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py @@ -3,8 +3,13 @@ import json import copy import pyblish.api -from openpype.lib import get_ffprobe_streams +from openpype.lib.openpype_version import get_openpype_version +from openpype.lib.transcoding import ( + get_ffprobe_streams, + convert_ffprobe_fps_to_float, +) from openpype.lib.profiles_filtering import filter_profiles +from openpype.lib.transcoding import VIDEO_EXTENSIONS class IntegrateFtrackInstance(pyblish.api.InstancePlugin): @@ -17,10 +22,21 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): label = "Integrate Ftrack Component" families = ["ftrack"] + metadata_keys_to_label = { + "openpype_version": "OpenPype version", + "frame_start": "Frame start", + "frame_end": "Frame end", + "duration": "Duration", + "width": "Resolution width", + "height": "Resolution height", + "fps": "FPS", + "codec": "Codec" + } + family_mapping = { "camera": "cam", "look": "look", - "mayaascii": "scene", + "mayaAscii": "scene", "model": "geo", "rig": "rig", "setdress": "setdress", @@ -39,7 +55,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): "reference": "reference" } keep_first_subset_name_for_review = True - asset_versions_status_profiles = {} + asset_versions_status_profiles = [] + additional_metadata_keys = [] def process(self, instance): self.log.debug("instance {}".format(instance)) @@ -58,11 +75,15 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): version_number = int(instance_version) family = instance.data["family"] - family_low = instance.data["family"].lower() + # Perform case-insensitive family mapping + family_low = family.lower() asset_type = instance.data.get("ftrackFamily") - if not asset_type and family_low in self.family_mapping: - asset_type = self.family_mapping[family_low] + if not asset_type: + for map_family, map_value in self.family_mapping.items(): + if map_family.lower() == family_low: + asset_type = map_value + break if not asset_type: asset_type = "upload" @@ -70,20 +91,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): self.log.debug( "Family: {}\nMapping: {}".format(family_low, self.family_mapping) ) - - # Ignore this instance if neither "ftrackFamily" or a family mapping is - # found. - if not asset_type: - self.log.info(( - "Family \"{}\" does not match any asset type mapping" - ).format(family)) - return - - # Prepare FPS - instance_fps = instance.data.get("fps") - if instance_fps is None: - instance_fps = instance.context.data["fps"] - status_name = self._get_asset_version_status_name(instance) # Base of component item data @@ -106,15 +113,16 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # These must be changed for each component "component_data": None, "component_path": None, - "component_location": None + "component_location": None, + "component_location_name": None, + "additional_data": {} } - ft_session = instance.context.data["ftrackSession"] - # Filter types of representations review_representations = [] thumbnail_representations = [] other_representations = [] + has_movie_review = False for repre in instance_repres: self.log.debug("Representation {}".format(repre)) repre_tags = repre.get("tags") or [] @@ -123,17 +131,15 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): elif "ftrackreview" in repre_tags: review_representations.append(repre) + if self._is_repre_video(repre): + has_movie_review = True else: other_representations.append(repre) # Prepare ftrack locations - unmanaged_location = ft_session.query( - "Location where name is \"ftrack.unmanaged\"" - ).one() - ftrack_server_location = ft_session.query( - "Location where name is \"ftrack.server\"" - ).one() + unmanaged_location_name = "ftrack.unmanaged" + ftrack_server_location_name = "ftrack.server" # Components data component_list = [] @@ -144,69 +150,53 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # TODO what if there is multiple thumbnails? first_thumbnail_component = None first_thumbnail_component_repre = None - for repre in thumbnail_representations: - published_path = repre.get("published_path") - if not published_path: - comp_files = repre["files"] - if isinstance(comp_files, (tuple, list, set)): - filename = comp_files[0] - else: - filename = comp_files - published_path = os.path.join( - repre["stagingDir"], filename - ) - if not os.path.exists(published_path): + if not review_representations or has_movie_review: + for repre in thumbnail_representations: + repre_path = self._get_repre_path(instance, repre, False) + if not repre_path: + self.log.warning( + "Published path is not set and source was removed." + ) continue - repre["published_path"] = published_path - # Create copy of base comp item and append it - thumbnail_item = copy.deepcopy(base_component_item) - thumbnail_item["component_path"] = repre["published_path"] - thumbnail_item["component_data"] = { - "name": "thumbnail" - } - thumbnail_item["thumbnail"] = True - # Create copy of item before setting location - src_components_to_add.append(copy.deepcopy(thumbnail_item)) - # Create copy of first thumbnail - if first_thumbnail_component is None: - first_thumbnail_component_repre = repre - first_thumbnail_component = thumbnail_item - # Set location - thumbnail_item["component_location"] = ftrack_server_location - # Add item to component list - component_list.append(thumbnail_item) + # Create copy of base comp item and append it + thumbnail_item = copy.deepcopy(base_component_item) + thumbnail_item["component_path"] = repre_path + thumbnail_item["component_data"] = { + "name": "thumbnail" + } + thumbnail_item["thumbnail"] = True + + # Create copy of item before setting location + if "delete" not in repre.get("tags", []): + src_components_to_add.append(copy.deepcopy(thumbnail_item)) + # Create copy of first thumbnail + if first_thumbnail_component is None: + first_thumbnail_component_repre = repre + first_thumbnail_component = thumbnail_item + # Set location + thumbnail_item["component_location_name"] = ( + ftrack_server_location_name + ) + + # Add item to component list + component_list.append(thumbnail_item) if first_thumbnail_component is not None: - width = first_thumbnail_component_repre.get("width") - height = first_thumbnail_component_repre.get("height") - if not width or not height: - component_path = first_thumbnail_component["component_path"] - streams = [] - try: - streams = get_ffprobe_streams(component_path) - except Exception: - self.log.debug(( - "Failed to retrieve information about intput {}" - ).format(component_path)) + metadata = self._prepare_image_component_metadata( + first_thumbnail_component_repre, + first_thumbnail_component["component_path"] + ) - for stream in streams: - if "width" in stream and "height" in stream: - width = stream["width"] - height = stream["height"] - break - - if width and height: + if metadata: component_data = first_thumbnail_component["component_data"] - component_data["name"] = "ftrackreview-image" - component_data["metadata"] = { - "ftr_meta": json.dumps({ - "width": width, - "height": height, - "format": "image" - }) - } + component_data["metadata"] = metadata + + if review_representations: + component_data["name"] = "thumbnail" + else: + component_data["name"] = "ftrackreview-image" # Create review components # Change asset name of each new component for review @@ -215,6 +205,18 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): extended_asset_name = "" multiple_reviewable = len(review_representations) > 1 for repre in review_representations: + if not self._is_repre_video(repre) and has_movie_review: + self.log.debug("Movie repre has priority " + "from {}".format(repre)) + continue + + repre_path = self._get_repre_path(instance, repre, False) + if not repre_path: + self.log.warning( + "Published path is not set and source was removed." + ) + continue + # Create copy of base comp item and append it review_item = copy.deepcopy(base_component_item) @@ -253,34 +255,26 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): first_thumbnail_component[ "asset_data"]["name"] = extended_asset_name - frame_start = repre.get("frameStartFtrack") - frame_end = repre.get("frameEndFtrack") - if frame_start is None or frame_end is None: - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] - - # Frame end of uploaded video file should be duration in frames - # - frame start is always 0 - # - frame end is duration in frames - duration = frame_end - frame_start + 1 - - fps = repre.get("fps") - if fps is None: - fps = instance_fps - # Change location - review_item["component_path"] = repre["published_path"] + review_item["component_path"] = repre_path # Change component data + + if self._is_repre_video(repre): + component_name = "ftrackreview-mp4" + metadata = self._prepare_video_component_metadata( + instance, repre, repre_path, True + ) + else: + component_name = "ftrackreview-image" + metadata = self._prepare_image_component_metadata( + repre, repre_path + ) + review_item["thumbnail"] = True + review_item["component_data"] = { # Default component name is "main". - "name": "ftrackreview-mp4", - "metadata": { - "ftr_meta": json.dumps({ - "frameIn": 0, - "frameOut": int(duration), - "frameRate": float(fps) - }) - } + "name": component_name, + "metadata": metadata } if is_first_review_repre: @@ -290,10 +284,13 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): not_first_components.append(review_item) # Create copy of item before setting location - src_components_to_add.append(copy.deepcopy(review_item)) + if "delete" not in repre.get("tags", []): + src_components_to_add.append(copy.deepcopy(review_item)) # Set location - review_item["component_location"] = ftrack_server_location + review_item["component_location_name"] = ( + ftrack_server_location_name + ) # Add item to component list component_list.append(review_item) @@ -305,8 +302,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): first_thumbnail_component ) new_thumbnail_component["asset_data"]["name"] = asset_name - new_thumbnail_component["component_location"] = ( - ftrack_server_location + new_thumbnail_component["component_location_name"] = ( + ftrack_server_location_name ) component_list.append(new_thumbnail_component) @@ -315,16 +312,19 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Make sure thumbnail is disabled copy_src_item["thumbnail"] = False # Set location - copy_src_item["component_location"] = unmanaged_location + copy_src_item["component_location_name"] = unmanaged_location_name # Modify name of component to have suffix "_src" component_data = copy_src_item["component_data"] component_name = component_data["name"] component_data["name"] = component_name + "_src" + component_data["metadata"] = self._prepare_component_metadata( + instance, repre, copy_src_item["component_path"], False + ) component_list.append(copy_src_item) # Add others representations as component for repre in other_representations: - published_path = repre.get("published_path") + published_path = self._get_repre_path(instance, repre, True) if not published_path: continue # Create copy of base comp item and append it @@ -337,10 +337,14 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ): other_item["asset_data"]["name"] = extended_asset_name - other_item["component_data"] = { - "name": repre["name"] + component_data = { + "name": repre["name"], + "metadata": self._prepare_component_metadata( + instance, repre, published_path, False + ) } - other_item["component_location"] = unmanaged_location + other_item["component_data"] = component_data + other_item["component_location_name"] = unmanaged_location_name other_item["component_path"] = published_path component_list.append(other_item) @@ -357,6 +361,54 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): )) instance.data["ftrackComponentsList"] = component_list + def _collect_additional_metadata(self, streams): + pass + + def _get_repre_path(self, instance, repre, only_published): + """Get representation path that can be used for integration. + + When 'only_published' is set to true the validation of path is not + relevant. In that case we just need what is set in 'published_path' + as "reference". The reference is not used to get or upload the file but + for reference where the file was published. + + Args: + instance (pyblish.Instance): Processed instance object. Used + for source of staging dir if representation does not have + filled it. + repre (dict): Representation on instance which could be and + could not be integrated with main integrator. + only_published (bool): Care only about published paths and + ignore if filepath is not existing anymore. + + Returns: + str: Path to representation file. + None: Path is not filled or does not exists. + """ + + published_path = repre.get("published_path") + if published_path: + published_path = os.path.normpath(published_path) + if os.path.exists(published_path): + return published_path + + if only_published: + return published_path + + comp_files = repre["files"] + if isinstance(comp_files, (tuple, list, set)): + filename = comp_files[0] + else: + filename = comp_files + + staging_dir = repre.get("stagingDir") + if not staging_dir: + staging_dir = instance.data["stagingDir"] + src_path = os.path.normpath(os.path.join(staging_dir, filename)) + if os.path.exists(src_path): + return src_path + return None + def _get_asset_version_status_name(self, instance): if not self.asset_versions_status_profiles: return None @@ -377,3 +429,182 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): return None return matching_profile["status"] or None + + def _prepare_component_metadata( + self, instance, repre, component_path, is_review=None + ): + if self._is_repre_video(repre): + return self._prepare_video_component_metadata(instance, repre, + component_path, + is_review) + else: + return self._prepare_image_component_metadata(repre, + component_path) + + def _prepare_video_component_metadata( + self, instance, repre, component_path, is_review=None + ): + metadata = {} + if "openpype_version" in self.additional_metadata_keys: + label = self.metadata_keys_to_label["openpype_version"] + metadata[label] = get_openpype_version() + + extension = os.path.splitext(component_path)[-1] + streams = [] + try: + streams = get_ffprobe_streams(component_path) + except Exception: + self.log.debug( + "Failed to retrieve information about " + "input {}".format(component_path)) + + # Find video streams + video_streams = [ + stream + for stream in streams + if stream["codec_type"] == "video" + ] + # Skip if there are not video streams + # - exr is special case which can have issues with reading through + # ffmpegh but we want to set fps for it + if not video_streams and extension not in [".exr"]: + return metadata + + stream_width = None + stream_height = None + stream_fps = None + frame_out = None + codec_label = None + for video_stream in video_streams: + codec_label = video_stream.get("codec_long_name") + if not codec_label: + codec_label = video_stream.get("codec") + + if codec_label: + pix_fmt = video_stream.get("pix_fmt") + if pix_fmt: + codec_label += " ({})".format(pix_fmt) + + tmp_width = video_stream.get("width") + tmp_height = video_stream.get("height") + if tmp_width and tmp_height: + stream_width = tmp_width + stream_height = tmp_height + + input_framerate = video_stream.get("r_frame_rate") + stream_duration = video_stream.get("duration") + if input_framerate is None or stream_duration is None: + continue + try: + stream_fps = convert_ffprobe_fps_to_float( + input_framerate + ) + except ValueError: + self.log.warning( + "Could not convert ffprobe " + "fps to float \"{}\"".format(input_framerate)) + continue + + stream_width = tmp_width + stream_height = tmp_height + + self.log.debug("FPS from stream is {} and duration is {}".format( + input_framerate, stream_duration + )) + frame_out = float(stream_duration) * stream_fps + break + + # Prepare FPS + instance_fps = instance.data.get("fps") + if instance_fps is None: + instance_fps = instance.context.data["fps"] + + repre_fps = repre.get("fps") + if repre_fps is not None: + repre_fps = float(repre_fps) + + fps = stream_fps or repre_fps or instance_fps + + # Prepare frame ranges + frame_start = repre.get("frameStartFtrack") + frame_end = repre.get("frameEndFtrack") + if frame_start is None or frame_end is None: + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + duration = (frame_end - frame_start) + 1 + + for key, value in [ + ("fps", fps), + ("frame_start", frame_start), + ("frame_end", frame_end), + ("duration", duration), + ("width", stream_width), + ("height", stream_height), + ("fps", fps), + ("codec", codec_label) + ]: + if not value or key not in self.additional_metadata_keys: + continue + label = self.metadata_keys_to_label[key] + metadata[label] = value + + if not is_review: + ftr_meta = {} + if fps: + ftr_meta["frameRate"] = fps + + if stream_width and stream_height: + ftr_meta["width"] = int(stream_width) + ftr_meta["height"] = int(stream_height) + metadata["ftr_meta"] = json.dumps(ftr_meta) + return metadata + + # Frame end of uploaded video file should be duration in frames + # - frame start is always 0 + # - frame end is duration in frames + if not frame_out: + frame_out = duration + + # Ftrack documentation says that it is required to have + # 'width' and 'height' in review component. But with those values + # review video does not play. + metadata["ftr_meta"] = json.dumps({ + "frameIn": 0, + "frameOut": frame_out, + "frameRate": float(fps) + }) + return metadata + + def _prepare_image_component_metadata(self, repre, component_path): + width = repre.get("width") + height = repre.get("height") + if not width or not height: + streams = [] + try: + streams = get_ffprobe_streams(component_path) + except Exception: + self.log.debug( + "Failed to retrieve information " + "about input {}".format(component_path)) + + for stream in streams: + if "width" in stream and "height" in stream: + width = stream["width"] + height = stream["height"] + break + + metadata = {} + if width and height: + metadata = { + "ftr_meta": json.dumps({ + "width": width, + "height": height, + "format": "image" + }) + } + + return metadata + + def _is_repre_video(self, repre): + repre_ext = ".{}".format(repre["ext"]) + return repre_ext in VIDEO_EXTENSIONS diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py index 952b21546d..ac3fa874e0 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py @@ -9,9 +9,11 @@ Requires: """ import sys +import copy import six import pyblish.api +from openpype.lib import StringTemplate class IntegrateFtrackNote(pyblish.api.InstancePlugin): @@ -53,14 +55,10 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): intent = instance.context.data.get("intent") intent_label = None - if intent and isinstance(intent, dict): - intent_val = intent.get("value") - intent_label = intent.get("label") - else: - intent_val = intent - - if not intent_label: - intent_label = intent_val or "" + if intent: + value = intent["value"] + if value: + intent_label = intent["label"] or value # if intent label is set then format comment # - it is possible that intent_label is equal to "" (empty string) @@ -96,6 +94,14 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): labels.append(label) + base_format_data = { + "host_name": host_name, + "app_name": app_name, + "app_label": app_label, + "source": instance.data.get("source", '') + } + if comment: + base_format_data["comment"] = comment for asset_version_data in asset_versions_data_by_id.values(): asset_version = asset_version_data["asset_version"] component_items = asset_version_data["component_items"] @@ -109,22 +115,31 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): template = self.note_template if template is None: template = self.note_with_intent_template - format_data = { - "intent": intent_label, - "comment": comment, - "host_name": host_name, - "app_name": app_name, - "app_label": app_label, - "published_paths": "
".join(sorted(published_paths)), - } - comment = template.format(**format_data) - if not comment: + format_data = copy.deepcopy(base_format_data) + format_data["published_paths"] = "
".join( + sorted(published_paths) + ) + if intent: + if "{intent}" in template: + format_data["intent"] = intent_label + else: + format_data["intent"] = intent + + note_text = StringTemplate.format_template(template, format_data) + if not note_text.solved: + self.log.warning(( + "Note template require more keys then can be provided." + "\nTemplate: {}\nData: {}" + ).format(template, format_data)) + continue + + if not note_text: self.log.info(( "Note for AssetVersion {} would be empty. Skipping." "\nTemplate: {}\nData: {}" ).format(asset_version["id"], template, format_data)) continue - asset_version.create_note(comment, author=user, labels=labels) + asset_version.create_note(note_text, author=user, labels=labels) try: session.commit() diff --git a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py index cf90c11b65..fa7a89050c 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py @@ -1,9 +1,13 @@ import sys import collections import six +from copy import deepcopy + import pyblish.api -from openpype.pipeline import legacy_io +from openpype.client import get_asset_by_id +from openpype.lib import filter_profiles + # Copy of constant `openpype_modules.ftrack.lib.avalon_sync.CUST_ATTR_AUTO_SYNC` CUST_ATTR_AUTO_SYNC = "avalon_auto_sync" @@ -64,29 +68,34 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): order = pyblish.api.IntegratorOrder - 0.04 label = 'Integrate Hierarchy To Ftrack' families = ["shot"] - hosts = ["hiero", "resolve", "standalonepublisher", "flame"] + hosts = [ + "hiero", + "resolve", + "standalonepublisher", + "flame", + "traypublisher" + ] optional = False + create_task_status_profiles = [] def process(self, context): self.context = context if "hierarchyContext" not in self.context.data: return - hierarchy_context = self.context.data["hierarchyContext"] + hierarchy_context = self._get_active_assets(context) + self.log.debug("__ hierarchy_context: {}".format(hierarchy_context)) - self.session = self.context.data["ftrackSession"] + session = self.context.data["ftrackSession"] project_name = self.context.data["projectEntity"]["name"] query = 'Project where full_name is "{}"'.format(project_name) - project = self.session.query(query).one() - auto_sync_state = project[ - "custom_attributes"][CUST_ATTR_AUTO_SYNC] + project = session.query(query).one() + auto_sync_state = project["custom_attributes"][CUST_ATTR_AUTO_SYNC] - if not legacy_io.Session: - legacy_io.install() - - self.ft_project = None - - input_data = hierarchy_context + self.session = session + self.ft_project = project + self.task_types = self.get_all_task_types(project) + self.task_statuses = self.get_task_statuses(project) # disable termporarily ftrack project's autosyncing if auto_sync_state: @@ -94,14 +103,14 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): try: # import ftrack hierarchy - self.import_to_ftrack(input_data) + self.import_to_ftrack(project_name, hierarchy_context) except Exception: raise finally: if auto_sync_state: self.auto_sync_on(project) - def import_to_ftrack(self, input_data, parent=None): + def import_to_ftrack(self, project_name, input_data, parent=None): # Prequery hiearchical custom attributes hier_custom_attributes = get_pype_attr(self.session)[1] hier_attr_by_key = { @@ -118,10 +127,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): self.log.debug(entity_type) if entity_type.lower() == 'project': - query = 'Project where full_name is "{}"'.format(entity_name) - entity = self.session.query(query).one() - self.ft_project = entity - self.task_types = self.get_all_task_types(entity) + entity = self.ft_project elif self.ft_project is None or parent is None: raise AssertionError( @@ -150,8 +156,14 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): # CUSTOM ATTRIBUTES custom_attributes = entity_data.get('custom_attributes', []) instances = [ - i for i in self.context if i.data['asset'] in entity['name'] + instance + for instance in self.context + if instance.data.get("asset") == entity["name"] ] + + for instance in instances: + instance.data["ftrackEntity"] = entity + for key in custom_attributes: hier_attr = hier_attr_by_key.get(key) # Use simple method if key is not hierarchical @@ -181,9 +193,6 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): ) ) - for instance in instances: - instance.data['ftrackEntity'] = entity - try: self.session.commit() except Exception: @@ -193,13 +202,22 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): six.reraise(tp, value, tb) # TASKS + instances_by_task_name = collections.defaultdict(list) + for instance in instances: + task_name = instance.data.get("task") + if task_name: + instances_by_task_name[task_name].append(instance) + tasks = entity_data.get('tasks', []) existing_tasks = [] tasks_to_create = [] for child in entity['children']: - if child.entity_type.lower() == 'task': - existing_tasks.append(child['name'].lower()) - # existing_tasks.append(child['type']['name']) + if child.entity_type.lower() == "task": + task_name_low = child["name"].lower() + existing_tasks.append(task_name_low) + + for instance in instances_by_task_name[task_name_low]: + instance["ftrackTask"] = child for task_name in tasks: task_type = tasks[task_name]["type"] @@ -209,21 +227,17 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): tasks_to_create.append((task_name, task_type)) for task_name, task_type in tasks_to_create: - self.create_task( + task_entity = self.create_task( name=task_name, task_type=task_type, parent=entity ) - try: - self.session.commit() - except Exception: - tp, value, tb = sys.exc_info() - self.session.rollback() - self.session._configure_locations() - six.reraise(tp, value, tb) + + for instance in instances_by_task_name[task_name.lower()]: + instance.data["ftrackTask"] = task_entity # Incoming links. - self.create_links(entity_data, entity) + self.create_links(project_name, entity_data, entity) try: self.session.commit() except Exception: @@ -256,9 +270,9 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): # Import children. if 'childs' in entity_data: self.import_to_ftrack( - entity_data['childs'], entity) + project_name, entity_data['childs'], entity) - def create_links(self, entity_data, entity): + def create_links(self, project_name, entity_data, entity): # Clear existing links. for link in entity.get("incoming_links", []): self.session.delete(link) @@ -271,9 +285,15 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): six.reraise(tp, value, tb) # Create new links. - for input in entity_data.get("inputs", []): - input_id = legacy_io.find_one({"_id": input})["data"]["ftrackId"] - assetbuild = self.session.get("AssetBuild", input_id) + for asset_id in entity_data.get("inputs", []): + asset_doc = get_asset_by_id(project_name, asset_id) + ftrack_id = None + if asset_doc: + ftrack_id = asset_doc["data"].get("ftrackId") + if not ftrack_id: + continue + + assetbuild = self.session.get("AssetBuild", ftrack_id) self.log.debug( "Creating link from {0} to {1}".format( assetbuild["name"], entity["name"] @@ -294,7 +314,37 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): return tasks + def get_task_statuses(self, project_entity): + project_schema = project_entity["project_schema"] + task_workflow_statuses = project_schema["_task_workflow"]["statuses"] + return { + status["id"]: status + for status in task_workflow_statuses + } + def create_task(self, name, task_type, parent): + filter_data = { + "task_names": name, + "task_types": task_type + } + profile = filter_profiles( + self.create_task_status_profiles, + filter_data + ) + status_id = None + if profile: + status_name = profile["status_name"] + status_name_low = status_name.lower() + for _status_id, status in self.task_statuses.items(): + if status["name"].lower() == status_name_low: + status_id = _status_id + break + + if status_id is None: + self.log.warning( + "Task status \"{}\" was not found".format(status_name) + ) + task = self.session.create('Task', { 'name': name, 'parent': parent @@ -303,6 +353,8 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): self.log.info(task_type) self.log.info(self.task_types) task['type'] = self.task_types[task_type] + if status_id is not None: + task["status_id"] = status_id try: self.session.commit() @@ -355,3 +407,41 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): self.session.rollback() self.session._configure_locations() six.reraise(tp, value, tb) + + def _get_active_assets(self, context): + """ Returns only asset dictionary. + Usually the last part of deep dictionary which + is not having any children + """ + def get_pure_hierarchy_data(input_dict): + input_dict_copy = deepcopy(input_dict) + for key in input_dict.keys(): + self.log.debug("__ key: {}".format(key)) + # check if child key is available + if input_dict[key].get("childs"): + # loop deeper + input_dict_copy[ + key]["childs"] = get_pure_hierarchy_data( + input_dict[key]["childs"]) + elif key not in active_assets: + input_dict_copy.pop(key, None) + return input_dict_copy + + hierarchy_context = context.data["hierarchyContext"] + + active_assets = [] + # filter only the active publishing insatnces + for instance in context: + if instance.data.get("publish") is False: + continue + + if not instance.data.get("asset"): + continue + + active_assets.append(instance.data["asset"]) + + # remove duplicity in list + active_assets = list(set(active_assets)) + self.log.debug("__ active_assets: {}".format(active_assets)) + + return get_pure_hierarchy_data(hierarchy_context) diff --git a/openpype/modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py b/openpype/modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py index dc80bf4eb3..489f291c0f 100644 --- a/openpype/modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py +++ b/openpype/modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py @@ -1,5 +1,5 @@ import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateFtrackAttributes(pyblish.api.InstancePlugin): @@ -34,7 +34,7 @@ class ValidateFtrackAttributes(pyblish.api.InstancePlugin): """ label = "Validate Custom Ftrack Attributes" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["ftrack"] optional = True # Ignore standalone host, because it does not have an Ftrack entity diff --git a/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py b/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py index 1a5da44432..78f9d135b7 100644 --- a/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py +++ b/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py @@ -13,10 +13,9 @@ import functools import itertools import distutils.version import hashlib -import tempfile +import appdirs import threading import atexit -import warnings import requests import requests.auth @@ -241,7 +240,7 @@ class Session(object): ) self._auto_connect_event_hub_thread = None - if auto_connect_event_hub in (None, True): + if auto_connect_event_hub is True: # Connect to event hub in background thread so as not to block main # session usage waiting for event hub connection. self._auto_connect_event_hub_thread = threading.Thread( @@ -252,9 +251,7 @@ class Session(object): # To help with migration from auto_connect_event_hub default changing # from True to False. - self._event_hub._deprecation_warning_auto_connect = ( - auto_connect_event_hub is None - ) + self._event_hub._deprecation_warning_auto_connect = False # Register to auto-close session on exit. atexit.register(WeakMethod(self.close)) @@ -271,8 +268,9 @@ class Session(object): # rebuilding types)? if schema_cache_path is not False: if schema_cache_path is None: + schema_cache_path = appdirs.user_cache_dir() schema_cache_path = os.environ.get( - 'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir() + 'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path ) schema_cache_path = os.path.join( diff --git a/openpype/modules/ftrack/scripts/sub_event_processor.py b/openpype/modules/ftrack/scripts/sub_event_processor.py index d1e2e3aaeb..a5ce0511b8 100644 --- a/openpype/modules/ftrack/scripts/sub_event_processor.py +++ b/openpype/modules/ftrack/scripts/sub_event_processor.py @@ -4,6 +4,8 @@ import signal import socket import datetime +import ftrack_api + from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer from openpype_modules.ftrack.ftrack_server.lib import ( SocketSession, @@ -12,17 +14,12 @@ from openpype_modules.ftrack.ftrack_server.lib import ( ) from openpype.modules import ModulesManager -from openpype.api import Logger from openpype.lib import ( + Logger, get_openpype_version, get_build_version ) - -import ftrack_api - -log = Logger().get_logger("Event processor") - subprocess_started = datetime.datetime.now() @@ -68,6 +65,8 @@ def register(session): def main(args): + log = Logger.get_logger("Event processor") + port = int(args[-1]) # Create a TCP/IP socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) diff --git a/openpype/modules/ftrack/scripts/sub_event_status.py b/openpype/modules/ftrack/scripts/sub_event_status.py index 3163642e3f..eb3f63c04b 100644 --- a/openpype/modules/ftrack/scripts/sub_event_status.py +++ b/openpype/modules/ftrack/scripts/sub_event_status.py @@ -7,6 +7,8 @@ import signal import socket import datetime +import appdirs + import ftrack_api from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer from openpype_modules.ftrack.ftrack_server.lib import ( @@ -15,8 +17,8 @@ from openpype_modules.ftrack.ftrack_server.lib import ( TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT ) -from openpype.api import Logger from openpype.lib import ( + Logger, is_current_version_studio_latest, is_running_from_build, get_expected_version, @@ -253,6 +255,15 @@ class StatusFactory: ) }) + items.append({ + "type": "label", + "value": ( + "Local versions dir: {}
Version repository path: {}" + ).format( + appdirs.user_data_dir("openpype", "pypeclub"), + os.environ.get("OPENPYPE_PATH") + ) + }) items.append({"type": "label", "value": "---"}) return items diff --git a/openpype/modules/ftrack/scripts/sub_event_storer.py b/openpype/modules/ftrack/scripts/sub_event_storer.py index 946ecbff79..a7e77951af 100644 --- a/openpype/modules/ftrack/scripts/sub_event_storer.py +++ b/openpype/modules/ftrack/scripts/sub_event_storer.py @@ -6,6 +6,8 @@ import socket import pymongo import ftrack_api + +from openpype.client import OpenPypeMongoConnection from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer from openpype_modules.ftrack.ftrack_server.lib import ( SocketSession, @@ -15,11 +17,10 @@ from openpype_modules.ftrack.ftrack_server.lib import ( ) from openpype_modules.ftrack.lib import get_ftrack_event_mongo_info from openpype.lib import ( - OpenPypeMongoConnection, + Logger, get_openpype_version, get_build_version ) -from openpype.api import Logger log = Logger.get_logger("Event storer") subprocess_started = datetime.datetime.now() diff --git a/openpype/modules/ftrack/scripts/sub_legacy_server.py b/openpype/modules/ftrack/scripts/sub_legacy_server.py index e3a623c376..1f0fc1b369 100644 --- a/openpype/modules/ftrack/scripts/sub_legacy_server.py +++ b/openpype/modules/ftrack/scripts/sub_legacy_server.py @@ -5,11 +5,11 @@ import signal import threading import ftrack_api -from openpype.api import Logger +from openpype.lib import Logger from openpype.modules import ModulesManager from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer -log = Logger().get_logger("Event Server Legacy") +log = Logger.get_logger("Event Server Legacy") class TimerChecker(threading.Thread): diff --git a/openpype/modules/ftrack/scripts/sub_user_server.py b/openpype/modules/ftrack/scripts/sub_user_server.py index a3701a0950..930a2d51e2 100644 --- a/openpype/modules/ftrack/scripts/sub_user_server.py +++ b/openpype/modules/ftrack/scripts/sub_user_server.py @@ -2,6 +2,7 @@ import sys import signal import socket +from openpype.lib import Logger from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer from openpype_modules.ftrack.ftrack_server.lib import ( SocketSession, @@ -9,9 +10,7 @@ from openpype_modules.ftrack.ftrack_server.lib import ( ) from openpype.modules import ModulesManager -from openpype.api import Logger - -log = Logger().get_logger("FtrackUserServer") +log = Logger.get_logger("FtrackUserServer") def main(args): diff --git a/openpype/modules/ftrack/tray/ftrack_tray.py b/openpype/modules/ftrack/tray/ftrack_tray.py index c6201a94f6..e3c6e30ead 100644 --- a/openpype/modules/ftrack/tray/ftrack_tray.py +++ b/openpype/modules/ftrack/tray/ftrack_tray.py @@ -2,24 +2,22 @@ import os import time import datetime import threading + from Qt import QtCore, QtWidgets, QtGui import ftrack_api -from ..ftrack_server.lib import check_ftrack_url -from ..ftrack_server import socket_thread -from ..lib import credentials -from ..ftrack_module import FTRACK_MODULE_DIR +from openpype import resources +from openpype.lib import Logger +from openpype_modules.ftrack import resolve_ftrack_url, FTRACK_MODULE_DIR +from openpype_modules.ftrack.ftrack_server import socket_thread +from openpype_modules.ftrack.lib import credentials from . import login_dialog -from openpype.api import Logger, resources - - -log = Logger().get_logger("FtrackModule") - class FtrackTrayWrapper: def __init__(self, module): self.module = module + self.log = Logger.get_logger(self.__class__.__name__) self.thread_action_server = None self.thread_socket_server = None @@ -48,6 +46,9 @@ class FtrackTrayWrapper: self.widget_login.activateWindow() self.widget_login.raise_() + def show_ftrack_browser(self): + QtGui.QDesktopServices.openUrl(self.module.ftrack_url) + def validate(self): validation = False cred = credentials.get_credentials() @@ -57,19 +58,19 @@ class FtrackTrayWrapper: if validation: self.widget_login.set_credentials(ft_user, ft_api_key) self.module.set_credentials_to_env(ft_user, ft_api_key) - log.info("Connected to Ftrack successfully") + self.log.info("Connected to Ftrack successfully") self.on_login_change() return validation if not validation and ft_user and ft_api_key: - log.warning( + self.log.warning( "Current Ftrack credentials are not valid. {}: {} - {}".format( str(os.environ.get("FTRACK_SERVER")), ft_user, ft_api_key ) ) - log.info("Please sign in to Ftrack") + self.log.info("Please sign in to Ftrack") self.bool_logged = False self.show_login_widget() self.set_menu_visibility() @@ -99,7 +100,7 @@ class FtrackTrayWrapper: self.action_credentials.setIcon(self.icon_not_logged) self.action_credentials.setToolTip("Logged out") - log.info("Logged out of Ftrack") + self.log.info("Logged out of Ftrack") self.bool_logged = False self.set_menu_visibility() @@ -121,10 +122,6 @@ class FtrackTrayWrapper: ftrack_url = self.module.ftrack_url os.environ["FTRACK_SERVER"] = ftrack_url - parent_file_path = os.path.dirname( - os.path.dirname(os.path.realpath(__file__)) - ) - min_fail_seconds = 5 max_fail_count = 3 wait_time_after_max_fail = 10 @@ -149,17 +146,19 @@ class FtrackTrayWrapper: # Main loop while True: if not self.bool_action_server_running: - log.debug("Action server was pushed to stop.") + self.log.debug("Action server was pushed to stop.") break # Check if accessible Ftrack and Mongo url if not ftrack_accessible: - ftrack_accessible = check_ftrack_url(ftrack_url) + ftrack_accessible = resolve_ftrack_url(ftrack_url) # Run threads only if Ftrack is accessible if not ftrack_accessible: if not printed_ftrack_error: - log.warning("Can't access Ftrack {}".format(ftrack_url)) + self.log.warning( + "Can't access Ftrack {}".format(ftrack_url) + ) if self.thread_socket_server is not None: self.thread_socket_server.stop() @@ -186,7 +185,7 @@ class FtrackTrayWrapper: self.set_menu_visibility() elif failed_count == max_fail_count: - log.warning(( + self.log.warning(( "Action server failed {} times." " I'll try to run again {}s later" ).format( @@ -238,10 +237,10 @@ class FtrackTrayWrapper: self.thread_action_server.join() self.thread_action_server = None - log.info("Ftrack action server was forced to stop") + self.log.info("Ftrack action server was forced to stop") except Exception: - log.warning( + self.log.warning( "Error has happened during Killing action server", exc_info=True ) @@ -284,6 +283,13 @@ class FtrackTrayWrapper: tray_server_menu.addAction(self.action_server_stop) self.tray_server_menu = tray_server_menu + + # Ftrack Browser + browser_open = QtWidgets.QAction("Open Ftrack...", tray_menu) + browser_open.triggered.connect(self.show_ftrack_browser) + tray_menu.addAction(browser_open) + self.browser_open = browser_open + self.bool_logged = False self.set_menu_visibility() @@ -331,7 +337,7 @@ class FtrackTrayWrapper: self.thread_timer = None except Exception as e: - log.error("During Killing Timer event server: {0}".format(e)) + self.log.error("During Killing Timer event server: {0}".format(e)) def changed_user(self): self.stop_action_server() diff --git a/openpype/modules/interfaces.py b/openpype/modules/interfaces.py index 13cbea690b..f92ec6bf2d 100644 --- a/openpype/modules/interfaces.py +++ b/openpype/modules/interfaces.py @@ -1,8 +1,33 @@ -from abc import abstractmethod +from abc import ABCMeta, abstractmethod, abstractproperty + +import six from openpype import resources -from openpype.modules import OpenPypeInterface + +class _OpenPypeInterfaceMeta(ABCMeta): + """OpenPypeInterface meta class to print proper string.""" + + def __str__(self): + return "<'OpenPypeInterface.{}'>".format(self.__name__) + + def __repr__(self): + return str(self) + + +@six.add_metaclass(_OpenPypeInterfaceMeta) +class OpenPypeInterface: + """Base class of Interface that can be used as Mixin with abstract parts. + + This is way how OpenPype module or addon can tell OpenPype that contain + implementation for specific functionality. + + Child classes of OpenPypeInterface may be used as mixin in different + OpenPype modules which means they have to have implemented methods defined + in the interface. By default interface does not have any abstract parts. + """ + + pass class IPluginPaths(OpenPypeInterface): @@ -14,21 +39,75 @@ class IPluginPaths(OpenPypeInterface): "publish": ["path/to/publish_plugins"] } """ - # TODO validation of an output + @abstractmethod def get_plugin_paths(self): pass + def get_creator_plugin_paths(self, host_name): + """Retreive creator plugin paths. + + Give addons ability to add creator plugin paths based on host name. + + NOTES: + - Default implementation uses 'get_plugin_paths' and always return + all creator plugins. + - Host name may help to organize plugins by host, but each creator + alsomay have host filtering. + + Args: + host_name (str): For which host are the plugins meant. + """ + + paths = self.get_plugin_paths() + if not paths or "create" not in paths: + return [] + + create_paths = paths["create"] + if not create_paths: + return [] + + if not isinstance(create_paths, (list, tuple, set)): + create_paths = [create_paths] + return create_paths + class ILaunchHookPaths(OpenPypeInterface): """Module has launch hook paths to return. + Modules does not have to inherit from this interface (changed 8.11.2022). + Module just have to have implemented 'get_launch_hook_paths' to be able use + the advantage. + Expected result is list of paths. ["path/to/launch_hooks_dir"] + + Deprecated: + This interface is not needed since OpenPype 3.14.*. Addon just have to + implement 'get_launch_hook_paths' which can expect Application object + or nothing as argument. + + Interface class will be removed after 3.16.*. """ @abstractmethod - def get_launch_hook_paths(self): + def get_launch_hook_paths(self, app): + """Paths to directory with application launch hooks. + + Method can be also defined without arguments. + ```python + def get_launch_hook_paths(self): + return [] + ``` + + Args: + app (Application): Application object which can be used for + filtering of which launch hook paths are returned. + + Returns: + Iterable[str]: Path to directories where launch hooks can be found. + """ + pass @@ -39,6 +118,7 @@ class ITrayModule(OpenPypeInterface): The module still must be usable if is not used in tray even if would do nothing. """ + tray_initialized = False _tray_manager = None @@ -51,16 +131,19 @@ class ITrayModule(OpenPypeInterface): This is where GUIs should be loaded or tray specific parts should be prepared. """ + pass @abstractmethod def tray_menu(self, tray_menu): """Add module's action to tray menu.""" + pass @abstractmethod def tray_start(self): """Start procedure in Pype tray.""" + pass @abstractmethod @@ -69,6 +152,7 @@ class ITrayModule(OpenPypeInterface): This is place where all threads should be shut. """ + pass def execute_in_main_thread(self, callback): @@ -77,6 +161,7 @@ class ITrayModule(OpenPypeInterface): Some callbacks need to be processed on main thread (menu actions must be added on main thread or they won't get triggered etc.) """ + if not self.tray_initialized: # TODO Called without initialized tray, still main thread needed try: @@ -101,6 +186,7 @@ class ITrayModule(OpenPypeInterface): msecs (int): Duration of message visibility in miliseconds. Default is 10000 msecs, may differ by Qt version. """ + if self._tray_manager: self._tray_manager.show_tray_message(title, message, icon, msecs) @@ -253,16 +339,19 @@ class ITrayService(ITrayModule): def set_service_running_icon(self): """Change icon of an QAction to green circle.""" + if self.menu_action: self.menu_action.setIcon(self.get_icon_running()) def set_service_failed_icon(self): """Change icon of an QAction to red circle.""" + if self.menu_action: self.menu_action.setIcon(self.get_icon_failed()) def set_service_idle_icon(self): """Change icon of an QAction to orange circle.""" + if self.menu_action: self.menu_action.setIcon(self.get_icon_idle()) @@ -276,6 +365,7 @@ class ISettingsChangeListener(OpenPypeInterface): "publish": ["path/to/publish_plugins"] } """ + @abstractmethod def on_system_settings_save( self, old_value, new_value, changes, new_value_metadata @@ -293,3 +383,24 @@ class ISettingsChangeListener(OpenPypeInterface): self, old_value, new_value, changes, project_name, new_value_metadata ): pass + + +class IHostAddon(OpenPypeInterface): + """Addon which also contain a host implementation.""" + + @abstractproperty + def host_name(self): + """Name of host which module represents.""" + + pass + + def get_workfile_extensions(self): + """Define workfile extensions for host. + + Not all hosts support workfiles thus this is optional implementation. + + Returns: + List[str]: Extensions used for workfiles with dot. + """ + + return [] diff --git a/openpype/modules/job_queue/module.py b/openpype/modules/job_queue/module.py index f1d7251e85..7075fcea14 100644 --- a/openpype/modules/job_queue/module.py +++ b/openpype/modules/job_queue/module.py @@ -43,7 +43,7 @@ import platform import click from openpype.modules import OpenPypeModule -from openpype.api import get_system_settings +from openpype.settings import get_system_settings class JobQueueModule(OpenPypeModule): diff --git a/openpype/modules/kitsu/__init__.py b/openpype/modules/kitsu/__init__.py new file mode 100644 index 0000000000..9220cb1762 --- /dev/null +++ b/openpype/modules/kitsu/__init__.py @@ -0,0 +1,9 @@ +""" Addon class definition and Settings definition must be imported here. + +If addon class or settings definition won't be here their definition won't +be found by OpenPype discovery. +""" + +from .kitsu_module import KitsuModule + +__all__ = ("KitsuModule",) diff --git a/openpype/modules/kitsu/actions/launcher_show_in_kitsu.py b/openpype/modules/kitsu/actions/launcher_show_in_kitsu.py new file mode 100644 index 0000000000..c95079e042 --- /dev/null +++ b/openpype/modules/kitsu/actions/launcher_show_in_kitsu.py @@ -0,0 +1,125 @@ +import webbrowser + +from openpype.pipeline import LauncherAction +from openpype.modules import ModulesManager +from openpype.client import get_project, get_asset_by_name + + +class ShowInKitsu(LauncherAction): + name = "showinkitsu" + label = "Show in Kitsu" + icon = "external-link-square" + color = "#e0e1e1" + order = 10 + + @staticmethod + def get_kitsu_module(): + return ModulesManager().modules_by_name.get("kitsu") + + def is_compatible(self, session): + if not session.get("AVALON_PROJECT"): + return False + + return True + + def process(self, session, **kwargs): + + # Context inputs + project_name = session["AVALON_PROJECT"] + asset_name = session.get("AVALON_ASSET", None) + task_name = session.get("AVALON_TASK", None) + + project = get_project(project_name=project_name, + fields=["data.zou_id"]) + if not project: + raise RuntimeError(f"Project {project_name} not found.") + + project_zou_id = project["data"].get("zou_id") + if not project_zou_id: + raise RuntimeError(f"Project {project_name} has no " + f"connected kitsu id.") + + asset_zou_name = None + asset_zou_id = None + asset_zou_type = 'Assets' + task_zou_id = None + zou_sub_type = ['AssetType', 'Sequence'] + if asset_name: + asset_zou_name = asset_name + asset_fields = ["data.zou.id", "data.zou.type"] + if task_name: + asset_fields.append(f"data.tasks.{task_name}.zou.id") + + asset = get_asset_by_name(project_name, + asset_name=asset_name, + fields=asset_fields) + + asset_zou_data = asset["data"].get("zou") + + if asset_zou_data: + asset_zou_type = asset_zou_data["type"] + if asset_zou_type not in zou_sub_type: + asset_zou_id = asset_zou_data["id"] + else: + asset_zou_type = asset_name + + if task_name: + task_data = asset["data"]["tasks"][task_name] + task_zou_data = task_data.get("zou", {}) + if not task_zou_data: + self.log.debug(f"No zou task data for task: {task_name}") + task_zou_id = task_zou_data["id"] + + # Define URL + url = self.get_url(project_id=project_zou_id, + asset_name=asset_zou_name, + asset_id=asset_zou_id, + asset_type=asset_zou_type, + task_id=task_zou_id) + + # Open URL in webbrowser + self.log.info(f"Opening URL: {url}") + webbrowser.open(url, + # Try in new tab + new=2) + + def get_url(self, + project_id, + asset_name=None, + asset_id=None, + asset_type=None, + task_id=None): + + shots_url = {'Shots', 'Sequence', 'Shot'} + sub_type = {'AssetType', 'Sequence'} + kitsu_module = self.get_kitsu_module() + + # Get kitsu url with /api stripped + kitsu_url = kitsu_module.server_url + if kitsu_url.endswith("/api"): + kitsu_url = kitsu_url[:-len("/api")] + + sub_url = f"/productions/{project_id}" + asset_type_url = "Shots" if asset_type in shots_url else "Assets" + + if task_id: + # Go to task page + # /productions/{project-id}/{asset_type}/tasks/{task_id} + sub_url += f"/{asset_type_url}/tasks/{task_id}" + + elif asset_id: + # Go to asset or shot page + # /productions/{project-id}/assets/{entity_id} + # /productions/{project-id}/shots/{entity_id} + sub_url += f"/{asset_type_url}/{asset_id}" + + else: + # Go to project page + # Project page must end with a view + # /productions/{project-id}/assets/ + # Add search method if is a sub_type + sub_url += f"/{asset_type_url}" + if asset_type in sub_type: + sub_url += f'?search={asset_name}' + + return f"{kitsu_url}{sub_url}" diff --git a/openpype/modules/kitsu/kitsu_module.py b/openpype/modules/kitsu/kitsu_module.py new file mode 100644 index 0000000000..b91373af20 --- /dev/null +++ b/openpype/modules/kitsu/kitsu_module.py @@ -0,0 +1,142 @@ +"""Kitsu module.""" + +import click +import os + +from openpype.modules import ( + OpenPypeModule, + IPluginPaths, + ITrayAction, +) + + +class KitsuModule(OpenPypeModule, IPluginPaths, ITrayAction): + """Kitsu module class.""" + + label = "Kitsu Connect" + name = "kitsu" + + def initialize(self, settings): + """Initialization of module.""" + module_settings = settings[self.name] + + # Enabled by settings + self.enabled = module_settings.get("enabled", False) + + # Add API URL schema + kitsu_url = module_settings["server"].strip() + if kitsu_url: + # Ensure web url + if not kitsu_url.startswith("http"): + kitsu_url = "https://" + kitsu_url + + # Check for "/api" url validity + if not kitsu_url.endswith("api"): + kitsu_url = "{}{}api".format( + kitsu_url, "" if kitsu_url.endswith("/") else "/" + ) + + self.server_url = kitsu_url + + # UI which must not be created at this time + self._dialog = None + + def tray_init(self): + """Tray init.""" + + self._create_dialog() + + def tray_start(self): + """Tray start.""" + from .utils.credentials import ( + load_credentials, + validate_credentials, + set_credentials_envs, + ) + + login, password = load_credentials() + + # Check credentials, ask them if needed + if validate_credentials(login, password): + set_credentials_envs(login, password) + else: + self.show_dialog() + + def get_global_environments(self): + """Kitsu's global environments.""" + return {"KITSU_SERVER": self.server_url} + + def _create_dialog(self): + # Don't recreate dialog if already exists + if self._dialog is not None: + return + + from .kitsu_widgets import KitsuPasswordDialog + + self._dialog = KitsuPasswordDialog() + + def show_dialog(self): + """Show dialog to log-in.""" + + # Make sure dialog is created + self._create_dialog() + + # Show dialog + self._dialog.open() + + def on_action_trigger(self): + """Implementation of abstract method for `ITrayAction`.""" + self.show_dialog() + + def get_plugin_paths(self): + """Implementation of abstract method for `IPluginPaths`.""" + current_dir = os.path.dirname(os.path.abspath(__file__)) + + return { + "publish": [os.path.join(current_dir, "plugins", "publish")], + "actions": [os.path.join(current_dir, "actions")] + } + + def cli(self, click_group): + click_group.add_command(cli_main) + + +@click.group(KitsuModule.name, help="Kitsu dynamic cli commands.") +def cli_main(): + pass + + +@cli_main.command() +@click.option("--login", envvar="KITSU_LOGIN", help="Kitsu login") +@click.option( + "--password", envvar="KITSU_PWD", help="Password for kitsu username" +) +def push_to_zou(login, password): + """Synchronize Zou database (Kitsu backend) with openpype database. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + """ + from .utils.update_zou_with_op import sync_zou + + sync_zou(login, password) + + +@cli_main.command() +@click.option("-l", "--login", envvar="KITSU_LOGIN", help="Kitsu login") +@click.option( + "-p", "--password", envvar="KITSU_PWD", help="Password for kitsu username" +) +def sync_service(login, password): + """Synchronize openpype database from Zou sever database. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + """ + from .utils.update_op_with_zou import sync_all_projects + from .utils.sync_service import start_listeners + + sync_all_projects(login, password) + start_listeners(login, password) diff --git a/openpype/modules/kitsu/kitsu_widgets.py b/openpype/modules/kitsu/kitsu_widgets.py new file mode 100644 index 0000000000..65baed9665 --- /dev/null +++ b/openpype/modules/kitsu/kitsu_widgets.py @@ -0,0 +1,188 @@ +from Qt import QtWidgets, QtCore, QtGui + +from openpype import style +from openpype.modules.kitsu.utils.credentials import ( + clear_credentials, + load_credentials, + save_credentials, + set_credentials_envs, + validate_credentials, +) +from openpype.resources import get_resource +from openpype.settings.lib import ( + get_system_settings, +) + +from openpype.widgets.password_dialog import PressHoverButton + + +class KitsuPasswordDialog(QtWidgets.QDialog): + """Kitsu login dialog.""" + + finished = QtCore.Signal(bool) + + def __init__(self, parent=None): + super(KitsuPasswordDialog, self).__init__(parent) + + self.setWindowTitle("Kitsu Credentials") + self.resize(300, 120) + + system_settings = get_system_settings() + user_login, user_pwd = load_credentials() + remembered = bool(user_login or user_pwd) + + self._final_result = None + self._connectable = bool( + system_settings["modules"].get("kitsu", {}).get("server") + ) + + # Server label + server_message = ( + system_settings["modules"]["kitsu"]["server"] + if self._connectable + else "no server url set in Studio Settings..." + ) + server_label = QtWidgets.QLabel( + f"Server: {server_message}", + self, + ) + + # Login input + login_widget = QtWidgets.QWidget(self) + + login_label = QtWidgets.QLabel("Login:", login_widget) + + login_input = QtWidgets.QLineEdit( + login_widget, + text=user_login if remembered else None, + ) + login_input.setPlaceholderText("Your Kitsu account login...") + + login_layout = QtWidgets.QHBoxLayout(login_widget) + login_layout.setContentsMargins(0, 0, 0, 0) + login_layout.addWidget(login_label) + login_layout.addWidget(login_input) + + # Password input + password_widget = QtWidgets.QWidget(self) + + password_label = QtWidgets.QLabel("Password:", password_widget) + + password_input = QtWidgets.QLineEdit( + password_widget, + text=user_pwd if remembered else None, + ) + password_input.setPlaceholderText("Your password...") + password_input.setEchoMode(QtWidgets.QLineEdit.Password) + + show_password_icon_path = get_resource("icons", "eye.png") + show_password_icon = QtGui.QIcon(show_password_icon_path) + show_password_btn = PressHoverButton(password_widget) + show_password_btn.setObjectName("PasswordBtn") + show_password_btn.setIcon(show_password_icon) + show_password_btn.setFocusPolicy(QtCore.Qt.ClickFocus) + + password_layout = QtWidgets.QHBoxLayout(password_widget) + password_layout.setContentsMargins(0, 0, 0, 0) + password_layout.addWidget(password_label) + password_layout.addWidget(password_input) + password_layout.addWidget(show_password_btn) + + # Message label + message_label = QtWidgets.QLabel("", self) + + # Buttons + buttons_widget = QtWidgets.QWidget(self) + + remember_checkbox = QtWidgets.QCheckBox("Remember", buttons_widget) + remember_checkbox.setObjectName("RememberCheckbox") + remember_checkbox.setChecked(remembered) + + ok_btn = QtWidgets.QPushButton("Ok", buttons_widget) + cancel_btn = QtWidgets.QPushButton("Cancel", buttons_widget) + + buttons_layout = QtWidgets.QHBoxLayout(buttons_widget) + buttons_layout.setContentsMargins(0, 0, 0, 0) + buttons_layout.addWidget(remember_checkbox) + buttons_layout.addStretch(1) + buttons_layout.addWidget(ok_btn) + buttons_layout.addWidget(cancel_btn) + + # Main layout + layout = QtWidgets.QVBoxLayout(self) + layout.addSpacing(5) + layout.addWidget(server_label, 0) + layout.addSpacing(5) + layout.addWidget(login_widget, 0) + layout.addWidget(password_widget, 0) + layout.addWidget(message_label, 0) + layout.addStretch(1) + layout.addWidget(buttons_widget, 0) + + ok_btn.clicked.connect(self._on_ok_click) + cancel_btn.clicked.connect(self._on_cancel_click) + show_password_btn.change_state.connect(self._on_show_password) + + self.login_input = login_input + self.password_input = password_input + self.remember_checkbox = remember_checkbox + self.message_label = message_label + + self.setStyleSheet(style.load_stylesheet()) + + def result(self): + return self._final_result + + def keyPressEvent(self, event): + if event.key() in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter): + self._on_ok_click() + return event.accept() + super(KitsuPasswordDialog, self).keyPressEvent(event) + + def closeEvent(self, event): + super(KitsuPasswordDialog, self).closeEvent(event) + self.finished.emit(self.result()) + + def _on_ok_click(self): + # Check if is connectable + if not self._connectable: + self.message_label.setText( + "Please set server url in Studio Settings!" + ) + return + + # Collect values + login_value = self.login_input.text() + pwd_value = self.password_input.text() + remember = self.remember_checkbox.isChecked() + + # Authenticate + if validate_credentials(login_value, pwd_value): + set_credentials_envs(login_value, pwd_value) + else: + self.message_label.setText("Authentication failed...") + return + + # Remember password cases + if remember: + save_credentials(login_value, pwd_value) + else: + # Clear local settings + clear_credentials() + + # Clear input fields + self.login_input.clear() + self.password_input.clear() + + self._final_result = True + self.close() + + def _on_show_password(self, show_password): + if show_password: + echo_mode = QtWidgets.QLineEdit.Normal + else: + echo_mode = QtWidgets.QLineEdit.Password + self.password_input.setEchoMode(echo_mode) + + def _on_cancel_click(self): + self.close() diff --git a/openpype/modules/kitsu/plugins/publish/collect_kitsu_credential.py b/openpype/modules/kitsu/plugins/publish/collect_kitsu_credential.py new file mode 100644 index 0000000000..b7f6f67a40 --- /dev/null +++ b/openpype/modules/kitsu/plugins/publish/collect_kitsu_credential.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +import os + +import gazu +import pyblish.api + + +class CollectKitsuSession(pyblish.api.ContextPlugin): # rename log in + """Collect Kitsu session using user credentials""" + + order = pyblish.api.CollectorOrder + label = "Kitsu user session" + # families = ["kitsu"] + + def process(self, context): + + gazu.client.set_host(os.environ["KITSU_SERVER"]) + gazu.log_in(os.environ["KITSU_LOGIN"], os.environ["KITSU_PWD"]) diff --git a/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py b/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py new file mode 100644 index 0000000000..c9e78b59eb --- /dev/null +++ b/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +import os + +import gazu +import pyblish.api + + +class CollectKitsuEntities(pyblish.api.ContextPlugin): + """Collect Kitsu entities according to the current context""" + + order = pyblish.api.CollectorOrder + 0.499 + label = "Kitsu entities" + + def process(self, context): + + asset_data = context.data["assetEntity"]["data"] + zou_asset_data = asset_data.get("zou") + if not zou_asset_data: + raise AssertionError("Zou asset data not found in OpenPype!") + self.log.debug("Collected zou asset data: {}".format(zou_asset_data)) + + zou_task_data = asset_data["tasks"][os.environ["AVALON_TASK"]].get( + "zou" + ) + if not zou_task_data: + self.log.warning("Zou task data not found in OpenPype!") + self.log.debug("Collected zou task data: {}".format(zou_task_data)) + + kitsu_project = gazu.project.get_project(zou_asset_data["project_id"]) + if not kitsu_project: + raise AssertionError("Project not found in kitsu!") + context.data["kitsu_project"] = kitsu_project + self.log.debug("Collect kitsu project: {}".format(kitsu_project)) + + entity_type = zou_asset_data["type"] + if entity_type == "Shot": + kitsu_entity = gazu.shot.get_shot(zou_asset_data["id"]) + else: + kitsu_entity = gazu.asset.get_asset(zou_asset_data["id"]) + + if not kitsu_entity: + raise AssertionError("{} not found in kitsu!".format(entity_type)) + + context.data["kitsu_entity"] = kitsu_entity + self.log.debug( + "Collect kitsu {}: {}".format(entity_type, kitsu_entity) + ) + + if zou_task_data: + kitsu_task = gazu.task.get_task(zou_task_data["id"]) + if not kitsu_task: + raise AssertionError("Task not found in kitsu!") + context.data["kitsu_task"] = kitsu_task + self.log.debug("Collect kitsu task: {}".format(kitsu_task)) + + else: + kitsu_task_type = gazu.task.get_task_type_by_name( + os.environ["AVALON_TASK"] + ) + if not kitsu_task_type: + raise AssertionError( + "Task type {} not found in Kitsu!".format( + os.environ["AVALON_TASK"] + ) + ) + + kitsu_task = gazu.task.get_task_by_name( + kitsu_entity, kitsu_task_type + ) + if not kitsu_task: + raise AssertionError("Task not found in kitsu!") + context.data["kitsu_task"] = kitsu_task + self.log.debug("Collect kitsu task: {}".format(kitsu_task)) diff --git a/openpype/modules/kitsu/plugins/publish/collect_kitsu_username.py b/openpype/modules/kitsu/plugins/publish/collect_kitsu_username.py new file mode 100644 index 0000000000..896050f7e2 --- /dev/null +++ b/openpype/modules/kitsu/plugins/publish/collect_kitsu_username.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +import os +import re + +import pyblish.api + + +class CollectKitsuUsername(pyblish.api.ContextPlugin): + """Collect Kitsu username from the kitsu login""" + + order = pyblish.api.CollectorOrder + 0.499 + label = "Kitsu username" + + def process(self, context): + kitsu_login = os.environ.get('KITSU_LOGIN') + + if not kitsu_login: + return + + kitsu_username = kitsu_login.split("@")[0].replace('.', ' ') + new_username = re.sub('[^a-zA-Z]', ' ', kitsu_username).title() + + for instance in context: + # Don't override customData if it already exists + if 'customData' not in instance.data: + instance.data['customData'] = {} + + instance.data['customData']["kitsuUsername"] = new_username diff --git a/openpype/modules/kitsu/plugins/publish/integrate_kitsu_note.py b/openpype/modules/kitsu/plugins/publish/integrate_kitsu_note.py new file mode 100644 index 0000000000..ea98e0b7cc --- /dev/null +++ b/openpype/modules/kitsu/plugins/publish/integrate_kitsu_note.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +import gazu +import pyblish.api + + +class IntegrateKitsuNote(pyblish.api.ContextPlugin): + """Integrate Kitsu Note""" + + order = pyblish.api.IntegratorOrder + label = "Kitsu Note and Status" + # families = ["kitsu"] + set_status_note = False + note_status_shortname = "wfa" + + def process(self, context): + + # Get comment text body + publish_comment = context.data.get("comment") + if not publish_comment: + self.log.info("Comment is not set.") + + self.log.debug("Comment is `{}`".format(publish_comment)) + + # Get note status, by default uses the task status for the note + # if it is not specified in the configuration + note_status = context.data["kitsu_task"]["task_status_id"] + if self.set_status_note: + kitsu_status = gazu.task.get_task_status_by_short_name( + self.note_status_shortname + ) + if kitsu_status: + note_status = kitsu_status + self.log.info("Note Kitsu status: {}".format(note_status)) + else: + self.log.info( + "Cannot find {} status. The status will not be " + "changed!".format(self.note_status_shortname) + ) + + # Add comment to kitsu task + self.log.debug( + "Add new note in taks id {}".format( + context.data["kitsu_task"]["id"] + ) + ) + kitsu_comment = gazu.task.add_comment( + context.data["kitsu_task"], note_status, comment=publish_comment + ) + + context.data["kitsu_comment"] = kitsu_comment diff --git a/openpype/modules/kitsu/plugins/publish/integrate_kitsu_review.py b/openpype/modules/kitsu/plugins/publish/integrate_kitsu_review.py new file mode 100644 index 0000000000..bf80095225 --- /dev/null +++ b/openpype/modules/kitsu/plugins/publish/integrate_kitsu_review.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +import gazu +import pyblish.api + + +class IntegrateKitsuReview(pyblish.api.InstancePlugin): + """Integrate Kitsu Review""" + + order = pyblish.api.IntegratorOrder + 0.01 + label = "Kitsu Review" + # families = ["kitsu"] + optional = True + + def process(self, instance): + + context = instance.context + task = context.data["kitsu_task"] + comment = context.data.get("kitsu_comment") + + # Check comment has been created + if not comment: + self.log.debug( + "Comment not created, review not pushed to preview." + ) + return + + # Add review representations as preview of comment + for representation in instance.data.get("representations", []): + # Skip if not tagged as review + if "review" not in representation.get("tags", []): + continue + + review_path = representation.get("published_path") + + self.log.debug("Found review at: {}".format(review_path)) + + gazu.task.add_preview( + task, comment, review_path, normalize_movie=True + ) + self.log.info("Review upload on comment") diff --git a/openpype/modules/kitsu/plugins/publish/other_kitsu_log_out.py b/openpype/modules/kitsu/plugins/publish/other_kitsu_log_out.py new file mode 100644 index 0000000000..c4a5b390e0 --- /dev/null +++ b/openpype/modules/kitsu/plugins/publish/other_kitsu_log_out.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +import gazu +import pyblish.api + + +class KitsuLogOut(pyblish.api.ContextPlugin): + """ + Log out from Kitsu API + """ + + order = pyblish.api.IntegratorOrder + 10 + label = "Kitsu Log Out" + + def process(self, context): + gazu.log_out() diff --git a/openpype/modules/kitsu/utils/__init__.py b/openpype/modules/kitsu/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/modules/kitsu/utils/credentials.py b/openpype/modules/kitsu/utils/credentials.py new file mode 100644 index 0000000000..adcfb07cd5 --- /dev/null +++ b/openpype/modules/kitsu/utils/credentials.py @@ -0,0 +1,107 @@ +"""Kitsu credentials functions.""" + +import os +from typing import Tuple +import gazu + +from openpype.lib.local_settings import OpenPypeSecureRegistry +from openpype.lib import emit_event + + +def validate_credentials( + login: str, password: str, kitsu_url: str = None +) -> bool: + """Validate credentials by trying to connect to Kitsu host URL. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + kitsu_url (str, optional): Kitsu host URL. Defaults to None. + + Returns: + bool: Are credentials valid? + """ + if kitsu_url is None: + kitsu_url = os.environ.get("KITSU_SERVER") + + # Connect to server + validate_host(kitsu_url) + + # Authenticate + try: + gazu.log_in(login, password) + except gazu.exception.AuthFailedException: + return False + + emit_event("kitsu.user.logged", data={"username": login}, source="kitsu") + + return True + + +def validate_host(kitsu_url: str) -> bool: + """Validate credentials by trying to connect to Kitsu host URL. + + Args: + kitsu_url (str, optional): Kitsu host URL. + + Returns: + bool: Is host valid? + """ + # Connect to server + gazu.set_host(kitsu_url) + + # Test host + if gazu.client.host_is_valid(): + return True + else: + raise gazu.exception.HostException(f"Host '{kitsu_url}' is invalid.") + + +def clear_credentials(): + """Clear credentials in Secure Registry.""" + # Get user registry + user_registry = OpenPypeSecureRegistry("kitsu_user") + + # Set local settings + user_registry.delete_item("login") + user_registry.delete_item("password") + + +def save_credentials(login: str, password: str): + """Save credentials in Secure Registry. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + """ + # Get user registry + user_registry = OpenPypeSecureRegistry("kitsu_user") + + # Set local settings + user_registry.set_item("login", login) + user_registry.set_item("password", password) + + +def load_credentials() -> Tuple[str, str]: + """Load registered credentials. + + Returns: + Tuple[str, str]: (Login, Password) + """ + # Get user registry + user_registry = OpenPypeSecureRegistry("kitsu_user") + + return user_registry.get_item("login", None), user_registry.get_item( + "password", None + ) + + +def set_credentials_envs(login: str, password: str): + """Set environment variables with Kitsu login and password. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + """ + os.environ["KITSU_LOGIN"] = login + os.environ["KITSU_PWD"] = password diff --git a/openpype/modules/kitsu/utils/sync_service.py b/openpype/modules/kitsu/utils/sync_service.py new file mode 100644 index 0000000000..441b95a7ec --- /dev/null +++ b/openpype/modules/kitsu/utils/sync_service.py @@ -0,0 +1,403 @@ +import os + +import gazu + +from openpype.client import ( + get_project, + get_assets, + get_asset_by_name +) +from openpype.pipeline import AvalonMongoDB +from .credentials import validate_credentials +from .update_op_with_zou import ( + create_op_asset, + set_op_project, + get_kitsu_project_name, + write_project_to_op, + update_op_assets, +) + + +class Listener: + """Host Kitsu listener.""" + + def __init__(self, login, password): + """Create client and add listeners to events without starting it. + + Run `listener.start()` to actually start the service. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + + Raises: + AuthFailedException: Wrong user login and/or password + """ + self.dbcon = AvalonMongoDB() + self.dbcon.install() + + gazu.client.set_host(os.environ["KITSU_SERVER"]) + + # Authenticate + if not validate_credentials(login, password): + raise gazu.exception.AuthFailedException( + f"Kitsu authentication failed for login: '{login}'..." + ) + + gazu.set_event_host( + os.environ["KITSU_SERVER"].replace("api", "socket.io") + ) + self.event_client = gazu.events.init() + + gazu.events.add_listener( + self.event_client, "project:new", self._new_project + ) + gazu.events.add_listener( + self.event_client, "project:update", self._update_project + ) + gazu.events.add_listener( + self.event_client, "project:delete", self._delete_project + ) + + gazu.events.add_listener( + self.event_client, "asset:new", self._new_asset + ) + gazu.events.add_listener( + self.event_client, "asset:update", self._update_asset + ) + gazu.events.add_listener( + self.event_client, "asset:delete", self._delete_asset + ) + + gazu.events.add_listener( + self.event_client, "episode:new", self._new_episode + ) + gazu.events.add_listener( + self.event_client, "episode:update", self._update_episode + ) + gazu.events.add_listener( + self.event_client, "episode:delete", self._delete_episode + ) + + gazu.events.add_listener( + self.event_client, "sequence:new", self._new_sequence + ) + gazu.events.add_listener( + self.event_client, "sequence:update", self._update_sequence + ) + gazu.events.add_listener( + self.event_client, "sequence:delete", self._delete_sequence + ) + + gazu.events.add_listener(self.event_client, "shot:new", self._new_shot) + gazu.events.add_listener( + self.event_client, "shot:update", self._update_shot + ) + gazu.events.add_listener( + self.event_client, "shot:delete", self._delete_shot + ) + + gazu.events.add_listener(self.event_client, "task:new", self._new_task) + gazu.events.add_listener( + self.event_client, "task:update", self._update_task + ) + gazu.events.add_listener( + self.event_client, "task:delete", self._delete_task + ) + + def start(self): + gazu.events.run_client(self.event_client) + + # == Project == + def _new_project(self, data): + """Create new project into OP DB.""" + + # Use update process to avoid duplicating code + self._update_project(data) + + def _update_project(self, data): + """Update project into OP DB.""" + # Get project entity + project = gazu.project.get_project(data["project_id"]) + project_name = project["name"] + + update_project = write_project_to_op(project, self.dbcon) + + # Write into DB + if update_project: + self.dbcon.Session["AVALON_PROJECT"] = project_name + self.dbcon.bulk_write([update_project]) + + def _delete_project(self, data): + """Delete project.""" + + project_name = get_kitsu_project_name(data["project_id"]) + + # Delete project collection + self.dbcon.database[project_name].drop() + + # == Asset == + + def _new_asset(self, data): + """Create new asset into OP DB.""" + # Get project entity + set_op_project(self.dbcon, data["project_id"]) + + # Get gazu entity + asset = gazu.asset.get_asset(data["asset_id"]) + + # Insert doc in DB + self.dbcon.insert_one(create_op_asset(asset)) + + # Update + self._update_asset(data) + + def _update_asset(self, data): + """Update asset into OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) + + # Get gazu entity + asset = gazu.asset.get_asset(data["asset_id"]) + + # Find asset doc + # Query all assets of the local project + zou_ids_and_asset_docs = { + asset_doc["data"]["zou"]["id"]: asset_doc + for asset_doc in get_assets(project_name) + if asset_doc["data"].get("zou", {}).get("id") + } + zou_ids_and_asset_docs[asset["project_id"]] = project_doc + + # Update + update_op_result = update_op_assets( + self.dbcon, project_doc, [asset], zou_ids_and_asset_docs + ) + if update_op_result: + asset_doc_id, asset_update = update_op_result[0] + self.dbcon.update_one({"_id": asset_doc_id}, asset_update) + + def _delete_asset(self, data): + """Delete asset of OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + + # Delete + self.dbcon.delete_one( + {"type": "asset", "data.zou.id": data["asset_id"]} + ) + + # == Episode == + def _new_episode(self, data): + """Create new episode into OP DB.""" + # Get project entity + set_op_project(self.dbcon, data["project_id"]) + + # Get gazu entity + episode = gazu.shot.get_episode(data["episode_id"]) + + # Insert doc in DB + self.dbcon.insert_one(create_op_asset(episode)) + + # Update + self._update_episode(data) + + def _update_episode(self, data): + """Update episode into OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) + + # Get gazu entity + episode = gazu.shot.get_episode(data["episode_id"]) + + # Find asset doc + # Query all assets of the local project + zou_ids_and_asset_docs = { + asset_doc["data"]["zou"]["id"]: asset_doc + for asset_doc in get_assets(project_name) + if asset_doc["data"].get("zou", {}).get("id") + } + zou_ids_and_asset_docs[episode["project_id"]] = project_doc + + # Update + update_op_result = update_op_assets( + self.dbcon, project_doc, [episode], zou_ids_and_asset_docs + ) + if update_op_result: + asset_doc_id, asset_update = update_op_result[0] + self.dbcon.update_one({"_id": asset_doc_id}, asset_update) + + def _delete_episode(self, data): + """Delete shot of OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + print("delete episode") # TODO check bugfix + + # Delete + self.dbcon.delete_one( + {"type": "asset", "data.zou.id": data["episode_id"]} + ) + + # == Sequence == + def _new_sequence(self, data): + """Create new sequnce into OP DB.""" + # Get project entity + set_op_project(self.dbcon, data["project_id"]) + + # Get gazu entity + sequence = gazu.shot.get_sequence(data["sequence_id"]) + + # Insert doc in DB + self.dbcon.insert_one(create_op_asset(sequence)) + + # Update + self._update_sequence(data) + + def _update_sequence(self, data): + """Update sequence into OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) + + # Get gazu entity + sequence = gazu.shot.get_sequence(data["sequence_id"]) + + # Find asset doc + # Query all assets of the local project + zou_ids_and_asset_docs = { + asset_doc["data"]["zou"]["id"]: asset_doc + for asset_doc in get_assets(project_name) + if asset_doc["data"].get("zou", {}).get("id") + } + zou_ids_and_asset_docs[sequence["project_id"]] = project_doc + + # Update + update_op_result = update_op_assets( + self.dbcon, project_doc, [sequence], zou_ids_and_asset_docs + ) + if update_op_result: + asset_doc_id, asset_update = update_op_result[0] + self.dbcon.update_one({"_id": asset_doc_id}, asset_update) + + def _delete_sequence(self, data): + """Delete sequence of OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + print("delete sequence") # TODO check bugfix + + # Delete + self.dbcon.delete_one( + {"type": "asset", "data.zou.id": data["sequence_id"]} + ) + + # == Shot == + def _new_shot(self, data): + """Create new shot into OP DB.""" + # Get project entity + set_op_project(self.dbcon, data["project_id"]) + + # Get gazu entity + shot = gazu.shot.get_shot(data["shot_id"]) + + # Insert doc in DB + self.dbcon.insert_one(create_op_asset(shot)) + + # Update + self._update_shot(data) + + def _update_shot(self, data): + """Update shot into OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) + + # Get gazu entity + shot = gazu.shot.get_shot(data["shot_id"]) + + # Find asset doc + # Query all assets of the local project + zou_ids_and_asset_docs = { + asset_doc["data"]["zou"]["id"]: asset_doc + for asset_doc in get_assets(project_name) + if asset_doc["data"].get("zou", {}).get("id") + } + zou_ids_and_asset_docs[shot["project_id"]] = project_doc + + # Update + update_op_result = update_op_assets( + self.dbcon, project_doc, [shot], zou_ids_and_asset_docs + ) + if update_op_result: + asset_doc_id, asset_update = update_op_result[0] + self.dbcon.update_one({"_id": asset_doc_id}, asset_update) + + def _delete_shot(self, data): + """Delete shot of OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + + # Delete + self.dbcon.delete_one( + {"type": "asset", "data.zou.id": data["shot_id"]} + ) + + # == Task == + def _new_task(self, data): + """Create new task into OP DB.""" + # Get project entity + set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() + + # Get gazu entity + task = gazu.task.get_task(data["task_id"]) + + # Find asset doc + parent_name = task["entity"]["name"] + + asset_doc = get_asset_by_name(project_name, parent_name) + + # Update asset tasks with new one + asset_tasks = asset_doc["data"].get("tasks") + task_type_name = task["task_type"]["name"] + asset_tasks[task_type_name] = {"type": task_type_name, "zou": task} + self.dbcon.update_one( + {"_id": asset_doc["_id"]}, {"$set": {"data.tasks": asset_tasks}} + ) + + def _update_task(self, data): + """Update task into OP DB.""" + # TODO is it necessary? + pass + + def _delete_task(self, data): + """Delete task of OP DB.""" + + set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() + # Find asset doc + asset_docs = list(get_assets(project_name)) + for doc in asset_docs: + # Match task + for name, task in doc["data"]["tasks"].items(): + if task.get("zou") and data["task_id"] == task["zou"]["id"]: + # Pop task + asset_tasks = doc["data"].get("tasks", {}) + asset_tasks.pop(name) + + # Delete task in DB + self.dbcon.update_one( + {"_id": doc["_id"]}, + {"$set": {"data.tasks": asset_tasks}}, + ) + return + + +def start_listeners(login: str, password: str): + """Start listeners to keep OpenPype up-to-date with Kitsu. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + """ + + # Connect to server + listener = Listener(login, password) + listener.start() diff --git a/openpype/modules/kitsu/utils/update_op_with_zou.py b/openpype/modules/kitsu/utils/update_op_with_zou.py new file mode 100644 index 0000000000..2d14b38bc4 --- /dev/null +++ b/openpype/modules/kitsu/utils/update_op_with_zou.py @@ -0,0 +1,445 @@ +"""Functions to update OpenPype data using Kitsu DB (a.k.a Zou).""" +from copy import deepcopy +import re +from typing import Dict, List + +from pymongo import DeleteOne, UpdateOne +import gazu +from gazu.task import ( + all_tasks_for_asset, + all_tasks_for_shot, +) + +from openpype.client import ( + get_project, + get_assets, + get_asset_by_id, + get_asset_by_name, + create_project, +) +from openpype.pipeline import AvalonMongoDB +from openpype.settings import get_project_settings +from openpype.modules.kitsu.utils.credentials import validate_credentials + +from openpype.lib import Logger + +log = Logger.get_logger(__name__) + +# Accepted namin pattern for OP +naming_pattern = re.compile("^[a-zA-Z0-9_.]*$") + + +def create_op_asset(gazu_entity: dict) -> dict: + """Create OP asset dict from gazu entity. + + :param gazu_entity: + """ + return { + "name": gazu_entity["name"], + "type": "asset", + "schema": "openpype:asset-3.0", + "data": {"zou": gazu_entity, "tasks": {}}, + } + + +def get_kitsu_project_name(project_id: str) -> str: + """Get project name based on project id in kitsu. + + Args: + project_id (str): UUID of project in Kitsu. + + Returns: + str: Name of Kitsu project. + """ + + project = gazu.project.get_project(project_id) + return project["name"] + + +def set_op_project(dbcon: AvalonMongoDB, project_id: str): + """Set project context. + + Args: + dbcon (AvalonMongoDB): Connection to DB + project_id (str): Project zou ID + """ + + dbcon.Session["AVALON_PROJECT"] = get_kitsu_project_name(project_id) + + +def update_op_assets( + dbcon: AvalonMongoDB, + project_doc: dict, + entities_list: List[dict], + asset_doc_ids: Dict[str, dict], +) -> List[Dict[str, dict]]: + """Update OpenPype assets. + Set 'data' and 'parent' fields. + + Args: + dbcon (AvalonMongoDB): Connection to DB + entities_list (List[dict]): List of zou entities to update + asset_doc_ids (Dict[str, dict]): Dicts of [{zou_id: asset_doc}, ...] + + Returns: + List[Dict[str, dict]]: List of (doc_id, update_dict) tuples + """ + project_name = project_doc["name"] + project_module_settings = get_project_settings(project_name)["kitsu"] + + assets_with_update = [] + for item in entities_list: + # Check asset exists + item_doc = asset_doc_ids.get(item["id"]) + if not item_doc: # Create asset + op_asset = create_op_asset(item) + insert_result = dbcon.insert_one(op_asset) + item_doc = get_asset_by_id(project_name, insert_result.inserted_id) + + # Update asset + item_data = deepcopy(item_doc["data"]) + item_data.update(item.get("data") or {}) + item_data["zou"] = item + + # == Asset settings == + # Frame in, fallback to project's value or default value (1001) + # TODO: get default from settings/project_anatomy/attributes.json + try: + frame_in = int( + item_data.pop( + "frame_in", project_doc["data"].get("frameStart") + ) + ) + except (TypeError, ValueError): + frame_in = 1001 + item_data["frameStart"] = frame_in + # Frames duration, fallback on 0 + try: + # NOTE nb_frames is stored directly in item + # because of zou's legacy design + frames_duration = int(item.get("nb_frames", 0)) + except (TypeError, ValueError): + frames_duration = 0 + # Frame out, fallback on frame_in + duration or project's value or 1001 + frame_out = item_data.pop("frame_out", None) + if not frame_out: + frame_out = frame_in + frames_duration + try: + frame_out = int(frame_out) + except (TypeError, ValueError): + frame_out = 1001 + item_data["frameEnd"] = frame_out + # Fps, fallback to project's value or default value (25.0) + try: + fps = float(item_data.get("fps", project_doc["data"].get("fps"))) + except (TypeError, ValueError): + fps = 25.0 + item_data["fps"] = fps + + # Tasks + tasks_list = [] + item_type = item["type"] + if item_type == "Asset": + tasks_list = all_tasks_for_asset(item) + elif item_type == "Shot": + tasks_list = all_tasks_for_shot(item) + item_data["tasks"] = { + t["task_type_name"]: {"type": t["task_type_name"], "zou": t} + for t in tasks_list + } + + # Get zou parent id for correct hierarchy + # Use parent substitutes if existing + substitute_parent_item = ( + item_data["parent_substitutes"][0] + if item_data.get("parent_substitutes") + else None + ) + if substitute_parent_item: + parent_zou_id = substitute_parent_item["parent_id"] + else: + parent_zou_id = ( + # For Asset, put under asset type directory + item.get("entity_type_id") + if item_type == "Asset" + else None + # Else, fallback on usual hierarchy + or item.get("parent_id") + or item.get("episode_id") + or item.get("source_id") + ) + + # Substitute item type for general classification (assets or shots) + if item_type in ["Asset", "AssetType"]: + entity_root_asset_name = "Assets" + elif item_type in ["Episode", "Sequence", "Shot"]: + entity_root_asset_name = "Shots" + + # Root parent folder if exist + visual_parent_doc_id = ( + asset_doc_ids[parent_zou_id]["_id"] if parent_zou_id else None + ) + if visual_parent_doc_id is None: + # Find root folder doc ("Assets" or "Shots") + root_folder_doc = get_asset_by_name( + project_name, + asset_name=entity_root_asset_name, + fields=["_id", "data.root_of"], + ) + + if root_folder_doc: + visual_parent_doc_id = root_folder_doc["_id"] + + # Visual parent for hierarchy + item_data["visualParent"] = visual_parent_doc_id + + # Add parents for hierarchy + item_data["parents"] = [] + ancestor_id = parent_zou_id + while ancestor_id is not None: + parent_doc = asset_doc_ids[ancestor_id] + item_data["parents"].insert(0, parent_doc["name"]) + + # Get parent entity + parent_entity = parent_doc["data"]["zou"] + ancestor_id = parent_entity.get("parent_id") + + # Build OpenPype compatible name + if item_type in ["Shot", "Sequence"] and parent_zou_id is not None: + # Name with parents hierarchy "({episode}_){sequence}_{shot}" + # to avoid duplicate name issue + item_name = f"{item_data['parents'][-1]}_{item['name']}" + + # Update doc name + asset_doc_ids[item["id"]]["name"] = item_name + else: + item_name = item["name"] + + # Set root folders parents + item_data["parents"] = [entity_root_asset_name] + item_data["parents"] + + # Update 'data' different in zou DB + updated_data = { + k: v for k, v in item_data.items() if item_doc["data"].get(k) != v + } + if updated_data or not item_doc.get("parent"): + assets_with_update.append( + ( + item_doc["_id"], + { + "$set": { + "name": item_name, + "data": item_data, + "parent": project_doc["_id"], + } + }, + ) + ) + return assets_with_update + + +def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne: + """Write gazu project to OP database. + Create project if doesn't exist. + + Args: + project (dict): Gazu project + dbcon (AvalonMongoDB): DB to create project in + + Returns: + UpdateOne: Update instance for the project + """ + project_name = project["name"] + project_doc = get_project(project_name) + if not project_doc: + log.info(f"Creating project '{project_name}'") + project_doc = create_project(project_name, project_name) + + # Project data and tasks + project_data = project_doc["data"] or {} + + # Build project code and update Kitsu + project_code = project.get("code") + if not project_code: + project_code = project["name"].replace(" ", "_").lower() + project["code"] = project_code + + # Update Zou + gazu.project.update_project(project) + + # Update data + project_data.update( + { + "code": project_code, + "fps": float(project["fps"]), + "zou_id": project["id"], + } + ) + + match_res = re.match(r"(\d+)x(\d+)", project["resolution"]) + if match_res: + project_data["resolutionWidth"] = int(match_res.group(1)) + project_data["resolutionHeight"] = int(match_res.group(2)) + else: + log.warning( + f"'{project['resolution']}' does not match the expected" + " format for the resolution, for example: 1920x1080" + ) + + return UpdateOne( + {"_id": project_doc["_id"]}, + { + "$set": { + "config.tasks": { + t["name"]: {"short_name": t.get("short_name", t["name"])} + for t in gazu.task.all_task_types_for_project(project) + or gazu.task.all_task_types() + }, + "data": project_data, + } + }, + ) + + +def sync_all_projects(login: str, password: str, ignore_projects: list = None): + """Update all OP projects in DB with Zou data. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + ignore_projects (list): List of unsynced project names + Raises: + gazu.exception.AuthFailedException: Wrong user login and/or password + """ + + # Authenticate + if not validate_credentials(login, password): + raise gazu.exception.AuthFailedException( + f"Kitsu authentication failed for login: '{login}'..." + ) + + # Iterate projects + dbcon = AvalonMongoDB() + dbcon.install() + all_projects = gazu.project.all_open_projects() + for project in all_projects: + if ignore_projects and project["name"] in ignore_projects: + continue + sync_project_from_kitsu(dbcon, project) + + +def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict): + """Update OP project in DB with Zou data. + + `root_of` is meant to sort entities by type for a better readability in + the data tree. It puts all shot like (Shot and Episode and Sequence) and + asset entities under two different root folders or hierarchy, defined in + settings. + + Args: + dbcon (AvalonMongoDB): MongoDB connection + project (dict): Project dict got using gazu. + """ + bulk_writes = [] + + # Get project from zou + if not project: + project = gazu.project.get_project_by_name(project["name"]) + + log.info(f"Synchronizing {project['name']}...") + + # Get all assets from zou + all_assets = gazu.asset.all_assets_for_project(project) + all_asset_types = gazu.asset.all_asset_types_for_project(project) + all_episodes = gazu.shot.all_episodes_for_project(project) + all_seqs = gazu.shot.all_sequences_for_project(project) + all_shots = gazu.shot.all_shots_for_project(project) + all_entities = [ + item + for item in all_assets + + all_asset_types + + all_episodes + + all_seqs + + all_shots + if naming_pattern.match(item["name"]) + ] + + # Sync project. Create if doesn't exist + bulk_writes.append(write_project_to_op(project, dbcon)) + + # Try to find project document + project_name = project["name"] + dbcon.Session["AVALON_PROJECT"] = project_name + project_doc = get_project(project_name) + + # Query all assets of the local project + zou_ids_and_asset_docs = { + asset_doc["data"]["zou"]["id"]: asset_doc + for asset_doc in get_assets(project_name) + if asset_doc["data"].get("zou", {}).get("id") + } + zou_ids_and_asset_docs[project["id"]] = project_doc + + # Create entities root folders + to_insert = [ + { + "name": r, + "type": "asset", + "schema": "openpype:asset-3.0", + "data": { + "root_of": r, + "tasks": {}, + }, + } + for r in ["Assets", "Shots"] + if not get_asset_by_name( + project_name, r, fields=["_id", "data.root_of"] + ) + ] + + # Create + to_insert.extend( + [ + create_op_asset(item) + for item in all_entities + if item["id"] not in zou_ids_and_asset_docs.keys() + ] + ) + if to_insert: + # Insert doc in DB + dbcon.insert_many(to_insert) + + # Update existing docs + zou_ids_and_asset_docs.update( + { + asset_doc["data"]["zou"]["id"]: asset_doc + for asset_doc in get_assets(project_name) + if asset_doc["data"].get("zou") + } + ) + + # Update + bulk_writes.extend( + [ + UpdateOne({"_id": id}, update) + for id, update in update_op_assets( + dbcon, project_doc, all_entities, zou_ids_and_asset_docs + ) + ] + ) + + # Delete + diff_assets = set(zou_ids_and_asset_docs.keys()) - { + e["id"] for e in all_entities + [project] + } + if diff_assets: + bulk_writes.extend( + [ + DeleteOne(zou_ids_and_asset_docs[asset_id]) + for asset_id in diff_assets + ] + ) + + # Write into DB + if bulk_writes: + dbcon.bulk_write(bulk_writes) diff --git a/openpype/modules/kitsu/utils/update_zou_with_op.py b/openpype/modules/kitsu/utils/update_zou_with_op.py new file mode 100644 index 0000000000..39baf31b93 --- /dev/null +++ b/openpype/modules/kitsu/utils/update_zou_with_op.py @@ -0,0 +1,265 @@ +"""Functions to update Kitsu DB (a.k.a Zou) using OpenPype Data.""" + +import re +from typing import List + +import gazu +from pymongo import UpdateOne + +from openpype.client import ( + get_projects, + get_project, + get_assets, +) +from openpype.pipeline import AvalonMongoDB +from openpype.settings import get_project_settings +from openpype.modules.kitsu.utils.credentials import validate_credentials + + +def sync_zou(login: str, password: str): + """Synchronize Zou database (Kitsu backend) with openpype database. + This is an utility function to help updating zou data with OP's, it may not + handle correctly all cases, a human intervention might + be required after all. + Will work better if OP DB has been previously synchronized from zou/kitsu. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + + Raises: + gazu.exception.AuthFailedException: Wrong user login and/or password + """ + + # Authenticate + if not validate_credentials(login, password): + raise gazu.exception.AuthFailedException( + f"Kitsu authentication failed for login: '{login}'..." + ) + + # Iterate projects + dbcon = AvalonMongoDB() + dbcon.install() + + op_projects = list(get_projects()) + for project_doc in op_projects: + sync_zou_from_op_project(project_doc["name"], dbcon, project_doc) + + +def sync_zou_from_op_project( + project_name: str, dbcon: AvalonMongoDB, project_doc: dict = None +) -> List[UpdateOne]: + """Update OP project in DB with Zou data. + + Args: + project_name (str): Name of project to sync + dbcon (AvalonMongoDB): MongoDB connection + project_doc (str, optional): Project doc to sync + """ + # Get project doc if not provided + if not project_doc: + project_doc = get_project(project_name) + + # Get all entities from zou + print(f"Synchronizing {project_name}...") + zou_project = gazu.project.get_project_by_name(project_name) + + # Create project + if zou_project is None: + raise RuntimeError( + f"Project '{project_name}' doesn't exist in Zou database, " + "please create it in Kitsu and add OpenPype user to it before " + "running synchronization." + ) + + # Update project settings and data + if project_doc["data"]: + zou_project.update( + { + "code": project_doc["data"]["code"], + "fps": project_doc["data"]["fps"], + "resolution": f"{project_doc['data']['resolutionWidth']}" + f"x{project_doc['data']['resolutionHeight']}", + } + ) + gazu.project.update_project_data(zou_project, data=project_doc["data"]) + gazu.project.update_project(zou_project) + + asset_types = gazu.asset.all_asset_types() + all_assets = gazu.asset.all_assets_for_project(zou_project) + all_episodes = gazu.shot.all_episodes_for_project(zou_project) + all_seqs = gazu.shot.all_sequences_for_project(zou_project) + all_shots = gazu.shot.all_shots_for_project(zou_project) + all_entities_ids = { + e["id"] for e in all_episodes + all_seqs + all_shots + all_assets + } + + # Query all assets of the local project + project_module_settings = get_project_settings(project_name)["kitsu"] + dbcon.Session["AVALON_PROJECT"] = project_name + asset_docs = { + asset_doc["_id"]: asset_doc + for asset_doc in get_assets(project_name) + } + + # Create new assets + new_assets_docs = [ + doc + for doc in asset_docs.values() + if doc["data"].get("zou", {}).get("id") not in all_entities_ids + ] + naming_pattern = project_module_settings["entities_naming_pattern"] + regex_ep = re.compile( + r"(.*{}.*)|(.*{}.*)|(.*{}.*)".format( + naming_pattern["shot"].replace("#", ""), + naming_pattern["sequence"].replace("#", ""), + naming_pattern["episode"].replace("#", ""), + ), + re.IGNORECASE, + ) + bulk_writes = [] + for doc in new_assets_docs: + visual_parent_id = doc["data"]["visualParent"] + parent_substitutes = [] + + # Match asset type by it's name + match = regex_ep.match(doc["name"]) + if not match: # Asset + new_entity = gazu.asset.new_asset( + zou_project, asset_types[0], doc["name"] + ) + # Match case in shot -" - " {{ {loggerName} }}: [" + " {{ {logger_name} }}: [" " {message}" " ]" ) @@ -299,7 +300,7 @@ class OutputWidget(QtWidgets.QWidget): elif level == "warning": line_f = ( "*** WRN:" - " >>> {{ {loggerName} }}: [" + " >>> {{ {logger_name} }}: [" " {message}" " ]" ) @@ -307,16 +308,25 @@ class OutputWidget(QtWidgets.QWidget): line_f = ( "!!! ERR:" " {timestamp}" - " >>> {{ {loggerName} }}: [" + " >>> {{ {logger_name} }}: [" " {message}" " ]" ) + logger_name = log["loggerName"] + timestamp = "" + if not show_timecode: + timestamp = log["timestamp"] + message = log["message"] exc = log.get("exception") if exc: - log["message"] = exc["message"] + message = exc["message"] - line = line_f.format(**log) + line = line_f.format( + message=html.escape(message), + logger_name=logger_name, + timestamp=timestamp + ) if show_timecode: timestamp = log["timestamp"] diff --git a/openpype/modules/muster/muster.py b/openpype/modules/muster/muster.py index 6e26ad2d7b..8d395d16e8 100644 --- a/openpype/modules/muster/muster.py +++ b/openpype/modules/muster/muster.py @@ -2,8 +2,7 @@ import os import json import appdirs import requests -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayModule +from openpype.modules import OpenPypeModule, ITrayModule class MusterModule(OpenPypeModule, ITrayModule): diff --git a/openpype/modules/project_manager_action.py b/openpype/modules/project_manager_action.py index 251964a059..5f74dd9ee5 100644 --- a/openpype/modules/project_manager_action.py +++ b/openpype/modules/project_manager_action.py @@ -1,5 +1,4 @@ -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayAction +from openpype.modules import OpenPypeModule, ITrayAction class ProjectManagerAction(OpenPypeModule, ITrayAction): diff --git a/openpype/modules/python_console_interpreter/module.py b/openpype/modules/python_console_interpreter/module.py index 8c4a2fba73..cb99c05e37 100644 --- a/openpype/modules/python_console_interpreter/module.py +++ b/openpype/modules/python_console_interpreter/module.py @@ -1,5 +1,4 @@ -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayAction +from openpype.modules import OpenPypeModule, ITrayAction class PythonInterpreterAction(OpenPypeModule, ITrayAction): diff --git a/openpype/modules/royalrender/api.py b/openpype/modules/royalrender/api.py index ed9e71f240..de1dba8724 100644 --- a/openpype/modules/royalrender/api.py +++ b/openpype/modules/royalrender/api.py @@ -5,13 +5,10 @@ import os from openpype.settings import get_project_settings from openpype.lib.local_settings import OpenPypeSettingsRegistry -from openpype.lib import PypeLogger, run_subprocess +from openpype.lib import Logger, run_subprocess from .rr_job import RRJob, SubmitFile, SubmitterParameter -log = PypeLogger.get_logger("RoyalRender") - - class Api: _settings = None @@ -19,6 +16,7 @@ class Api: RR_SUBMIT_API = 2 def __init__(self, settings, project=None): + self.log = Logger.get_logger("RoyalRender") self._settings = settings self._initialize_rr(project) @@ -137,7 +135,7 @@ class Api: rr_console += ".exe" args = [rr_console, file] - run_subprocess(" ".join(args), logger=log) + run_subprocess(" ".join(args), logger=self.log) def _submit_using_api(self, file): # type: (SubmitFile) -> None @@ -159,11 +157,11 @@ class Api: rr_server = tcp.getRRServer() if len(rr_server) == 0: - log.info("Got RR IP address {}".format(rr_server)) + self.log.info("Got RR IP address {}".format(rr_server)) # TODO: Port is hardcoded in RR? If not, move it to Settings if not tcp.setServer(rr_server, 7773): - log.error( + self.log.error( "Can not set RR server: {}".format(tcp.errorMessage())) raise RoyalRenderException(tcp.errorMessage()) diff --git a/openpype/modules/royalrender/royal_render_module.py b/openpype/modules/royalrender/royal_render_module.py index 4f72860ad6..10d74d01d1 100644 --- a/openpype/modules/royalrender/royal_render_module.py +++ b/openpype/modules/royalrender/royal_render_module.py @@ -2,8 +2,7 @@ """Module providing support for Royal Render.""" import os import openpype.modules -from openpype.modules import OpenPypeModule -from openpype_interfaces import IPluginPaths +from openpype.modules import OpenPypeModule, IPluginPaths class RoyalRenderModule(OpenPypeModule, IPluginPaths): diff --git a/openpype/modules/settings_action.py b/openpype/modules/settings_action.py index 2b4b51e3ad..1902caff1d 100644 --- a/openpype/modules/settings_action.py +++ b/openpype/modules/settings_action.py @@ -1,5 +1,4 @@ -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayAction +from openpype.modules import OpenPypeModule, ITrayAction class SettingsAction(OpenPypeModule, ITrayAction): @@ -23,6 +22,11 @@ class SettingsAction(OpenPypeModule, ITrayAction): """Initialization in tray implementation of ITrayAction.""" self.create_settings_window() + def tray_exit(self): + # Close settings UI to remove settings lock + if self.settings_window: + self.settings_window.close() + def on_action_trigger(self): """Implementation for action trigger of ITrayAction.""" self.show_settings_window() diff --git a/openpype/modules/shotgrid/README.md b/openpype/modules/shotgrid/README.md new file mode 100644 index 0000000000..cbee0e9bf4 --- /dev/null +++ b/openpype/modules/shotgrid/README.md @@ -0,0 +1,19 @@ +## Shotgrid Module + +### Pre-requisites + +Install and launch a [shotgrid leecher](https://github.com/Ellipsanime/shotgrid-leecher) server + +### Quickstart + +The goal of this tutorial is to synchronize an already existing shotgrid project with OpenPype. + +- Activate the shotgrid module in the **system settings** and inform the shotgrid leecher server API url + +- Create a new OpenPype project with the **project manager** + +- Inform the shotgrid authentication infos (url, script name, api key) and the shotgrid project ID related to this OpenPype project in the **project settings** + +- Use the batch interface (Tray > shotgrid > Launch batch), select your project and click "batch" + +- You can now access your shotgrid entities within the **avalon launcher** and publish informations to shotgrid with **pyblish** diff --git a/openpype/modules/shotgrid/__init__.py b/openpype/modules/shotgrid/__init__.py new file mode 100644 index 0000000000..f1337a9492 --- /dev/null +++ b/openpype/modules/shotgrid/__init__.py @@ -0,0 +1,5 @@ +from .shotgrid_module import ( + ShotgridModule, +) + +__all__ = ("ShotgridModule",) diff --git a/openpype/modules/shotgrid/lib/__init__.py b/openpype/modules/shotgrid/lib/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/modules/shotgrid/lib/const.py b/openpype/modules/shotgrid/lib/const.py new file mode 100644 index 0000000000..2a34800fac --- /dev/null +++ b/openpype/modules/shotgrid/lib/const.py @@ -0,0 +1 @@ +MODULE_NAME = "shotgrid" diff --git a/openpype/modules/shotgrid/lib/credentials.py b/openpype/modules/shotgrid/lib/credentials.py new file mode 100644 index 0000000000..337c4f6ecb --- /dev/null +++ b/openpype/modules/shotgrid/lib/credentials.py @@ -0,0 +1,125 @@ + +from urllib.parse import urlparse + +import shotgun_api3 +from shotgun_api3.shotgun import AuthenticationFault + +from openpype.lib import OpenPypeSecureRegistry, OpenPypeSettingsRegistry +from openpype.modules.shotgrid.lib.record import Credentials + + +def _get_shotgrid_secure_key(hostname, key): + """Secure item key for entered hostname.""" + return f"shotgrid/{hostname}/{key}" + + +def _get_secure_value_and_registry( + hostname, + name, +): + key = _get_shotgrid_secure_key(hostname, name) + registry = OpenPypeSecureRegistry(key) + return registry.get_item(name, None), registry + + +def get_shotgrid_hostname(shotgrid_url): + + if not shotgrid_url: + raise Exception("Shotgrid url cannot be a null") + valid_shotgrid_url = ( + f"//{shotgrid_url}" if "//" not in shotgrid_url else shotgrid_url + ) + return urlparse(valid_shotgrid_url).hostname + + +# Credentials storing function (using keyring) + + +def get_credentials(shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + if not hostname: + return None + login_value, _ = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + password_value, _ = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + return Credentials(login_value, password_value) + + +def save_credentials(login, password, shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + _, login_registry = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + _, password_registry = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + clear_credentials(shotgrid_url) + login_registry.set_item(Credentials.login_key_prefix(), login) + password_registry.set_item(Credentials.password_key_prefix(), password) + + +def clear_credentials(shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + login_value, login_registry = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + password_value, password_registry = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + + if login_value is not None: + login_registry.delete_item(Credentials.login_key_prefix()) + + if password_value is not None: + password_registry.delete_item(Credentials.password_key_prefix()) + + +# Login storing function (using json) + + +def get_local_login(): + reg = OpenPypeSettingsRegistry() + try: + return str(reg.get_item("shotgrid_login")) + except Exception: + return None + + +def save_local_login(login): + reg = OpenPypeSettingsRegistry() + reg.set_item("shotgrid_login", login) + + +def clear_local_login(): + reg = OpenPypeSettingsRegistry() + reg.delete_item("shotgrid_login") + + +def check_credentials( + login, + password, + shotgrid_url, +): + + if not shotgrid_url or not login or not password: + return False + try: + session = shotgun_api3.Shotgun( + shotgrid_url, + login=login, + password=password, + ) + session.preferences_read() + session.close() + except AuthenticationFault: + return False + return True diff --git a/openpype/modules/shotgrid/lib/record.py b/openpype/modules/shotgrid/lib/record.py new file mode 100644 index 0000000000..f62f4855d5 --- /dev/null +++ b/openpype/modules/shotgrid/lib/record.py @@ -0,0 +1,20 @@ + +class Credentials: + login = None + password = None + + def __init__(self, login, password) -> None: + super().__init__() + self.login = login + self.password = password + + def is_empty(self): + return not (self.login and self.password) + + @staticmethod + def login_key_prefix(): + return "login" + + @staticmethod + def password_key_prefix(): + return "password" diff --git a/openpype/modules/shotgrid/lib/settings.py b/openpype/modules/shotgrid/lib/settings.py new file mode 100644 index 0000000000..5b0b728f55 --- /dev/null +++ b/openpype/modules/shotgrid/lib/settings.py @@ -0,0 +1,18 @@ +from openpype.settings import get_system_settings, get_project_settings +from openpype.modules.shotgrid.lib.const import MODULE_NAME + + +def get_shotgrid_project_settings(project): + return get_project_settings(project).get(MODULE_NAME, {}) + + +def get_shotgrid_settings(): + return get_system_settings().get("modules", {}).get(MODULE_NAME, {}) + + +def get_shotgrid_servers(): + return get_shotgrid_settings().get("shotgrid_settings", {}) + + +def get_leecher_backend_url(): + return get_shotgrid_settings().get("leecher_backend_url") diff --git a/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py new file mode 100644 index 0000000000..0b03ac2e5d --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py @@ -0,0 +1,100 @@ +import os + +import pyblish.api +from openpype.lib.mongo import OpenPypeMongoConnection + + +class CollectShotgridEntities(pyblish.api.ContextPlugin): + """Collect shotgrid entities according to the current context""" + + order = pyblish.api.CollectorOrder + 0.499 + label = "Shotgrid entities" + + def process(self, context): + + avalon_project = context.data.get("projectEntity") + avalon_asset = context.data.get("assetEntity") + avalon_task_name = os.getenv("AVALON_TASK") + + self.log.info(avalon_project) + self.log.info(avalon_asset) + + sg_project = _get_shotgrid_project(context) + sg_task = _get_shotgrid_task( + avalon_project, + avalon_asset, + avalon_task_name + ) + sg_entity = _get_shotgrid_entity(avalon_project, avalon_asset) + + if sg_project: + context.data["shotgridProject"] = sg_project + self.log.info( + "Collected correspondig shotgrid project : {}".format( + sg_project + ) + ) + + if sg_task: + context.data["shotgridTask"] = sg_task + self.log.info( + "Collected correspondig shotgrid task : {}".format(sg_task) + ) + + if sg_entity: + context.data["shotgridEntity"] = sg_entity + self.log.info( + "Collected correspondig shotgrid entity : {}".format(sg_entity) + ) + + def _find_existing_version(self, code, context): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["sg_task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["code", "is", code], + ] + + sg = context.data.get("shotgridSession") + return sg.find_one("Version", filters, []) + + +def _get_shotgrid_collection(project): + client = OpenPypeMongoConnection.get_mongo_client() + return client.get_database("shotgrid_openpype").get_collection(project) + + +def _get_shotgrid_project(context): + shotgrid_project_id = context.data["project_settings"].get( + "shotgrid_project_id") + if shotgrid_project_id: + return {"type": "Project", "id": shotgrid_project_id} + return {} + + +def _get_shotgrid_task(avalon_project, avalon_asset, avalon_task): + sg_col = _get_shotgrid_collection(avalon_project["name"]) + shotgrid_task_hierarchy_row = sg_col.find_one( + { + "type": "Task", + "_id": {"$regex": "^" + avalon_task + "_[0-9]*"}, + "parent": {"$regex": ".*," + avalon_asset["name"] + ","}, + } + ) + if shotgrid_task_hierarchy_row: + return {"type": "Task", "id": shotgrid_task_hierarchy_row["src_id"]} + return {} + + +def _get_shotgrid_entity(avalon_project, avalon_asset): + sg_col = _get_shotgrid_collection(avalon_project["name"]) + shotgrid_entity_hierarchy_row = sg_col.find_one( + {"_id": avalon_asset["name"]} + ) + if shotgrid_entity_hierarchy_row: + return { + "type": shotgrid_entity_hierarchy_row["type"], + "id": shotgrid_entity_hierarchy_row["src_id"], + } + return {} diff --git a/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py new file mode 100644 index 0000000000..9d5d2271bf --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py @@ -0,0 +1,123 @@ +import os + +import pyblish.api +import shotgun_api3 +from shotgun_api3.shotgun import AuthenticationFault + +from openpype.lib import OpenPypeSettingsRegistry +from openpype.modules.shotgrid.lib.settings import ( + get_shotgrid_servers, + get_shotgrid_project_settings, +) + + +class CollectShotgridSession(pyblish.api.ContextPlugin): + """Collect shotgrid session using user credentials""" + + order = pyblish.api.CollectorOrder + label = "Shotgrid user session" + + def process(self, context): + + certificate_path = os.getenv("SHOTGUN_API_CACERTS") + if certificate_path is None or not os.path.exists(certificate_path): + self.log.info( + "SHOTGUN_API_CACERTS does not contains a valid \ + path: {}".format( + certificate_path + ) + ) + certificate_path = get_shotgrid_certificate() + self.log.info("Get Certificate from shotgrid_api") + + if not os.path.exists(certificate_path): + self.log.error( + "Could not find certificate in shotgun_api3: \ + {}".format( + certificate_path + ) + ) + return + + set_shotgrid_certificate(certificate_path) + self.log.info("Set Certificate: {}".format(certificate_path)) + + avalon_project = os.getenv("AVALON_PROJECT") + + shotgrid_settings = get_shotgrid_project_settings(avalon_project) + self.log.info("shotgrid settings: {}".format(shotgrid_settings)) + shotgrid_servers_settings = get_shotgrid_servers() + self.log.info( + "shotgrid_servers_settings: {}".format(shotgrid_servers_settings) + ) + + shotgrid_server = shotgrid_settings.get("shotgrid_server", "") + if not shotgrid_server: + self.log.error( + "No Shotgrid server found, please choose a credential" + "in script name and script key in OpenPype settings" + ) + + shotgrid_server_setting = shotgrid_servers_settings.get( + shotgrid_server, {} + ) + shotgrid_url = shotgrid_server_setting.get("shotgrid_url", "") + + shotgrid_script_name = shotgrid_server_setting.get( + "shotgrid_script_name", "" + ) + shotgrid_script_key = shotgrid_server_setting.get( + "shotgrid_script_key", "" + ) + if not shotgrid_script_name and not shotgrid_script_key: + self.log.error( + "No Shotgrid api credential found, please enter " + "script name and script key in OpenPype settings" + ) + + login = get_login() or os.getenv("OPENPYPE_SG_USER") + + if not login: + self.log.error( + "No Shotgrid login found, please " + "login to shotgrid withing openpype Tray" + ) + + session = shotgun_api3.Shotgun( + base_url=shotgrid_url, + script_name=shotgrid_script_name, + api_key=shotgrid_script_key, + sudo_as_login=login, + ) + + try: + session.preferences_read() + except AuthenticationFault: + raise ValueError( + "Could not connect to shotgrid {} with user {}".format( + shotgrid_url, login + ) + ) + + self.log.info( + "Logged to shotgrid {} with user {}".format(shotgrid_url, login) + ) + context.data["shotgridSession"] = session + context.data["shotgridUser"] = login + + +def get_shotgrid_certificate(): + shotgun_api_path = os.path.dirname(shotgun_api3.__file__) + return os.path.join(shotgun_api_path, "lib", "certifi", "cacert.pem") + + +def set_shotgrid_certificate(certificate): + os.environ["SHOTGUN_API_CACERTS"] = certificate + + +def get_login(): + reg = OpenPypeSettingsRegistry() + try: + return str(reg.get_item("shotgrid_login")) + except Exception: + return None diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py new file mode 100644 index 0000000000..cfd2d10fd9 --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py @@ -0,0 +1,77 @@ +import os +import pyblish.api + + +class IntegrateShotgridPublish(pyblish.api.InstancePlugin): + """ + Create published Files from representations and add it to version. If + representation is tagged add shotgrid review, it will add it in + path to movie for a movie file or path to frame for an image sequence. + """ + + order = pyblish.api.IntegratorOrder + 0.499 + label = "Shotgrid Published Files" + + def process(self, instance): + + context = instance.context + + self.sg = context.data.get("shotgridSession") + + shotgrid_version = instance.data.get("shotgridVersion") + + for representation in instance.data.get("representations", []): + + local_path = representation.get("published_path") + code = os.path.basename(local_path) + + if representation.get("tags", []): + continue + + published_file = self._find_existing_publish( + code, context, shotgrid_version + ) + + published_file_data = { + "project": context.data.get("shotgridProject"), + "code": code, + "entity": context.data.get("shotgridEntity"), + "task": context.data.get("shotgridTask"), + "version": shotgrid_version, + "path": {"local_path": local_path}, + } + if not published_file: + published_file = self._create_published(published_file_data) + self.log.info( + "Create Shotgrid PublishedFile: {}".format(published_file) + ) + else: + self.sg.update( + published_file["type"], + published_file["id"], + published_file_data, + ) + self.log.info( + "Update Shotgrid PublishedFile: {}".format(published_file) + ) + + if instance.data["family"] == "image": + self.sg.upload_thumbnail( + published_file["type"], published_file["id"], local_path + ) + instance.data["shotgridPublishedFile"] = published_file + + def _find_existing_publish(self, code, context, shotgrid_version): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["version", "is", shotgrid_version], + ["code", "is", code], + ] + return self.sg.find_one("PublishedFile", filters, []) + + def _create_published(self, published_file_data): + + return self.sg.create("PublishedFile", published_file_data) diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py new file mode 100644 index 0000000000..a1b7140e22 --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py @@ -0,0 +1,92 @@ +import os +import pyblish.api + + +class IntegrateShotgridVersion(pyblish.api.InstancePlugin): + """Integrate Shotgrid Version""" + + order = pyblish.api.IntegratorOrder + 0.497 + label = "Shotgrid Version" + + sg = None + + def process(self, instance): + + context = instance.context + self.sg = context.data.get("shotgridSession") + + # TODO: Use path template solver to build version code from settings + anatomy = instance.data.get("anatomyData", {}) + code = "_".join( + [ + anatomy["project"]["code"], + anatomy["parent"], + anatomy["asset"], + anatomy["task"]["name"], + "v{:03}".format(int(anatomy["version"])), + ] + ) + + version = self._find_existing_version(code, context) + + if not version: + version = self._create_version(code, context) + self.log.info("Create Shotgrid version: {}".format(version)) + else: + self.log.info("Use existing Shotgrid version: {}".format(version)) + + data_to_update = {} + status = context.data.get("intent", {}).get("value") + if status: + data_to_update["sg_status_list"] = status + + for representation in instance.data.get("representations", []): + local_path = representation.get("published_path") + code = os.path.basename(local_path) + + if "shotgridreview" in representation.get("tags", []): + + if representation["ext"] in ["mov", "avi"]: + self.log.info( + "Upload review: {} for version shotgrid {}".format( + local_path, version.get("id") + ) + ) + self.sg.upload( + "Version", + version.get("id"), + local_path, + field_name="sg_uploaded_movie", + ) + + data_to_update["sg_path_to_movie"] = local_path + + elif representation["ext"] in ["jpg", "png", "exr", "tga"]: + path_to_frame = local_path.replace("0000", "#") + data_to_update["sg_path_to_frames"] = path_to_frame + + self.log.info("Update Shotgrid version with {}".format(data_to_update)) + self.sg.update("Version", version["id"], data_to_update) + + instance.data["shotgridVersion"] = version + + def _find_existing_version(self, code, context): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["sg_task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["code", "is", code], + ] + return self.sg.find_one("Version", filters, []) + + def _create_version(self, code, context): + + version_data = { + "project": context.data.get("shotgridProject"), + "sg_task": context.data.get("shotgridTask"), + "entity": context.data.get("shotgridEntity"), + "code": code, + } + + return self.sg.create("Version", version_data) diff --git a/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py b/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py new file mode 100644 index 0000000000..48b320e15e --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py @@ -0,0 +1,38 @@ +import pyblish.api +from openpype.pipeline.publish import ValidateContentsOrder + + +class ValidateShotgridUser(pyblish.api.ContextPlugin): + """ + Check if user is valid and have access to the project. + """ + + label = "Validate Shotgrid User" + order = ValidateContentsOrder + + def process(self, context): + sg = context.data.get("shotgridSession") + + login = context.data.get("shotgridUser") + self.log.info("Login shotgrid set in OpenPype is {}".format(login)) + project = context.data.get("shotgridProject") + self.log.info("Current shotgun project is {}".format(project)) + + if not (login and sg and project): + raise KeyError() + + user = sg.find_one("HumanUser", [["login", "is", login]], ["projects"]) + + self.log.info(user) + self.log.info(login) + user_projects_id = [p["id"] for p in user.get("projects", [])] + if not project.get("id") in user_projects_id: + raise PermissionError( + "Login {} don't have access to the project {}".format( + login, project + ) + ) + + self.log.info( + "Login {} have access to the project {}".format(login, project) + ) diff --git a/openpype/modules/shotgrid/server/README.md b/openpype/modules/shotgrid/server/README.md new file mode 100644 index 0000000000..15e056ff3e --- /dev/null +++ b/openpype/modules/shotgrid/server/README.md @@ -0,0 +1,5 @@ + +### Shotgrid server + +Please refer to the external project that covers Openpype/Shotgrid communication: + - https://github.com/Ellipsanime/shotgrid-leecher diff --git a/openpype/modules/shotgrid/shotgrid_module.py b/openpype/modules/shotgrid/shotgrid_module.py new file mode 100644 index 0000000000..d26647d06a --- /dev/null +++ b/openpype/modules/shotgrid/shotgrid_module.py @@ -0,0 +1,54 @@ +import os + +from openpype.modules import ( + OpenPypeModule, + ITrayModule, + IPluginPaths, +) + +SHOTGRID_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class ShotgridModule(OpenPypeModule, ITrayModule, IPluginPaths): + leecher_manager_url = None + name = "shotgrid" + enabled = False + project_id = None + tray_wrapper = None + + def initialize(self, modules_settings): + shotgrid_settings = modules_settings.get(self.name, dict()) + self.enabled = shotgrid_settings.get("enabled", False) + self.leecher_manager_url = shotgrid_settings.get( + "leecher_manager_url", "" + ) + + def connect_with_modules(self, enabled_modules): + pass + + def get_global_environments(self): + return {"PROJECT_ID": self.project_id} + + def get_plugin_paths(self): + return { + "publish": [ + os.path.join(SHOTGRID_MODULE_DIR, "plugins", "publish") + ] + } + + def get_launch_hook_paths(self): + return os.path.join(SHOTGRID_MODULE_DIR, "hooks") + + def tray_init(self): + from .tray.shotgrid_tray import ShotgridTrayWrapper + + self.tray_wrapper = ShotgridTrayWrapper(self) + + def tray_start(self): + return self.tray_wrapper.validate() + + def tray_exit(self, *args, **kwargs): + return self.tray_wrapper + + def tray_menu(self, tray_menu): + return self.tray_wrapper.tray_menu(tray_menu) diff --git a/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py b/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py new file mode 100644 index 0000000000..1f78cf77c9 --- /dev/null +++ b/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py @@ -0,0 +1,34 @@ +import pytest +from assertpy import assert_that + +import openpype.modules.shotgrid.lib.credentials as sut + + +def test_missing_shotgrid_url(): + with pytest.raises(Exception) as ex: + # arrange + url = "" + # act + sut.get_shotgrid_hostname(url) + # assert + assert_that(ex).is_equal_to("Shotgrid url cannot be a null") + + +def test_full_shotgrid_url(): + # arrange + url = "https://shotgrid.com/myinstance" + # act + actual = sut.get_shotgrid_hostname(url) + # assert + assert_that(actual).is_not_empty() + assert_that(actual).is_equal_to("shotgrid.com") + + +def test_incomplete_shotgrid_url(): + # arrange + url = "shotgrid.com/myinstance" + # act + actual = sut.get_shotgrid_hostname(url) + # assert + assert_that(actual).is_not_empty() + assert_that(actual).is_equal_to("shotgrid.com") diff --git a/openpype/modules/shotgrid/tray/credential_dialog.py b/openpype/modules/shotgrid/tray/credential_dialog.py new file mode 100644 index 0000000000..9d841d98be --- /dev/null +++ b/openpype/modules/shotgrid/tray/credential_dialog.py @@ -0,0 +1,201 @@ +import os +from Qt import QtCore, QtWidgets, QtGui + +from openpype import style +from openpype import resources +from openpype.modules.shotgrid.lib import settings, credentials + + +class CredentialsDialog(QtWidgets.QDialog): + SIZE_W = 450 + SIZE_H = 200 + + _module = None + _is_logged = False + url_label = None + login_label = None + password_label = None + url_input = None + login_input = None + password_input = None + input_layout = None + login_button = None + buttons_layout = None + main_widget = None + + login_changed = QtCore.Signal() + + def __init__(self, module, parent=None): + super(CredentialsDialog, self).__init__(parent) + + self._module = module + self._is_logged = False + + self.setWindowTitle("OpenPype - Shotgrid Login") + + icon = QtGui.QIcon(resources.get_openpype_icon_filepath()) + self.setWindowIcon(icon) + + self.setWindowFlags( + QtCore.Qt.WindowCloseButtonHint + | QtCore.Qt.WindowMinimizeButtonHint + ) + self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H)) + self.setMaximumSize(QtCore.QSize(self.SIZE_W + 100, self.SIZE_H + 100)) + self.setStyleSheet(style.load_stylesheet()) + + self.ui_init() + + def ui_init(self): + self.url_label = QtWidgets.QLabel("Shotgrid server:") + self.login_label = QtWidgets.QLabel("Login:") + self.password_label = QtWidgets.QLabel("Password:") + + self.url_input = QtWidgets.QComboBox() + # self.url_input.setReadOnly(True) + + self.login_input = QtWidgets.QLineEdit() + self.login_input.setPlaceholderText("login") + + self.password_input = QtWidgets.QLineEdit() + self.password_input.setPlaceholderText("password") + self.password_input.setEchoMode(QtWidgets.QLineEdit.Password) + + self.error_label = QtWidgets.QLabel("") + self.error_label.setStyleSheet("color: red;") + self.error_label.setWordWrap(True) + self.error_label.hide() + + self.input_layout = QtWidgets.QFormLayout() + self.input_layout.setContentsMargins(10, 15, 10, 5) + + self.input_layout.addRow(self.url_label, self.url_input) + self.input_layout.addRow(self.login_label, self.login_input) + self.input_layout.addRow(self.password_label, self.password_input) + self.input_layout.addRow(self.error_label) + + self.login_button = QtWidgets.QPushButton("Login") + self.login_button.setToolTip("Log in shotgrid instance") + self.login_button.clicked.connect(self._on_shotgrid_login_clicked) + + self.logout_button = QtWidgets.QPushButton("Logout") + self.logout_button.setToolTip("Log out shotgrid instance") + self.logout_button.clicked.connect(self._on_shotgrid_logout_clicked) + + self.buttons_layout = QtWidgets.QHBoxLayout() + self.buttons_layout.addWidget(self.logout_button) + self.buttons_layout.addWidget(self.login_button) + + self.main_widget = QtWidgets.QVBoxLayout(self) + self.main_widget.addLayout(self.input_layout) + self.main_widget.addLayout(self.buttons_layout) + self.setLayout(self.main_widget) + + def show(self, *args, **kwargs): + super(CredentialsDialog, self).show(*args, **kwargs) + self._fill_shotgrid_url() + self._fill_shotgrid_login() + + def _fill_shotgrid_url(self): + servers = settings.get_shotgrid_servers() + + if servers: + for _, v in servers.items(): + self.url_input.addItem("{}".format(v.get('shotgrid_url'))) + self._valid_input(self.url_input) + self.login_button.show() + self.logout_button.show() + enabled = True + else: + self.set_error("Ask your admin to add shotgrid server in settings") + self._invalid_input(self.url_input) + self.login_button.hide() + self.logout_button.hide() + enabled = False + + self.login_input.setEnabled(enabled) + self.password_input.setEnabled(enabled) + + def _fill_shotgrid_login(self): + login = credentials.get_local_login() + + if login: + self.login_input.setText(login) + + def _clear_shotgrid_login(self): + self.login_input.setText("") + self.password_input.setText("") + + def _on_shotgrid_login_clicked(self): + login = self.login_input.text().strip() + password = self.password_input.text().strip() + missing = [] + + if login == "": + missing.append("login") + self._invalid_input(self.login_input) + + if password == "": + missing.append("password") + self._invalid_input(self.password_input) + + url = self.url_input.currentText() + if url == "": + missing.append("url") + self._invalid_input(self.url_input) + + if len(missing) > 0: + self.set_error("You didn't enter {}".format(" and ".join(missing))) + return + + # if credentials.check_credentials( + # login=login, + # password=password, + # shotgrid_url=url, + # ): + credentials.save_local_login( + login=login + ) + os.environ['OPENPYPE_SG_USER'] = login + self._on_login() + + self.set_error("CANT LOGIN") + + def _on_shotgrid_logout_clicked(self): + credentials.clear_local_login() + del os.environ['OPENPYPE_SG_USER'] + self._clear_shotgrid_login() + self._on_logout() + + def set_error(self, msg): + self.error_label.setText(msg) + self.error_label.show() + + def _on_login(self): + self._is_logged = True + self.login_changed.emit() + self._close_widget() + + def _on_logout(self): + self._is_logged = False + self.login_changed.emit() + + def _close_widget(self): + self.hide() + + def _valid_input(self, input_widget): + input_widget.setStyleSheet("") + + def _invalid_input(self, input_widget): + input_widget.setStyleSheet("border: 1px solid red;") + + def login_with_credentials( + self, url, login, password + ): + verification = credentials.check_credentials(url, login, password) + if verification: + credentials.save_credentials(login, password, False) + self._module.set_credentials_to_env(login, password) + self.set_credentials(login, password) + self.login_changed.emit() + return verification diff --git a/openpype/modules/shotgrid/tray/shotgrid_tray.py b/openpype/modules/shotgrid/tray/shotgrid_tray.py new file mode 100644 index 0000000000..4038d77b03 --- /dev/null +++ b/openpype/modules/shotgrid/tray/shotgrid_tray.py @@ -0,0 +1,75 @@ +import os +import webbrowser + +from Qt import QtWidgets + +from openpype.modules.shotgrid.lib import credentials +from openpype.modules.shotgrid.tray.credential_dialog import ( + CredentialsDialog, +) + + +class ShotgridTrayWrapper: + module = None + credentials_dialog = None + logged_user_label = None + + def __init__(self, module): + self.module = module + self.credentials_dialog = CredentialsDialog(module) + self.credentials_dialog.login_changed.connect(self.set_login_label) + self.logged_user_label = QtWidgets.QAction("") + self.logged_user_label.setDisabled(True) + self.set_login_label() + + def show_batch_dialog(self): + if self.module.leecher_manager_url: + webbrowser.open(self.module.leecher_manager_url) + + def show_connect_dialog(self): + self.show_credential_dialog() + + def show_credential_dialog(self): + self.credentials_dialog.show() + self.credentials_dialog.activateWindow() + self.credentials_dialog.raise_() + + def set_login_label(self): + login = credentials.get_local_login() + if login: + self.logged_user_label.setText("{}".format(login)) + else: + self.logged_user_label.setText( + "No User logged in {0}".format(login) + ) + + def tray_menu(self, tray_menu): + # Add login to user menu + menu = QtWidgets.QMenu("Shotgrid", tray_menu) + show_connect_action = QtWidgets.QAction("Connect to Shotgrid", menu) + show_connect_action.triggered.connect(self.show_connect_dialog) + menu.addAction(self.logged_user_label) + menu.addSeparator() + menu.addAction(show_connect_action) + tray_menu.addMenu(menu) + + # Add manager to Admin menu + for m in tray_menu.findChildren(QtWidgets.QMenu): + if m.title() == "Admin": + shotgrid_manager_action = QtWidgets.QAction( + "Shotgrid manager", menu + ) + shotgrid_manager_action.triggered.connect( + self.show_batch_dialog + ) + m.addAction(shotgrid_manager_action) + + def validate(self): + login = credentials.get_local_login() + + if not login: + self.show_credential_dialog() + else: + os.environ["OPENPYPE_SG_USER"] = login + + return True diff --git a/openpype/modules/slack/plugins/publish/integrate_slack_api.py b/openpype/modules/slack/plugins/publish/integrate_slack_api.py index 10bde7d4c0..643e55915b 100644 --- a/openpype/modules/slack/plugins/publish/integrate_slack_api.py +++ b/openpype/modules/slack/plugins/publish/integrate_slack_api.py @@ -4,8 +4,8 @@ import pyblish.api import copy from datetime import datetime +from openpype.client import OpenPypeMongoConnection from openpype.lib.plugin_tools import prepare_template_data -from openpype.lib import OpenPypeMongoConnection class IntegrateSlackAPI(pyblish.api.InstancePlugin): @@ -95,13 +95,15 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): Reviews might be large, so allow only adding link to message instead of uploading only. """ + fill_data = copy.deepcopy(instance.context.data["anatomyData"]) + username = fill_data.get("user") fill_pairs = [ ("asset", instance.data.get("asset", fill_data.get("asset"))), ("subset", instance.data.get("subset", fill_data.get("subset"))), - ("username", instance.data.get("username", - fill_data.get("username"))), + ("user", username), + ("username", username), ("app", instance.data.get("app", fill_data.get("app"))), ("family", instance.data.get("family", fill_data.get("family"))), ("version", str(instance.data.get("version", @@ -110,13 +112,19 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): if review_path: fill_pairs.append(("review_filepath", review_path)) - task_data = instance.data.get("task") - if not task_data: - task_data = fill_data.get("task") - for key, value in task_data.items(): - fill_key = "task[{}]".format(key) - fill_pairs.append((fill_key, value)) - fill_pairs.append(("task", task_data["name"])) + task_data = fill_data.get("task") + if task_data: + if ( + "{task}" in message_templ + or "{Task}" in message_templ + or "{TASK}" in message_templ + ): + fill_pairs.append(("task", task_data["name"])) + + else: + for key, value in task_data.items(): + fill_key = "task[{}]".format(key) + fill_pairs.append((fill_key, value)) self.log.debug("fill_pairs ::{}".format(fill_pairs)) multiple_case_variants = prepare_template_data(fill_pairs) diff --git a/openpype/modules/slack/slack_module.py b/openpype/modules/slack/slack_module.py index 9b2976d766..797ae19f4a 100644 --- a/openpype/modules/slack/slack_module.py +++ b/openpype/modules/slack/slack_module.py @@ -1,14 +1,10 @@ import os -from openpype.modules import OpenPypeModule -from openpype_interfaces import ( - IPluginPaths, - ILaunchHookPaths -) +from openpype.modules import OpenPypeModule, IPluginPaths SLACK_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) -class SlackIntegrationModule(OpenPypeModule, IPluginPaths, ILaunchHookPaths): +class SlackIntegrationModule(OpenPypeModule, IPluginPaths): """Allows sending notification to Slack channels during publishing.""" name = "slack" @@ -18,7 +14,8 @@ class SlackIntegrationModule(OpenPypeModule, IPluginPaths, ILaunchHookPaths): self.enabled = slack_settings["enabled"] def get_launch_hook_paths(self): - """Implementation of `ILaunchHookPaths`.""" + """Implementation for applications launch hooks.""" + return os.path.join(SLACK_MODULE_DIR, "launch_hooks") def get_plugin_paths(self): diff --git a/openpype/modules/standalonepublish_action.py b/openpype/modules/standalonepublish_action.py deleted file mode 100644 index ba53ce9b9e..0000000000 --- a/openpype/modules/standalonepublish_action.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -import platform -import subprocess -from openpype.lib import get_openpype_execute_args -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayAction - - -class StandAlonePublishAction(OpenPypeModule, ITrayAction): - label = "Publish" - name = "standalonepublish_tool" - - def initialize(self, modules_settings): - import openpype - self.enabled = modules_settings[self.name]["enabled"] - self.publish_paths = [ - os.path.join( - openpype.PACKAGE_DIR, - "hosts", - "standalonepublisher", - "plugins", - "publish" - ) - ] - - def tray_init(self): - return - - def on_action_trigger(self): - self.run_standalone_publisher() - - def connect_with_modules(self, enabled_modules): - """Collect publish paths from other modules.""" - publish_paths = self.manager.collect_plugin_paths()["publish"] - self.publish_paths.extend(publish_paths) - - def run_standalone_publisher(self): - args = get_openpype_execute_args("standalonepublisher") - kwargs = {} - if platform.system().lower() == "darwin": - new_args = ["open", "-na", args.pop(0), "--args"] - new_args.extend(args) - args = new_args - - detached_process = getattr(subprocess, "DETACHED_PROCESS", None) - if detached_process is not None: - kwargs["creationflags"] = detached_process - - subprocess.Popen(args, **kwargs) diff --git a/openpype/modules/sync_server/providers/abstract_provider.py b/openpype/modules/sync_server/providers/abstract_provider.py index 688a17f14f..e11a8ba71e 100644 --- a/openpype/modules/sync_server/providers/abstract_provider.py +++ b/openpype/modules/sync_server/providers/abstract_provider.py @@ -1,8 +1,8 @@ import abc import six -from openpype.api import Logger +from openpype.lib import Logger -log = Logger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer") @six.add_metaclass(abc.ABCMeta) @@ -10,6 +10,8 @@ class AbstractProvider: CODE = '' LABEL = '' + _log = None + def __init__(self, project_name, site_name, tree=None, presets=None): self.presets = None self.active = False @@ -19,6 +21,12 @@ class AbstractProvider: super(AbstractProvider, self).__init__() + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + @abc.abstractmethod def is_active(self): """ @@ -62,7 +70,7 @@ class AbstractProvider: @abc.abstractmethod def upload_file(self, source_path, path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Copy file from 'source_path' to 'target_path' on provider. @@ -75,7 +83,7 @@ class AbstractProvider: arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): name of project_name file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -87,7 +95,7 @@ class AbstractProvider: @abc.abstractmethod def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Download file from provider into local system @@ -99,7 +107,7 @@ class AbstractProvider: arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -199,11 +207,11 @@ class AbstractProvider: path = anatomy.fill_root(path) except KeyError: msg = "Error in resolving local root from anatomy" - log.error(msg) + self.log.error(msg) raise ValueError(msg) except IndexError: msg = "Path {} contains unfillable placeholder" - log.error(msg) + self.log.error(msg) raise ValueError(msg) return path diff --git a/openpype/modules/sync_server/providers/dropbox.py b/openpype/modules/sync_server/providers/dropbox.py index dfc42fed75..e026ae7ef6 100644 --- a/openpype/modules/sync_server/providers/dropbox.py +++ b/openpype/modules/sync_server/providers/dropbox.py @@ -2,12 +2,9 @@ import os import dropbox -from openpype.api import Logger from .abstract_provider import AbstractProvider from ..utils import EditableScopes -log = Logger().get_logger("SyncServer") - class DropboxHandler(AbstractProvider): CODE = 'dropbox' @@ -20,26 +17,26 @@ class DropboxHandler(AbstractProvider): self.dbx = None if not self.presets: - log.info( + self.log.info( "Sync Server: There are no presets for {}.".format(site_name) ) return if not self.presets["enabled"]: - log.debug("Sync Server: Site {} not enabled for {}.". + self.log.debug("Sync Server: Site {} not enabled for {}.". format(site_name, project_name)) return token = self.presets.get("token", "") if not token: msg = "Sync Server: No access token for dropbox provider" - log.info(msg) + self.log.info(msg) return team_folder_name = self.presets.get("team_folder_name", "") if not team_folder_name: msg = "Sync Server: No team folder name for dropbox provider" - log.info(msg) + self.log.info(msg) return acting_as_member = self.presets.get("acting_as_member", "") @@ -47,7 +44,7 @@ class DropboxHandler(AbstractProvider): msg = ( "Sync Server: No acting member for dropbox provider" ) - log.info(msg) + self.log.info(msg) return try: @@ -55,7 +52,7 @@ class DropboxHandler(AbstractProvider): token, acting_as_member, team_folder_name ) except Exception as e: - log.info("Could not establish dropbox object: {}".format(e)) + self.log.info("Could not establish dropbox object: {}".format(e)) return super(AbstractProvider, self).__init__() @@ -224,7 +221,7 @@ class DropboxHandler(AbstractProvider): return False def upload_file(self, source_path, path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Copy file from 'source_path' to 'target_path' on provider. @@ -237,7 +234,7 @@ class DropboxHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -290,7 +287,7 @@ class DropboxHandler(AbstractProvider): cursor.offset = f.tell() server.update_db( - collection=collection, + project_name=project_name, new_file_id=None, file=file, representation=representation, @@ -301,7 +298,7 @@ class DropboxHandler(AbstractProvider): return path def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Download file from provider into local system @@ -313,7 +310,7 @@ class DropboxHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -337,7 +334,7 @@ class DropboxHandler(AbstractProvider): self.dbx.files_download_to_file(local_path, source_path) server.update_db( - collection=collection, + project_name=project_name, new_file_id=None, file=file, representation=representation, @@ -448,7 +445,7 @@ class DropboxHandler(AbstractProvider): path = anatomy.fill_root(path) except KeyError: msg = "Error in resolving local root from anatomy" - log.error(msg) + self.log.error(msg) raise ValueError(msg) return path diff --git a/openpype/modules/sync_server/providers/gdrive.py b/openpype/modules/sync_server/providers/gdrive.py index aa7329b104..9a3ce89cf5 100644 --- a/openpype/modules/sync_server/providers/gdrive.py +++ b/openpype/modules/sync_server/providers/gdrive.py @@ -5,12 +5,12 @@ import sys import six import platform -from openpype.api import Logger -from openpype.api import get_system_settings +from openpype.lib import Logger +from openpype.settings import get_system_settings from .abstract_provider import AbstractProvider from ..utils import time_function, ResumableError -log = Logger().get_logger("SyncServer") +log = Logger.get_logger("GDriveHandler") try: from googleapiclient.discovery import build @@ -69,13 +69,17 @@ class GDriveHandler(AbstractProvider): self.presets = presets if not self.presets: - log.info("Sync Server: There are no presets for {}.". - format(site_name)) + self.log.info( + "Sync Server: There are no presets for {}.".format(site_name) + ) return if not self.presets["enabled"]: - log.debug("Sync Server: Site {} not enabled for {}.". - format(site_name, project_name)) + self.log.debug( + "Sync Server: Site {} not enabled for {}.".format( + site_name, project_name + ) + ) return current_platform = platform.system().lower() @@ -85,20 +89,22 @@ class GDriveHandler(AbstractProvider): if not cred_path: msg = "Sync Server: Please, fill the credentials for gdrive "\ "provider for platform '{}' !".format(current_platform) - log.info(msg) + self.log.info(msg) return try: cred_path = cred_path.format(**os.environ) except KeyError as e: - log.info("Sync Server: The key(s) {} does not exist in the " - "environment variables".format(" ".join(e.args))) + self.log.info(( + "Sync Server: The key(s) {} does not exist in the " + "environment variables" + ).format(" ".join(e.args))) return if not os.path.exists(cred_path): msg = "Sync Server: No credentials for gdrive provider " + \ "for '{}' on path '{}'!".format(site_name, cred_path) - log.info(msg) + self.log.info(msg) return self.service = None @@ -251,7 +257,7 @@ class GDriveHandler(AbstractProvider): return folder_id def upload_file(self, source_path, path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Uploads single file from 'source_path' to destination 'path'. @@ -264,7 +270,7 @@ class GDriveHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -318,22 +324,22 @@ class GDriveHandler(AbstractProvider): fields='id') media.stream() - log.debug("Start Upload! {}".format(source_path)) + self.log.debug("Start Upload! {}".format(source_path)) last_tick = status = response = None status_val = 0 while response is None: if server.is_representation_paused(representation['_id'], check_parents=True, - project_name=collection): + project_name=project_name): raise ValueError("Paused during process, please redo.") if status: status_val = float(status.progress()) if not last_tick or \ time.time() - last_tick >= server.LOG_PROGRESS_SEC: last_tick = time.time() - log.debug("Uploaded %d%%." % + self.log.debug("Uploaded %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, @@ -350,15 +356,16 @@ class GDriveHandler(AbstractProvider): if 'has not granted' in ex._get_reason().strip(): raise PermissionError(ex._get_reason().strip()) - log.warning("Forbidden received, hit quota. " - "Injecting 60s delay.") + self.log.warning( + "Forbidden received, hit quota. Injecting 60s delay." + ) time.sleep(60) return False raise return response['id'] def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Downloads single file from 'source_path' (remote) to 'local_path'. @@ -372,7 +379,7 @@ class GDriveHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -410,16 +417,16 @@ class GDriveHandler(AbstractProvider): while response is None: if server.is_representation_paused(representation['_id'], check_parents=True, - project_name=collection): + project_name=project_name): raise ValueError("Paused during process, please redo.") if status: status_val = float(status.progress()) if not last_tick or \ time.time() - last_tick >= server.LOG_PROGRESS_SEC: last_tick = time.time() - log.debug("Downloaded %d%%." % + self.log.debug("Downloaded %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, @@ -629,9 +636,9 @@ class GDriveHandler(AbstractProvider): ["gdrive"] ) except KeyError: - log.info(("Sync Server: There are no presets for Gdrive " + - "provider."). - format(str(provider_presets))) + log.info(( + "Sync Server: There are no presets for Gdrive provider." + ).format(str(provider_presets))) return return provider_presets @@ -704,7 +711,7 @@ class GDriveHandler(AbstractProvider): roots[self.MY_DRIVE_STR] = self.service.files() \ .get(fileId='root').execute() except errors.HttpError: - log.warning("HttpError in sync loop, " + self.log.warning("HttpError in sync loop, " "trying next loop", exc_info=True) raise ResumableError @@ -727,7 +734,7 @@ class GDriveHandler(AbstractProvider): Returns: (dictionary) path as a key, folder id as a value """ - log.debug("build_tree len {}".format(len(folders))) + self.log.debug("build_tree len {}".format(len(folders))) if not self.root: # build only when necessary, could be expensive self.root = self._prepare_root_info() @@ -779,9 +786,9 @@ class GDriveHandler(AbstractProvider): loop_cnt += 1 if len(no_parents_yet) > 0: - log.debug("Some folders path are not resolved {}". + self.log.debug("Some folders path are not resolved {}". format(no_parents_yet)) - log.debug("Remove deleted folders from trash.") + self.log.debug("Remove deleted folders from trash.") return tree diff --git a/openpype/modules/sync_server/providers/local_drive.py b/openpype/modules/sync_server/providers/local_drive.py index 68f604b39c..8f55dc529b 100644 --- a/openpype/modules/sync_server/providers/local_drive.py +++ b/openpype/modules/sync_server/providers/local_drive.py @@ -4,10 +4,11 @@ import shutil import threading import time -from openpype.api import Logger, Anatomy +from openpype.lib import Logger +from openpype.pipeline import Anatomy from .abstract_provider import AbstractProvider -log = Logger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer") class LocalDriveHandler(AbstractProvider): @@ -81,7 +82,7 @@ class LocalDriveHandler(AbstractProvider): return editable def upload_file(self, source_path, target_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False, direction="Upload"): """ Copies file from 'source_path' to 'target_path' @@ -94,7 +95,7 @@ class LocalDriveHandler(AbstractProvider): thread = threading.Thread(target=self._copy, args=(source_path, target_path)) thread.start() - self._mark_progress(collection, file, representation, server, + self._mark_progress(project_name, file, representation, server, site, source_path, target_path, direction) else: if os.path.exists(target_path): @@ -104,13 +105,14 @@ class LocalDriveHandler(AbstractProvider): return os.path.basename(target_path) def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Download a file form 'source_path' to 'local_path' """ return self.upload_file(source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, + representation, site, overwrite, direction="Download") def delete_file(self, path): @@ -187,7 +189,7 @@ class LocalDriveHandler(AbstractProvider): except shutil.SameFileError: print("same files, skipping") - def _mark_progress(self, collection, file, representation, server, site, + def _mark_progress(self, project_name, file, representation, server, site, source_path, target_path, direction): """ Updates progress field in DB by values 0-1. @@ -203,7 +205,7 @@ class LocalDriveHandler(AbstractProvider): status_val = target_file_size / source_file_size last_tick = time.time() log.debug(direction + "ed %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, diff --git a/openpype/modules/sync_server/providers/sftp.py b/openpype/modules/sync_server/providers/sftp.py index 49b87b14ec..40f11cb9dd 100644 --- a/openpype/modules/sync_server/providers/sftp.py +++ b/openpype/modules/sync_server/providers/sftp.py @@ -4,10 +4,10 @@ import time import threading import platform -from openpype.api import Logger -from openpype.api import get_system_settings +from openpype.lib import Logger +from openpype.settings import get_system_settings from .abstract_provider import AbstractProvider -log = Logger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer-SFTPHandler") pysftp = None try: @@ -43,8 +43,9 @@ class SFTPHandler(AbstractProvider): self.presets = presets if not self.presets: - log.warning("Sync Server: There are no presets for {}.". - format(site_name)) + self.log.warning( + "Sync Server: There are no presets for {}.".format(site_name) + ) return # store to instance for reconnect @@ -222,7 +223,7 @@ class SFTPHandler(AbstractProvider): return os.path.basename(path) def upload_file(self, source_path, target_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Uploads single file from 'source_path' to destination 'path'. @@ -235,7 +236,7 @@ class SFTPHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -256,7 +257,7 @@ class SFTPHandler(AbstractProvider): thread = threading.Thread(target=self._upload, args=(source_path, target_path)) thread.start() - self._mark_progress(collection, file, representation, server, + self._mark_progress(project_name, file, representation, server, site, source_path, target_path, "upload") return os.path.basename(target_path) @@ -267,7 +268,7 @@ class SFTPHandler(AbstractProvider): conn.put(source_path, target_path) def download_file(self, source_path, target_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Downloads single file from 'source_path' (remote) to 'target_path'. @@ -281,7 +282,7 @@ class SFTPHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -302,7 +303,7 @@ class SFTPHandler(AbstractProvider): thread = threading.Thread(target=self._download, args=(source_path, target_path)) thread.start() - self._mark_progress(collection, file, representation, server, + self._mark_progress(project_name, file, representation, server, site, source_path, target_path, "download") return os.path.basename(target_path) @@ -423,9 +424,9 @@ class SFTPHandler(AbstractProvider): return pysftp.Connection(**conn_params) except (paramiko.ssh_exception.SSHException, pysftp.exceptions.ConnectionException): - log.warning("Couldn't connect", exc_info=True) + self.log.warning("Couldn't connect", exc_info=True) - def _mark_progress(self, collection, file, representation, server, site, + def _mark_progress(self, project_name, file, representation, server, site, source_path, target_path, direction): """ Updates progress field in DB by values 0-1. @@ -445,8 +446,8 @@ class SFTPHandler(AbstractProvider): time.time() - last_tick >= server.LOG_PROGRESS_SEC: status_val = target_file_size / source_file_size last_tick = time.time() - log.debug(direction + "ed %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + self.log.debug(direction + "ed %d%%." % int(status_val * 100)) + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, diff --git a/openpype/modules/sync_server/resources/disabled.png b/openpype/modules/sync_server/resources/disabled.png new file mode 100644 index 0000000000..e036d7ef6a Binary files /dev/null and b/openpype/modules/sync_server/resources/disabled.png differ diff --git a/openpype/modules/sync_server/rest_api.py b/openpype/modules/sync_server/rest_api.py new file mode 100644 index 0000000000..a7d9dd80b7 --- /dev/null +++ b/openpype/modules/sync_server/rest_api.py @@ -0,0 +1,37 @@ +from aiohttp.web_response import Response +from openpype.lib import Logger + + +class SyncServerModuleRestApi: + """ + REST API endpoint used for calling from hosts when context change + happens in Workfile app. + """ + + def __init__(self, user_module, server_manager): + self._log = None + self.module = user_module + self.server_manager = server_manager + + self.prefix = "/sync_server" + + self.register() + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + def register(self): + self.server_manager.add_route( + "POST", + self.prefix + "/reset_timer", + self.reset_timer, + ) + + async def reset_timer(self, _request): + """Force timer to run immediately.""" + self.module.reset_timer() + + return Response(status=200) diff --git a/openpype/modules/sync_server/sync_server.py b/openpype/modules/sync_server/sync_server.py index 22eed01ef3..d0a40a60ff 100644 --- a/openpype/modules/sync_server/sync_server.py +++ b/openpype/modules/sync_server/sync_server.py @@ -6,15 +6,12 @@ import concurrent.futures from concurrent.futures._base import CancelledError from .providers import lib -from openpype.lib import PypeLogger +from openpype.lib import Logger from .utils import SyncStatus, ResumableError -log = PypeLogger().get_logger("SyncServer") - - -async def upload(module, collection, file, representation, provider_name, +async def upload(module, project_name, file, representation, provider_name, remote_site_name, tree=None, preset=None): """ Upload single 'file' of a 'representation' to 'provider'. @@ -31,7 +28,7 @@ async def upload(module, collection, file, representation, provider_name, Args: module(SyncServerModule): object to run SyncServerModule API - collection (str): source collection + project_name (str): source db file (dictionary): of file from representation in Mongo representation (dictionary): of representation provider_name (string): gdrive, gdc etc. @@ -47,15 +44,16 @@ async def upload(module, collection, file, representation, provider_name, # thread can do that at a time, upload/download to prepared # structure should be run in parallel remote_handler = lib.factory.get_provider(provider_name, - collection, + project_name, remote_site_name, tree=tree, presets=preset) file_path = file.get("path", "") try: - local_file_path, remote_file_path = resolve_paths(module, - file_path, collection, remote_site_name, remote_handler + local_file_path, remote_file_path = resolve_paths( + module, file_path, project_name, + remote_site_name, remote_handler ) except Exception as exp: print(exp) @@ -74,27 +72,28 @@ async def upload(module, collection, file, representation, provider_name, local_file_path, remote_file_path, module, - collection, + project_name, file, representation, remote_site_name, True ) - module.handle_alternate_site(collection, representation, remote_site_name, + module.handle_alternate_site(project_name, representation, + remote_site_name, file["_id"], file_id) return file_id -async def download(module, collection, file, representation, provider_name, +async def download(module, project_name, file, representation, provider_name, remote_site_name, tree=None, preset=None): """ Downloads file to local folder denoted in representation.Context. Args: module(SyncServerModule): object to run SyncServerModule API - collection (str): source collection + project_name (str): source file (dictionary) : info about processed file representation (dictionary): repr that 'file' belongs to provider_name (string): 'gdrive' etc @@ -108,20 +107,20 @@ async def download(module, collection, file, representation, provider_name, """ with module.lock: remote_handler = lib.factory.get_provider(provider_name, - collection, + project_name, remote_site_name, tree=tree, presets=preset) file_path = file.get("path", "") local_file_path, remote_file_path = resolve_paths( - module, file_path, collection, remote_site_name, remote_handler + module, file_path, project_name, remote_site_name, remote_handler ) local_folder = os.path.dirname(local_file_path) os.makedirs(local_folder, exist_ok=True) - local_site = module.get_active_site(collection) + local_site = module.get_active_site(project_name) loop = asyncio.get_running_loop() file_id = await loop.run_in_executor(None, @@ -129,20 +128,20 @@ async def download(module, collection, file, representation, provider_name, remote_file_path, local_file_path, module, - collection, + project_name, file, representation, local_site, True ) - module.handle_alternate_site(collection, representation, local_site, + module.handle_alternate_site(project_name, representation, local_site, file["_id"], file_id) return file_id -def resolve_paths(module, file_path, collection, +def resolve_paths(module, file_path, project_name, remote_site_name=None, remote_handler=None): """ Returns tuple of local and remote file paths with {root} @@ -153,7 +152,7 @@ def resolve_paths(module, file_path, collection, Args: module(SyncServerModule): object to run SyncServerModule API file_path(string): path with {root} - collection(string): project name + project_name(string): project name remote_site_name(string): remote site remote_handler(AbstractProvider): implementation Returns: @@ -164,7 +163,7 @@ def resolve_paths(module, file_path, collection, remote_file_path = remote_handler.resolve_path(file_path) local_handler = lib.factory.get_provider( - 'local_drive', collection, module.get_active_site(collection)) + 'local_drive', project_name, module.get_active_site(project_name)) local_file_path = local_handler.resolve_path(file_path) return local_file_path, remote_file_path @@ -236,6 +235,8 @@ class SyncServerThread(threading.Thread): Stopped when tray is closed. """ def __init__(self, module): + self.log = Logger.get_logger(self.__class__.__name__) + super(SyncServerThread, self).__init__() self.module = module self.loop = None @@ -247,17 +248,17 @@ class SyncServerThread(threading.Thread): self.is_running = True try: - log.info("Starting Sync Server") + self.log.info("Starting Sync Server") self.loop = asyncio.new_event_loop() # create new loop for thread asyncio.set_event_loop(self.loop) self.loop.set_default_executor(self.executor) asyncio.ensure_future(self.check_shutdown(), loop=self.loop) asyncio.ensure_future(self.sync_loop(), loop=self.loop) - log.info("Sync Server Started") + self.log.info("Sync Server Started") self.loop.run_forever() except Exception: - log.warning( + self.log.warning( "Sync Server service has failed", exc_info=True ) finally: @@ -269,8 +270,8 @@ class SyncServerThread(threading.Thread): - gets list of collections in DB - gets list of active remote providers (has configuration, credentials) - - for each collection it looks for representations that should - be synced + - for each project_name it looks for representations that + should be synced - synchronize found collections - update representations - fills error messages for exceptions - waits X seconds and repeat @@ -280,20 +281,19 @@ class SyncServerThread(threading.Thread): while self.is_running and not self.module.is_paused(): try: import time - start_time = None + start_time = time.time() self.module.set_sync_project_settings() # clean cache - for collection, preset in self.module.sync_project_settings.\ - items(): - if collection not in self.module.get_enabled_projects(): - continue + project_name = None + enabled_projects = self.module.get_enabled_projects() + for project_name in enabled_projects: + preset = self.module.sync_project_settings[project_name] - start_time = time.time() - local_site, remote_site = self._working_sites(collection) + local_site, remote_site = self._working_sites(project_name) if not all([local_site, remote_site]): continue sync_repres = self.module.get_sync_representations( - collection, + project_name, local_site, remote_site ) @@ -311,7 +311,7 @@ class SyncServerThread(threading.Thread): remote_provider = \ self.module.get_provider_for_site(site=remote_site) handler = lib.factory.get_provider(remote_provider, - collection, + project_name, remote_site, presets=site_preset) limit = lib.factory.get_provider_batch_limit( @@ -342,7 +342,7 @@ class SyncServerThread(threading.Thread): limit -= 1 task = asyncio.create_task( upload(self.module, - collection, + project_name, file, sync, remote_provider, @@ -354,7 +354,7 @@ class SyncServerThread(threading.Thread): files_processed_info.append((file, sync, remote_site, - collection + project_name )) processed_file_path.add(file_path) if status == SyncStatus.DO_DOWNLOAD: @@ -362,7 +362,7 @@ class SyncServerThread(threading.Thread): limit -= 1 task = asyncio.create_task( download(self.module, - collection, + project_name, file, sync, remote_provider, @@ -374,23 +374,24 @@ class SyncServerThread(threading.Thread): files_processed_info.append((file, sync, local_site, - collection + project_name )) processed_file_path.add(file_path) - log.debug("Sync tasks count {}". - format(len(task_files_to_process))) + self.log.debug("Sync tasks count {}".format( + len(task_files_to_process) + )) files_created = await asyncio.gather( *task_files_to_process, return_exceptions=True) for file_id, info in zip(files_created, files_processed_info): - file, representation, site, collection = info + file, representation, site, project_name = info error = None if isinstance(file_id, BaseException): error = str(file_id) file_id = None - self.module.update_db(collection, + self.module.update_db(project_name, file_id, file, representation, @@ -398,28 +399,31 @@ class SyncServerThread(threading.Thread): error) duration = time.time() - start_time - log.debug("One loop took {:.2f}s".format(duration)) + self.log.debug("One loop took {:.2f}s".format(duration)) - delay = self.module.get_loop_delay(collection) - log.debug("Waiting for {} seconds to new loop".format(delay)) + delay = self.module.get_loop_delay(project_name) + self.log.debug( + "Waiting for {} seconds to new loop".format(delay) + ) self.timer = asyncio.create_task(self.run_timer(delay)) await asyncio.gather(self.timer) except ConnectionResetError: - log.warning("ConnectionResetError in sync loop, " - "trying next loop", - exc_info=True) + self.log.warning( + "ConnectionResetError in sync loop, trying next loop", + exc_info=True) except CancelledError: # just stopping server pass except ResumableError: - log.warning("ResumableError in sync loop, " - "trying next loop", - exc_info=True) + self.log.warning( + "ResumableError in sync loop, trying next loop", + exc_info=True) except Exception: self.stop() - log.warning("Unhandled except. in sync loop, stopping server", - exc_info=True) + self.log.warning( + "Unhandled except. in sync loop, stopping server", + exc_info=True) def stop(self): """Sets is_running flag to false, 'check_shutdown' shuts server down""" @@ -432,16 +436,17 @@ class SyncServerThread(threading.Thread): while self.is_running: if self.module.long_running_tasks: task = self.module.long_running_tasks.pop() - log.info("starting long running") + self.log.info("starting long running") await self.loop.run_in_executor(None, task["func"]) - log.info("finished long running") + self.log.info("finished long running") self.module.projects_processed.remove(task["project_name"]) await asyncio.sleep(0.5) tasks = [task for task in asyncio.all_tasks() if task is not asyncio.current_task()] list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks results = await asyncio.gather(*tasks, return_exceptions=True) - log.debug(f'Finished awaiting cancelled tasks, results: {results}...') + self.log.debug( + f'Finished awaiting cancelled tasks, results: {results}...') await self.loop.shutdown_asyncgens() # to really make sure everything else has time to stop self.executor.shutdown(wait=True) @@ -454,29 +459,32 @@ class SyncServerThread(threading.Thread): def reset_timer(self): """Called when waiting for next loop should be skipped""" - log.debug("Resetting timer") + self.log.debug("Resetting timer") if self.timer: self.timer.cancel() self.timer = None - def _working_sites(self, collection): - if self.module.is_project_paused(collection): - log.debug("Both sites same, skipping") + def _working_sites(self, project_name): + if self.module.is_project_paused(project_name): + self.log.debug("Both sites same, skipping") return None, None - local_site = self.module.get_active_site(collection) - remote_site = self.module.get_remote_site(collection) + local_site = self.module.get_active_site(project_name) + remote_site = self.module.get_remote_site(project_name) if local_site == remote_site: - log.debug("{}-{} sites same, skipping".format(local_site, - remote_site)) + self.log.debug("{}-{} sites same, skipping".format( + local_site, remote_site)) return None, None - configured_sites = _get_configured_sites(self.module, collection) + configured_sites = _get_configured_sites(self.module, project_name) if not all([local_site in configured_sites, remote_site in configured_sites]): - log.debug("Some of the sites {} - {} is not ".format(local_site, - remote_site) + - "working properly") + self.log.debug( + "Some of the sites {} - {} is not working properly".format( + local_site, remote_site + ) + ) + return None, None return local_site, remote_site diff --git a/openpype/modules/sync_server/sync_server_module.py b/openpype/modules/sync_server/sync_server_module.py index 45ff8bc4d1..653ee50541 100644 --- a/openpype/modules/sync_server/sync_server_module.py +++ b/openpype/modules/sync_server/sync_server_module.py @@ -1,22 +1,28 @@ import os -from bson.objectid import ObjectId +import sys +import time from datetime import datetime import threading import platform import copy +import signal from collections import deque, defaultdict +import click +from bson.objectid import ObjectId -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayModule -from openpype.api import ( - Anatomy, +from openpype.client import ( + get_projects, + get_representations, + get_representation_by_id, +) +from openpype.modules import OpenPypeModule, ITrayModule +from openpype.settings import ( get_project_settings, get_system_settings, - get_local_site_id ) -from openpype.lib import PypeLogger -from openpype.pipeline import AvalonMongoDB +from openpype.lib import Logger, get_local_site_id +from openpype.pipeline import AvalonMongoDB, Anatomy from openpype.settings.lib import ( get_default_anatomy_settings, get_anatomy_settings @@ -27,8 +33,7 @@ from .providers import lib from .utils import time_function, SyncStatus, SiteAlreadyPresentError - -log = PypeLogger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer") class SyncServerModule(OpenPypeModule, ITrayModule): @@ -130,48 +135,55 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.projects_processed = set() """ Start of Public API """ - def add_site(self, collection, representation_id, site_name=None, - force=False): + def add_site(self, project_name, representation_id, site_name=None, + force=False, priority=None, reset_timer=False): """ Adds new site to representation to be synced. - 'collection' must have synchronization enabled (globally or + 'project_name' must have synchronization enabled (globally or project only) - Used as a API endpoint from outside applications (Loader etc). + Used as an API endpoint from outside applications (Loader etc). Use 'force' to reset existing site. Args: - collection (string): project name (must match DB) + project_name (string): project name (must match DB) representation_id (string): MongoDB _id value site_name (string): name of configured and active site force (bool): reset site if exists + priority (int): set priority + reset_timer (bool): if delay timer should be reset, eg. user mark + some representation to be synced manually Throws: SiteAlreadyPresentError - if adding already existing site and not 'force' ValueError - other errors (repre not found, misconfiguration) """ - if not self.get_sync_project_setting(collection): + if not self.get_sync_project_setting(project_name): raise ValueError("Project not configured") if not site_name: site_name = self.DEFAULT_SITE - self.reset_site_on_representation(collection, + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, - force=force) + force=force, + priority=priority) - def remove_site(self, collection, representation_id, site_name, + if reset_timer: + self.reset_timer() + + def remove_site(self, project_name, representation_id, site_name, remove_local_files=False): """ Removes 'site_name' for particular 'representation_id' on - 'collection' + 'project_name' Args: - collection (string): project name (must match DB) + project_name (string): project name (must match DB) representation_id (string): MongoDB _id value site_name (string): name of configured and active site remove_local_files (bool): remove only files for 'local_id' @@ -180,15 +192,15 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns: throws ValueError if any issue """ - if not self.get_sync_project_setting(collection): + if not self.get_sync_project_setting(project_name): raise ValueError("Project not configured") - self.reset_site_on_representation(collection, + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, remove=True) if remove_local_files: - self._remove_local_file(collection, representation_id, site_name) + self._remove_local_file(project_name, representation_id, site_name) def compute_resource_sync_sites(self, project_name): """Get available resource sync sites state for publish process. @@ -335,9 +347,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return alt_site_pairs - def clear_project(self, collection, site_name): + def clear_project(self, project_name, site_name): """ - Clear 'collection' of 'site_name' and its local files + Clear 'project_name' of 'site_name' and its local files Works only on real local sites, not on 'studio' """ @@ -346,16 +358,17 @@ class SyncServerModule(OpenPypeModule, ITrayModule): "files.sites.name": site_name } + # TODO currently not possible to replace with get_representations representations = list( - self.connection.database[collection].find(query)) + self.connection.database[project_name].find(query)) if not representations: self.log.debug("No repre found") return for repre in representations: - self.remove_site(collection, repre.get("_id"), site_name, True) + self.remove_site(project_name, repre.get("_id"), site_name, True) - def create_validate_project_task(self, collection, site_name): + def create_validate_project_task(self, project_name, site_name): """Adds metadata about project files validation on a queue. This process will loop through all representation and check if @@ -372,33 +385,28 @@ class SyncServerModule(OpenPypeModule, ITrayModule): """ task = { "type": "validate", - "project_name": collection, - "func": lambda: self.validate_project(collection, site_name, + "project_name": project_name, + "func": lambda: self.validate_project(project_name, site_name, reset_missing=True) } - self.projects_processed.add(collection) + self.projects_processed.add(project_name) self.long_running_tasks.append(task) - def validate_project(self, collection, site_name, reset_missing=False): - """Validate 'collection' of 'site_name' and its local files + def validate_project(self, project_name, site_name, reset_missing=False): + """Validate 'project_name' of 'site_name' and its local files If file present and not marked with a 'site_name' in DB, DB is updated with site name and file modified date. Args: - collection (string): project name + project_name (string): project name site_name (string): active site name reset_missing (bool): if True reset site in DB if missing physically """ - self.log.debug("Validation of {} for {} started".format(collection, + self.log.debug("Validation of {} for {} started".format(project_name, site_name)) - query = { - "type": "representation" - } - - representations = list( - self.connection.database[collection].find(query)) + representations = list(get_representations(project_name)) if not representations: self.log.debug("No repre found") return @@ -418,7 +426,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): continue file_path = repre_file.get("path", "") - local_file_path = self.get_local_file_path(collection, + local_file_path = self.get_local_file_path(project_name, site_name, file_path) @@ -430,14 +438,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): "Adding site {} for {}".format(site_name, repre_id)) - query = { - "_id": repre_id - } created_dt = datetime.fromtimestamp( os.path.getmtime(local_file_path)) elem = {"name": site_name, "created_dt": created_dt} - self._add_site(collection, query, repre, elem, + self._add_site(project_name, repre, elem, site_name=site_name, file_id=repre_file["_id"], force=True) @@ -447,51 +452,52 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.log.debug("Resetting site {} for {}". format(site_name, repre_id)) self.reset_site_on_representation( - collection, repre_id, site_name=site_name, + project_name, repre_id, site_name=site_name, file_id=repre_file["_id"]) sites_reset += 1 if sites_added % 100 == 0: self.log.debug("Sites added {}".format(sites_added)) - self.log.debug("Validation of {} for {} ended".format(collection, + self.log.debug("Validation of {} for {} ended".format(project_name, site_name)) self.log.info("Sites added {}, sites reset {}".format(sites_added, reset_missing)) - def pause_representation(self, collection, representation_id, site_name): + def pause_representation(self, project_name, representation_id, site_name): """ Sets 'representation_id' as paused, eg. no syncing should be happening on it. Args: - collection (string): project name + project_name (string): project name representation_id (string): MongoDB objectId value site_name (string): 'gdrive', 'studio' etc. """ - log.info("Pausing SyncServer for {}".format(representation_id)) + self.log.info("Pausing SyncServer for {}".format(representation_id)) self._paused_representations.add(representation_id) - self.reset_site_on_representation(collection, representation_id, + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, pause=True) - def unpause_representation(self, collection, representation_id, site_name): + def unpause_representation(self, project_name, + representation_id, site_name): """ Sets 'representation_id' as unpaused. Does not fail or warn if repre wasn't paused. Args: - collection (string): project name + project_name (string): project name representation_id (string): MongoDB objectId value site_name (string): 'gdrive', 'studio' etc. """ - log.info("Unpausing SyncServer for {}".format(representation_id)) + self.log.info("Unpausing SyncServer for {}".format(representation_id)) try: self._paused_representations.remove(representation_id) except KeyError: pass # self.paused_representations is not persistent - self.reset_site_on_representation(collection, representation_id, + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, pause=False) def is_representation_paused(self, representation_id, @@ -522,9 +528,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): happening on all representation inside. Args: - project_name (string): collection name + project_name (string): project_name name """ - log.info("Pausing SyncServer for {}".format(project_name)) + self.log.info("Pausing SyncServer for {}".format(project_name)) self._paused_projects.add(project_name) def unpause_project(self, project_name): @@ -534,9 +540,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Does not fail or warn if project wasn't paused. Args: - project_name (string): collection name + project_name (string): """ - log.info("Unpausing SyncServer for {}".format(project_name)) + self.log.info("Unpausing SyncServer for {}".format(project_name)) try: self._paused_projects.remove(project_name) except KeyError: @@ -547,7 +553,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns if 'project_name' is paused or not. Args: - project_name (string): collection name + project_name (string): check_parents (bool): check if server itself is not paused Returns: @@ -564,14 +570,14 @@ class SyncServerModule(OpenPypeModule, ITrayModule): It won't check anything, not uploading/downloading... """ - log.info("Pausing SyncServer") + self.log.info("Pausing SyncServer") self._paused = True def unpause_server(self): """ Unpause server """ - log.info("Unpausing SyncServer") + self.log.info("Unpausing SyncServer") self._paused = False def is_paused(self): @@ -882,7 +888,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): # val = val[platform.system().lower()] # except KeyError: # st = "{}'s field value {} should be".format(key, val) # noqa: E501 - # log.error(st + " multiplatform dict") + # self.log.error(st + " multiplatform dict") # # item["namespace"] = item["namespace"].replace('{site}', # site_name) @@ -912,23 +918,94 @@ class SyncServerModule(OpenPypeModule, ITrayModule): In case of user's involvement (reset site), start that right away. """ - self.sync_server_thread.reset_timer() + + if not self.enabled: + return + + if self.sync_server_thread is None: + self._reset_timer_with_rest_api() + else: + self.sync_server_thread.reset_timer() + + def is_representation_on_site( + self, project_name, representation_id, site_name + ): + """Checks if 'representation_id' has all files avail. on 'site_name'""" + representation = get_representation_by_id(project_name, + representation_id, + fields=["_id", "files"]) + if not representation: + return False + + on_site = False + for file_info in representation.get("files", []): + for site in file_info.get("sites", []): + if site["name"] != site_name: + continue + + if (site.get("progress") or site.get("error") or + not site.get("created_dt")): + return False + on_site = True + + return on_site + + def _reset_timer_with_rest_api(self): + # POST to webserver sites to add to representations + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") + if not webserver_url: + self.log.warning("Couldn't find webserver url") + return + + rest_api_url = "{}/sync_server/reset_timer".format( + webserver_url + ) + + try: + import requests + except Exception: + self.log.warning( + "Couldn't add sites to representations " + "('requests' is not available)" + ) + return + + requests.post(rest_api_url) def get_enabled_projects(self): """Returns list of projects which have SyncServer enabled.""" enabled_projects = [] if self.enabled: - for project in self.connection.projects(projection={"name": 1}): + for project in get_projects(fields=["name"]): project_name = project["name"] - project_settings = self.get_sync_project_setting(project_name) - if project_settings and project_settings.get("enabled"): + if self.is_project_enabled(project_name): enabled_projects.append(project_name) return enabled_projects - def handle_alternate_site(self, collection, representation, processed_site, - file_id, synced_file_id): + def is_project_enabled(self, project_name, single=False): + """Checks if 'project_name' is enabled for syncing. + 'get_sync_project_setting' is potentially expensive operation (pulls + settings for all projects if cached version is not available), using + project_settings for specific project should be faster. + Args: + project_name (str) + single (bool): use 'get_project_settings' method + """ + if self.enabled: + if single: + project_settings = get_project_settings(project_name) + project_settings = \ + self._parse_sync_settings_from_settings(project_settings) + else: + project_settings = self.get_sync_project_setting(project_name) + if project_settings and project_settings.get("enabled"): + return True + return False + + def handle_alternate_site(self, project_name, representation, + processed_site, file_id, synced_file_id): """ For special use cases where one site vendors another. @@ -941,7 +1018,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): same location >> file is accesible on 'sftp' site right away. Args: - collection (str): name of project + project_name (str): name of project representation (dict) processed_site (str): real site_name of published/uploaded file file_id (ObjectId): DB id of file handled @@ -965,26 +1042,112 @@ class SyncServerModule(OpenPypeModule, ITrayModule): alternate_sites = set(alternate_sites) for alt_site in alternate_sites: - query = { - "_id": representation["_id"] - } elem = {"name": alt_site, "created_dt": datetime.now(), "id": synced_file_id} self.log.debug("Adding alternate {} to {}".format( alt_site, representation["_id"])) - self._add_site(collection, query, + self._add_site(project_name, representation, elem, alt_site, file_id=file_id, force=True) + def get_repre_info_for_versions(self, project_name, version_ids, + active_site, remote_site): + """Returns representation documents for versions and sites combi + + Args: + project_name (str) + version_ids (list): of version[_id] + active_site (string): 'local', 'studio' etc + remote_site (string): dtto + Returns: + + """ + self.connection.Session["AVALON_PROJECT"] = project_name + query = [ + {"$match": {"parent": {"$in": version_ids}, + "type": "representation", + "files.sites.name": {"$exists": 1}}}, + {"$unwind": "$files"}, + {'$addFields': { + 'order_local': { + '$filter': { + 'input': '$files.sites', 'as': 'p', + 'cond': {'$eq': ['$$p.name', active_site]} + } + } + }}, + {'$addFields': { + 'order_remote': { + '$filter': { + 'input': '$files.sites', 'as': 'p', + 'cond': {'$eq': ['$$p.name', remote_site]} + } + } + }}, + {'$addFields': { + 'progress_local': {"$arrayElemAt": [{ + '$cond': [ + {'$size': "$order_local.progress"}, + "$order_local.progress", + # if exists created_dt count is as available + {'$cond': [ + {'$size': "$order_local.created_dt"}, + [1], + [0] + ]} + ]}, + 0 + ]} + }}, + {'$addFields': { + 'progress_remote': {"$arrayElemAt": [{ + '$cond': [ + {'$size': "$order_remote.progress"}, + "$order_remote.progress", + # if exists created_dt count is as available + {'$cond': [ + {'$size': "$order_remote.created_dt"}, + [1], + [0] + ]} + ]}, + 0 + ]} + }}, + {'$group': { # first group by repre + '_id': '$_id', + 'parent': {'$first': '$parent'}, + 'avail_ratio_local': { + '$first': { + '$divide': [{'$sum': "$progress_local"}, {'$sum': 1}] + } + }, + 'avail_ratio_remote': { + '$first': { + '$divide': [{'$sum': "$progress_remote"}, {'$sum': 1}] + } + } + }}, + {'$group': { # second group by parent, eg version_id + '_id': '$parent', + 'repre_count': {'$sum': 1}, # total representations + # fully available representation for site + 'avail_repre_local': {'$sum': "$avail_ratio_local"}, + 'avail_repre_remote': {'$sum': "$avail_ratio_remote"}, + }}, + ] + # docs = list(self.connection.aggregate(query)) + return self.connection.aggregate(query) + """ End of Public API """ - def get_local_file_path(self, collection, site_name, file_path): + def get_local_file_path(self, project_name, site_name, file_path): """ Externalized for app """ - handler = LocalDriveHandler(collection, site_name) + handler = LocalDriveHandler(project_name, site_name) local_file_path = handler.resolve_path(file_path) return local_file_path @@ -1020,21 +1183,13 @@ class SyncServerModule(OpenPypeModule, ITrayModule): """ self.server_init() - from .tray.app import SyncServerWindow - self.widget = SyncServerWindow(self) - def server_init(self): """Actual initialization of Sync Server.""" # import only in tray or Python3, because of Python2 hosts - from .sync_server import SyncServerThread - if not self.enabled: return - enabled_projects = self.get_enabled_projects() - if not enabled_projects: - self.enabled = False - return + from .sync_server import SyncServerThread self.lock = threading.Lock() @@ -1054,10 +1209,10 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.server_start() def server_start(self): - if self.sync_project_settings and self.enabled: + if self.enabled: self.sync_server_thread.start() else: - log.info("No presets or active providers. " + + self.log.info("No presets or active providers. " + "Synchronization not possible.") def tray_exit(self): @@ -1075,12 +1230,12 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if not self.is_running: return try: - log.info("Stopping sync server server") + self.log.info("Stopping sync server server") self.sync_server_thread.is_running = False self.sync_server_thread.stop() - log.info("Sync server stopped") + self.log.info("Sync server stopped") except Exception: - log.warning( + self.log.warning( "Error has happened during Killing sync server", exc_info=True ) @@ -1151,10 +1306,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): def _prepare_sync_project_settings(self, exclude_locals): sync_project_settings = {} system_sites = self.get_all_site_configs() - project_docs = self.connection.projects( - projection={"name": 1}, - only_active=True - ) + project_docs = get_projects(fields=["name"]) for project_doc in project_docs: project_name = project_doc["name"] sites = copy.deepcopy(system_sites) # get all configured sites @@ -1168,7 +1320,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): sync_project_settings[project_name] = proj_settings if not sync_project_settings: - log.info("No enabled and configured projects for sync.") + self.log.info("No enabled and configured projects for sync.") return sync_project_settings def get_sync_project_setting(self, project_name, exclude_locals=False, @@ -1279,7 +1431,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return sites.get(site, 'N/A') @time_function - def get_sync_representations(self, collection, active_site, remote_site): + def get_sync_representations(self, project_name, active_site, remote_site): """ Get representations that should be synced, these could be recognised by presence of document in 'files.sites', where key is @@ -1290,8 +1442,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): better performance. Goal is to get as few representations as possible. Args: - collection (string): name of collection (in most cases matches - project name + project_name (string): active_site (string): identifier of current active site (could be 'local_0' when working from home, 'studio' when working in the studio (default) @@ -1300,10 +1451,10 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns: (list) of dictionaries """ - log.debug("Check representations for : {}".format(collection)) - self.connection.Session["AVALON_PROJECT"] = collection + self.log.debug("Check representations for : {}".format(project_name)) + self.connection.Session["AVALON_PROJECT"] = project_name # retry_cnt - number of attempts to sync specific file before giving up - retries_arr = self._get_retries_arr(collection) + retries_arr = self._get_retries_arr(project_name) match = { "type": "representation", "$or": [ @@ -1379,9 +1530,10 @@ class SyncServerModule(OpenPypeModule, ITrayModule): }}, {"$sort": {'priority': -1, '_id': 1}}, ] - log.debug("active_site:{} - remote_site:{}".format(active_site, - remote_site)) - log.debug("query: {}".format(aggr)) + self.log.debug("active_site:{} - remote_site:{}".format( + active_site, remote_site + )) + self.log.debug("query: {}".format(aggr)) representations = self.connection.aggregate(aggr) return representations @@ -1416,7 +1568,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if get_local_site_id() not in (local_site, remote_site): # don't do upload/download for studio sites - log.debug("No local site {} - {}".format(local_site, remote_site)) + self.log.debug( + "No local site {} - {}".format(local_site, remote_site) + ) return SyncStatus.DO_NOTHING _, remote_rec = self._get_site_rec(sites, remote_site) or {} @@ -1440,21 +1594,21 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return SyncStatus.DO_NOTHING - def update_db(self, collection, new_file_id, file, representation, + def update_db(self, project_name, new_file_id, file, representation, site, error=None, progress=None, priority=None): """ Update 'provider' portion of records in DB with success (file_id) or error (exception) Args: - collection (string): name of project - force to db connection as + project_name (string): name of project - force to db connection as each file might come from different collection - new_file_id (string): + new_file_id (string): only present if file synced successfully file (dictionary): info about processed file (pulled from DB) representation (dictionary): parent repr of file (from DB) site (string): label ('gdrive', 'S3') error (string): exception message - progress (float): 0-1 of progress of upload/download + progress (float): 0-0.99 of progress of upload/download priority (int): 0-100 set priority Returns: @@ -1490,7 +1644,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if file_id: arr_filter.append({'f._id': ObjectId(file_id)}) - self.connection.database[collection].update_one( + self.connection.database[project_name].update_one( query, update, upsert=True, @@ -1507,11 +1661,16 @@ class SyncServerModule(OpenPypeModule, ITrayModule): error_str = '' source_file = file.get("path", "") - log.debug("File for {} - {source_file} process {status} {error_str}". - format(representation_id, - status=status, - source_file=source_file, - error_str=error_str)) + self.log.debug( + ( + "File for {} - {source_file} process {status} {error_str}" + ).format( + representation_id, + status=status, + source_file=source_file, + error_str=error_str + ) + ) def _get_file_info(self, files, _id): """ @@ -1553,9 +1712,10 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return -1, None - def reset_site_on_representation(self, collection, representation_id, + def reset_site_on_representation(self, project_name, representation_id, side=None, file_id=None, site_name=None, - remove=False, pause=None, force=False): + remove=False, pause=None, force=False, + priority=None): """ Reset information about synchronization for particular 'file_id' and provider. @@ -1570,7 +1730,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Should be used when repre should be synced to new site. Args: - collection (string): name of project (eg. collection) in DB + project_name (string): name of project (eg. collection) in DB representation_id(string): _id of representation file_id (string): file _id in representation side (string): local or remote side @@ -1578,26 +1738,25 @@ class SyncServerModule(OpenPypeModule, ITrayModule): remove (bool): if True remove site altogether pause (bool or None): if True - pause, False - unpause force (bool): hard reset - currently only for add_site + priority (int): set priority Raises: SiteAlreadyPresentError - if adding already existing site and not 'force' ValueError - other errors (repre not found, misconfiguration) """ - query = { - "_id": ObjectId(representation_id) - } - - representation = self.connection.database[collection].find_one(query) + representation = get_representation_by_id(project_name, + representation_id) if not representation: raise ValueError("Representation {} not found in {}". - format(representation_id, collection)) + format(representation_id, project_name)) + if side and site_name: raise ValueError("Misconfiguration, only one of side and " + "site_name arguments should be passed.") - local_site = self.get_active_site(collection) - remote_site = self.get_remote_site(collection) + local_site = self.get_active_site(project_name) + remote_site = self.get_remote_site(project_name) if side: if side == 'local': @@ -1607,38 +1766,48 @@ class SyncServerModule(OpenPypeModule, ITrayModule): elem = {"name": site_name} + # Add priority + if priority: + elem["priority"] = priority + if file_id: # reset site for particular file - self._reset_site_for_file(collection, query, + self._reset_site_for_file(project_name, representation_id, elem, file_id, site_name) elif side: # reset site for whole representation - self._reset_site(collection, query, elem, site_name) + self._reset_site(project_name, representation_id, elem, site_name) elif remove: # remove site for whole representation - self._remove_site(collection, query, representation, site_name) + self._remove_site(project_name, + representation, site_name) elif pause is not None: - self._pause_unpause_site(collection, query, + self._pause_unpause_site(project_name, representation, site_name, pause) else: # add new site to all files for representation - self._add_site(collection, query, representation, elem, site_name, + self._add_site(project_name, representation, elem, site_name, force=force) - def _update_site(self, collection, query, update, arr_filter): + def _update_site(self, project_name, representation_id, + update, arr_filter): """ Auxiliary method to call update_one function on DB Used for refactoring ugly reset_provider_for_file """ - self.connection.database[collection].update_one( + query = { + "_id": ObjectId(representation_id) + } + + self.connection.database[project_name].update_one( query, update, upsert=True, array_filters=arr_filter ) - def _reset_site_for_file(self, collection, query, + def _reset_site_for_file(self, project_name, representation_id, elem, file_id, site_name): """ Resets 'site_name' for 'file_id' on representation in 'query' on - 'collection' + 'project_name' """ update = { "$set": {"files.$[f].sites.$[s]": elem} @@ -1651,9 +1820,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'f._id': file_id} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation_id, update, arr_filter) - def _reset_site(self, collection, query, elem, site_name): + def _reset_site(self, project_name, representation_id, elem, site_name): """ Resets 'site_name' for all files of representation in 'query' """ @@ -1665,9 +1834,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'s.name': site_name} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation_id, update, arr_filter) - def _remove_site(self, collection, query, representation, site_name): + def _remove_site(self, project_name, representation, site_name): """ Removes 'site_name' for 'representation' in 'query' @@ -1681,7 +1850,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): break if not found: msg = "Site {} not found".format(site_name) - log.info(msg) + self.log.info(msg) raise ValueError(msg) update = { @@ -1689,10 +1858,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): } arr_filter = [] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation["_id"], + update, arr_filter) - def _pause_unpause_site(self, collection, query, - representation, site_name, pause): + def _pause_unpause_site(self, project_name, representation, + site_name, pause): """ Pauses/unpauses all files for 'representation' based on 'pause' @@ -1707,7 +1877,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): break if not found: msg = "Site {} not found".format(site_name) - log.info(msg) + self.log.info(msg) raise ValueError(msg) if pause: @@ -1724,12 +1894,13 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'s.name': site_name} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation["_id"], + update, arr_filter) - def _add_site(self, collection, query, representation, elem, site_name, + def _add_site(self, project_name, representation, elem, site_name, force=False, file_id=None): """ - Adds 'site_name' to 'representation' on 'collection' + Adds 'site_name' to 'representation' on 'project_name' Args: representation (dict) @@ -1737,10 +1908,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Use 'force' to remove existing or raises ValueError """ + representation_id = representation["_id"] reset_existing = False files = representation.get("files", []) if not files: - log.debug("No files for {}".format(representation["_id"])) + self.log.debug("No files for {}".format(representation_id)) return for repre_file in files: @@ -1750,13 +1922,14 @@ class SyncServerModule(OpenPypeModule, ITrayModule): for site in repre_file.get("sites"): if site["name"] == site_name: if force or site.get("error"): - self._reset_site_for_file(collection, query, + self._reset_site_for_file(project_name, + representation_id, elem, repre_file["_id"], site_name) reset_existing = True else: msg = "Site {} already present".format(site_name) - log.info(msg) + self.log.info(msg) raise SiteAlreadyPresentError(msg) if reset_existing: @@ -1776,14 +1949,15 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'f._id': file_id} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation_id, + update, arr_filter) - def _remove_local_file(self, collection, representation_id, site_name): + def _remove_local_file(self, project_name, representation_id, site_name): """ Removes all local files for 'site_name' of 'representation_id' Args: - collection (string): project name (must match DB) + project_name (string): project name (must match DB) representation_id (string): MongoDB _id value site_name (string): name of configured and active site @@ -1799,21 +1973,17 @@ class SyncServerModule(OpenPypeModule, ITrayModule): provider_name = self.get_provider_for_site(site=site_name) if provider_name == 'local_drive': - query = { - "_id": ObjectId(representation_id) - } - - representation = list( - self.connection.database[collection].find(query)) + representation = get_representation_by_id(project_name, + representation_id, + fields=["files"]) if not representation: self.log.debug("No repre {} found".format( representation_id)) return - representation = representation.pop() local_file_path = '' for file in representation.get("files"): - local_file_path = self.get_local_file_path(collection, + local_file_path = self.get_local_file_path(project_name, site_name, file.get("path", "") ) @@ -1845,6 +2015,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns: (int): in seconds """ + if not project_name: + return 60 + ld = self.sync_project_settings[project_name]["config"]["loop_delay"] return int(ld) @@ -1856,16 +2029,19 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.widget = SyncServerWindow(self) no_errors = True except ValueError: - log.info("No system setting for sync. Not syncing.", exc_info=True) + self.log.info( + "No system setting for sync. Not syncing.", exc_info=True + ) except KeyError: - log.info(( + self.log.info(( "There are not set presets for SyncServer OR " "Credentials provided are invalid, " "no syncing possible"). format(str(self.sync_project_settings)), exc_info=True) except: - log.error("Uncaught exception durin start of SyncServer", - exc_info=True) + self.log.error( + "Uncaught exception durin start of SyncServer", + exc_info=True) self.enabled = no_errors self.widget.show() @@ -1974,3 +2150,55 @@ class SyncServerModule(OpenPypeModule, ITrayModule): settings ('presets') """ return presets[project_name]['sites'][site_name]['root'] + + def cli(self, click_group): + click_group.add_command(cli_main) + + # Webserver module implementation + def webserver_initialization(self, server_manager): + """Add routes for syncs.""" + if self.tray_initialized: + from .rest_api import SyncServerModuleRestApi + self.rest_api_obj = SyncServerModuleRestApi( + self, server_manager + ) + + +@click.group(SyncServerModule.name, help="SyncServer module related commands.") +def cli_main(): + pass + + +@cli_main.command() +@click.option( + "-a", + "--active_site", + required=True, + help="Name of active stie") +def syncservice(active_site): + """Launch sync server under entered site. + + This should be ideally used by system service (such us systemd or upstart + on linux and window service). + """ + + from openpype.modules import ModulesManager + + os.environ["OPENPYPE_LOCAL_ID"] = active_site + + def signal_handler(sig, frame): + print("You pressed Ctrl+C. Process ended.") + sync_server_module.server_exit() + sys.exit(0) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + manager = ModulesManager() + sync_server_module = manager.modules_by_name["sync_server"] + + sync_server_module.server_init() + sync_server_module.server_start() + + while True: + time.sleep(1.0) diff --git a/openpype/modules/sync_server/tray/app.py b/openpype/modules/sync_server/tray/app.py index fc8558bdbc..9b9768327e 100644 --- a/openpype/modules/sync_server/tray/app.py +++ b/openpype/modules/sync_server/tray/app.py @@ -2,7 +2,6 @@ from Qt import QtWidgets, QtCore, QtGui from openpype.tools.settings import style -from openpype.lib import PypeLogger from openpype import resources from .widgets import ( @@ -10,8 +9,6 @@ from .widgets import ( SyncRepresentationSummaryWidget ) -log = PypeLogger().get_logger("SyncServer") - class SyncServerWindow(QtWidgets.QDialog): """ @@ -46,6 +43,14 @@ class SyncServerWindow(QtWidgets.QDialog): left_column_layout.addWidget(self.pause_btn) + checkbox = QtWidgets.QCheckBox("Show only enabled", self) + checkbox.setStyleSheet("QCheckBox{spacing: 5px;" + "padding:5px 5px 5px 5px;}") + checkbox.setChecked(True) + self.show_only_enabled_chk = checkbox + + left_column_layout.addWidget(self.show_only_enabled_chk) + repres = SyncRepresentationSummaryWidget( sync_server, project=self.projects.current_project, @@ -86,15 +91,27 @@ class SyncServerWindow(QtWidgets.QDialog): repres.message_generated.connect(self._update_message) self.projects.message_generated.connect(self._update_message) + self.show_only_enabled_chk.stateChanged.connect( + self._on_enabled_change + ) + self.representationWidget = repres + def showEvent(self, event): + self.representationWidget.set_project(self.projects.current_project) + self.projects.refresh() + self._set_running(True) + super().showEvent(event) + + def closeEvent(self, event): + self._set_running(False) + super().closeEvent(event) + def _on_project_change(self): if self.projects.current_project is None: return - self.representationWidget.table_view.model().set_project( - self.projects.current_project - ) + self.representationWidget.set_project(self.projects.current_project) project_name = self.projects.current_project if not self.sync_server.get_sync_project_setting(project_name): @@ -103,16 +120,12 @@ class SyncServerWindow(QtWidgets.QDialog): self.projects.refresh() return - def showEvent(self, event): - self.representationWidget.model.set_project( - self.projects.current_project) + def _on_enabled_change(self): + """Called when enabled projects only checkbox is toggled.""" + self.projects.show_only_enabled = \ + self.show_only_enabled_chk.isChecked() self.projects.refresh() - self._set_running(True) - super().showEvent(event) - - def closeEvent(self, event): - self._set_running(False) - super().closeEvent(event) + self.representationWidget.set_project(None) def _set_running(self, running): self.representationWidget.model.is_running = running diff --git a/openpype/modules/sync_server/tray/delegates.py b/openpype/modules/sync_server/tray/delegates.py index 5ab809a816..988eb40d28 100644 --- a/openpype/modules/sync_server/tray/delegates.py +++ b/openpype/modules/sync_server/tray/delegates.py @@ -1,8 +1,7 @@ import os from Qt import QtCore, QtWidgets, QtGui -from openpype.lib import PypeLogger -from . import lib +from openpype.lib import Logger from openpype.tools.utils.constants import ( LOCAL_PROVIDER_ROLE, @@ -16,7 +15,7 @@ from openpype.tools.utils.constants import ( EDIT_ICON_ROLE ) -log = PypeLogger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer") class PriorityDelegate(QtWidgets.QStyledItemDelegate): diff --git a/openpype/modules/sync_server/tray/lib.py b/openpype/modules/sync_server/tray/lib.py index 87344be634..ff93815639 100644 --- a/openpype/modules/sync_server/tray/lib.py +++ b/openpype/modules/sync_server/tray/lib.py @@ -2,11 +2,6 @@ import attr import abc import six -from openpype.lib import PypeLogger - - -log = PypeLogger().get_logger("SyncServer") - STATUS = { 0: 'In Progress', 1: 'Queued', diff --git a/openpype/modules/sync_server/tray/models.py b/openpype/modules/sync_server/tray/models.py index 7241cc3472..d63d046508 100644 --- a/openpype/modules/sync_server/tray/models.py +++ b/openpype/modules/sync_server/tray/models.py @@ -1,6 +1,7 @@ import os import attr from bson.objectid import ObjectId +import datetime from Qt import QtCore from Qt.QtCore import Qt @@ -8,8 +9,8 @@ import qtawesome from openpype.tools.utils.delegates import pretty_timestamp -from openpype.lib import PypeLogger -from openpype.api import get_local_site_id +from openpype.lib import Logger, get_local_site_id +from openpype.client import get_representation_by_id from . import lib @@ -31,7 +32,7 @@ from openpype.tools.utils.constants import ( ) -log = PypeLogger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer") class _SyncRepresentationModel(QtCore.QAbstractTableModel): @@ -52,7 +53,8 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): All queries should go through this (because of collection). """ - return self.sync_server.connection.database[self.project] + if self.project: + return self.sync_server.connection.database[self.project] @property def project(self): @@ -150,6 +152,9 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): @property def can_edit(self): """Returns true if some site is user local site, eg. could edit""" + if not self.project: + return False + return get_local_site_id() in (self.active_site, self.remote_site) def get_column(self, index): @@ -190,7 +195,7 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): actually queried (scrolled a couple of times to list more than single page of records) """ - if self.is_editing or not self.is_running: + if self.is_editing or not self.is_running or not self.project: return self.refresh_started.emit() self.beginResetModel() @@ -232,6 +237,9 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): more records in DB than loaded. """ log.debug("fetchMore") + if not self.dbcon: + return + items_to_fetch = min(self._total_records - self._rec_loaded, self.PAGE_SIZE) self.query = self.get_query(self._rec_loaded) @@ -286,9 +294,10 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): # replace('False', 'false').\ # replace('True', 'true').replace('None', 'null')) - representations = self.dbcon.aggregate(pipeline=self.query, - allowDiskUse=True) - self.refresh(representations) + if self.dbcon: + representations = self.dbcon.aggregate(pipeline=self.query, + allowDiskUse=True) + self.refresh(representations) def set_word_filter(self, word_filter): """ @@ -378,9 +387,9 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): project (str): name of project """ self._project = project - self.sync_server.set_sync_project_settings() # project might have been deactivated in the meantime if not self.sync_server.get_sync_project_setting(project): + self._data = {} return self.active_site = self.sync_server.get_active_site(self.project) @@ -405,6 +414,23 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): return index return None + def _convert_date(self, date_value, current_date): + """Converts 'date_value' to string. + + Value of date_value might contain date in the future, used for nicely + sort queued items next to last downloaded. + """ + try: + converted_date = None + # ignore date in the future - for sorting only + if date_value and date_value < current_date: + converted_date = date_value.strftime("%Y%m%dT%H%M%SZ") + except (AttributeError, TypeError): + # ignore unparseable values + pass + + return converted_date + class SyncRepresentationSummaryModel(_SyncRepresentationModel): """ @@ -414,7 +440,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): full text filtering. Allows pagination, most of heavy lifting is being done on DB side. - Single model matches to single collection. When project is changed, + Single model matches to single project. When project is changed, model is reset and refreshed. Args: @@ -509,25 +535,23 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): self._word_filter = None - if not self._project or self._project == lib.DUMMY_PROJECT: - return - self.sync_server = sync_server # TODO think about admin mode + self.sort_criteria = self.DEFAULT_SORT + + self.timer = QtCore.QTimer() + if not self._project or self._project == lib.DUMMY_PROJECT: + self.active_site = sync_server.DEFAULT_SITE + self.remote_site = sync_server.DEFAULT_SITE + return + # this is for regular user, always only single local and single remote self.active_site = self.sync_server.get_active_site(self.project) self.remote_site = self.sync_server.get_remote_site(self.project) - self.sort_criteria = self.DEFAULT_SORT - self.query = self.get_query() self.default_query = list(self.get_query()) - representations = self.dbcon.aggregate(pipeline=self.query, - allowDiskUse=True) - self.refresh(representations) - - self.timer = QtCore.QTimer() self.timer.timeout.connect(self.tick) self.timer.start(self.REFRESH_SEC) @@ -554,7 +578,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): remote_provider = lib.translate_provider_for_icon(self.sync_server, self.project, remote_site) - + current_date = datetime.datetime.now() for repre in result.get("paginatedResults"): files = repre.get("files", []) if isinstance(files, dict): # aggregate returns dictionary @@ -564,14 +588,10 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): if not files: continue - local_updated = remote_updated = None - if repre.get('updated_dt_local'): - local_updated = \ - repre.get('updated_dt_local').strftime("%Y%m%dT%H%M%SZ") - - if repre.get('updated_dt_remote'): - remote_updated = \ - repre.get('updated_dt_remote').strftime("%Y%m%dT%H%M%SZ") + local_updated = self._convert_date(repre.get('updated_dt_local'), + current_date) + remote_updated = self._convert_date(repre.get('updated_dt_remote'), + current_date) avg_progress_remote = lib.convert_progress( repre.get('avg_progress_remote', '0')) @@ -639,6 +659,8 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): if limit == 0: limit = SyncRepresentationSummaryModel.PAGE_SIZE + # replace null with value in the future for better sorting + dummy_max_date = datetime.datetime(2099, 1, 1) aggr = [ {"$match": self.get_match_part()}, {'$unwind': '$files'}, @@ -681,7 +703,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): {'$cond': [ {'$size': "$order_remote.last_failed_dt"}, "$order_remote.last_failed_dt", - [] + [dummy_max_date] ]} ]}}, 'updated_dt_local': {'$first': { @@ -690,7 +712,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): {'$cond': [ {'$size': "$order_local.last_failed_dt"}, "$order_local.last_failed_dt", - [] + [dummy_max_date] ]} ]}}, 'files_size': {'$ifNull': ["$files.size", 0]}, @@ -897,11 +919,10 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): repre_id = self.data(index, Qt.UserRole) - representation = list(self.dbcon.find({"type": "representation", - "_id": repre_id})) + representation = get_representation_by_id(self.project, repre_id) if representation: self.sync_server.update_db(self.project, None, None, - representation.pop(), + representation, get_local_site_id(), priority=value) self.is_editing = False @@ -1003,9 +1024,6 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): self.sort_criteria = self.DEFAULT_SORT self.query = self.get_query() - representations = self.dbcon.aggregate(pipeline=self.query, - allowDiskUse=True) - self.refresh(representations) self.timer = QtCore.QTimer() self.timer.timeout.connect(self.tick) @@ -1036,6 +1054,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): self.project, remote_site) + current_date = datetime.datetime.now() for repre in result.get("paginatedResults"): # log.info("!!! repre:: {}".format(repre)) files = repre.get("files", []) @@ -1043,16 +1062,12 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): files = [files] for file in files: - local_updated = remote_updated = None - if repre.get('updated_dt_local'): - local_updated = \ - repre.get('updated_dt_local').strftime( - "%Y%m%dT%H%M%SZ") - - if repre.get('updated_dt_remote'): - remote_updated = \ - repre.get('updated_dt_remote').strftime( - "%Y%m%dT%H%M%SZ") + local_updated = self._convert_date( + repre.get('updated_dt_local'), + current_date) + remote_updated = self._convert_date( + repre.get('updated_dt_remote'), + current_date) remote_progress = lib.convert_progress( repre.get('progress_remote', '0')) @@ -1101,6 +1116,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): if limit == 0: limit = SyncRepresentationSummaryModel.PAGE_SIZE + dummy_max_date = datetime.datetime(2099, 1, 1) aggr = [ {"$match": self.get_match_part()}, {"$unwind": "$files"}, @@ -1144,7 +1160,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): '$cond': [ {'$size': "$order_remote.last_failed_dt"}, "$order_remote.last_failed_dt", - [] + [dummy_max_date] ] } ] @@ -1157,7 +1173,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): '$cond': [ {'$size': "$order_local.last_failed_dt"}, "$order_local.last_failed_dt", - [] + [dummy_max_date] ] } ] @@ -1340,11 +1356,10 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): file_id = self.data(index, Qt.UserRole) updated_file = None - # conversion from cursor to list - representations = list(self.dbcon.find({"type": "representation", - "_id": self._id})) + representation = get_representation_by_id(self.project, self._id) + if not representation: + return - representation = representations.pop() for repre_file in representation["files"]: if repre_file["_id"] == file_id: updated_file = repre_file diff --git a/openpype/modules/sync_server/tray/widgets.py b/openpype/modules/sync_server/tray/widgets.py index 6aae9562cf..c40aa98f24 100644 --- a/openpype/modules/sync_server/tray/widgets.py +++ b/openpype/modules/sync_server/tray/widgets.py @@ -9,8 +9,7 @@ import qtawesome from openpype.tools.settings import style -from openpype.api import get_local_site_id -from openpype.lib import PypeLogger +from openpype.lib import Logger, get_local_site_id from openpype.tools.utils.delegates import pretty_timestamp @@ -36,7 +35,7 @@ from openpype.tools.utils.constants import ( TRIES_ROLE ) -log = PypeLogger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer") class SyncProjectListWidget(QtWidgets.QWidget): @@ -47,6 +46,7 @@ class SyncProjectListWidget(QtWidgets.QWidget): message_generated = QtCore.Signal(str) refresh_msec = 10000 + show_only_enabled = True def __init__(self, sync_server, parent): super(SyncProjectListWidget, self).__init__(parent) @@ -122,11 +122,15 @@ class SyncProjectListWidget(QtWidgets.QWidget): self._model_reset = False selected_item = None - for project_name in self.sync_server.sync_project_settings.\ - keys(): + sync_settings = self.sync_server.sync_project_settings + for project_name in sync_settings.keys(): if self.sync_server.is_paused() or \ self.sync_server.is_project_paused(project_name): icon = self._get_icon("paused") + elif not sync_settings[project_name]["enabled"]: + if self.show_only_enabled: + continue + icon = self._get_icon("disabled") else: icon = self._get_icon("synced") @@ -139,12 +143,12 @@ class SyncProjectListWidget(QtWidgets.QWidget): if self.current_project == project_name: selected_item = item + if model.item(0) is None: + return + if selected_item: selected_index = model.indexFromItem(selected_item) - if len(self.sync_server.sync_project_settings.keys()) == 0: - model.appendRow(QtGui.QStandardItem(lib.DUMMY_PROJECT)) - if not self.current_project: self.current_project = model.item(0).data(QtCore.Qt.DisplayRole) @@ -248,6 +252,9 @@ class _SyncRepresentationWidget(QtWidgets.QWidget): active_changed = QtCore.Signal() # active index changed message_generated = QtCore.Signal(str) + def set_project(self, project): + self.model.set_project(project) + def _selection_changed(self, _new_selected, _all_selected): idxs = self.selection_model.selectedRows() self._selected_ids = set() @@ -581,7 +588,6 @@ class SyncRepresentationSummaryWidget(_SyncRepresentationWidget): super(SyncRepresentationSummaryWidget, self).__init__(parent) self.sync_server = sync_server - self._selected_ids = set() # keep last selected _id txt_filter = QtWidgets.QLineEdit() @@ -625,7 +631,6 @@ class SyncRepresentationSummaryWidget(_SyncRepresentationWidget): column = table_view.model().get_header_index("priority") priority_delegate = delegates.PriorityDelegate(self) table_view.setItemDelegateForColumn(column, priority_delegate) - layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) layout.addLayout(top_bar_layout) @@ -633,21 +638,16 @@ class SyncRepresentationSummaryWidget(_SyncRepresentationWidget): self.table_view = table_view self.model = model - horizontal_header = HorizontalHeader(self) - table_view.setHorizontalHeader(horizontal_header) table_view.setSortingEnabled(True) - for column_name, width in self.default_widths: idx = model.get_header_index(column_name) table_view.setColumnWidth(idx, width) - table_view.doubleClicked.connect(self._double_clicked) self.txt_filter.textChanged.connect(lambda: model.set_word_filter( self.txt_filter.text())) table_view.customContextMenuRequested.connect(self._on_context_menu) - model.refresh_started.connect(self._save_scrollbar) model.refresh_finished.connect(self._set_scrollbar) model.modelReset.connect(self._set_selection) @@ -963,7 +963,6 @@ class HorizontalHeader(QtWidgets.QHeaderView): super(HorizontalHeader, self).__init__(QtCore.Qt.Horizontal, parent) self._parent = parent self.checked_values = {} - self.setModel(self._parent.model) self.setSectionsClickable(True) diff --git a/openpype/modules/sync_server/utils.py b/openpype/modules/sync_server/utils.py index 03f362202f..4caa01e9d7 100644 --- a/openpype/modules/sync_server/utils.py +++ b/openpype/modules/sync_server/utils.py @@ -1,6 +1,8 @@ import time -from openpype.api import Logger -log = Logger().get_logger("SyncServer") + +from openpype.lib import Logger + +log = Logger.get_logger("SyncServer") class ResumableError(Exception): diff --git a/openpype/modules/timers_manager/idle_threads.py b/openpype/modules/timers_manager/idle_threads.py index 9ec27e659b..7242761143 100644 --- a/openpype/modules/timers_manager/idle_threads.py +++ b/openpype/modules/timers_manager/idle_threads.py @@ -2,7 +2,7 @@ import time from Qt import QtCore from pynput import mouse, keyboard -from openpype.lib import PypeLogger +from openpype.lib import Logger class IdleItem: @@ -31,7 +31,7 @@ class IdleManager(QtCore.QThread): def __init__(self): super(IdleManager, self).__init__() - self.log = PypeLogger.get_logger(self.__class__.__name__) + self.log = Logger.get_logger(self.__class__.__name__) self.signal_reset_timer.connect(self._reset_time) self.idle_item = IdleItem() diff --git a/openpype/modules/timers_manager/plugins/publish/start_timer.py b/openpype/modules/timers_manager/plugins/publish/start_timer.py new file mode 100644 index 0000000000..6408327ca1 --- /dev/null +++ b/openpype/modules/timers_manager/plugins/publish/start_timer.py @@ -0,0 +1,39 @@ +""" +Requires: + context -> system_settings + context -> openPypeModules +""" + +import pyblish.api + +from openpype.pipeline import legacy_io + + +class StartTimer(pyblish.api.ContextPlugin): + label = "Start Timer" + order = pyblish.api.IntegratorOrder + 1 + hosts = ["*"] + + def process(self, context): + timers_manager = context.data["openPypeModules"]["timers_manager"] + if not timers_manager.enabled: + self.log.debug("TimersManager is disabled") + return + + modules_settings = context.data["system_settings"]["modules"] + if not modules_settings["timers_manager"]["disregard_publishing"]: + self.log.debug("Publish is not affecting running timers.") + return + + project_name = legacy_io.active_project() + asset_name = legacy_io.Session.get("AVALON_ASSET") + task_name = legacy_io.Session.get("AVALON_TASK") + if not project_name or not asset_name or not task_name: + self.log.info(( + "Current context does not contain all" + " required information to start a timer." + )) + return + timers_manager.start_timer_with_webserver( + project_name, asset_name, task_name, self.log + ) diff --git a/openpype/modules/timers_manager/plugins/publish/stop_timer.py b/openpype/modules/timers_manager/plugins/publish/stop_timer.py new file mode 100644 index 0000000000..a8674ff2ca --- /dev/null +++ b/openpype/modules/timers_manager/plugins/publish/stop_timer.py @@ -0,0 +1,27 @@ +""" +Requires: + context -> system_settings + context -> openPypeModules +""" + + +import pyblish.api + + +class StopTimer(pyblish.api.ContextPlugin): + label = "Stop Timer" + order = pyblish.api.ExtractorOrder - 0.49 + hosts = ["*"] + + def process(self, context): + timers_manager = context.data["openPypeModules"]["timers_manager"] + if not timers_manager.enabled: + self.log.debug("TimersManager is disabled") + return + + modules_settings = context.data["system_settings"]["modules"] + if not modules_settings["timers_manager"]["disregard_publishing"]: + self.log.debug("Publish is not affecting running timers.") + return + + timers_manager.stop_timer_with_webserver(self.log) diff --git a/openpype/modules/timers_manager/rest_api.py b/openpype/modules/timers_manager/rest_api.py index f16cb316c3..979db9075b 100644 --- a/openpype/modules/timers_manager/rest_api.py +++ b/openpype/modules/timers_manager/rest_api.py @@ -1,9 +1,7 @@ import json from aiohttp.web_response import Response -from openpype.api import Logger - -log = Logger().get_logger("Event processor") +from openpype.lib import Logger class TimersManagerModuleRestApi: @@ -12,6 +10,7 @@ class TimersManagerModuleRestApi: happens in Workfile app. """ def __init__(self, user_module, server_manager): + self._log = None self.module = user_module self.server_manager = server_manager @@ -19,6 +18,12 @@ class TimersManagerModuleRestApi: self.register() + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + def register(self): self.server_manager.add_route( "POST", @@ -47,7 +52,7 @@ class TimersManagerModuleRestApi: "Payload must contain fields 'project_name," " 'asset_name' and 'task_name'" ) - log.error(msg) + self.log.error(msg) return Response(status=400, message=msg) self.module.stop_timers() @@ -73,7 +78,7 @@ class TimersManagerModuleRestApi: "Payload must contain fields 'project_name, 'asset_name'," " 'task_name'" ) - log.warning(message) + self.log.warning(message) return Response(text=message, status=404) time = self.module.get_task_time(project_name, asset_name, task_name) diff --git a/openpype/modules/timers_manager/timers_manager.py b/openpype/modules/timers_manager/timers_manager.py index 3f77a2b7dc..0ba68285a4 100644 --- a/openpype/modules/timers_manager/timers_manager.py +++ b/openpype/modules/timers_manager/timers_manager.py @@ -2,15 +2,18 @@ import os import platform -from openpype.modules import OpenPypeModule -from openpype_interfaces import ( +from openpype.client import get_asset_by_name +from openpype.modules import ( + OpenPypeModule, ITrayService, - ILaunchHookPaths + IPluginPaths ) -from openpype.pipeline import AvalonMongoDB +from openpype.lib.events import register_event_callback from .exceptions import InvalidContextError +TIMER_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) + class ExampleTimersManagerConnector: """Timers manager can handle timers of multiple modules/addons. @@ -32,6 +35,7 @@ class ExampleTimersManagerConnector: } ``` """ + # Not needed at all def __init__(self, module): # Store timer manager module to be able call it's methods when needed @@ -71,7 +75,11 @@ class ExampleTimersManagerConnector: self._timers_manager_module.timer_stopped(self._module.id) -class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): +class TimersManager( + OpenPypeModule, + ITrayService, + IPluginPaths +): """ Handles about Timers. Should be able to start/stop all timers at once. @@ -175,11 +183,18 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): ) def get_launch_hook_paths(self): - """Implementation of `ILaunchHookPaths`.""" - return os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "launch_hooks" - ) + """Implementation for applications launch hooks.""" + + return [ + os.path.join(TIMER_MODULE_DIR, "launch_hooks") + ] + + def get_plugin_paths(self): + """Implementation of `IPluginPaths`.""" + + return { + "publish": [os.path.join(TIMER_MODULE_DIR, "plugins", "publish")] + } @staticmethod def get_timer_data_for_context( @@ -196,22 +211,13 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): " Project: \"{}\" Asset: \"{}\" Task: \"{}\"" ).format(str(project_name), str(asset_name), str(task_name))) - dbconn = AvalonMongoDB() - dbconn.install() - dbconn.Session["AVALON_PROJECT"] = project_name - - asset_doc = dbconn.find_one( - { - "type": "asset", - "name": asset_name - }, - { - "data.tasks": True, - "data.parents": True - } + asset_doc = get_asset_by_name( + project_name, + asset_name, + fields=["_id", "name", "data.tasks", "data.parents"] ) + if not asset_doc: - dbconn.uninstall() raise InvalidContextError(( "Asset \"{}\" not found in project \"{}\"" ).format(asset_name, project_name)) @@ -219,7 +225,6 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): asset_data = asset_doc.get("data") or {} asset_tasks = asset_data.get("tasks") or {} if task_name not in asset_tasks: - dbconn.uninstall() raise InvalidContextError(( "Task \"{}\" not found on asset \"{}\" in project \"{}\"" ).format(task_name, asset_name, project_name)) @@ -237,9 +242,10 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): hierarchy_items = asset_data.get("parents") or [] hierarchy_items.append(asset_name) - dbconn.uninstall() return { "project_name": project_name, + "asset_id": str(asset_doc["_id"]), + "asset_name": asset_doc["name"], "task_name": task_name, "task_type": task_type, "hierarchy": hierarchy_items @@ -396,6 +402,7 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): logger (logging.Logger): Logger object. Using 'print' if not passed. """ + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") if not webserver_url: msg = "Couldn't find webserver url" @@ -422,3 +429,50 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): } return requests.post(rest_api_url, json=data) + + @staticmethod + def stop_timer_with_webserver(logger=None): + """Prepared method for calling stop timers on REST api. + + Args: + logger (logging.Logger): Logger used for logging messages. + """ + + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") + if not webserver_url: + msg = "Couldn't find webserver url" + if logger is not None: + logger.warning(msg) + else: + print(msg) + return + + rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) + try: + import requests + except Exception: + msg = "Couldn't start timer ('requests' is not available)" + if logger is not None: + logger.warning(msg) + else: + print(msg) + return + + return requests.post(rest_api_url) + + def on_host_install(self, host, host_name, project_name): + self.log.debug("Installing task changed callback") + register_event_callback("taskChanged", self._on_host_task_change) + + def _on_host_task_change(self, event): + project_name = event["project_name"] + asset_name = event["asset_name"] + task_name = event["task_name"] + self.log.debug(( + "Sending message that timer should change to" + " Project: {} Asset: {} Task: {}" + ).format(project_name, asset_name, task_name)) + + self.start_timer_with_webserver( + project_name, asset_name, task_name, self.log + ) diff --git a/openpype/modules/webserver/cors_middleware.py b/openpype/modules/webserver/cors_middleware.py new file mode 100644 index 0000000000..f1cd7b04b3 --- /dev/null +++ b/openpype/modules/webserver/cors_middleware.py @@ -0,0 +1,283 @@ +r""" +=============== +CORS Middleware +=============== +.. versionadded:: 0.2.0 +Dealing with CORS headers for aiohttp applications. +**IMPORTANT:** There is a `aiohttp-cors +`_ library, which handles CORS +headers by attaching additional handlers to aiohttp application for +OPTIONS (preflight) requests. In same time this CORS middleware mimics the +logic of `django-cors-headers `_, +where all handling done in the middleware without any additional handlers. This +approach allows aiohttp application to respond with CORS headers for OPTIONS or +wildcard handlers, which is not possible with ``aiohttp-cors`` due to +https://github.com/aio-libs/aiohttp-cors/issues/241 issue. +For detailed information about CORS (Cross Origin Resource Sharing) please +visit: +- `Wikipedia `_ +- Or `MDN `_ +Configuration +============= +**IMPORTANT:** By default, CORS middleware do not allow any origins to access +content from your aiohttp appliction. Which means, you need carefully check +possible options and provide custom values for your needs. +Usage +===== +.. code-block:: python + import re + from aiohttp import web + from aiohttp_middlewares import cors_middleware + from aiohttp_middlewares.cors import DEFAULT_ALLOW_HEADERS + # Unsecure configuration to allow all CORS requests + app = web.Application( + middlewares=[cors_middleware(allow_all=True)] + ) + # Allow CORS requests from URL http://localhost:3000 + app = web.Application( + middlewares=[ + cors_middleware(origins=["http://localhost:3000"]) + ] + ) + # Allow CORS requests from all localhost urls + app = web.Application( + middlewares=[ + cors_middleware( + origins=[re.compile(r"^https?\:\/\/localhost")] + ) + ] + ) + # Allow CORS requests from https://frontend.myapp.com as well + # as allow credentials + CORS_ALLOW_ORIGINS = ["https://frontend.myapp.com"] + app = web.Application( + middlewares=[ + cors_middleware( + origins=CORS_ALLOW_ORIGINS, + allow_credentials=True, + ) + ] + ) + # Allow CORS requests only for API urls + app = web.Application( + middelwares=[ + cors_middleware( + origins=CORS_ALLOW_ORIGINS, + urls=[re.compile(r"^\/api")], + ) + ] + ) + # Allow CORS requests for POST & PATCH methods, and for all + # default headers and `X-Client-UID` + app = web.Application( + middlewares=[ + cors_middleware( + origings=CORS_ALLOW_ORIGINS, + allow_methods=("POST", "PATCH"), + allow_headers=DEFAULT_ALLOW_HEADERS + + ("X-Client-UID",), + ) + ] + ) +""" + +import logging +import re +from typing import Pattern, Tuple + +from aiohttp import web + +from aiohttp_middlewares.annotations import ( + Handler, + Middleware, + StrCollection, + UrlCollection, +) +from aiohttp_middlewares.utils import match_path + + +ACCESS_CONTROL = "Access-Control" +ACCESS_CONTROL_ALLOW = f"{ACCESS_CONTROL}-Allow" +ACCESS_CONTROL_ALLOW_CREDENTIALS = f"{ACCESS_CONTROL_ALLOW}-Credentials" +ACCESS_CONTROL_ALLOW_HEADERS = f"{ACCESS_CONTROL_ALLOW}-Headers" +ACCESS_CONTROL_ALLOW_METHODS = f"{ACCESS_CONTROL_ALLOW}-Methods" +ACCESS_CONTROL_ALLOW_ORIGIN = f"{ACCESS_CONTROL_ALLOW}-Origin" +ACCESS_CONTROL_EXPOSE_HEADERS = f"{ACCESS_CONTROL}-Expose-Headers" +ACCESS_CONTROL_MAX_AGE = f"{ACCESS_CONTROL}-Max-Age" +ACCESS_CONTROL_REQUEST_METHOD = f"{ACCESS_CONTROL}-Request-Method" + +DEFAULT_ALLOW_HEADERS = ( + "accept", + "accept-encoding", + "authorization", + "content-type", + "dnt", + "origin", + "user-agent", + "x-csrftoken", + "x-requested-with", +) +DEFAULT_ALLOW_METHODS = ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT") +DEFAULT_URLS: Tuple[Pattern[str]] = (re.compile(r".*"),) + +logger = logging.getLogger(__name__) + + +def cors_middleware( + *, + allow_all: bool = False, + origins: UrlCollection = None, + urls: UrlCollection = None, + expose_headers: StrCollection = None, + allow_headers: StrCollection = DEFAULT_ALLOW_HEADERS, + allow_methods: StrCollection = DEFAULT_ALLOW_METHODS, + allow_credentials: bool = False, + max_age: int = None, +) -> Middleware: + """Middleware to provide CORS headers for aiohttp applications. + :param allow_all: + When enabled, allow any Origin to access content from your aiohttp web + application. **Please be careful with enabling this option as it may + result in security issues for your application.** By default: ``False`` + :param origins: + Allow content access for given list of origins. Support supplying + strings for exact origin match or regex instances. By default: ``None`` + :param urls: + Allow contect access for given list of URLs in aiohttp application. + By default: *apply CORS headers for all URLs* + :param expose_headers: + List of headers to be exposed with every CORS request. By default: + ``None`` + :param allow_headers: + List of allowed headers. By default: + .. code-block:: python + ( + "accept", + "accept-encoding", + "authorization", + "content-type", + "dnt", + "origin", + "user-agent", + "x-csrftoken", + "x-requested-with", + ) + :param allow_methods: + List of allowed methods. By default: + .. code-block:: python + ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT") + :param allow_credentials: + When enabled apply allow credentials header in response, which results + in sharing cookies on shared resources. **Please be careful with + allowing credentials for CORS requests.** By default: ``False`` + :param max_age: Access control max age in seconds. By default: ``None`` + """ + check_urls: UrlCollection = DEFAULT_URLS if urls is None else urls + + @web.middleware + async def middleware( + request: web.Request, handler: Handler + ) -> web.StreamResponse: + # Initial vars + request_method = request.method + request_path = request.rel_url.path + + # Is this an OPTIONS request + is_options_request = request_method == "OPTIONS" + + # Is this a preflight request + is_preflight_request = ( + is_options_request + and ACCESS_CONTROL_REQUEST_METHOD in request.headers + ) + + # Log extra data + log_extra = { + "is_preflight_request": is_preflight_request, + "method": request_method.lower(), + "path": request_path, + } + + # Check whether CORS should be enabled for given URL or not. By default + # CORS enabled for all URLs + if not match_items(check_urls, request_path): + logger.debug( + "Request should not be processed via CORS middleware", + extra=log_extra, + ) + return await handler(request) + + # If this is a preflight request - generate empty response + if is_preflight_request: + response = web.StreamResponse() + # Otherwise - call actual handler + else: + response = await handler(request) + + # Now check origin heaer + origin = request.headers.get("Origin") + # Empty origin - do nothing + if not origin: + logger.debug( + "Request does not have Origin header. CORS headers not " + "available for given requests", + extra=log_extra, + ) + return response + + # Set allow credentials header if necessary + if allow_credentials: + response.headers[ACCESS_CONTROL_ALLOW_CREDENTIALS] = "true" + + # Check whether current origin satisfies CORS policy + if not allow_all and not (origins and match_items(origins, origin)): + logger.debug( + "CORS headers not allowed for given Origin", extra=log_extra + ) + return response + + # Now start supplying CORS headers + # First one is Access-Control-Allow-Origin + if allow_all and not allow_credentials: + cors_origin = "*" + else: + cors_origin = origin + response.headers[ACCESS_CONTROL_ALLOW_ORIGIN] = cors_origin + + # Then Access-Control-Expose-Headers + if expose_headers: + response.headers[ACCESS_CONTROL_EXPOSE_HEADERS] = ", ".join( + expose_headers + ) + + # Now, if this is an options request, respond with extra Allow headers + if is_options_request: + response.headers[ACCESS_CONTROL_ALLOW_HEADERS] = ", ".join( + allow_headers + ) + response.headers[ACCESS_CONTROL_ALLOW_METHODS] = ", ".join( + allow_methods + ) + if max_age is not None: + response.headers[ACCESS_CONTROL_MAX_AGE] = str(max_age) + + # If this is preflight request - do not allow other middlewares to + # process this request + if is_preflight_request: + logger.debug( + "Provide CORS headers with empty response for preflight " + "request", + extra=log_extra, + ) + raise web.HTTPOk(text="", headers=response.headers) + + # Otherwise return normal response + logger.debug("Provide CORS headers for request", extra=log_extra) + return response + + return middleware + + +def match_items(items: UrlCollection, value: str) -> bool: + """Go through all items and try to match item with given value.""" + return any(match_path(item, value) for item in items) diff --git a/openpype/modules/webserver/host_console_listener.py b/openpype/modules/webserver/host_console_listener.py index 6138f9f097..fdfe1ba688 100644 --- a/openpype/modules/webserver/host_console_listener.py +++ b/openpype/modules/webserver/host_console_listener.py @@ -5,7 +5,7 @@ import logging from concurrent.futures import CancelledError from Qt import QtWidgets -from openpype_interfaces import ITrayService +from openpype.modules import ITrayService log = logging.getLogger(__name__) diff --git a/openpype/modules/webserver/server.py b/openpype/modules/webserver/server.py index 83a29e074e..120925a362 100644 --- a/openpype/modules/webserver/server.py +++ b/openpype/modules/webserver/server.py @@ -1,16 +1,19 @@ +import re import threading import asyncio from aiohttp import web -from openpype.lib import PypeLogger - -log = PypeLogger.get_logger("WebServer") +from openpype.lib import Logger +from .cors_middleware import cors_middleware class WebServerManager: """Manger that care about web server thread.""" + def __init__(self, port=None, host=None): + self._log = None + self.port = port or 8079 self.host = host or "localhost" @@ -18,12 +21,24 @@ class WebServerManager: self.handlers = {} self.on_stop_callbacks = [] - self.app = web.Application() + self.app = web.Application( + middlewares=[ + cors_middleware( + origins=[re.compile(r"^https?\:\/\/localhost")] + ) + ] + ) # add route with multiple methods for single "external app" self.webserver_thread = WebServerThread(self) + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + @property def url(self): return "http://{}:{}".format(self.host, self.port) @@ -42,12 +57,12 @@ class WebServerManager: if not self.is_running: return try: - log.debug("Stopping Web server") + self.log.debug("Stopping Web server") self.webserver_thread.is_running = False self.webserver_thread.stop() except Exception: - log.warning( + self.log.warning( "Error has happened during Killing Web server", exc_info=True ) @@ -65,7 +80,10 @@ class WebServerManager: class WebServerThread(threading.Thread): """ Listener for requests in thread.""" + def __init__(self, manager): + self._log = None + super(WebServerThread, self).__init__() self.is_running = False @@ -75,6 +93,12 @@ class WebServerThread(threading.Thread): self.site = None self.tasks = [] + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + @property def port(self): return self.manager.port @@ -87,13 +111,13 @@ class WebServerThread(threading.Thread): self.is_running = True try: - log.info("Starting WebServer server") + self.log.info("Starting WebServer server") self.loop = asyncio.new_event_loop() # create new loop for thread asyncio.set_event_loop(self.loop) self.loop.run_until_complete(self.start_server()) - log.debug( + self.log.debug( "Running Web server on URL: \"localhost:{}\"".format(self.port) ) @@ -101,7 +125,7 @@ class WebServerThread(threading.Thread): self.loop.run_forever() except Exception: - log.warning( + self.log.warning( "Web Server service has failed", exc_info=True ) finally: @@ -109,7 +133,7 @@ class WebServerThread(threading.Thread): self.is_running = False self.manager.thread_stopped() - log.info("Web server stopped") + self.log.info("Web server stopped") async def start_server(self): """ Starts runner and TCPsite """ @@ -129,17 +153,17 @@ class WebServerThread(threading.Thread): while self.is_running: while self.tasks: task = self.tasks.pop(0) - log.debug("waiting for task {}".format(task)) + self.log.debug("waiting for task {}".format(task)) await task - log.debug("returned value {}".format(task.result)) + self.log.debug("returned value {}".format(task.result)) await asyncio.sleep(0.5) - log.debug("Starting shutdown") + self.log.debug("Starting shutdown") await self.site.stop() - log.debug("Site stopped") + self.log.debug("Site stopped") await self.runner.cleanup() - log.debug("Runner stopped") + self.log.debug("Runner stopped") tasks = [ task for task in asyncio.all_tasks() @@ -147,7 +171,9 @@ class WebServerThread(threading.Thread): ] list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks results = await asyncio.gather(*tasks, return_exceptions=True) - log.debug(f'Finished awaiting cancelled tasks, results: {results}...') + self.log.debug( + f'Finished awaiting cancelled tasks, results: {results}...' + ) await self.loop.shutdown_asyncgens() # to really make sure everything else has time to stop await asyncio.sleep(0.07) diff --git a/openpype/modules/webserver/webserver_module.py b/openpype/modules/webserver/webserver_module.py index 686bd27bfd..354ab1e4f9 100644 --- a/openpype/modules/webserver/webserver_module.py +++ b/openpype/modules/webserver/webserver_module.py @@ -24,8 +24,7 @@ import os import socket from openpype import resources -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayService +from openpype.modules import OpenPypeModule, ITrayService class WebServerModule(OpenPypeModule, ITrayService): @@ -53,9 +52,12 @@ class WebServerModule(OpenPypeModule, ITrayService): try: module.webserver_initialization(self.server_manager) except Exception: - self.log.warning(( - "Failed to connect module \"{}\" to webserver." - ).format(module.name)) + self.log.warning( + ( + "Failed to connect module \"{}\" to webserver." + ).format(module.name), + exc_info=True + ) def tray_init(self): self.create_server_manager() diff --git a/openpype/pipeline/__init__.py b/openpype/pipeline/__init__.py index 2e441fbf27..f5319c5a48 100644 --- a/openpype/pipeline/__init__.py +++ b/openpype/pipeline/__init__.py @@ -6,6 +6,7 @@ from .constants import ( from .mongodb import ( AvalonMongoDB, ) +from .anatomy import Anatomy from .create import ( BaseCreator, @@ -84,6 +85,7 @@ from .context_tools import ( register_host, registered_host, deregister_host, + get_process_id, ) install = install_host uninstall = uninstall_host @@ -96,6 +98,9 @@ __all__ = ( # --- MongoDB --- "AvalonMongoDB", + # --- Anatomy --- + "Anatomy", + # --- Create --- "BaseCreator", "Creator", diff --git a/openpype/pipeline/anatomy.py b/openpype/pipeline/anatomy.py new file mode 100644 index 0000000000..908dc2b187 --- /dev/null +++ b/openpype/pipeline/anatomy.py @@ -0,0 +1,1415 @@ +import os +import re +import copy +import platform +import collections +import numbers + +import six +import time + +from openpype.settings.lib import ( + get_project_settings, + get_local_settings, +) +from openpype.settings.constants import ( + DEFAULT_PROJECT_KEY +) + +from openpype.client import get_project +from openpype.lib.path_templates import ( + TemplateUnsolved, + TemplateResult, + TemplatesDict, + FormatObject, +) +from openpype.lib.log import Logger + +log = Logger.get_logger(__name__) + + +class ProjectNotSet(Exception): + """Exception raised when is created Anatomy without project name.""" + + +class RootCombinationError(Exception): + """This exception is raised when templates has combined root types.""" + + def __init__(self, roots): + joined_roots = ", ".join( + ["\"{}\"".format(_root) for _root in roots] + ) + # TODO better error message + msg = ( + "Combination of root with and" + " without root name in AnatomyTemplates. {}" + ).format(joined_roots) + + super(RootCombinationError, self).__init__(msg) + + +class BaseAnatomy(object): + """Anatomy module helps to keep project settings. + + Wraps key project specifications, AnatomyTemplates and Roots. + """ + root_key_regex = re.compile(r"{(root?[^}]+)}") + root_name_regex = re.compile(r"root\[([^]]+)\]") + + def __init__(self, project_doc, local_settings, site_name): + project_name = project_doc["name"] + self.project_name = project_name + + self._site_name = site_name + + self._data = self._prepare_anatomy_data( + project_doc, local_settings, site_name + ) + self._templates_obj = AnatomyTemplates(self) + self._roots_obj = Roots(self) + + # Anatomy used as dictionary + # - implemented only getters returning copy + def __getitem__(self, key): + return copy.deepcopy(self._data[key]) + + def get(self, key, default=None): + return copy.deepcopy(self._data).get(key, default) + + def keys(self): + return copy.deepcopy(self._data).keys() + + def values(self): + return copy.deepcopy(self._data).values() + + def items(self): + return copy.deepcopy(self._data).items() + + def _prepare_anatomy_data(self, project_doc, local_settings, site_name): + """Prepare anatomy data for further processing. + + Method added to replace `{task}` with `{task[name]}` in templates. + """ + project_name = project_doc["name"] + anatomy_data = self._project_doc_to_anatomy_data(project_doc) + + templates_data = anatomy_data.get("templates") + if templates_data: + # Replace `{task}` with `{task[name]}` in templates + value_queue = collections.deque() + value_queue.append(templates_data) + while value_queue: + item = value_queue.popleft() + if not isinstance(item, dict): + continue + + self._apply_local_settings_on_anatomy_data(anatomy_data, + local_settings, + project_name, + site_name) + + return anatomy_data + + @property + def templates(self): + """Wrap property `templates` of Anatomy's AnatomyTemplates instance.""" + return self._templates_obj.templates + + @property + def templates_obj(self): + """Return `AnatomyTemplates` object of current Anatomy instance.""" + return self._templates_obj + + def format(self, *args, **kwargs): + """Wrap `format` method of Anatomy's `templates_obj`.""" + return self._templates_obj.format(*args, **kwargs) + + def format_all(self, *args, **kwargs): + """Wrap `format_all` method of Anatomy's `templates_obj`.""" + return self._templates_obj.format_all(*args, **kwargs) + + @property + def roots(self): + """Wrap `roots` property of Anatomy's `roots_obj`.""" + return self._roots_obj.roots + + @property + def roots_obj(self): + """Return `Roots` object of current Anatomy instance.""" + return self._roots_obj + + def root_environments(self): + """Return OPENPYPE_ROOT_* environments for current project in dict.""" + return self._roots_obj.root_environments() + + def root_environmets_fill_data(self, template=None): + """Environment variable values in dictionary for rootless path. + + Args: + template (str): Template for environment variable key fill. + By default is set to `"${}"`. + """ + return self.roots_obj.root_environmets_fill_data(template) + + def find_root_template_from_path(self, *args, **kwargs): + """Wrapper for Roots `find_root_template_from_path`.""" + return self.roots_obj.find_root_template_from_path(*args, **kwargs) + + def path_remapper(self, *args, **kwargs): + """Wrapper for Roots `path_remapper`.""" + return self.roots_obj.path_remapper(*args, **kwargs) + + def all_root_paths(self): + """Wrapper for Roots `all_root_paths`.""" + return self.roots_obj.all_root_paths() + + def set_root_environments(self): + """Set OPENPYPE_ROOT_* environments for current project.""" + self._roots_obj.set_root_environments() + + def root_names(self): + """Return root names for current project.""" + return self.root_names_from_templates(self.templates) + + def _root_keys_from_templates(self, data): + """Extract root key from templates in data. + + Args: + data (dict): Data that may contain templates as string. + + Return: + set: Set of all root names from templates as strings. + + Output example: `{"root[work]", "root[publish]"}` + """ + + output = set() + if isinstance(data, dict): + for value in data.values(): + for root in self._root_keys_from_templates(value): + output.add(root) + + elif isinstance(data, str): + for group in re.findall(self.root_key_regex, data): + output.add(group) + + return output + + def root_value_for_template(self, template): + """Returns value of root key from template.""" + root_templates = [] + for group in re.findall(self.root_key_regex, template): + root_templates.append("{" + group + "}") + + if not root_templates: + return None + + return root_templates[0].format(**{"root": self.roots}) + + def root_names_from_templates(self, templates): + """Extract root names form anatomy templates. + + Returns None if values in templates contain only "{root}". + Empty list is returned if there is no "root" in templates. + Else returns all root names from templates in list. + + RootCombinationError is raised when templates contain both root types, + basic "{root}" and with root name specification "{root[work]}". + + Args: + templates (dict): Anatomy templates where roots are not filled. + + Return: + list/None: List of all root names from templates as strings when + multiroot setup is used, otherwise None is returned. + """ + roots = list(self._root_keys_from_templates(templates)) + # Return empty list if no roots found in templates + if not roots: + return roots + + # Raise exception when root keys have roots with and without root name. + # Invalid output example: ["root", "root[project]", "root[render]"] + if len(roots) > 1 and "root" in roots: + raise RootCombinationError(roots) + + # Return None if "root" without root name in templates + if len(roots) == 1 and roots[0] == "root": + return None + + names = set() + for root in roots: + for group in re.findall(self.root_name_regex, root): + names.add(group) + return list(names) + + def fill_root(self, template_path): + """Fill template path where is only "root" key unfilled. + + Args: + template_path (str): Path with "root" key in. + Example path: "{root}/projects/MyProject/Shot01/Lighting/..." + + Return: + str: formatted path + """ + # NOTE does not care if there are different keys than "root" + return template_path.format(**{"root": self.roots}) + + @classmethod + def fill_root_with_path(cls, rootless_path, root_path): + """Fill path without filled "root" key with passed path. + + This is helper to fill root with different directory path than anatomy + has defined no matter if is single or multiroot. + + Output path is same as input path if `rootless_path` does not contain + unfilled root key. + + Args: + rootless_path (str): Path without filled "root" key. Example: + "{root[work]}/MyProject/..." + root_path (str): What should replace root key in `rootless_path`. + + Returns: + str: Path with filled root. + """ + output = str(rootless_path) + for group in re.findall(cls.root_key_regex, rootless_path): + replacement = "{" + group + "}" + output = output.replace(replacement, root_path) + + return output + + def replace_root_with_env_key(self, filepath, template=None): + """Replace root of path with environment key. + + # Example: + ## Project with roots: + ``` + { + "nas": { + "windows": P:/projects", + ... + } + ... + } + ``` + + ## Entered filepath + "P:/projects/project/asset/task/animation_v001.ma" + + ## Entered template + "<{}>" + + ## Output + "/project/asset/task/animation_v001.ma" + + Args: + filepath (str): Full file path where root should be replaced. + template (str): Optional template for environment key. Must + have one index format key. + Default value if not entered: "${}" + + Returns: + str: Path where root is replaced with environment root key. + + Raise: + ValueError: When project's roots were not found in entered path. + """ + success, rootless_path = self.find_root_template_from_path(filepath) + if not success: + raise ValueError( + "{}: Project's roots were not found in path: {}".format( + self.project_name, filepath + ) + ) + + data = self.root_environmets_fill_data(template) + return rootless_path.format(**data) + + def _project_doc_to_anatomy_data(self, project_doc): + """Convert project document to anatomy data. + + Probably should fill missing keys and values. + """ + + output = copy.deepcopy(project_doc["config"]) + output["attributes"] = copy.deepcopy(project_doc["data"]) + + return output + + def _apply_local_settings_on_anatomy_data( + self, anatomy_data, local_settings, project_name, site_name + ): + """Apply local settings on anatomy data. + + ATM local settings can modify project roots. Project name is required + as local settings have data stored data by project's name. + + Local settings override root values in this order: + 1.) Check if local settings contain overrides for default project and + apply it's values on roots if there are any. + 2.) If passed `project_name` is not None then check project specific + overrides in local settings for the project and apply it's value on + roots if there are any. + + NOTE: Root values of default project from local settings are always + applied if are set. + + Args: + anatomy_data (dict): Data for anatomy. + local_settings (dict): Data of local settings. + project_name (str): Name of project for which anatomy data are. + """ + if not local_settings: + return + + local_project_settings = local_settings.get("projects") or {} + + # Check for roots existence in local settings first + roots_project_locals = ( + local_project_settings + .get(project_name, {}) + ) + roots_default_locals = ( + local_project_settings + .get(DEFAULT_PROJECT_KEY, {}) + ) + + # Skip rest of processing if roots are not set + if not roots_project_locals and not roots_default_locals: + return + + # Combine roots from local settings + roots_locals = roots_default_locals.get(site_name) or {} + roots_locals.update(roots_project_locals.get(site_name) or {}) + # Skip processing if roots for current active site are not available in + # local settings + if not roots_locals: + return + + current_platform = platform.system().lower() + + root_data = anatomy_data["roots"] + for root_name, path in roots_locals.items(): + if root_name not in root_data: + continue + anatomy_data["roots"][root_name][current_platform] = ( + path + ) + + +class Anatomy(BaseAnatomy): + _project_cache = {} + _site_cache = {} + + def __init__(self, project_name=None, site_name=None): + if not project_name: + project_name = os.environ.get("AVALON_PROJECT") + + if not project_name: + raise ProjectNotSet(( + "Implementation bug: Project name is not set. Anatomy requires" + " to load data for specific project." + )) + + project_doc = self.get_project_doc_from_cache(project_name) + local_settings = get_local_settings() + if not site_name: + site_name = self.get_site_name_from_cache( + project_name, local_settings + ) + + super(Anatomy, self).__init__( + project_doc, + local_settings, + site_name + ) + + @classmethod + def get_project_doc_from_cache(cls, project_name): + project_cache = cls._project_cache.get(project_name) + if project_cache is not None: + if time.time() - project_cache["start"] > 10: + cls._project_cache.pop(project_name) + project_cache = None + + if project_cache is None: + project_cache = { + "project_doc": get_project(project_name), + "start": time.time() + } + cls._project_cache[project_name] = project_cache + + return copy.deepcopy( + cls._project_cache[project_name]["project_doc"] + ) + + @classmethod + def get_site_name_from_cache(cls, project_name, local_settings): + site_cache = cls._site_cache.get(project_name) + if site_cache is not None: + if time.time() - site_cache["start"] > 10: + cls._site_cache.pop(project_name) + site_cache = None + + if site_cache: + return site_cache["site_name"] + + local_project_settings = local_settings.get("projects") + if not local_project_settings: + return + + project_locals = local_project_settings.get(project_name) or {} + default_locals = local_project_settings.get(DEFAULT_PROJECT_KEY) or {} + active_site = ( + project_locals.get("active_site") + or default_locals.get("active_site") + ) + if not active_site: + project_settings = get_project_settings(project_name) + active_site = ( + project_settings + ["global"] + ["sync_server"] + ["config"] + ["active_site"] + ) + + cls._site_cache[project_name] = { + "site_name": active_site, + "start": time.time() + } + return active_site + + +class AnatomyTemplateUnsolved(TemplateUnsolved): + """Exception for unsolved template when strict is set to True.""" + + msg = "Anatomy template \"{0}\" is unsolved.{1}{2}" + + +class AnatomyTemplateResult(TemplateResult): + rootless = None + + def __new__(cls, result, rootless_path): + new_obj = super(AnatomyTemplateResult, cls).__new__( + cls, + str(result), + result.template, + result.solved, + result.used_values, + result.missing_keys, + result.invalid_types + ) + new_obj.rootless = rootless_path + return new_obj + + def validate(self): + if not self.solved: + raise AnatomyTemplateUnsolved( + self.template, + self.missing_keys, + self.invalid_types + ) + + def copy(self): + tmp = TemplateResult( + str(self), + self.template, + self.solved, + self.used_values, + self.missing_keys, + self.invalid_types + ) + return self.__class__(tmp, self.rootless) + + def normalized(self): + """Convert to normalized path.""" + + tmp = TemplateResult( + os.path.normpath(self), + self.template, + self.solved, + self.used_values, + self.missing_keys, + self.invalid_types + ) + return self.__class__(tmp, self.rootless) + + +class AnatomyTemplates(TemplatesDict): + inner_key_pattern = re.compile(r"(\{@.*?[^{}0]*\})") + inner_key_name_pattern = re.compile(r"\{@(.*?[^{}0]*)\}") + + def __init__(self, anatomy): + super(AnatomyTemplates, self).__init__() + self.anatomy = anatomy + self.loaded_project = None + + def __getitem__(self, key): + return self.templates[key] + + def get(self, key, default=None): + return self.templates.get(key, default) + + def reset(self): + self._raw_templates = None + self._templates = None + self._objected_templates = None + + @property + def project_name(self): + return self.anatomy.project_name + + @property + def roots(self): + return self.anatomy.roots + + @property + def templates(self): + self._validate_discovery() + return self._templates + + @property + def objected_templates(self): + self._validate_discovery() + return self._objected_templates + + def _validate_discovery(self): + if self.project_name != self.loaded_project: + self.reset() + + if self._templates is None: + self._discover() + self.loaded_project = self.project_name + + def _format_value(self, value, data): + if isinstance(value, RootItem): + return self._solve_dict(value, data) + + result = super(AnatomyTemplates, self)._format_value(value, data) + if isinstance(result, TemplateResult): + rootless_path = self._rootless_path(result, data) + result = AnatomyTemplateResult(result, rootless_path) + return result + + def set_templates(self, templates): + if not templates: + self.reset() + return + + self._raw_templates = copy.deepcopy(templates) + templates = copy.deepcopy(templates) + v_queue = collections.deque() + v_queue.append(templates) + while v_queue: + item = v_queue.popleft() + if not isinstance(item, dict): + continue + + for key in tuple(item.keys()): + value = item[key] + if isinstance(value, dict): + v_queue.append(value) + + elif ( + isinstance(value, six.string_types) + and "{task}" in value + ): + item[key] = value.replace("{task}", "{task[name]}") + + solved_templates = self.solve_template_inner_links(templates) + self._templates = solved_templates + self._objected_templates = self.create_ojected_templates( + solved_templates + ) + + def default_templates(self): + """Return default templates data with solved inner keys.""" + return self.solve_template_inner_links( + self.anatomy["templates"] + ) + + def _discover(self): + """ Loads anatomy templates from yaml. + Default templates are loaded if project is not set or project does + not have set it's own. + TODO: create templates if not exist. + + Returns: + TemplatesResultDict: Contain templates data for current project of + default templates. + """ + + if self.project_name is None: + # QUESTION create project specific if not found? + raise AssertionError(( + "Project \"{0}\" does not have his own templates." + " Trying to use default." + ).format(self.project_name)) + + self.set_templates(self.anatomy["templates"]) + + @classmethod + def replace_inner_keys(cls, matches, value, key_values, key): + """Replacement of inner keys in template values.""" + for match in matches: + anatomy_sub_keys = ( + cls.inner_key_name_pattern.findall(match) + ) + if key in anatomy_sub_keys: + raise ValueError(( + "Unsolvable recursion in inner keys, " + "key: \"{}\" is in his own value." + " Can't determine source, please check Anatomy templates." + ).format(key)) + + for anatomy_sub_key in anatomy_sub_keys: + replace_value = key_values.get(anatomy_sub_key) + if replace_value is None: + raise KeyError(( + "Anatomy templates can't be filled." + " Anatomy key `{0}` has" + " invalid inner key `{1}`." + ).format(key, anatomy_sub_key)) + + if not ( + isinstance(replace_value, numbers.Number) + or isinstance(replace_value, six.string_types) + ): + raise ValueError(( + "Anatomy templates can't be filled." + " Anatomy key `{0}` has" + " invalid inner key `{1}`" + " with value `{2}`." + ).format(key, anatomy_sub_key, str(replace_value))) + + value = value.replace(match, str(replace_value)) + + return value + + @classmethod + def prepare_inner_keys(cls, key_values): + """Check values of inner keys. + + Check if inner key exist in template group and has valid value. + It is also required to avoid infinite loop with unsolvable recursion + when first inner key's value refers to second inner key's value where + first is used. + """ + keys_to_solve = set(key_values.keys()) + while True: + found = False + for key in tuple(keys_to_solve): + value = key_values[key] + + if isinstance(value, six.string_types): + matches = cls.inner_key_pattern.findall(value) + if not matches: + keys_to_solve.remove(key) + continue + + found = True + key_values[key] = cls.replace_inner_keys( + matches, value, key_values, key + ) + continue + + elif not isinstance(value, dict): + keys_to_solve.remove(key) + continue + + subdict_found = False + for _key, _value in tuple(value.items()): + matches = cls.inner_key_pattern.findall(_value) + if not matches: + continue + + subdict_found = True + found = True + key_values[key][_key] = cls.replace_inner_keys( + matches, _value, key_values, + "{}.{}".format(key, _key) + ) + + if not subdict_found: + keys_to_solve.remove(key) + + if not found: + break + + return key_values + + @classmethod + def solve_template_inner_links(cls, templates): + """Solve templates inner keys identified by "{@*}". + + Process is split into 2 parts. + First is collecting all global keys (keys in top hierarchy where value + is not dictionary). All global keys are set for all group keys (keys + in top hierarchy where value is dictionary). Value of a key is not + overridden in group if already contain value for the key. + + In second part all keys with "at" symbol in value are replaced with + value of the key afterward "at" symbol from the group. + + Args: + templates (dict): Raw templates data. + + Example: + templates:: + key_1: "value_1", + key_2: "{@key_1}/{filling_key}" + + group_1: + key_3: "value_3/{@key_2}" + + group_2: + key_2": "value_2" + key_4": "value_4/{@key_2}" + + output:: + key_1: "value_1" + key_2: "value_1/{filling_key}" + + group_1: { + key_1: "value_1" + key_2: "value_1/{filling_key}" + key_3: "value_3/value_1/{filling_key}" + + group_2: { + key_1: "value_1" + key_2: "value_2" + key_4: "value_3/value_2" + """ + default_key_values = templates.pop("defaults", {}) + for key, value in tuple(templates.items()): + if isinstance(value, dict): + continue + default_key_values[key] = templates.pop(key) + + # Pop "others" key before before expected keys are processed + other_templates = templates.pop("others") or {} + + keys_by_subkey = {} + for sub_key, sub_value in templates.items(): + key_values = {} + key_values.update(default_key_values) + key_values.update(sub_value) + keys_by_subkey[sub_key] = cls.prepare_inner_keys(key_values) + + for sub_key, sub_value in other_templates.items(): + if sub_key in keys_by_subkey: + log.warning(( + "Key \"{}\" is duplicated in others. Skipping." + ).format(sub_key)) + continue + + key_values = {} + key_values.update(default_key_values) + key_values.update(sub_value) + keys_by_subkey[sub_key] = cls.prepare_inner_keys(key_values) + + default_keys_by_subkeys = cls.prepare_inner_keys(default_key_values) + + for key, value in default_keys_by_subkeys.items(): + keys_by_subkey[key] = value + + return keys_by_subkey + + def _dict_to_subkeys_list(self, subdict, pre_keys=None): + if pre_keys is None: + pre_keys = [] + output = [] + for key in subdict: + value = subdict[key] + result = list(pre_keys) + result.append(key) + if isinstance(value, dict): + for item in self._dict_to_subkeys_list(value, result): + output.append(item) + else: + output.append(result) + return output + + def _keys_to_dicts(self, key_list, value): + if not key_list: + return None + if len(key_list) == 1: + return {key_list[0]: value} + return {key_list[0]: self._keys_to_dicts(key_list[1:], value)} + + def _rootless_path(self, result, final_data): + used_values = result.used_values + missing_keys = result.missing_keys + template = result.template + invalid_types = result.invalid_types + if ( + "root" not in used_values + or "root" in missing_keys + or "{root" not in template + ): + return + + for invalid_type in invalid_types: + if "root" in invalid_type: + return + + root_keys = self._dict_to_subkeys_list({"root": used_values["root"]}) + if not root_keys: + return + + output = str(result) + for used_root_keys in root_keys: + if not used_root_keys: + continue + + used_value = used_values + root_key = None + for key in used_root_keys: + used_value = used_value[key] + if root_key is None: + root_key = key + else: + root_key += "[{}]".format(key) + + root_key = "{" + root_key + "}" + output = output.replace(str(used_value), root_key) + + return output + + def format(self, data, strict=True): + copy_data = copy.deepcopy(data) + roots = self.roots + if roots: + copy_data["root"] = roots + result = super(AnatomyTemplates, self).format(copy_data) + result.strict = strict + return result + + def format_all(self, in_data, only_keys=True): + """ Solves templates based on entered data. + + Args: + data (dict): Containing keys to be filled into template. + + Returns: + TemplatesResultDict: Output `TemplateResult` have `strict` + attribute set to False so accessing unfilled keys in templates + won't raise any exceptions. + """ + return self.format(in_data, strict=False) + + +class RootItem(FormatObject): + """Represents one item or roots. + + Holds raw data of root item specification. Raw data contain value + for each platform, but current platform value is used when object + is used for formatting of template. + + Args: + root_raw_data (dict): Dictionary containing root values by platform + names. ["windows", "linux" and "darwin"] + name (str, optional): Root name which is representing. Used with + multi root setup otherwise None value is expected. + parent_keys (list, optional): All dictionary parent keys. Values of + `parent_keys` are used for get full key which RootItem is + representing. Used for replacing root value in path with + formattable key. e.g. parent_keys == ["work"] -> {root[work]} + parent (object, optional): It is expected to be `Roots` object. + Value of `parent` won't affect code logic much. + """ + + def __init__( + self, root_raw_data, name=None, parent_keys=None, parent=None + ): + lowered_platform_keys = {} + for key, value in root_raw_data.items(): + lowered_platform_keys[key.lower()] = value + self.raw_data = lowered_platform_keys + self.cleaned_data = self._clean_roots(lowered_platform_keys) + self.name = name + self.parent_keys = parent_keys or [] + self.parent = parent + + self.available_platforms = list(lowered_platform_keys.keys()) + self.value = lowered_platform_keys.get(platform.system().lower()) + self.clean_value = self.clean_root(self.value) + + def __format__(self, *args, **kwargs): + return self.value.__format__(*args, **kwargs) + + def __str__(self): + return str(self.value) + + def __repr__(self): + return self.__str__() + + def __getitem__(self, key): + if isinstance(key, numbers.Number): + return self.value[key] + + additional_info = "" + if self.parent and self.parent.project_name: + additional_info += " for project \"{}\"".format( + self.parent.project_name + ) + + raise AssertionError( + "Root key \"{}\" is missing{}.".format( + key, additional_info + ) + ) + + def full_key(self): + """Full key value for dictionary formatting in template. + + Returns: + str: Return full replacement key for formatting. This helps when + multiple roots are set. In that case e.g. `"root[work]"` is + returned. + """ + if not self.name: + return "root" + + joined_parent_keys = "".join( + ["[{}]".format(key) for key in self.parent_keys] + ) + return "root{}".format(joined_parent_keys) + + def clean_path(self, path): + """Just replace backslashes with forward slashes.""" + return str(path).replace("\\", "/") + + def clean_root(self, root): + """Makes sure root value does not end with slash.""" + if root: + root = self.clean_path(root) + while root.endswith("/"): + root = root[:-1] + return root + + def _clean_roots(self, raw_data): + """Clean all values of raw root item values.""" + cleaned = {} + for key, value in raw_data.items(): + cleaned[key] = self.clean_root(value) + return cleaned + + def path_remapper(self, path, dst_platform=None, src_platform=None): + """Remap path for specific platform. + + Args: + path (str): Source path which need to be remapped. + dst_platform (str, optional): Specify destination platform + for which remapping should happen. + src_platform (str, optional): Specify source platform. This is + recommended to not use and keep unset until you really want + to use specific platform. + roots (dict/RootItem/None, optional): It is possible to remap + path with different roots then instance where method was + called has. + + Returns: + str/None: When path does not contain known root then + None is returned else returns remapped path with "{root}" + or "{root[]}". + """ + cleaned_path = self.clean_path(path) + if dst_platform: + dst_root_clean = self.cleaned_data.get(dst_platform) + if not dst_root_clean: + key_part = "" + full_key = self.full_key() + if full_key != "root": + key_part += "\"{}\" ".format(full_key) + + log.warning( + "Root {}miss platform \"{}\" definition.".format( + key_part, dst_platform + ) + ) + return None + + if cleaned_path.startswith(dst_root_clean): + return cleaned_path + + if src_platform: + src_root_clean = self.cleaned_data.get(src_platform) + if src_root_clean is None: + log.warning( + "Root \"{}\" miss platform \"{}\" definition.".format( + self.full_key(), src_platform + ) + ) + return None + + if not cleaned_path.startswith(src_root_clean): + return None + + subpath = cleaned_path[len(src_root_clean):] + if dst_platform: + # `dst_root_clean` is used from upper condition + return dst_root_clean + subpath + return self.clean_value + subpath + + result, template = self.find_root_template_from_path(path) + if not result: + return None + + def parent_dict(keys, value): + if not keys: + return value + + key = keys.pop(0) + return {key: parent_dict(keys, value)} + + if dst_platform: + format_value = parent_dict(list(self.parent_keys), dst_root_clean) + else: + format_value = parent_dict(list(self.parent_keys), self.value) + + return template.format(**{"root": format_value}) + + def find_root_template_from_path(self, path): + """Replaces known root value with formattable key in path. + + All platform values are checked for this replacement. + + Args: + path (str): Path where root value should be found. + + Returns: + tuple: Tuple contain 2 values: `success` (bool) and `path` (str). + When success it True then path should contain replaced root + value with formattable key. + + Example: + When input path is:: + "C:/windows/path/root/projects/my_project/file.ext" + + And raw data of item looks like:: + { + "windows": "C:/windows/path/root", + "linux": "/mount/root" + } + + Output will be:: + (True, "{root}/projects/my_project/file.ext") + + If any of raw data value wouldn't match path's root output is:: + (False, "C:/windows/path/root/projects/my_project/file.ext") + """ + result = False + output = str(path) + + root_paths = list(self.cleaned_data.values()) + mod_path = self.clean_path(path) + for root_path in root_paths: + # Skip empty paths + if not root_path: + continue + + if mod_path.startswith(root_path): + result = True + replacement = "{" + self.full_key() + "}" + output = replacement + mod_path[len(root_path):] + break + + return (result, output) + + +class Roots: + """Object which should be used for formatting "root" key in templates. + + Args: + anatomy Anatomy: Anatomy object created for a specific project. + """ + + env_prefix = "OPENPYPE_PROJECT_ROOT" + roots_filename = "roots.json" + + def __init__(self, anatomy): + self.anatomy = anatomy + self.loaded_project = None + self._roots = None + + def __format__(self, *args, **kwargs): + return self.roots.__format__(*args, **kwargs) + + def __getitem__(self, key): + return self.roots[key] + + def reset(self): + """Reset current roots value.""" + self._roots = None + + def path_remapper( + self, path, dst_platform=None, src_platform=None, roots=None + ): + """Remap path for specific platform. + + Args: + path (str): Source path which need to be remapped. + dst_platform (str, optional): Specify destination platform + for which remapping should happen. + src_platform (str, optional): Specify source platform. This is + recommended to not use and keep unset until you really want + to use specific platform. + roots (dict/RootItem/None, optional): It is possible to remap + path with different roots then instance where method was + called has. + + Returns: + str/None: When path does not contain known root then + None is returned else returns remapped path with "{root}" + or "{root[]}". + """ + if roots is None: + roots = self.roots + + if roots is None: + raise ValueError("Roots are not set. Can't find path.") + + if "{root" in path: + path = path.format(**{"root": roots}) + # If `dst_platform` is not specified then return else continue. + if not dst_platform: + return path + + if isinstance(roots, RootItem): + return roots.path_remapper(path, dst_platform, src_platform) + + for _root in roots.values(): + result = self.path_remapper( + path, dst_platform, src_platform, _root + ) + if result is not None: + return result + + def find_root_template_from_path(self, path, roots=None): + """Find root value in entered path and replace it with formatting key. + + Args: + path (str): Source path where root will be searched. + roots (Roots/dict, optional): It is possible to use different + roots than instance where method was triggered has. + + Returns: + tuple: Output contains tuple with bool representing success as + first value and path with or without replaced root with + formatting key as second value. + + Raises: + ValueError: When roots are not entered and can't be loaded. + """ + if roots is None: + log.debug( + "Looking for matching root in path \"{}\".".format(path) + ) + roots = self.roots + + if roots is None: + raise ValueError("Roots are not set. Can't find path.") + + if isinstance(roots, RootItem): + return roots.find_root_template_from_path(path) + + for root_name, _root in roots.items(): + success, result = self.find_root_template_from_path(path, _root) + if success: + log.info("Found match in root \"{}\".".format(root_name)) + return success, result + + log.warning("No matching root was found in current setting.") + return (False, path) + + def set_root_environments(self): + """Set root environments for current project.""" + for key, value in self.root_environments().items(): + os.environ[key] = value + + def root_environments(self): + """Use root keys to create unique keys for environment variables. + + Concatenates prefix "OPENPYPE_ROOT" with root keys to create unique + keys. + + Returns: + dict: Result is `{(str): (str)}` dicitonary where key represents + unique key concatenated by keys and value is root value of + current platform root. + + Example: + With raw root values:: + "work": { + "windows": "P:/projects/work", + "linux": "/mnt/share/projects/work", + "darwin": "/darwin/path/work" + }, + "publish": { + "windows": "P:/projects/publish", + "linux": "/mnt/share/projects/publish", + "darwin": "/darwin/path/publish" + } + + Result on windows platform:: + { + "OPENPYPE_ROOT_WORK": "P:/projects/work", + "OPENPYPE_ROOT_PUBLISH": "P:/projects/publish" + } + + Short example when multiroot is not used:: + { + "OPENPYPE_ROOT": "P:/projects" + } + """ + return self._root_environments() + + def all_root_paths(self, roots=None): + """Return all paths for all roots of all platforms.""" + if roots is None: + roots = self.roots + + output = [] + if isinstance(roots, RootItem): + for value in roots.raw_data.values(): + output.append(value) + return output + + for _roots in roots.values(): + output.extend(self.all_root_paths(_roots)) + return output + + def _root_environments(self, keys=None, roots=None): + if not keys: + keys = [] + if roots is None: + roots = self.roots + + if isinstance(roots, RootItem): + key_items = [self.env_prefix] + for _key in keys: + key_items.append(_key.upper()) + + key = "_".join(key_items) + # Make sure key and value does not contain unicode + # - can happen in Python 2 hosts + return {str(key): str(roots.value)} + + output = {} + for _key, _value in roots.items(): + _keys = list(keys) + _keys.append(_key) + output.update(self._root_environments(_keys, _value)) + return output + + def root_environmets_fill_data(self, template=None): + """Environment variable values in dictionary for rootless path. + + Args: + template (str): Template for environment variable key fill. + By default is set to `"${}"`. + """ + if template is None: + template = "${}" + return self._root_environmets_fill_data(template) + + def _root_environmets_fill_data(self, template, keys=None, roots=None): + if keys is None and roots is None: + return { + "root": self._root_environmets_fill_data( + template, [], self.roots + ) + } + + if isinstance(roots, RootItem): + key_items = [Roots.env_prefix] + for _key in keys: + key_items.append(_key.upper()) + key = "_".join(key_items) + return template.format(key) + + output = {} + for key, value in roots.items(): + _keys = list(keys) + _keys.append(key) + output[key] = self._root_environmets_fill_data( + template, _keys, value + ) + return output + + @property + def project_name(self): + """Return project name which will be used for loading root values.""" + return self.anatomy.project_name + + @property + def roots(self): + """Property for filling "root" key in templates. + + This property returns roots for current project or default root values. + Warning: + Default roots value may cause issues when project use different + roots settings. That may happen when project use multiroot + templates but default roots miss their keys. + """ + if self.project_name != self.loaded_project: + self._roots = None + + if self._roots is None: + self._roots = self._discover() + self.loaded_project = self.project_name + return self._roots + + def _discover(self): + """ Loads current project's roots or default. + + Default roots are loaded if project override's does not contain roots. + + Returns: + `RootItem` or `dict` with multiple `RootItem`s when multiroot + setting is used. + """ + + return self._parse_dict(self.anatomy["roots"], parent=self) + + @staticmethod + def _parse_dict(data, key=None, parent_keys=None, parent=None): + """Parse roots raw data into RootItem or dictionary with RootItems. + + Converting raw roots data to `RootItem` helps to handle platform keys. + This method is recursive to be able handle multiroot setup and + is static to be able to load default roots without creating new object. + + Args: + data (dict): Should contain raw roots data to be parsed. + key (str, optional): Current root key. Set by recursion. + parent_keys (list): Parent dictionary keys. Set by recursion. + parent (Roots, optional): Parent object set in `RootItem` + helps to keep RootItem instance updated with `Roots` object. + + Returns: + `RootItem` or `dict` with multiple `RootItem`s when multiroot + setting is used. + """ + if not parent_keys: + parent_keys = [] + is_last = False + for value in data.values(): + if isinstance(value, six.string_types): + is_last = True + break + + if is_last: + return RootItem(data, key, parent_keys, parent=parent) + + output = {} + for _key, value in data.items(): + _parent_keys = list(parent_keys) + _parent_keys.append(_key) + output[_key] = Roots._parse_dict(value, _key, _parent_keys, parent) + return output diff --git a/openpype/pipeline/context_tools.py b/openpype/pipeline/context_tools.py index 06bd639776..0ec19d50fe 100644 --- a/openpype/pipeline/context_tools.py +++ b/openpype/pipeline/context_tools.py @@ -1,38 +1,49 @@ """Core pipeline functionality""" import os -import sys import json import types import logging -import inspect import platform +import uuid import pyblish.api from pyblish.lib import MessageHandler import openpype -from openpype.modules import load_modules -from openpype.settings import get_project_settings -from openpype.lib import ( - Anatomy, - register_event_callback, - filter_pyblish_plugins, - change_timer_to_current_context, +from openpype.client import ( + get_project, + get_asset_by_id, + get_asset_by_name, + version_is_latest, ) +from openpype.lib.events import emit_event +from openpype.modules import load_modules, ModulesManager +from openpype.settings import get_project_settings +from .publish.lib import filter_pyblish_plugins +from .anatomy import Anatomy +from .template_data import get_template_data_with_names +from .workfile import ( + get_workfile_template_key, + get_custom_workfile_template_by_string_context, +) from . import ( legacy_io, register_loader_plugin_path, - register_inventory_action, + register_inventory_action_path, register_creator_plugin_path, deregister_loader_plugin_path, ) _is_installed = False +_process_id = None _registered_root = {"_": ""} _registered_host = {"_": None} +# Keep modules manager (and it's modules) in memory +# - that gives option to register modules' callbacks +_modules_manager = None log = logging.getLogger(__name__) @@ -44,6 +55,23 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +def _get_modules_manager(): + """Get or create modules manager for host installation. + + This is not meant for public usage. Reason is to keep modules + in memory of process to be able trigger their event callbacks if they + need any. + + Returns: + ModulesManager: Manager wrapping discovered modules. + """ + + global _modules_manager + if _modules_manager is None: + _modules_manager = ModulesManager() + return _modules_manager + + def register_root(path): """Register currently active root""" log.info("Registering root: %s" % path) @@ -74,6 +102,7 @@ def install_host(host): _is_installed = True legacy_io.install() + modules_manager = _get_modules_manager() missing = list() for key in ("AVALON_PROJECT", "AVALON_ASSET"): @@ -95,8 +124,6 @@ def install_host(host): register_host(host) - register_event_callback("taskChanged", _on_task_change) - def modified_emit(obj, record): """Method replacing `emit` in Pyblish's MessageHandler.""" record.msg = record.getMessage() @@ -104,10 +131,25 @@ def install_host(host): MessageHandler.emit = modified_emit - install_openpype_plugins() + if os.environ.get("OPENPYPE_REMOTE_PUBLISH"): + # target "farm" == rendering on farm, expects OPENPYPE_PUBLISH_DATA + # target "remote" == remote execution, installs host + print("Registering pyblish target: remote") + pyblish.api.register_target("remote") + else: + pyblish.api.register_target("local") + + project_name = os.environ.get("AVALON_PROJECT") + host_name = os.environ.get("AVALON_APP") + + # Give option to handle host installation + for module in modules_manager.get_enabled_modules(): + module.on_host_install(host, host_name, project_name) + + install_openpype_plugins(project_name, host_name) -def install_openpype_plugins(project_name=None): +def install_openpype_plugins(project_name=None, host_name=None): # Make sure modules are loaded load_modules() @@ -116,6 +158,18 @@ def install_openpype_plugins(project_name=None): pyblish.api.register_discovery_filter(filter_pyblish_plugins) register_loader_plugin_path(LOAD_PATH) + modules_manager = _get_modules_manager() + publish_plugin_dirs = modules_manager.collect_plugin_paths()["publish"] + for path in publish_plugin_dirs: + pyblish.api.register_plugin_path(path) + + if host_name is None: + host_name = os.environ.get("AVALON_APP") + + creator_paths = modules_manager.collect_creator_plugin_paths(host_name) + for creator_path in creator_paths: + register_creator_plugin_path(creator_path) + if project_name is None: project_name = os.environ.get("AVALON_PROJECT") @@ -145,11 +199,7 @@ def install_openpype_plugins(project_name=None): pyblish.api.register_plugin_path(path) register_loader_plugin_path(path) register_creator_plugin_path(path) - register_inventory_action(path) - - -def _on_task_change(): - change_timer_to_current_context() + register_inventory_action_path(path) def uninstall_host(): @@ -195,102 +245,17 @@ def register_host(host): required, or browse the source code. """ - signatures = { - "ls": [] - } - _validate_signature(host, signatures) _registered_host["_"] = host -def _validate_signature(module, signatures): - # Required signatures for each member - - missing = list() - invalid = list() - success = True - - for member in signatures: - if not hasattr(module, member): - missing.append(member) - success = False - - else: - attr = getattr(module, member) - if sys.version_info.major >= 3: - signature = inspect.getfullargspec(attr)[0] - else: - signature = inspect.getargspec(attr)[0] - required_signature = signatures[member] - - assert isinstance(signature, list) - assert isinstance(required_signature, list) - - if not all(member in signature - for member in required_signature): - invalid.append({ - "member": member, - "signature": ", ".join(signature), - "required": ", ".join(required_signature) - }) - success = False - - if not success: - report = list() - - if missing: - report.append( - "Incomplete interface for module: '%s'\n" - "Missing: %s" % (module, ", ".join( - "'%s'" % member for member in missing)) - ) - - if invalid: - report.append( - "'%s': One or more members were found, but didn't " - "have the right argument signature." % module.__name__ - ) - - for member in invalid: - report.append( - " Found: {member}({signature})".format(**member) - ) - report.append( - " Expected: {member}({required})".format(**member) - ) - - raise ValueError("\n".join(report)) - - def registered_host(): """Return currently registered host""" return _registered_host["_"] def deregister_host(): - _registered_host["_"] = default_host() - - -def default_host(): - """A default host, in place of anything better - - This may be considered as reference for the - interface a host must implement. It also ensures - that the system runs, even when nothing is there - to support it. - - """ - - host = types.ModuleType("defaultHost") - - def ls(): - return list() - - host.__dict__.update({ - "ls": ls - }) - - return host + _registered_host["_"] = None def debug_host(): @@ -332,3 +297,269 @@ def debug_host(): }) return host + + +def get_current_project(fields=None): + """Helper function to get project document based on global Session. + + This function should be called only in process where host is installed. + + Returns: + dict: Project document. + None: Project is not set. + """ + + project_name = legacy_io.active_project() + return get_project(project_name, fields=fields) + + +def get_current_project_asset(asset_name=None, asset_id=None, fields=None): + """Helper function to get asset document based on global Session. + + This function should be called only in process where host is installed. + + Asset is found out based on passed asset name or id (not both). Asset name + is not used for filtering if asset id is passed. When both asset name and + id are missing then asset name from current process is used. + + Args: + asset_name (str): Name of asset used for filter. + asset_id (Union[str, ObjectId]): Asset document id. If entered then + is used as only filter. + fields (Union[List[str], None]): Limit returned data of asset documents + to specific keys. + + Returns: + dict: Asset document. + None: Asset is not set or not exist. + """ + + project_name = legacy_io.active_project() + if asset_id: + return get_asset_by_id(project_name, asset_id, fields=fields) + + if not asset_name: + asset_name = legacy_io.Session.get("AVALON_ASSET") + # Skip if is not set even on context + if not asset_name: + return None + return get_asset_by_name(project_name, asset_name, fields=fields) + + +def is_representation_from_latest(representation): + """Return whether the representation is from latest version + + Args: + representation (dict): The representation document from the database. + + Returns: + bool: Whether the representation is of latest version. + """ + + project_name = legacy_io.active_project() + return version_is_latest(project_name, representation["parent"]) + + +def get_template_data_from_session(session=None, system_settings=None): + """Template data for template fill from session keys. + + Args: + session (Union[Dict[str, str], None]): The Session to use. If not + provided use the currently active global Session. + system_settings (Union[Dict[str, Any], Any]): Prepared system settings. + Optional are auto received if not passed. + + Returns: + Dict[str, Any]: All available data from session. + """ + + if session is None: + session = legacy_io.Session + + project_name = session["AVALON_PROJECT"] + asset_name = session["AVALON_ASSET"] + task_name = session["AVALON_TASK"] + host_name = session["AVALON_APP"] + + return get_template_data_with_names( + project_name, asset_name, task_name, host_name, system_settings + ) + + +def get_workdir_from_session(session=None, template_key=None): + """Template data for template fill from session keys. + + Args: + session (Union[Dict[str, str], None]): The Session to use. If not + provided use the currently active global Session. + template_key (str): Prepared template key from which workdir is + calculated. + + Returns: + str: Workdir path. + """ + + if session is None: + session = legacy_io.Session + project_name = session["AVALON_PROJECT"] + host_name = session["AVALON_APP"] + anatomy = Anatomy(project_name) + template_data = get_template_data_from_session(session) + anatomy_filled = anatomy.format(template_data) + + if not template_key: + task_type = template_data["task"]["type"] + template_key = get_workfile_template_key( + task_type, + host_name, + project_name=project_name + ) + path = anatomy_filled[template_key]["folder"] + if path: + path = os.path.normpath(path) + return path + + +def get_custom_workfile_template_from_session( + session=None, project_settings=None +): + """Filter and fill workfile template profiles by current context. + + Current context is defined by `legacy_io.Session`. That's why this + function should be used only inside host where context is set and stable. + + Args: + session (Union[None, Dict[str, str]]): Session from which are taken + data. + project_settings(Dict[str, Any]): Template profiles from settings. + + Returns: + str: Path to template or None if none of profiles match current + context. (Existence of formatted path is not validated.) + """ + + if session is None: + session = legacy_io.Session + + return get_custom_workfile_template_by_string_context( + session["AVALON_PROJECT"], + session["AVALON_ASSET"], + session["AVALON_TASK"], + session["AVALON_APP"], + project_settings=project_settings + ) + + +def compute_session_changes( + session, asset_doc, task_name, template_key=None +): + """Compute the changes for a session object on task under asset. + + Function does not change the session object, only returns changes. + + Args: + session (Dict[str, str]): The initial session to compute changes to. + This is required for computing the full Work Directory, as that + also depends on the values that haven't changed. + asset_doc (Dict[str, Any]): Asset document to switch to. + task_name (str): Name of task to switch to. + template_key (Union[str, None]): Prepare workfile template key in + anatomy templates. + + Returns: + Dict[str, str]: Changes in the Session dictionary. + """ + + changes = {} + + # Get asset document and asset + if not asset_doc: + task_name = None + asset_name = None + else: + asset_name = asset_doc["name"] + + # Detect any changes compared session + mapping = { + "AVALON_ASSET": asset_name, + "AVALON_TASK": task_name, + } + changes = { + key: value + for key, value in mapping.items() + if value != session.get(key) + } + if not changes: + return changes + + # Compute work directory (with the temporary changed session so far) + changed_session = session.copy() + changed_session.update(changes) + + workdir = None + if asset_doc: + workdir = get_workdir_from_session( + changed_session, template_key + ) + + changes["AVALON_WORKDIR"] = workdir + + return changes + + +def change_current_context(asset_doc, task_name, template_key=None): + """Update active Session to a new task work area. + + This updates the live Session to a different task under asset. + + Args: + asset_doc (Dict[str, Any]): The asset document to set. + task_name (str): The task to set under asset. + template_key (Union[str, None]): Prepared template key to be used for + workfile template in Anatomy. + + Returns: + Dict[str, str]: The changed key, values in the current Session. + """ + + changes = compute_session_changes( + legacy_io.Session, + asset_doc, + task_name, + template_key=template_key + ) + + # Update the Session and environments. Pop from environments all keys with + # value set to None. + for key, value in changes.items(): + legacy_io.Session[key] = value + if value is None: + os.environ.pop(key, None) + else: + os.environ[key] = value + + data = changes.copy() + # Convert env keys to human readable keys + data["project_name"] = legacy_io.Session["AVALON_PROJECT"] + data["asset_name"] = legacy_io.Session["AVALON_ASSET"] + data["task_name"] = legacy_io.Session["AVALON_TASK"] + + # Emit session change + emit_event("taskChanged", data) + + return changes + + +def get_process_id(): + """Fake process id created on demand using uuid. + + Can be used to create process specific folders in temp directory. + + Returns: + str: Process id. + """ + + global _process_id + if _process_id is None: + _process_id = str(uuid.uuid4()) + return _process_id diff --git a/openpype/pipeline/create/__init__.py b/openpype/pipeline/create/__init__.py index 1beeb4267b..b0877d0a29 100644 --- a/openpype/pipeline/create/__init__.py +++ b/openpype/pipeline/create/__init__.py @@ -1,19 +1,32 @@ from .constants import ( - SUBSET_NAME_ALLOWED_SYMBOLS + SUBSET_NAME_ALLOWED_SYMBOLS, + DEFAULT_SUBSET_TEMPLATE, + PRE_CREATE_THUMBNAIL_KEY, ) + +from .subset_name import ( + TaskNotSetError, + get_subset_name, +) + from .creator_plugins import ( CreatorError, BaseCreator, Creator, AutoCreator, + HiddenCreator, + + discover_legacy_creator_plugins, + get_legacy_creator_by_name, discover_creator_plugins, - discover_legacy_creator_plugins, register_creator_plugin, deregister_creator_plugin, register_creator_plugin_path, deregister_creator_plugin_path, + + cache_and_get_instances, ) from .context import ( @@ -29,15 +42,23 @@ from .legacy_create import ( __all__ = ( "SUBSET_NAME_ALLOWED_SYMBOLS", + "DEFAULT_SUBSET_TEMPLATE", + "PRE_CREATE_THUMBNAIL_KEY", + + "TaskNotSetError", + "get_subset_name", "CreatorError", "BaseCreator", "Creator", "AutoCreator", + "HiddenCreator", + + "discover_legacy_creator_plugins", + "get_legacy_creator_by_name", "discover_creator_plugins", - "discover_legacy_creator_plugins", "register_creator_plugin", "deregister_creator_plugin", "register_creator_plugin_path", diff --git a/openpype/pipeline/create/constants.py b/openpype/pipeline/create/constants.py index bfbbccfd12..375cfc4a12 100644 --- a/openpype/pipeline/create/constants.py +++ b/openpype/pipeline/create/constants.py @@ -1,6 +1,10 @@ SUBSET_NAME_ALLOWED_SYMBOLS = "a-zA-Z0-9_." +DEFAULT_SUBSET_TEMPLATE = "{family}{Variant}" +PRE_CREATE_THUMBNAIL_KEY = "thumbnail_source" __all__ = ( "SUBSET_NAME_ALLOWED_SYMBOLS", + "DEFAULT_SUBSET_TEMPLATE", + "PRE_CREATE_THUMBNAIL_KEY", ) diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index 6f862e0588..4fd460ffea 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -1,11 +1,19 @@ import os +import sys import copy import logging +import traceback import collections import inspect from uuid import uuid4 from contextlib import contextmanager +from openpype.client import get_assets +from openpype.settings import ( + get_system_settings, + get_project_settings +) +from openpype.host import IPublishHost from openpype.pipeline import legacy_io from openpype.pipeline.mongodb import ( AvalonMongoDB, @@ -16,18 +24,21 @@ from .creator_plugins import ( Creator, AutoCreator, discover_creator_plugins, -) - -from openpype.api import ( - get_system_settings, - get_project_settings + discover_convertor_plugins, + CreatorError, ) UpdateData = collections.namedtuple("UpdateData", ["instance", "changes"]) +class UnavailableSharedData(Exception): + """Shared data are not available at the moment when are accessed.""" + pass + + class ImmutableKeyError(TypeError): """Accessed key is immutable so does not allow changes or removements.""" + def __init__(self, key, msg=None): self.immutable_key = key if not msg: @@ -39,6 +50,7 @@ class ImmutableKeyError(TypeError): class HostMissRequiredMethod(Exception): """Host does not have implemented required functions for creation.""" + def __init__(self, host, missing_methods): self.missing_methods = missing_methods self.host = host @@ -59,12 +71,119 @@ class HostMissRequiredMethod(Exception): super(HostMissRequiredMethod, self).__init__(msg) +class ConvertorsOperationFailed(Exception): + def __init__(self, msg, failed_info): + super(ConvertorsOperationFailed, self).__init__(msg) + self.failed_info = failed_info + + +class ConvertorsFindFailed(ConvertorsOperationFailed): + def __init__(self, failed_info): + msg = "Failed to find incompatible subsets" + super(ConvertorsFindFailed, self).__init__( + msg, failed_info + ) + + +class ConvertorsConversionFailed(ConvertorsOperationFailed): + def __init__(self, failed_info): + msg = "Failed to convert incompatible subsets" + super(ConvertorsConversionFailed, self).__init__( + msg, failed_info + ) + + +def prepare_failed_convertor_operation_info(identifier, exc_info): + exc_type, exc_value, exc_traceback = exc_info + formatted_traceback = "".join(traceback.format_exception( + exc_type, exc_value, exc_traceback + )) + + return { + "convertor_identifier": identifier, + "message": str(exc_value), + "traceback": formatted_traceback + } + + +class CreatorsOperationFailed(Exception): + """Raised when a creator process crashes in 'CreateContext'. + + The exception contains information about the creator and error. The data + are prepared using 'prepare_failed_creator_operation_info' and can be + serialized using json. + + Usage is for UI purposes which may not have access to exceptions directly + and would not have ability to catch exceptions 'per creator'. + + Args: + msg (str): General error message. + failed_info (list[dict[str, Any]]): List of failed creators with + exception message and optionally formatted traceback. + """ + + def __init__(self, msg, failed_info): + super(CreatorsOperationFailed, self).__init__(msg) + self.failed_info = failed_info + + +class CreatorsCollectionFailed(CreatorsOperationFailed): + def __init__(self, failed_info): + msg = "Failed to collect instances" + super(CreatorsCollectionFailed, self).__init__( + msg, failed_info + ) + + +class CreatorsSaveFailed(CreatorsOperationFailed): + def __init__(self, failed_info): + msg = "Failed update instance changes" + super(CreatorsSaveFailed, self).__init__( + msg, failed_info + ) + + +class CreatorsRemoveFailed(CreatorsOperationFailed): + def __init__(self, failed_info): + msg = "Failed to remove instances" + super(CreatorsRemoveFailed, self).__init__( + msg, failed_info + ) + + +class CreatorsCreateFailed(CreatorsOperationFailed): + def __init__(self, failed_info): + msg = "Faled to create instances" + super(CreatorsCreateFailed, self).__init__( + msg, failed_info + ) + + +def prepare_failed_creator_operation_info( + identifier, label, exc_info, add_traceback=True +): + formatted_traceback = None + exc_type, exc_value, exc_traceback = exc_info + if add_traceback: + formatted_traceback = "".join(traceback.format_exception( + exc_type, exc_value, exc_traceback + )) + + return { + "creator_identifier": identifier, + "creator_label": label, + "message": str(exc_value), + "traceback": formatted_traceback + } + + class InstanceMember: """Representation of instance member. TODO: Implement and use! """ + def __init__(self, instance, name): self.instance = instance @@ -93,6 +212,7 @@ class AttributeValues: values(dict): Values after possible conversion. origin_data(dict): Values loaded from host before conversion. """ + def __init__(self, attr_defs, values, origin_data=None): from openpype.lib.attribute_definitions import UnknownDef @@ -161,7 +281,10 @@ class AttributeValues: return self._data.pop(key, default) def reset_values(self): - self._data = [] + self._data = {} + + def mark_as_stored(self): + self._origin_data = copy.deepcopy(self._data) @property def attr_defs(self): @@ -173,6 +296,10 @@ class AttributeValues: output = {} for key in self._data: output[key] = self[key] + + for key, attr_def in self._attr_defs_by_key.items(): + if key not in output: + output[key] = attr_def.default return output @staticmethod @@ -188,6 +315,16 @@ class AttributeValues: def changes(self): return self.calculate_changes(self._data, self._origin_data) + def apply_changes(self, changes): + for key, item in changes.items(): + old_value, new_value = item + if new_value is None: + if key in self: + self.pop(key) + + elif self.get(key) != new_value: + self[key] = new_value + class CreatorAttributeValues(AttributeValues): """Creator specific attribute values of an instance. @@ -195,6 +332,7 @@ class CreatorAttributeValues(AttributeValues): Args: instance (CreatedInstance): Instance for which are values hold. """ + def __init__(self, instance, *args, **kwargs): self.instance = instance super(CreatorAttributeValues, self).__init__(*args, **kwargs) @@ -210,6 +348,7 @@ class PublishAttributeValues(AttributeValues): publish_attributes(PublishAttributes): Wrapper for multiple publish attributes is used as parent object. """ + def __init__(self, publish_attributes, *args, **kwargs): self.publish_attributes = publish_attributes super(PublishAttributeValues, self).__init__(*args, **kwargs) @@ -231,6 +370,7 @@ class PublishAttributes: attr_plugins(list): List of publish plugins that may have defined attribute definitions. """ + def __init__(self, parent, origin_data, attr_plugins=None): self.parent = parent self._origin_data = copy.deepcopy(origin_data) @@ -269,6 +409,7 @@ class PublishAttributes: key(str): Plugin name. default: Default value if plugin was not found. """ + if key not in self._data: return default @@ -286,11 +427,16 @@ class PublishAttributes: def plugin_names_order(self): """Plugin names order by their 'order' attribute.""" + for name in self._plugin_names_order: yield name + def mark_as_stored(self): + self._origin_data = copy.deepcopy(self._data) + def data_to_store(self): """Convert attribute values to "data to store".""" + output = {} for key, attr_value in self._data.items(): output[key] = attr_value.data_to_store() @@ -298,6 +444,7 @@ class PublishAttributes: def changes(self): """Return changes per each key.""" + changes = {} for key, attr_val in self._data.items(): attr_changes = attr_val.changes() @@ -311,8 +458,24 @@ class PublishAttributes: changes[key] = (value, None) return changes + def apply_changes(self, changes): + for key, item in changes.items(): + if isinstance(item, dict): + self._data[key].apply_changes(item) + continue + + old_value, new_value = item + if new_value is not None: + raise ValueError( + "Unexpected type \"{}\" expected None".format( + str(type(new_value)) + ) + ) + self.pop(key) + def set_publish_plugins(self, attr_plugins): """Set publish plugins attribute definitions.""" + self._plugin_names_order = [] self._missing_plugins = [] self.attr_plugins = attr_plugins or [] @@ -364,6 +527,7 @@ class CreatedInstance: `openpype.pipeline.registered_host`. new(bool): Is instance new. """ + # Keys that can't be changed or removed from data after loading using # creator. # - 'creator_attributes' and 'publish_attributes' can change values of @@ -383,8 +547,12 @@ class CreatedInstance: self.creator = creator # Instance members may have actions on them + # TODO implement members logic self._members = [] + # Data that can be used for lifetime of object + self._transient_data = {} + # Create a copy of passed data to avoid changing them on the fly data = copy.deepcopy(data or {}) # Store original value of passed data @@ -495,6 +663,20 @@ class CreatedInstance: def subset_name(self): return self._data["subset"] + @property + def label(self): + label = self._data.get("label") + if not label: + label = self.subset_name + return label + + @property + def group_label(self): + label = self._data.get("group") + if label: + return label + return self.creator.get_group_label() + @property def creator_identifier(self): return self.creator.identifier @@ -551,6 +733,7 @@ class CreatedInstance: @property def id(self): """Instance identifier.""" + return self._data["instance_id"] @property @@ -559,10 +742,32 @@ class CreatedInstance: Access to data is needed to modify values. """ + return self + @property + def transient_data(self): + """Data stored for lifetime of instance object. + + These data are not stored to scene and will be lost on object + deletion. + + Can be used to store objects. In some host implementations is not + possible to reference to object in scene with some unique identifier + (e.g. node in Fusion.). In that case it is handy to store the object + here. Should be used that way only if instance data are stored on the + node itself. + + Returns: + Dict[str, Any]: Dictionary object where you can store data related + to instance for lifetime of instance object. + """ + + return self._transient_data + def changes(self): """Calculate and return changes.""" + changes = {} new_keys = set() for key, new_value in self._data.items(): @@ -587,6 +792,25 @@ class CreatedInstance: changes[key] = (old_value, None) return changes + def mark_as_stored(self): + """Should be called when instance data are stored. + + Origin data are replaced by current data so changes are cleared. + """ + + orig_keys = set(self._orig_data.keys()) + for key, value in self._data.items(): + orig_keys.discard(key) + if key in ("creator_attributes", "publish_attributes"): + continue + self._orig_data[key] = copy.deepcopy(value) + + for key in orig_keys: + self._orig_data.pop(key) + + self.creator_attributes.mark_as_stored() + self.publish_attributes.mark_as_stored() + @property def creator_attributes(self): return self._data["creator_attributes"] @@ -600,6 +824,18 @@ class CreatedInstance: return self._data["publish_attributes"] def data_to_store(self): + """Collect data that contain json parsable types. + + It is possible to recreate the instance using these data. + + Todo: + We probably don't need OrderedDict. When data are loaded they + are not ordered anymore. + + Returns: + OrderedDict: Ordered dictionary with instance data. + """ + output = collections.OrderedDict() for key, value in self._data.items(): if key in ("creator_attributes", "publish_attributes"): @@ -634,6 +870,128 @@ class CreatedInstance: if member not in self._members: self._members.append(member) + def serialize_for_remote(self): + return { + "data": self.data_to_store(), + "orig_data": copy.deepcopy(self._orig_data) + } + + @classmethod + def deserialize_on_remote(cls, serialized_data, creator_items): + """Convert instance data to CreatedInstance. + + This is fake instance in remote process e.g. in UI process. The creator + is not a full creator and should not be used for calling methods when + instance is created from this method (matters on implementation). + + Args: + serialized_data (Dict[str, Any]): Serialized data for remote + recreating. Should contain 'data' and 'orig_data'. + creator_items (Dict[str, Any]): Mapping of creator identifier and + objects that behave like a creator for most of attribute + access. + """ + + instance_data = copy.deepcopy(serialized_data["data"]) + creator_identifier = instance_data["creator_identifier"] + creator_item = creator_items[creator_identifier] + + family = instance_data.get("family", None) + if family is None: + family = creator_item.family + subset_name = instance_data.get("subset", None) + + obj = cls( + family, subset_name, instance_data, creator_item, new=False + ) + obj._orig_data = serialized_data["orig_data"] + + return obj + + def remote_changes(self): + """Prepare serializable changes on remote side. + + Returns: + Dict[str, Any]: Prepared changes that can be send to client side. + """ + + return { + "changes": self.changes(), + "asset_is_valid": self._asset_is_valid, + "task_is_valid": self._task_is_valid, + } + + def update_from_remote(self, remote_changes): + """Apply changes from remote side on client side. + + Args: + remote_changes (Dict[str, Any]): Changes created on remote side. + """ + + self._asset_is_valid = remote_changes["asset_is_valid"] + self._task_is_valid = remote_changes["task_is_valid"] + + changes = remote_changes["changes"] + creator_attributes = changes.pop("creator_attributes", None) or {} + publish_attributes = changes.pop("publish_attributes", None) or {} + if changes: + self.apply_changes(changes) + + if creator_attributes: + self.creator_attributes.apply_changes(creator_attributes) + + if publish_attributes: + self.publish_attributes.apply_changes(publish_attributes) + + def apply_changes(self, changes): + """Apply changes created via 'changes'. + + Args: + Dict[str, Tuple[Any, Any]]: Instance changes to apply. Same values + are kept untouched. + """ + + for key, item in changes.items(): + old_value, new_value = item + if new_value is None: + if key in self: + self.pop(key) + else: + current_value = self.get(key) + if current_value != new_value: + self[key] = new_value + + +class ConvertorItem(object): + """Item representing convertor plugin. + + Args: + identifier (str): Identifier of convertor. + label (str): Label which will be shown in UI. + """ + + def __init__(self, identifier, label): + self._id = str(uuid4()) + self.identifier = identifier + self.label = label + + @property + def id(self): + return self._id + + def to_data(self): + return { + "id": self.id, + "identifier": self.identifier, + "label": self.label + } + + @classmethod + def from_data(cls, data): + obj = cls(data["identifier"], data["label"]) + obj._id = data["id"] + return obj + class CreateContext: """Context of instance creation. @@ -651,12 +1009,6 @@ class CreateContext: discover_publish_plugins(bool): Discover publish plugins during reset phase. """ - # Methods required in host implementaion to be able create instances - # or change context data. - required_methods = ( - "get_context_data", - "update_context_data" - ) def __init__( self, host, dbcon=None, headless=False, reset=True, @@ -706,7 +1058,11 @@ class CreateContext: # Manual creators self.manual_creators = {} + self.convertors_plugins = {} + self.convertor_items_by_id = {} + self.publish_discover_result = None + self.publish_plugins_mismatch_targets = [] self.publish_plugins = [] self.plugins_with_defs = [] self._attr_plugins_by_family = {} @@ -718,6 +1074,11 @@ class CreateContext: self._bulk_counter = 0 self._bulk_instances_to_process = [] + # Shared data across creators during collection phase + self._collection_shared_data = None + + self.thumbnail_paths_by_instance_id = {} + # Trigger reset if was enabled if reset: self.reset(discover_publish_plugins) @@ -726,6 +1087,10 @@ class CreateContext: def instances(self): return self._instances_by_id.values() + @property + def instances_by_id(self): + return self._instances_by_id + @property def publish_attributes(self): """Access to global publish attributes.""" @@ -738,10 +1103,10 @@ class CreateContext: Args: host(ModuleType): Host implementaion. """ - missing = set() - for attr_name in cls.required_methods: - if not hasattr(host, attr_name): - missing.add(attr_name) + + missing = set( + IPublishHost.get_missing_publish_methods(host) + ) return missing @property @@ -749,6 +1114,14 @@ class CreateContext: """Is host valid for creation.""" return self._host_is_valid + @property + def host_name(self): + return os.environ["AVALON_APP"] + + @property + def project_name(self): + return self.dbcon.active_project() + @property def log(self): """Dynamic access to logger.""" @@ -761,14 +1134,56 @@ class CreateContext: All changes will be lost if were not saved explicitely. """ + + self.reset_preparation() + self.reset_avalon_context() self.reset_plugins(discover_publish_plugins) self.reset_context_data() with self.bulk_instances_collection(): self.reset_instances() + self.find_convertor_items() self.execute_autocreators() + self.reset_finalization() + + def refresh_thumbnails(self): + """Cleanup thumbnail paths. + + Remove all thumbnail filepaths that are empty or lead to files which + does not exists or of instances that are not available anymore. + """ + + invalid = set() + for instance_id, path in self.thumbnail_paths_by_instance_id.items(): + instance_available = True + if instance_id is not None: + instance_available = instance_id in self._instances_by_id + + if ( + not instance_available + or not path + or not os.path.exists(path) + ): + invalid.add(instance_id) + + for instance_id in invalid: + self.thumbnail_paths_by_instance_id.pop(instance_id) + + def reset_preparation(self): + """Prepare attributes that must be prepared/cleaned before reset.""" + + # Give ability to store shared data for collection phase + self._collection_shared_data = {} + + def reset_finalization(self): + """Cleanup of attributes after reset.""" + + # Stop access to collection shared data + self._collection_shared_data = None + self.refresh_thumbnails() + def reset_avalon_context(self): """Give ability to reset avalon context. @@ -807,6 +1222,12 @@ class CreateContext: Reloads creators from preregistered paths and can load publish plugins if it's enabled on context. """ + + self._reset_publish_plugins(discover_publish_plugins) + self._reset_creator_plugins() + self._reset_convertor_plugins() + + def _reset_publish_plugins(self, discover_publish_plugins): import pyblish.logic from openpype.pipeline import OpenPypePyblishPluginMixin @@ -821,27 +1242,37 @@ class CreateContext: discover_result = DiscoverResult() plugins_with_defs = [] plugins_by_targets = [] + plugins_mismatch_targets = [] if discover_publish_plugins: discover_result = publish_plugins_discover() publish_plugins = discover_result.plugins - targets = pyblish.logic.registered_targets() or ["default"] + targets = set(pyblish.logic.registered_targets()) + targets.add("default") plugins_by_targets = pyblish.logic.plugins_by_targets( - publish_plugins, targets + publish_plugins, list(targets) ) + # Collect plugins that can have attribute definitions for plugin in publish_plugins: if OpenPypePyblishPluginMixin in inspect.getmro(plugin): plugins_with_defs.append(plugin) + plugins_mismatch_targets = [ + plugin + for plugin in publish_plugins + if plugin not in plugins_by_targets + ] + + self.publish_plugins_mismatch_targets = plugins_mismatch_targets self.publish_discover_result = discover_result self.publish_plugins = plugins_by_targets self.plugins_with_defs = plugins_with_defs + def _reset_creator_plugins(self): # Prepare settings - project_name = self.dbcon.Session["AVALON_PROJECT"] system_settings = get_system_settings() - project_settings = get_project_settings(project_name) + project_settings = get_project_settings(self.project_name) # Discover and prepare creators creators = {} @@ -861,10 +1292,22 @@ class CreateContext: "Using first and skipping following" )) continue + + # Filter by host name + if ( + creator_class.host_name + and creator_class.host_name != self.host_name + ): + self.log.info(( + "Creator's host name \"{}\"" + " is not supported for current host \"{}\"" + ).format(creator_class.host_name, self.host_name)) + continue + creator = creator_class( - self, - system_settings, project_settings, + system_settings, + self, self.headless ) creators[creator_identifier] = creator @@ -878,6 +1321,27 @@ class CreateContext: self.creators = creators + def _reset_convertor_plugins(self): + convertors_plugins = {} + for convertor_class in discover_convertor_plugins(): + if inspect.isabstract(convertor_class): + self.log.info( + "Skipping abstract Creator {}".format(str(convertor_class)) + ) + continue + + convertor_identifier = convertor_class.identifier + if convertor_identifier in convertors_plugins: + self.log.warning(( + "Duplicated Converter identifier. " + "Using first and skipping following" + )) + continue + + convertors_plugins[convertor_identifier] = convertor_class(self) + + self.convertors_plugins = convertors_plugins + def reset_context_data(self): """Reload context data using host implementation. @@ -946,9 +1410,75 @@ class CreateContext: with self.bulk_instances_collection(): self._bulk_instances_to_process.append(instance) + def create(self, identifier, *args, **kwargs): + """Wrapper for creators to trigger created. + + Different types of creators may expect different arguments thus the + hints for args are blind. + + Args: + identifier (str): Creator's identifier. + *args (Tuple[Any]): Arguments for create method. + **kwargs (Dict[Any, Any]): Keyword argument for create method. + """ + + error_message = "Failed to run Creator with identifier \"{}\". {}" + creator = self.creators.get(identifier) + label = getattr(creator, "label", None) + failed = False + add_traceback = False + exc_info = None + try: + # Fake CreatorError (Could be maybe specific exception?) + if creator is None: + raise CreatorError( + "Creator {} was not found".format(identifier) + ) + + creator.create(*args, **kwargs) + + except CreatorError: + failed = True + exc_info = sys.exc_info() + self.log.warning(error_message.format(identifier, exc_info[1])) + + except: + failed = True + add_traceback = True + exc_info = sys.exc_info() + self.log.warning( + error_message.format(identifier, ""), + exc_info=True + ) + + if failed: + raise CreatorsCreateFailed([ + prepare_failed_creator_operation_info( + identifier, label, exc_info, add_traceback + ) + ]) + def creator_removed_instance(self, instance): + """When creator removes instance context should be acknowledged. + + If creator removes instance conext should know about it to avoid + possible issues in the session. + + Args: + instance (CreatedInstance): Object of instance which was removed + from scene metadata. + """ + self._instances_by_id.pop(instance.id, None) + def add_convertor_item(self, convertor_identifier, label): + self.convertor_items_by_id[convertor_identifier] = ConvertorItem( + convertor_identifier, label + ) + + def remove_convertor_item(self, convertor_identifier): + self.convertor_items_by_id.pop(convertor_identifier, None) + @contextmanager def bulk_instances_collection(self): """Validate context of instances in bulk. @@ -981,24 +1511,112 @@ class CreateContext: self._instances_by_id = {} # Collect instances + error_message = "Collection of instances for creator {} failed. {}" + failed_info = [] for creator in self.creators.values(): - creator.collect_instances() + label = creator.label + identifier = creator.identifier + failed = False + add_traceback = False + exc_info = None + try: + creator.collect_instances() + + except CreatorError: + failed = True + exc_info = sys.exc_info() + self.log.warning(error_message.format(identifier, exc_info[1])) + + except: + failed = True + add_traceback = True + exc_info = sys.exc_info() + self.log.warning( + error_message.format(identifier, ""), + exc_info=True + ) + + if failed: + failed_info.append( + prepare_failed_creator_operation_info( + identifier, label, exc_info, add_traceback + ) + ) + + if failed_info: + raise CreatorsCollectionFailed(failed_info) + + def find_convertor_items(self): + """Go through convertor plugins to look for items to convert. + + Raises: + ConvertorsFindFailed: When one or more convertors fails during + finding. + """ + + self.convertor_items_by_id = {} + + failed_info = [] + for convertor in self.convertors_plugins.values(): + try: + convertor.find_instances() + + except: + failed_info.append( + prepare_failed_convertor_operation_info( + convertor.identifier, sys.exc_info() + ) + ) + self.log.warning( + "Failed to find instances of convertor \"{}\"".format( + convertor.identifier + ), + exc_info=True + ) + + if failed_info: + raise ConvertorsFindFailed(failed_info) def execute_autocreators(self): """Execute discovered AutoCreator plugins. Reset instances if any autocreator executed properly. """ + + error_message = "Failed to run AutoCreator with identifier \"{}\". {}" + failed_info = [] for identifier, creator in self.autocreators.items(): + label = creator.label + failed = False + add_traceback = False try: creator.create() - except Exception: - # TODO raise report exception if any crashed - msg = ( - "Failed to run AutoCreator with identifier \"{}\" ({})." - ).format(identifier, inspect.getfile(creator.__class__)) - self.log.warning(msg, exc_info=True) + except CreatorError: + failed = True + exc_info = sys.exc_info() + self.log.warning(error_message.format(identifier, exc_info[1])) + + # Use bare except because some hosts raise their exceptions that + # do not inherit from python's `BaseException` + except: + failed = True + add_traceback = True + exc_info = sys.exc_info() + self.log.warning( + error_message.format(identifier, ""), + exc_info=True + ) + + if failed: + failed_info.append( + prepare_failed_creator_operation_info( + identifier, label, exc_info, add_traceback + ) + ) + + if failed_info: + raise CreatorsCreateFailed(failed_info) def validate_instances_context(self, instances=None): """Validate 'asset' and 'task' instance context.""" @@ -1024,15 +1642,10 @@ class CreateContext: for asset_name in task_names_by_asset_name.keys() if asset_name is not None ] - asset_docs = list(self.dbcon.find( - { - "type": "asset", - "name": {"$in": asset_names} - }, - { - "name": True, - "data.tasks": True - } + asset_docs = list(get_assets( + self.project_name, + asset_names=asset_names, + fields=["name", "data.tasks"] )) task_names_by_asset_name = {} @@ -1080,17 +1693,48 @@ class CreateContext: identifier = instance.creator_identifier instances_by_identifier[identifier].append(instance) - for identifier, cretor_instances in instances_by_identifier.items(): + error_message = "Instances update of creator \"{}\" failed. {}" + failed_info = [] + for identifier, creator_instances in instances_by_identifier.items(): update_list = [] - for instance in cretor_instances: + for instance in creator_instances: instance_changes = instance.changes() if instance_changes: update_list.append(UpdateData(instance, instance_changes)) creator = self.creators[identifier] - if update_list: + if not update_list: + continue + + label = creator.label + failed = False + add_traceback = False + exc_info = None + try: creator.update_instances(update_list) + except CreatorError: + failed = True + exc_info = sys.exc_info() + self.log.warning(error_message.format(identifier, exc_info[1])) + + except: + failed = True + add_traceback = True + exc_info = sys.exc_info() + self.log.warning( + error_message.format(identifier, ""), exc_info=True) + + if failed: + failed_info.append( + prepare_failed_creator_operation_info( + identifier, label, exc_info, add_traceback + ) + ) + + if failed_info: + raise CreatorsSaveFailed(failed_info) + def remove_instances(self, instances): """Remove instances from context. @@ -1098,14 +1742,48 @@ class CreateContext: instances(list): Instances that should be removed from context. """ + instances_by_identifier = collections.defaultdict(list) for instance in instances: identifier = instance.creator_identifier instances_by_identifier[identifier].append(instance) + error_message = "Instances removement of creator \"{}\" failed. {}" + failed_info = [] for identifier, creator_instances in instances_by_identifier.items(): creator = self.creators.get(identifier) - creator.remove_instances(creator_instances) + label = creator.label + failed = False + add_traceback = False + exc_info = None + try: + creator.remove_instances(creator_instances) + + except CreatorError: + failed = True + exc_info = sys.exc_info() + self.log.warning( + error_message.format(identifier, exc_info[1]) + ) + + except: + failed = True + add_traceback = True + exc_info = sys.exc_info() + self.log.warning( + error_message.format(identifier, ""), + exc_info=True + ) + + if failed: + failed_info.append( + prepare_failed_creator_operation_info( + identifier, label, exc_info, add_traceback + ) + ) + + if failed_info: + raise CreatorsRemoveFailed(failed_info) def _get_publish_plugins_with_attr_for_family(self, family): """Publish plugin attributes for passed family. @@ -1137,3 +1815,68 @@ class CreateContext: if not plugin.__instanceEnabled__: plugins.append(plugin) return plugins + + @property + def collection_shared_data(self): + """Access to shared data that can be used during creator's collection. + + Retruns: + Dict[str, Any]: Shared data. + + Raises: + UnavailableSharedData: When called out of collection phase. + """ + + if self._collection_shared_data is None: + raise UnavailableSharedData( + "Accessed Collection shared data out of collection phase" + ) + return self._collection_shared_data + + def run_convertor(self, convertor_identifier): + """Run convertor plugin by it's idenfitifier. + + Conversion is skipped if convertor is not available. + + Args: + convertor_identifier (str): Identifier of convertor. + """ + + convertor = self.convertors_plugins.get(convertor_identifier) + if convertor is not None: + convertor.convert() + + def run_convertors(self, convertor_identifiers): + """Run convertor plugins by idenfitifiers. + + Conversion is skipped if convertor is not available. It is recommended + to trigger reset after conversion to reload instances. + + Args: + convertor_identifiers (Iterator[str]): Identifiers of convertors + to run. + + Raises: + ConvertorsConversionFailed: When one or more convertors fails. + """ + + failed_info = [] + for convertor_identifier in convertor_identifiers: + try: + self.run_convertor(convertor_identifier) + + except: + failed_info.append( + prepare_failed_convertor_operation_info( + convertor_identifier, sys.exc_info() + ) + ) + self.log.warning( + "Failed to convert instances of convertor \"{}\"".format( + convertor_identifier + ), + exc_info=True + ) + + if failed_info: + raise ConvertorsConversionFailed(failed_info) diff --git a/openpype/pipeline/create/creator_plugins.py b/openpype/pipeline/create/creator_plugins.py index cbe19da064..bb5ce00452 100644 --- a/openpype/pipeline/create/creator_plugins.py +++ b/openpype/pipeline/create/creator_plugins.py @@ -1,17 +1,17 @@ +import os import copy -import logging +import collections from abc import ( ABCMeta, abstractmethod, abstractproperty ) + import six -from openpype.lib import ( - get_subset_name_with_asset_doc, - set_plugin_attributes_from_settings, -) +from openpype.settings import get_system_settings, get_project_settings +from openpype.lib import Logger from openpype.pipeline.plugin_discover import ( discover, register_plugin, @@ -20,6 +20,7 @@ from openpype.pipeline.plugin_discover import ( deregister_plugin_path ) +from .subset_name import get_subset_name from .legacy_create import LegacyCreator @@ -33,6 +34,111 @@ class CreatorError(Exception): super(CreatorError, self).__init__(message) +@six.add_metaclass(ABCMeta) +class SubsetConvertorPlugin(object): + """Helper for conversion of instances created using legacy creators. + + Conversion from legacy creators would mean to loose legacy instances, + convert them automatically or write a script which must user run. All of + these solutions are workign but will happen without asking or user must + know about them. This plugin can be used to show legacy instances in + Publisher and give user ability to run conversion script. + + Convertor logic should be very simple. Method 'find_instances' is to + look for legacy instances in scene a possibly call + pre-implemented 'add_convertor_item'. + + User will have ability to trigger conversion which is executed by calling + 'convert' which should call 'remove_convertor_item' when is done. + + It does make sense to add only one or none legacy item to create context + for convertor as it's not possible to choose which instace are converted + and which are not. + + Convertor can use 'collection_shared_data' property like creators. Also + can store any information to it's object for conversion purposes. + + Args: + create_context + """ + + _log = None + + def __init__(self, create_context): + self._create_context = create_context + + @property + def log(self): + """Logger of the plugin. + + Returns: + logging.Logger: Logger with name of the plugin. + """ + + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + @abstractproperty + def identifier(self): + """Converted identifier. + + Returns: + str: Converted identifier unique for all converters in host. + """ + + pass + + @abstractmethod + def find_instances(self): + """Look for legacy instances in the scene. + + Should call 'add_convertor_item' if there is at least one instance to + convert. + """ + + pass + + @abstractmethod + def convert(self): + """Conversion code.""" + + pass + + @property + def create_context(self): + """Quick access to create context.""" + + return self._create_context + + @property + def collection_shared_data(self): + """Access to shared data that can be used during 'find_instances'. + + Retruns: + Dict[str, Any]: Shared data. + + Raises: + UnavailableSharedData: When called out of collection phase. + """ + + return self._create_context.collection_shared_data + + def add_convertor_item(self, label): + """Add item to CreateContext. + + Args: + label (str): Label of item which will show in UI. + """ + + self._create_context.add_convertor_item(self.identifier, label) + + def remove_convertor_item(self): + """Remove legacy item from create context when conversion finished.""" + + self._create_context.remove_convertor_item(self.identifier) + + @six.add_metaclass(ABCMeta) class BaseCreator: """Plugin that create and modify instance data before publishing process. @@ -47,6 +153,9 @@ class BaseCreator: # Label shown in UI label = None + group_label = None + # Cached group label after first call 'get_group_label' + _cached_group_label = None # Variable to store logger _log = None @@ -63,42 +172,113 @@ class BaseCreator: # `openpype.pipeline.attribute_definitions` instance_attr_defs = [] + # Filtering by host name - can be used to be filtered by host name + # - used on all hosts when set to 'None' for Backwards compatibility + # - was added afterwards + # QUESTION make this required? + host_name = None + def __init__( - self, create_context, system_settings, project_settings, headless=False + self, project_settings, system_settings, create_context, headless=False ): # Reference to CreateContext self.create_context = create_context + self.project_settings = project_settings # Creator is running in headless mode (without UI elemets) # - we may use UI inside processing this attribute should be checked self.headless = headless + self.apply_settings(project_settings, system_settings) + + def apply_settings(self, project_settings, system_settings): + """Method called on initialization of plugin to apply settings.""" + + pass + @property def identifier(self): """Identifier of creator (must be unique). Default implementation returns plugin's family. """ + return self.family @abstractproperty def family(self): """Family that plugin represents.""" + pass @property - def log(self): - if self._log is None: - from openpype.api import Logger + def project_name(self): + """Family that plugin represents.""" + return self.create_context.project_name + + @property + def host(self): + return self.create_context.host + + def get_group_label(self): + """Group label under which are instances grouped in UI. + + Default implementation use attributes in this order: + - 'group_label' -> 'label' -> 'identifier' + Keep in mind that 'identifier' use 'family' by default. + + Returns: + str: Group label that can be used for grouping of instances in UI. + Group label can be overriden by instance itself. + """ + + if self._cached_group_label is None: + label = self.identifier + if self.group_label: + label = self.group_label + elif self.label: + label = self.label + self._cached_group_label = label + return self._cached_group_label + + @property + def log(self): + """Logger of the plugin. + + Returns: + logging.Logger: Logger with name of the plugin. + """ + + if self._log is None: self._log = Logger.get_logger(self.__class__.__name__) return self._log def _add_instance_to_context(self, instance): - """Helper method to ad d""" + """Helper method to add instance to create context. + + Instances should be stored to DCC workfile metadata to be able reload + them and also stored to CreateContext in which is creator plugin + existing at the moment to be able use it without refresh of + CreateContext. + + Args: + instance (CreatedInstance): New created instance. + """ + self.create_context.creator_adds_instance(instance) def _remove_instance_from_context(self, instance): + """Helper method to remove instance from create context. + + Instances must be removed from DCC workfile metadat aand from create + context in which plugin is existing at the moment of removement to + propagate the change without restarting create context. + + Args: + instance (CreatedInstance): Instance which should be removed. + """ + self.create_context.creator_removed_instance(instance) @abstractmethod @@ -109,6 +289,7 @@ class BaseCreator: - must expect all data that were passed to init in previous implementation """ + pass @abstractmethod @@ -135,6 +316,7 @@ class BaseCreator: self._add_instance_to_context(instance) ``` """ + pass @abstractmethod @@ -142,9 +324,10 @@ class BaseCreator: """Store changes of existing instances so they can be recollected. Args: - update_list(list): Gets list of tuples. Each item + update_list(List[UpdateData]): Gets list of tuples. Each item contain changed instance and it's changes. """ + pass @abstractmethod @@ -155,9 +338,10 @@ class BaseCreator: 'True' if did so. Args: - instance(list): Instance objects which should be + instance(List[CreatedInstance]): Instance objects which should be removed. """ + pass def get_icon(self): @@ -165,20 +349,28 @@ class BaseCreator: Can return path to image file or awesome icon name. """ + return self.icon def get_dynamic_data( - self, variant, task_name, asset_doc, project_name, host_name + self, variant, task_name, asset_doc, project_name, host_name, instance ): """Dynamic data for subset name filling. These may be get dynamically created based on current context of workfile. """ + return {} def get_subset_name( - self, variant, task_name, asset_doc, project_name, host_name=None + self, + variant, + task_name, + asset_doc, + project_name, + host_name=None, + instance=None ): """Return subset name for passed context. @@ -192,25 +384,33 @@ class BaseCreator: Asset document is not used yet but is required if would like to use task type in subset templates. + Method is also called on subset name update. In that case origin + instance is passed in. + Args: variant(str): Subset name variant. In most of cases user input. task_name(str): For which task subset is created. asset_doc(dict): Asset document for which subset is created. project_name(str): Project name. host_name(str): Which host creates subset. + instance(CreatedInstance|None): Object of 'CreatedInstance' for + which is subset name updated. Passed only on subset name + update. """ + dynamic_data = self.get_dynamic_data( - variant, task_name, asset_doc, project_name, host_name + variant, task_name, asset_doc, project_name, host_name, instance ) - return get_subset_name_with_asset_doc( + return get_subset_name( self.family, variant, task_name, asset_doc, project_name, host_name, - dynamic_data=dynamic_data + dynamic_data=dynamic_data, + project_settings=self.project_settings ) def get_instance_attr_defs(self): @@ -225,11 +425,32 @@ class BaseCreator: keys/values when plugin attributes change. Returns: - list: Attribute definitions that can be tweaked for + List[AbtractAttrDef]: Attribute definitions that can be tweaked for created instance. """ + return self.instance_attr_defs + @property + def collection_shared_data(self): + """Access to shared data that can be used during creator's collection. + + Retruns: + Dict[str, Any]: Shared data. + + Raises: + UnavailableSharedData: When called out of collection phase. + """ + + return self.create_context.collection_shared_data + + def set_instance_thumbnail_path(self, instance_id, thumbnail_path=None): + """Set path to thumbnail for instance.""" + + self.create_context.thumbnail_paths_by_instance_id[instance_id] = ( + thumbnail_path + ) + class Creator(BaseCreator): """Creator that has more information for artist to show in UI. @@ -256,6 +477,13 @@ class Creator(BaseCreator): # - in some cases it may confuse artists because it would not be used # e.g. for buld creators create_allow_context_change = True + # A thumbnail can be passed in precreate attributes + # - if is set to True is should expect that a thumbnail path under key + # PRE_CREATE_THUMBNAIL_KEY can be sent in data with precreate data + # - is disabled by default because the feature was added in later stages + # and creators who would not expect PRE_CREATE_THUMBNAIL_KEY could + # cause issues with instance data + create_allow_thumbnail = False # Precreate attribute definitions showed before creation # - similar to instance attribute definitions @@ -285,6 +513,7 @@ class Creator(BaseCreator): Returns: str: Short description of family. """ + return self.description def get_detail_description(self): @@ -295,6 +524,7 @@ class Creator(BaseCreator): Returns: str: Detailed description of family for artist. """ + return self.detailed_description def get_default_variants(self): @@ -306,8 +536,9 @@ class Creator(BaseCreator): By default returns `default_variants` value. Returns: - list: Whisper variants for user input. + List[str]: Whisper variants for user input. """ + return copy.deepcopy(self.default_variants) def get_default_variant(self): @@ -326,16 +557,24 @@ class Creator(BaseCreator): """Plugin attribute definitions needed for creation. Attribute definitions of plugin that define how creation will work. Values of these definitions are passed to `create` method. - NOTE: - Convert method should be implemented which should care about updating - keys/values when plugin attributes change. + + Note: + Convert method should be implemented which should care about + updating keys/values when plugin attributes change. + Returns: - list: Attribute definitions that can be tweaked for + List[AbtractAttrDef]: Attribute definitions that can be tweaked for created instance. """ return self.pre_create_attr_defs +class HiddenCreator(BaseCreator): + @abstractmethod + def create(self, instance_data, source_data): + pass + + class AutoCreator(BaseCreator): """Creator which is automatically triggered without user interaction. @@ -351,12 +590,60 @@ def discover_creator_plugins(): return discover(BaseCreator) +def discover_convertor_plugins(): + return discover(SubsetConvertorPlugin) + + def discover_legacy_creator_plugins(): + from openpype.lib import Logger + + log = Logger.get_logger("CreatorDiscover") + plugins = discover(LegacyCreator) - set_plugin_attributes_from_settings(plugins, LegacyCreator) + project_name = os.environ.get("AVALON_PROJECT") + system_settings = get_system_settings() + project_settings = get_project_settings(project_name) + for plugin in plugins: + try: + plugin.apply_settings(project_settings, system_settings) + except Exception: + log.warning( + "Failed to apply settings to loader {}".format( + plugin.__name__ + ), + exc_info=True + ) return plugins +def get_legacy_creator_by_name(creator_name, case_sensitive=False): + """Find creator plugin by name. + + Args: + creator_name (str): Name of creator class that should be returned. + case_sensitive (bool): Match of creator plugin name is case sensitive. + Set to `False` by default. + + Returns: + Creator: Return first matching plugin or `None`. + """ + + # Lower input creator name if is not case sensitive + if not case_sensitive: + creator_name = creator_name.lower() + + for creator_plugin in discover_legacy_creator_plugins(): + _creator_name = creator_plugin.__name__ + + # Lower creator plugin name if is not case sensitive + if not case_sensitive: + _creator_name = _creator_name.lower() + + if _creator_name == creator_name: + return creator_plugin + return None + + def register_creator_plugin(plugin): if issubclass(plugin, BaseCreator): register_plugin(BaseCreator, plugin) @@ -364,6 +651,9 @@ def register_creator_plugin(plugin): elif issubclass(plugin, LegacyCreator): register_plugin(LegacyCreator, plugin) + elif issubclass(plugin, SubsetConvertorPlugin): + register_plugin(SubsetConvertorPlugin, plugin) + def deregister_creator_plugin(plugin): if issubclass(plugin, BaseCreator): @@ -372,12 +662,48 @@ def deregister_creator_plugin(plugin): elif issubclass(plugin, LegacyCreator): deregister_plugin(LegacyCreator, plugin) + elif issubclass(plugin, SubsetConvertorPlugin): + deregister_plugin(SubsetConvertorPlugin, plugin) + def register_creator_plugin_path(path): register_plugin_path(BaseCreator, path) register_plugin_path(LegacyCreator, path) + register_plugin_path(SubsetConvertorPlugin, path) def deregister_creator_plugin_path(path): deregister_plugin_path(BaseCreator, path) deregister_plugin_path(LegacyCreator, path) + deregister_plugin_path(SubsetConvertorPlugin, path) + + +def cache_and_get_instances(creator, shared_key, list_instances_func): + """Common approach to cache instances in shared data. + + This is helper function which does not handle cases when a 'shared_key' is + used for different list instances functions. The same approach of caching + instances into 'collection_shared_data' is not required but is so common + we've decided to unify it to some degree. + + Function 'list_instances_func' is called only if 'shared_key' is not + available in 'collection_shared_data' on creator. + + Args: + creator (Creator): Plugin which would like to get instance data. + shared_key (str): Key under which output of function will be stored. + list_instances_func (Function): Function that will return instance data + if data were not yet stored under 'shared_key'. + + Returns: + Dict[str, Dict[str, Any]]: Cached instances by creator identifier from + result of passed function. + """ + + if shared_key not in creator.collection_shared_data: + value = collections.defaultdict(list) + for instance in list_instances_func(): + identifier = instance.get("creator_identifier") + value[identifier].append(instance) + creator.collection_shared_data[shared_key] = value + return creator.collection_shared_data[shared_key] diff --git a/openpype/pipeline/create/legacy_create.py b/openpype/pipeline/create/legacy_create.py index 46e0e3d663..82e5de7a8f 100644 --- a/openpype/pipeline/create/legacy_create.py +++ b/openpype/pipeline/create/legacy_create.py @@ -5,10 +5,13 @@ Renamed classes and functions - 'create' -> 'legacy_create' """ +import os import logging import collections -from openpype.lib import get_subset_name +from openpype.client import get_asset_by_id + +from .subset_name import get_subset_name class LegacyCreator(object): @@ -37,6 +40,48 @@ class LegacyCreator(object): self.data.update(data or {}) + @classmethod + def apply_settings(cls, project_settings, system_settings): + """Apply OpenPype settings to a plugin class.""" + + host_name = os.environ.get("AVALON_APP") + plugin_type = "create" + plugin_type_settings = ( + project_settings + .get(host_name, {}) + .get(plugin_type, {}) + ) + global_type_settings = ( + project_settings + .get("global", {}) + .get(plugin_type, {}) + ) + if not global_type_settings and not plugin_type_settings: + return + + plugin_name = cls.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + # Look for plugin settings in global settings + elif plugin_name in global_type_settings: + plugin_settings = global_type_settings[plugin_name] + + if not plugin_settings: + return + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + setattr(cls, "active", False) + print(" - is disabled by preset") + else: + setattr(cls, option, value) + print(" - setting `{}`: `{}`".format(option, value)) + def process(self): pass @@ -104,11 +149,15 @@ class LegacyCreator(object): variant, task_name, asset_id, project_name, host_name ) + asset_doc = get_asset_by_id( + project_name, asset_id, fields=["data.tasks"] + ) + return get_subset_name( cls.family, variant, task_name, - asset_id, + asset_doc, project_name, host_name, dynamic_data=dynamic_data diff --git a/openpype/pipeline/create/subset_name.py b/openpype/pipeline/create/subset_name.py new file mode 100644 index 0000000000..f508263708 --- /dev/null +++ b/openpype/pipeline/create/subset_name.py @@ -0,0 +1,109 @@ +import os + +from openpype.settings import get_project_settings +from openpype.lib import filter_profiles, prepare_template_data +from openpype.pipeline import legacy_io + +from .constants import DEFAULT_SUBSET_TEMPLATE + + +class TaskNotSetError(KeyError): + def __init__(self, msg=None): + if not msg: + msg = "Creator's subset name template requires task name." + super(TaskNotSetError, self).__init__(msg) + + +def get_subset_name( + family, + variant, + task_name, + asset_doc, + project_name=None, + host_name=None, + default_template=None, + dynamic_data=None, + project_settings=None +): + """Calculate subset name based on passed context and OpenPype settings. + + Subst name templates are defined in `project_settings/global/tools/creator + /subset_name_profiles` where are profiles with host name, family, task name + and task type filters. If context does not match any profile then + `DEFAULT_SUBSET_TEMPLATE` is used as default template. + + That's main reason why so many arguments are required to calculate subset + name. + + Args: + family (str): Instance family. + variant (str): In most of cases it is user input during creation. + task_name (str): Task name on which context is instance created. + asset_doc (dict): Queried asset document with it's tasks in data. + Used to get task type. + project_name (str): Name of project on which is instance created. + Important for project settings that are loaded. + host_name (str): One of filtering criteria for template profile + filters. + default_template (str): Default template if any profile does not match + passed context. Constant 'DEFAULT_SUBSET_TEMPLATE' is used if + is not passed. + dynamic_data (dict): Dynamic data specific for a creator which creates + instance. + dbcon (AvalonMongoDB): Mongo connection to be able query asset document + if 'asset_doc' is not passed. + """ + + if not family: + return "" + + if not host_name: + host_name = os.environ["AVALON_APP"] + + # Use only last part of class family value split by dot (`.`) + family = family.rsplit(".", 1)[-1] + + if project_name is None: + project_name = legacy_io.Session["AVALON_PROJECT"] + + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + task_info = asset_tasks.get(task_name) or {} + task_type = task_info.get("type") + + # Get settings + if not project_settings: + project_settings = get_project_settings(project_name) + tools_settings = project_settings["global"]["tools"] + profiles = tools_settings["creator"]["subset_name_profiles"] + filtering_criteria = { + "families": family, + "hosts": host_name, + "tasks": task_name, + "task_types": task_type + } + + matching_profile = filter_profiles(profiles, filtering_criteria) + template = None + if matching_profile: + template = matching_profile["template"] + + # Make sure template is set (matching may have empty string) + if not template: + template = default_template or DEFAULT_SUBSET_TEMPLATE + + # Simple check of task name existence for template with {task} in + # - missing task should be possible only in Standalone publisher + if not task_name and "{task" in template.lower(): + raise TaskNotSetError() + + fill_pairs = { + "variant": variant, + "family": family, + "task": task_name + } + if dynamic_data: + # Dynamic data may override default values + for key, value in dynamic_data.items(): + fill_pairs[key] = value + + return template.format(**prepare_template_data(fill_pairs)) diff --git a/openpype/pipeline/delivery.py b/openpype/pipeline/delivery.py new file mode 100644 index 0000000000..8cf9a43aac --- /dev/null +++ b/openpype/pipeline/delivery.py @@ -0,0 +1,310 @@ +"""Functions useful for delivery of published representations.""" +import os +import shutil +import glob +import clique +import collections + +from openpype.lib import create_hard_link + + +def _copy_file(src_path, dst_path): + """Hardlink file if possible(to save space), copy if not. + + Because of using hardlinks should not be function used in other parts + of pipeline. + """ + + if os.path.exists(dst_path): + return + try: + create_hard_link( + src_path, + dst_path + ) + except OSError: + shutil.copyfile(src_path, dst_path) + + +def get_format_dict(anatomy, location_path): + """Returns replaced root values from user provider value. + + Args: + anatomy (Anatomy): Project anatomy. + location_path (str): User provided value. + + Returns: + (dict): Prepared data for formatting of a template. + """ + + format_dict = {} + if not location_path: + return format_dict + + location_path = location_path.replace("\\", "/") + root_names = anatomy.root_names_from_templates( + anatomy.templates["delivery"] + ) + format_dict["root"] = {} + for name in root_names: + format_dict["root"][name] = location_path + return format_dict + + +def check_destination_path( + repre_id, + anatomy, + anatomy_data, + datetime_data, + template_name +): + """ Try to create destination path based on 'template_name'. + + In the case that path cannot be filled, template contains unmatched + keys, provide error message to filter out repre later. + + Args: + repre_id (str): Representation id. + anatomy (Anatomy): Project anatomy. + anatomy_data (dict): Template data to fill anatomy templates. + datetime_data (dict): Values with actual date. + template_name (str): Name of template which should be used from anatomy + templates. + Returns: + Dict[str, List[str]]: Report of happened errors. Key is message title + value is detailed information. + """ + + anatomy_data.update(datetime_data) + anatomy_filled = anatomy.format_all(anatomy_data) + dest_path = anatomy_filled["delivery"][template_name] + report_items = collections.defaultdict(list) + + if not dest_path.solved: + msg = ( + "Missing keys in Representation's context" + " for anatomy template \"{}\"." + ).format(template_name) + + sub_msg = ( + "Representation: {}
" + ).format(repre_id) + + if dest_path.missing_keys: + keys = ", ".join(dest_path.missing_keys) + sub_msg += ( + "- Missing keys: \"{}\"
" + ).format(keys) + + if dest_path.invalid_types: + items = [] + for key, value in dest_path.invalid_types.items(): + items.append("\"{}\" {}".format(key, str(value))) + + keys = ", ".join(items) + sub_msg += ( + "- Invalid value DataType: \"{}\"
" + ).format(keys) + + report_items[msg].append(sub_msg) + + return report_items + + +def deliver_single_file( + src_path, + repre, + anatomy, + template_name, + anatomy_data, + format_dict, + report_items, + log +): + """Copy single file to calculated path based on template + + Args: + src_path(str): path of source representation file + repre (dict): full repre, used only in deliver_sequence, here only + as to share same signature + anatomy (Anatomy) + template_name (string): user selected delivery template name + anatomy_data (dict): data from repre to fill anatomy with + format_dict (dict): root dictionary with names and values + report_items (collections.defaultdict): to return error messages + log (logging.Logger): for log printing + + Returns: + (collections.defaultdict, int) + """ + + # Make sure path is valid for all platforms + src_path = os.path.normpath(src_path.replace("\\", "/")) + + if not os.path.exists(src_path): + msg = "{} doesn't exist for {}".format(src_path, repre["_id"]) + report_items["Source file was not found"].append(msg) + return report_items, 0 + + anatomy_filled = anatomy.format(anatomy_data) + if format_dict: + template_result = anatomy_filled["delivery"][template_name] + delivery_path = template_result.rootless.format(**format_dict) + else: + delivery_path = anatomy_filled["delivery"][template_name] + + # Backwards compatibility when extension contained `.` + delivery_path = delivery_path.replace("..", ".") + # Make sure path is valid for all platforms + delivery_path = os.path.normpath(delivery_path.replace("\\", "/")) + + delivery_folder = os.path.dirname(delivery_path) + if not os.path.exists(delivery_folder): + os.makedirs(delivery_folder) + + log.debug("Copying single: {} -> {}".format(src_path, delivery_path)) + _copy_file(src_path, delivery_path) + + return report_items, 1 + + +def deliver_sequence( + src_path, + repre, + anatomy, + template_name, + anatomy_data, + format_dict, + report_items, + log +): + """ For Pype2(mainly - works in 3 too) where representation might not + contain files. + + Uses listing physical files (not 'files' on repre as a)might not be + present, b)might not be reliable for representation and copying them. + + TODO Should be refactored when files are sufficient to drive all + representations. + + Args: + src_path(str): path of source representation file + repre (dict): full representation + anatomy (Anatomy) + template_name (string): user selected delivery template name + anatomy_data (dict): data from repre to fill anatomy with + format_dict (dict): root dictionary with names and values + report_items (collections.defaultdict): to return error messages + log (logging.Logger): for log printing + + Returns: + (collections.defaultdict, int) + """ + + src_path = os.path.normpath(src_path.replace("\\", "/")) + + def hash_path_exist(myPath): + res = myPath.replace('#', '*') + glob_search_results = glob.glob(res) + if len(glob_search_results) > 0: + return True + return False + + if not hash_path_exist(src_path): + msg = "{} doesn't exist for {}".format( + src_path, repre["_id"]) + report_items["Source file was not found"].append(msg) + return report_items, 0 + + delivery_templates = anatomy.templates.get("delivery") or {} + delivery_template = delivery_templates.get(template_name) + if delivery_template is None: + msg = ( + "Delivery template \"{}\" in anatomy of project \"{}\"" + " was not found" + ).format(template_name, anatomy.project_name) + report_items[""].append(msg) + return report_items, 0 + + # Check if 'frame' key is available in template which is required + # for sequence delivery + if "{frame" not in delivery_template: + msg = ( + "Delivery template \"{}\" in anatomy of project \"{}\"" + "does not contain '{{frame}}' key to fill. Delivery of sequence" + " can't be processed." + ).format(template_name, anatomy.project_name) + report_items[""].append(msg) + return report_items, 0 + + dir_path, file_name = os.path.split(str(src_path)) + + context = repre["context"] + ext = context.get("ext", context.get("representation")) + + if not ext: + msg = "Source extension not found, cannot find collection" + report_items[msg].append(src_path) + log.warning("{} <{}>".format(msg, context)) + return report_items, 0 + + ext = "." + ext + # context.representation could be .psd + ext = ext.replace("..", ".") + + src_collections, remainder = clique.assemble(os.listdir(dir_path)) + src_collection = None + for col in src_collections: + if col.tail != ext: + continue + + src_collection = col + break + + if src_collection is None: + msg = "Source collection of files was not found" + report_items[msg].append(src_path) + log.warning("{} <{}>".format(msg, src_path)) + return report_items, 0 + + frame_indicator = "@####@" + + anatomy_data["frame"] = frame_indicator + anatomy_filled = anatomy.format(anatomy_data) + + if format_dict: + template_result = anatomy_filled["delivery"][template_name] + delivery_path = template_result.rootless.format(**format_dict) + else: + delivery_path = anatomy_filled["delivery"][template_name] + + delivery_path = os.path.normpath(delivery_path.replace("\\", "/")) + delivery_folder = os.path.dirname(delivery_path) + dst_head, dst_tail = delivery_path.split(frame_indicator) + dst_padding = src_collection.padding + dst_collection = clique.Collection( + head=dst_head, + tail=dst_tail, + padding=dst_padding + ) + + if not os.path.exists(delivery_folder): + os.makedirs(delivery_folder) + + src_head = src_collection.head + src_tail = src_collection.tail + uploaded = 0 + for index in src_collection.indexes: + src_padding = src_collection.format("{padding}") % index + src_file_name = "{}{}{}".format(src_head, src_padding, src_tail) + src = os.path.normpath( + os.path.join(dir_path, src_file_name) + ) + + dst_padding = dst_collection.format("{padding}") % index + dst = "{}{}{}".format(dst_head, dst_padding, dst_tail) + log.debug("Copying single: {} -> {}".format(src, dst)) + _copy_file(src, dst) + uploaded += 1 + + return report_items, uploaded diff --git a/openpype/lib/editorial.py b/openpype/pipeline/editorial.py similarity index 91% rename from openpype/lib/editorial.py rename to openpype/pipeline/editorial.py index bf868953ea..564d78ea6f 100644 --- a/openpype/lib/editorial.py +++ b/openpype/pipeline/editorial.py @@ -1,23 +1,16 @@ import os import re import clique -from .import_utils import discover_host_vendor_module -try: - import opentimelineio as otio - from opentimelineio import opentime as _ot -except ImportError: - if not os.environ.get("AVALON_APP"): - raise - otio = discover_host_vendor_module("opentimelineio") - _ot = discover_host_vendor_module("opentimelineio.opentime") +import opentimelineio as otio +from opentimelineio import opentime as _ot def otio_range_to_frame_range(otio_range): start = _ot.to_frames( otio_range.start_time, otio_range.start_time.rate) end = start + _ot.to_frames( - otio_range.duration, otio_range.duration.rate) - 1 + otio_range.duration, otio_range.duration.rate) return start, end @@ -125,9 +118,9 @@ def range_from_frames(start, duration, fps): ) -def frames_to_secons(frames, framerate): +def frames_to_seconds(frames, framerate): """ - Returning secons. + Returning seconds. Args: frames (int): frame @@ -135,8 +128,8 @@ def frames_to_secons(frames, framerate): Returns: float: second value - """ + rt = _ot.from_frames(frames, framerate) return _ot.to_seconds(rt) @@ -168,7 +161,7 @@ def make_sequence_collection(path, otio_range, metadata): first, last = otio_range_to_frame_range(otio_range) collection = clique.Collection( head=head, tail=tail, padding=metadata["padding"]) - collection.indexes.update([i for i in range(first, (last + 1))]) + collection.indexes.update([i for i in range(first, last)]) return dir_path, collection @@ -218,6 +211,7 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end): "name": name } tw_node.update(metadata) + tw_node["lookup"] = list(lookup) # get first and last frame offsets offset_in += lookup[0] @@ -269,16 +263,17 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end): "retime": True, "speed": time_scalar, "timewarps": time_warp_nodes, - "handleStart": handle_start, - "handleEnd": handle_end + "handleStart": int(round(handle_start)), + "handleEnd": int(round(handle_end)) } } returning_dict = { "mediaIn": media_in_trimmed, "mediaOut": media_out_trimmed, - "handleStart": handle_start, - "handleEnd": handle_end + "handleStart": int(round(handle_start)), + "handleEnd": int(round(handle_end)), + "speed": time_scalar } # add version data only if retime diff --git a/openpype/pipeline/legacy_io.py b/openpype/pipeline/legacy_io.py index c8e7e79600..bde2b24c2a 100644 --- a/openpype/pipeline/legacy_io.py +++ b/openpype/pipeline/legacy_io.py @@ -18,9 +18,13 @@ _database = database = None log = logging.getLogger(__name__) +def is_installed(): + return module._is_installed + + def install(): """Establish a persistent connection to the database""" - if module._is_installed: + if is_installed(): return session = session_data_from_environment(context_keys=True) @@ -55,7 +59,7 @@ def uninstall(): def requires_install(func): @functools.wraps(func) def decorated(*args, **kwargs): - if not module._is_installed: + if not is_installed(): install() return func(*args, **kwargs) return decorated @@ -144,3 +148,12 @@ def parenthood(*args, **kwargs): @requires_install def bulk_write(*args, **kwargs): return _connection_object.bulk_write(*args, **kwargs) + + +@requires_install +def active_project(*args, **kwargs): + return _connection_object.active_project(*args, **kwargs) + + +def current_project(*args, **kwargs): + return Session.get("AVALON_PROJECT") diff --git a/openpype/pipeline/load/__init__.py b/openpype/pipeline/load/__init__.py index 6e7612d4c1..e96f64f2a4 100644 --- a/openpype/pipeline/load/__init__.py +++ b/openpype/pipeline/load/__init__.py @@ -1,8 +1,11 @@ from .utils import ( HeroVersionType, + IncompatibleLoaderError, + InvalidRepresentationContext, get_repres_contexts, + get_contexts_for_repre_docs, get_subset_contexts, get_representation_context, @@ -16,14 +19,20 @@ from .utils import ( switch_container, get_loader_identifier, + get_loaders_by_name, get_representation_path_from_context, get_representation_path, + get_representation_path_with_anatomy, is_compatible_loader, loaders_from_repre_context, loaders_from_representation, + + any_outdated_containers, + get_outdated_containers, + filter_containers, ) from .plugins import ( @@ -41,9 +50,12 @@ from .plugins import ( __all__ = ( # utils.py "HeroVersionType", + "IncompatibleLoaderError", + "InvalidRepresentationContext", "get_repres_contexts", + "get_contexts_for_repre_docs", "get_subset_contexts", "get_representation_context", @@ -57,15 +69,21 @@ __all__ = ( "switch_container", "get_loader_identifier", + "get_loaders_by_name", "get_representation_path_from_context", "get_representation_path", + "get_representation_path_with_anatomy", "is_compatible_loader", "loaders_from_repre_context", "loaders_from_representation", + "any_outdated_containers", + "get_outdated_containers", + "filter_containers", + # plugins.py "LoaderPlugin", "SubsetLoaderPlugin", diff --git a/openpype/pipeline/load/plugins.py b/openpype/pipeline/load/plugins.py index a30a2188a4..8cba8d8217 100644 --- a/openpype/pipeline/load/plugins.py +++ b/openpype/pipeline/load/plugins.py @@ -1,6 +1,8 @@ +import os import logging -from openpype.lib import set_plugin_attributes_from_settings +from openpype.settings import get_system_settings, get_project_settings +from openpype.pipeline import legacy_io from openpype.pipeline.plugin_discover import ( discover, register_plugin, @@ -37,6 +39,46 @@ class LoaderPlugin(list): def __init__(self, context): self.fname = self.filepath_from_context(context) + @classmethod + def apply_settings(cls, project_settings, system_settings): + host_name = os.environ.get("AVALON_APP") + plugin_type = "load" + plugin_type_settings = ( + project_settings + .get(host_name, {}) + .get(plugin_type, {}) + ) + global_type_settings = ( + project_settings + .get("global", {}) + .get(plugin_type, {}) + ) + if not global_type_settings and not plugin_type_settings: + return + + plugin_name = cls.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + # Look for plugin settings in global settings + elif plugin_name in global_type_settings: + plugin_settings = global_type_settings[plugin_name] + + if not plugin_settings: + return + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + setattr(cls, "active", False) + print(" - is disabled by preset") + else: + setattr(cls, option, value) + print(" - setting `{}`: `{}`".format(option, value)) + @classmethod def get_representations(cls): return cls.representations @@ -110,9 +152,25 @@ class SubsetLoaderPlugin(LoaderPlugin): pass -def discover_loader_plugins(): +def discover_loader_plugins(project_name=None): + from openpype.lib import Logger + + log = Logger.get_logger("LoaderDiscover") plugins = discover(LoaderPlugin) - set_plugin_attributes_from_settings(plugins, LoaderPlugin) + if not project_name: + project_name = legacy_io.active_project() + system_settings = get_system_settings() + project_settings = get_project_settings(project_name) + for plugin in plugins: + try: + plugin.apply_settings(project_settings, system_settings) + except Exception: + log.warning( + "Failed to apply settings to loader {}".format( + plugin.__name__ + ), + exc_info=True + ) return plugins diff --git a/openpype/pipeline/load/utils.py b/openpype/pipeline/load/utils.py index 99e5d11f82..784d4628f3 100644 --- a/openpype/pipeline/load/utils.py +++ b/openpype/pipeline/load/utils.py @@ -4,19 +4,42 @@ import copy import getpass import logging import inspect +import collections import numbers -import six -from bson.objectid import ObjectId - -from openpype.lib import Anatomy +from openpype.host import ILoadHost +from openpype.client import ( + get_project, + get_assets, + get_subsets, + get_versions, + get_version_by_id, + get_last_version_by_subset_id, + get_hero_version_by_subset_id, + get_version_by_name, + get_last_versions, + get_representations, + get_representation_by_id, + get_representation_by_name, + get_representation_parents +) +from openpype.lib import ( + StringTemplate, + TemplateUnsolved, +) from openpype.pipeline import ( schema, legacy_io, + Anatomy, ) log = logging.getLogger(__name__) +ContainersFilterResult = collections.namedtuple( + "ContainersFilterResult", + ["latest", "outdated", "not_found", "invalid"] +) + class HeroVersionType(object): def __init__(self, version): @@ -42,6 +65,11 @@ class IncompatibleLoaderError(ValueError): pass +class InvalidRepresentationContext(ValueError): + """Representation path can't be received using representation document.""" + pass + + def get_repres_contexts(representation_ids, dbcon=None): """Return parenthood context for representation. @@ -52,40 +80,36 @@ def get_repres_contexts(representation_ids, dbcon=None): Returns: dict: The full representation context by representation id. - keys are repre_id, value is dictionary with full: - asset_doc - version_doc - subset_doc - repre_doc - + keys are repre_id, value is dictionary with full documents of + asset, subset, version and representation. """ + if not dbcon: dbcon = legacy_io - contexts = {} if not representation_ids: + return {} + + project_name = dbcon.active_project() + repre_docs = get_representations(project_name, representation_ids) + + return get_contexts_for_repre_docs(project_name, repre_docs) + + +def get_contexts_for_repre_docs(project_name, repre_docs): + contexts = {} + if not repre_docs: return contexts - _representation_ids = [] - for repre_id in representation_ids: - if isinstance(repre_id, six.string_types): - repre_id = ObjectId(repre_id) - _representation_ids.append(repre_id) - - repre_docs = dbcon.find({ - "type": "representation", - "_id": {"$in": _representation_ids} - }) repre_docs_by_id = {} version_ids = set() for repre_doc in repre_docs: version_ids.add(repre_doc["parent"]) repre_docs_by_id[repre_doc["_id"]] = repre_doc - version_docs = dbcon.find({ - "type": {"$in": ["version", "hero_version"]}, - "_id": {"$in": list(version_ids)} - }) + version_docs = get_versions( + project_name, version_ids, hero=True + ) version_docs_by_id = {} hero_version_docs = [] @@ -99,10 +123,7 @@ def get_repres_contexts(representation_ids, dbcon=None): subset_ids.add(version_doc["parent"]) if versions_for_hero: - _version_docs = dbcon.find({ - "type": "version", - "_id": {"$in": list(versions_for_hero)} - }) + _version_docs = get_versions(project_name, versions_for_hero) _version_data_by_id = { version_doc["_id"]: version_doc["data"] for version_doc in _version_docs @@ -114,26 +135,20 @@ def get_repres_contexts(representation_ids, dbcon=None): version_data = copy.deepcopy(_version_data_by_id[version_id]) version_docs_by_id[hero_version_id]["data"] = version_data - subset_docs = dbcon.find({ - "type": "subset", - "_id": {"$in": list(subset_ids)} - }) + subset_docs = get_subsets(project_name, subset_ids) subset_docs_by_id = {} asset_ids = set() for subset_doc in subset_docs: subset_docs_by_id[subset_doc["_id"]] = subset_doc asset_ids.add(subset_doc["parent"]) - asset_docs = dbcon.find({ - "type": "asset", - "_id": {"$in": list(asset_ids)} - }) + asset_docs = get_assets(project_name, asset_ids) asset_docs_by_id = { asset_doc["_id"]: asset_doc for asset_doc in asset_docs } - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) for repre_id, repre_doc in repre_docs_by_id.items(): version_doc = version_docs_by_id[repre_doc["parent"]] @@ -173,32 +188,21 @@ def get_subset_contexts(subset_ids, dbcon=None): if not subset_ids: return contexts - _subset_ids = set() - for subset_id in subset_ids: - if isinstance(subset_id, six.string_types): - subset_id = ObjectId(subset_id) - _subset_ids.add(subset_id) - - subset_docs = dbcon.find({ - "type": "subset", - "_id": {"$in": list(_subset_ids)} - }) + project_name = dbcon.active_project() + subset_docs = get_subsets(project_name, subset_ids) subset_docs_by_id = {} asset_ids = set() for subset_doc in subset_docs: subset_docs_by_id[subset_doc["_id"]] = subset_doc asset_ids.add(subset_doc["parent"]) - asset_docs = dbcon.find({ - "type": "asset", - "_id": {"$in": list(asset_ids)} - }) + asset_docs = get_assets(project_name, asset_ids) asset_docs_by_id = { asset_doc["_id"]: asset_doc for asset_doc in asset_docs } - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) for subset_id, subset_doc in subset_docs_by_id.items(): asset_doc = asset_docs_by_id[subset_doc["parent"]] @@ -224,20 +228,30 @@ def get_representation_context(representation): Returns: dict: The full representation context. - """ assert representation is not None, "This is a bug" - if isinstance(representation, (six.string_types, ObjectId)): - representation = legacy_io.find_one( - {"_id": ObjectId(str(representation))}) + project_name = legacy_io.active_project() + if not isinstance(representation, dict): + representation = get_representation_by_id( + project_name, representation + ) - version, subset, asset, project = legacy_io.parenthood(representation) + if not representation: + raise AssertionError("Representation was not found in database") - assert all([representation, version, subset, asset, project]), ( - "This is a bug" + version, subset, asset, project = get_representation_parents( + project_name, representation ) + if not version: + raise AssertionError("Version was not found in database") + if not subset: + raise AssertionError("Subset was not found in database") + if not asset: + raise AssertionError("Asset was not found in database") + if not project: + raise AssertionError("Project was not found in database") context = { "project": { @@ -378,6 +392,20 @@ def get_loader_identifier(loader): return loader.__name__ +def get_loaders_by_name(): + from .plugins import discover_loader_plugins + + loaders_by_name = {} + for loader in discover_loader_plugins(): + loader_name = loader.__name__ + if loader_name in loaders_by_name: + raise KeyError( + "Duplicated loader name {} !".format(loader_name) + ) + loaders_by_name[loader_name] = loader + return loaders_by_name + + def _get_container_loader(container): """Return the Loader corresponding to the container""" from .plugins import discover_loader_plugins @@ -405,42 +433,36 @@ def update_container(container, version=-1): """Update a container""" # Compute the different version from 'representation' - current_representation = legacy_io.find_one({ - "_id": ObjectId(container["representation"]) - }) + project_name = legacy_io.active_project() + current_representation = get_representation_by_id( + project_name, container["representation"] + ) assert current_representation is not None, "This is a bug" - current_version, subset, asset, project = legacy_io.parenthood( - current_representation) - + current_version = get_version_by_id( + project_name, current_representation["parent"], fields=["parent"] + ) if version == -1: - new_version = legacy_io.find_one({ - "type": "version", - "parent": subset["_id"] - }, sort=[("name", -1)]) + new_version = get_last_version_by_subset_id( + project_name, current_version["parent"], fields=["_id"] + ) + + elif isinstance(version, HeroVersionType): + new_version = get_hero_version_by_subset_id( + project_name, current_version["parent"], fields=["_id"] + ) + else: - if isinstance(version, HeroVersionType): - version_query = { - "parent": subset["_id"], - "type": "hero_version" - } - else: - version_query = { - "parent": subset["_id"], - "type": "version", - "name": version - } - new_version = legacy_io.find_one(version_query) + new_version = get_version_by_name( + project_name, version, current_version["parent"], fields=["_id"] + ) assert new_version is not None, "This is a bug" - new_representation = legacy_io.find_one({ - "type": "representation", - "parent": new_version["_id"], - "name": current_representation["name"] - }) - + new_representation = get_representation_by_name( + project_name, current_representation["name"], new_version["_id"] + ) assert new_representation is not None, "Representation wasn't found" path = get_representation_path(new_representation) @@ -482,10 +504,10 @@ def switch_container(container, representation, loader_plugin=None): )) # Get the new representation to switch to - new_representation = legacy_io.find_one({ - "type": "representation", - "_id": representation["_id"], - }) + project_name = legacy_io.active_project() + new_representation = get_representation_by_id( + project_name, representation["_id"] + ) new_context = get_representation_context(new_representation) if not is_compatible_loader(loader_plugin, new_context): @@ -509,6 +531,52 @@ def get_representation_path_from_context(context): return get_representation_path(representation, root) +def get_representation_path_with_anatomy(repre_doc, anatomy): + """Receive representation path using representation document and anatomy. + + Anatomy is used to replace 'root' key in representation file. Ideally + should be used instead of 'get_representation_path' which is based on + "current context". + + Future notes: + We want also be able store resources into representation and I can + imagine the result should also contain paths to possible resources. + + Args: + repre_doc (Dict[str, Any]): Representation document. + anatomy (Anatomy): Project anatomy object. + + Returns: + Union[None, TemplateResult]: None if path can't be received + + Raises: + InvalidRepresentationContext: When representation data are probably + invalid or not available. + """ + + try: + template = repre_doc["data"]["template"] + + except KeyError: + raise InvalidRepresentationContext(( + "Representation document does not" + " contain template in data ('data.template')" + )) + + try: + context = repre_doc["context"] + context["root"] = anatomy.roots + path = StringTemplate.format_strict_template(template, context) + + except TemplateUnsolved as exc: + raise InvalidRepresentationContext(( + "Couldn't resolve representation template with available data." + " Reason: {}".format(str(exc)) + )) + + return path.normalized() + + def get_representation_path(representation, root=None, dbcon=None): """Get filename from representation document @@ -527,8 +595,6 @@ def get_representation_path(representation, root=None, dbcon=None): """ - from openpype.lib import StringTemplate, TemplateUnsolved - if dbcon is None: dbcon = legacy_io @@ -708,3 +774,165 @@ def loaders_from_representation(loaders, representation): context = get_representation_context(representation) return loaders_from_repre_context(loaders, context) + + +def any_outdated_containers(host=None, project_name=None): + """Check if there are any outdated containers in scene.""" + + if get_outdated_containers(host, project_name): + return True + return False + + +def get_outdated_containers(host=None, project_name=None): + """Collect outdated containers from host scene. + + Currently registered host and project in global session are used if + arguments are not passed. + + Args: + host (ModuleType): Host implementation with 'ls' function available. + project_name (str): Name of project in which context we are. + """ + + if host is None: + from openpype.pipeline import registered_host + + host = registered_host() + + if project_name is None: + project_name = legacy_io.active_project() + + if isinstance(host, ILoadHost): + containers = host.get_containers() + else: + containers = host.ls() + return filter_containers(containers, project_name).outdated + + +def filter_containers(containers, project_name): + """Filter containers and split them into 4 categories. + + Categories are 'latest', 'outdated', 'invalid' and 'not_found'. + The 'lastest' containers are from last version, 'outdated' are not, + 'invalid' are invalid containers (invalid content) and 'not_found' has + some missing entity in database. + + Args: + containers (Iterable[dict]): List of containers referenced into scene. + project_name (str): Name of project in which context shoud look for + versions. + + Returns: + ContainersFilterResult: Named tuple with 'latest', 'outdated', + 'invalid' and 'not_found' containers. + """ + + # Make sure containers is list that won't change + containers = list(containers) + + outdated_containers = [] + uptodate_containers = [] + not_found_containers = [] + invalid_containers = [] + output = ContainersFilterResult( + uptodate_containers, + outdated_containers, + not_found_containers, + invalid_containers + ) + # Query representation docs to get it's version ids + repre_ids = { + container["representation"] + for container in containers + if container["representation"] + } + if not repre_ids: + if containers: + invalid_containers.extend(containers) + return output + + repre_docs = get_representations( + project_name, + representation_ids=repre_ids, + fields=["_id", "parent"] + ) + # Store representations by stringified representation id + repre_docs_by_str_id = {} + repre_docs_by_version_id = collections.defaultdict(list) + for repre_doc in repre_docs: + repre_id = str(repre_doc["_id"]) + version_id = repre_doc["parent"] + repre_docs_by_str_id[repre_id] = repre_doc + repre_docs_by_version_id[version_id].append(repre_doc) + + # Query version docs to get it's subset ids + # - also query hero version to be able identify if representation + # belongs to existing version + version_docs = get_versions( + project_name, + version_ids=repre_docs_by_version_id.keys(), + hero=True, + fields=["_id", "parent", "type"] + ) + verisons_by_id = {} + versions_by_subset_id = collections.defaultdict(list) + hero_version_ids = set() + for version_doc in version_docs: + version_id = version_doc["_id"] + # Store versions by their ids + verisons_by_id[version_id] = version_doc + # There's no need to query subsets for hero versions + # - they are considered as latest? + if version_doc["type"] == "hero_version": + hero_version_ids.add(version_id) + continue + subset_id = version_doc["parent"] + versions_by_subset_id[subset_id].append(version_doc) + + last_versions = get_last_versions( + project_name, + subset_ids=versions_by_subset_id.keys(), + fields=["_id"] + ) + # Figure out which versions are outdated + outdated_version_ids = set() + for subset_id, last_version_doc in last_versions.items(): + for version_doc in versions_by_subset_id[subset_id]: + version_id = version_doc["_id"] + if version_id != last_version_doc["_id"]: + outdated_version_ids.add(version_id) + + # Based on all collected data figure out which containers are outdated + # - log out if there are missing representation or version documents + for container in containers: + container_name = container["objectName"] + repre_id = container["representation"] + if not repre_id: + invalid_containers.append(container) + continue + + repre_doc = repre_docs_by_str_id.get(repre_id) + if not repre_doc: + log.debug(( + "Container '{}' has an invalid representation." + " It is missing in the database." + ).format(container_name)) + not_found_containers.append(container) + continue + + version_id = repre_doc["parent"] + if version_id in outdated_version_ids: + outdated_containers.append(container) + + elif version_id not in verisons_by_id: + log.debug(( + "Representation on container '{}' has an invalid version." + " It is missing in the database." + ).format(container_name)) + not_found_containers.append(container) + + else: + uptodate_containers.append(container) + + return output diff --git a/openpype/pipeline/mongodb.py b/openpype/pipeline/mongodb.py index 565e26b966..be2b67a5e7 100644 --- a/openpype/pipeline/mongodb.py +++ b/openpype/pipeline/mongodb.py @@ -5,6 +5,8 @@ import logging import pymongo from uuid import uuid4 +from openpype.client import OpenPypeMongoConnection + from . import schema @@ -156,8 +158,6 @@ class AvalonMongoDB: @property def mongo_client(self): - from openpype.lib import OpenPypeMongoConnection - return OpenPypeMongoConnection.get_mongo_client() @property @@ -199,6 +199,10 @@ class AvalonMongoDB: """Return the name of the active project""" return self.Session["AVALON_PROJECT"] + def current_project(self): + """Currently set project in Session without triggering installation.""" + return self.Session.get("AVALON_PROJECT") + @requires_install @auto_reconnect def projects(self, projection=None, only_active=True): diff --git a/openpype/pipeline/plugin_discover.py b/openpype/pipeline/plugin_discover.py index 004e530b1c..7edd9ac290 100644 --- a/openpype/pipeline/plugin_discover.py +++ b/openpype/pipeline/plugin_discover.py @@ -2,7 +2,7 @@ import os import inspect import traceback -from openpype.api import Logger +from openpype.lib import Logger from openpype.lib.python_module_tools import ( modules_from_path, classes_from_module, diff --git a/openpype/pipeline/project_folders.py b/openpype/pipeline/project_folders.py new file mode 100644 index 0000000000..1bcba5c320 --- /dev/null +++ b/openpype/pipeline/project_folders.py @@ -0,0 +1,107 @@ +import os +import re +import json + +import six + +from openpype.settings import get_project_settings +from openpype.lib import Logger + +from .anatomy import Anatomy +from .template_data import get_project_template_data + + +def concatenate_splitted_paths(split_paths, anatomy): + log = Logger.get_logger("concatenate_splitted_paths") + pattern_array = re.compile(r"\[.*\]") + output = [] + for path_items in split_paths: + clean_items = [] + if isinstance(path_items, str): + path_items = [path_items] + + for path_item in path_items: + if not re.match(r"{.+}", path_item): + path_item = re.sub(pattern_array, "", path_item) + clean_items.append(path_item) + + # backward compatibility + if "__project_root__" in path_items: + for root, root_path in anatomy.roots.items(): + if not os.path.exists(str(root_path)): + log.debug("Root {} path path {} not exist on \ + computer!".format(root, root_path)) + continue + clean_items = ["{{root[{}]}}".format(root), + r"{project[name]}"] + clean_items[1:] + output.append(os.path.normpath(os.path.sep.join(clean_items))) + continue + + output.append(os.path.normpath(os.path.sep.join(clean_items))) + + return output + + +def fill_paths(path_list, anatomy): + format_data = get_project_template_data(project_name=anatomy.project_name) + format_data["root"] = anatomy.roots + filled_paths = [] + + for path in path_list: + new_path = path.format(**format_data) + filled_paths.append(new_path) + + return filled_paths + + +def create_project_folders(project_name, basic_paths=None): + log = Logger.get_logger("create_project_folders") + anatomy = Anatomy(project_name) + if basic_paths is None: + basic_paths = get_project_basic_paths(project_name) + + if not basic_paths: + return + + concat_paths = concatenate_splitted_paths(basic_paths, anatomy) + filled_paths = fill_paths(concat_paths, anatomy) + + # Create folders + for path in filled_paths: + if os.path.exists(path): + log.debug("Folder already exists: {}".format(path)) + else: + log.debug("Creating folder: {}".format(path)) + os.makedirs(path) + + +def _list_path_items(folder_structure): + output = [] + for key, value in folder_structure.items(): + if not value: + output.append(key) + continue + + paths = _list_path_items(value) + for path in paths: + if not isinstance(path, (list, tuple)): + path = [path] + + item = [key] + item.extend(path) + output.append(item) + + return output + + +def get_project_basic_paths(project_name): + project_settings = get_project_settings(project_name) + folder_structure = ( + project_settings["global"]["project_folder_structure"] + ) + if not folder_structure: + return [] + + if isinstance(folder_structure, six.string_types): + folder_structure = json.loads(folder_structure) + return _list_path_items(folder_structure) diff --git a/openpype/pipeline/publish/__init__.py b/openpype/pipeline/publish/__init__.py index af5d7c4a91..04b1a66f3a 100644 --- a/openpype/pipeline/publish/__init__.py +++ b/openpype/pipeline/publish/__init__.py @@ -1,28 +1,85 @@ +from .constants import ( + ValidatePipelineOrder, + ValidateContentsOrder, + ValidateSceneOrder, + ValidateMeshOrder, +) + from .publish_plugins import ( + AbstractMetaInstancePlugin, + AbstractMetaContextPlugin, + PublishValidationError, PublishXmlValidationError, KnownPublishError, OpenPypePyblishPluginMixin, OptionalPyblishPluginMixin, + + RepairAction, + RepairContextAction, + + Extractor, ) from .lib import ( + get_publish_template_name, + DiscoverResult, publish_plugins_discover, load_help_content_from_plugin, load_help_content_from_filepath, + + get_errored_instances_from_context, + get_errored_plugins_from_context, + + filter_instances_for_context_plugin, + context_plugin_should_run, + get_instance_staging_dir, +) + +from .abstract_expected_files import ExpectedFiles +from .abstract_collect_render import ( + RenderInstance, + AbstractCollectRender, ) __all__ = ( + "ValidatePipelineOrder", + "ValidateContentsOrder", + "ValidateSceneOrder", + "ValidateMeshOrder", + + "AbstractMetaInstancePlugin", + "AbstractMetaContextPlugin", + "PublishValidationError", "PublishXmlValidationError", "KnownPublishError", "OpenPypePyblishPluginMixin", "OptionalPyblishPluginMixin", + "RepairAction", + "RepairContextAction", + + "Extractor", + + "get_publish_template_name", + "DiscoverResult", "publish_plugins_discover", "load_help_content_from_plugin", "load_help_content_from_filepath", + + "get_errored_instances_from_context", + "get_errored_plugins_from_context", + + "filter_instances_for_context_plugin", + "context_plugin_should_run", + "get_instance_staging_dir", + + "ExpectedFiles", + + "RenderInstance", + "AbstractCollectRender", ) diff --git a/openpype/lib/abstract_collect_render.py b/openpype/pipeline/publish/abstract_collect_render.py similarity index 98% rename from openpype/lib/abstract_collect_render.py rename to openpype/pipeline/publish/abstract_collect_render.py index 3d81f6d794..ccb2415346 100644 --- a/openpype/lib/abstract_collect_render.py +++ b/openpype/pipeline/publish/abstract_collect_render.py @@ -12,8 +12,7 @@ import six import pyblish.api from openpype.pipeline import legacy_io - -from .abstract_metaplugins import AbstractMetaContextPlugin +from .publish_plugins import AbstractMetaContextPlugin @attr.s @@ -64,6 +63,8 @@ class RenderInstance(object): family = attr.ib(default="renderlayer") families = attr.ib(default=["renderlayer"]) # list of families + # True if should be rendered on farm, eg not integrate + farm = attr.ib(default=False) # format settings multipartExr = attr.ib(default=False) # flag for multipart exrs diff --git a/openpype/lib/abstract_expected_files.py b/openpype/pipeline/publish/abstract_expected_files.py similarity index 100% rename from openpype/lib/abstract_expected_files.py rename to openpype/pipeline/publish/abstract_expected_files.py diff --git a/openpype/pipeline/publish/constants.py b/openpype/pipeline/publish/constants.py new file mode 100644 index 0000000000..dcd3445200 --- /dev/null +++ b/openpype/pipeline/publish/constants.py @@ -0,0 +1,7 @@ +import pyblish.api + + +ValidatePipelineOrder = pyblish.api.ValidatorOrder + 0.05 +ValidateContentsOrder = pyblish.api.ValidatorOrder + 0.1 +ValidateSceneOrder = pyblish.api.ValidatorOrder + 0.2 +ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3 diff --git a/openpype/pipeline/publish/contants.py b/openpype/pipeline/publish/contants.py new file mode 100644 index 0000000000..169eca2e5c --- /dev/null +++ b/openpype/pipeline/publish/contants.py @@ -0,0 +1,2 @@ +DEFAULT_PUBLISH_TEMPLATE = "publish" +DEFAULT_HERO_PUBLISH_TEMPLATE = "hero" diff --git a/openpype/pipeline/publish/lib.py b/openpype/pipeline/publish/lib.py index 739b2c8806..c76671fa39 100644 --- a/openpype/pipeline/publish/lib.py +++ b/openpype/pipeline/publish/lib.py @@ -2,10 +2,198 @@ import os import sys import types import inspect +import copy +import tempfile import xml.etree.ElementTree import six import pyblish.plugin +import pyblish.api + +from openpype.lib import Logger, filter_profiles +from openpype.settings import ( + get_project_settings, + get_system_settings, +) + +from .contants import ( + DEFAULT_PUBLISH_TEMPLATE, + DEFAULT_HERO_PUBLISH_TEMPLATE, +) + + +def get_template_name_profiles( + project_name, project_settings=None, logger=None +): + """Receive profiles for publish template keys. + + At least one of arguments must be passed. + + Args: + project_name (str): Name of project where to look for templates. + project_settings(Dic[str, Any]): Prepared project settings. + + Returns: + List[Dict[str, Any]]: Publish template profiles. + """ + + if not project_name and not project_settings: + raise ValueError(( + "Both project name and project settings are missing." + " At least one must be entered." + )) + + if not project_settings: + project_settings = get_project_settings(project_name) + + profiles = ( + project_settings + ["global"] + ["tools"] + ["publish"] + ["template_name_profiles"] + ) + if profiles: + return copy.deepcopy(profiles) + + # Use legacy approach for cases new settings are not filled yet for the + # project + legacy_profiles = ( + project_settings + ["global"] + ["publish"] + ["IntegrateAssetNew"] + ["template_name_profiles"] + ) + if legacy_profiles: + if not logger: + logger = Logger.get_logger("get_template_name_profiles") + + logger.warning(( + "Project \"{}\" is using legacy access to publish template." + " It is recommended to move settings to new location" + " 'project_settings/global/tools/publish/template_name_profiles'." + ).format(project_name)) + + # Replace "tasks" key with "task_names" + profiles = [] + for profile in copy.deepcopy(legacy_profiles): + profile["task_names"] = profile.pop("tasks", []) + profiles.append(profile) + return profiles + + +def get_hero_template_name_profiles( + project_name, project_settings=None, logger=None +): + """Receive profiles for hero publish template keys. + + At least one of arguments must be passed. + + Args: + project_name (str): Name of project where to look for templates. + project_settings(Dic[str, Any]): Prepared project settings. + + Returns: + List[Dict[str, Any]]: Publish template profiles. + """ + + if not project_name and not project_settings: + raise ValueError(( + "Both project name and project settings are missing." + " At least one must be entered." + )) + + if not project_settings: + project_settings = get_project_settings(project_name) + + profiles = ( + project_settings + ["global"] + ["tools"] + ["publish"] + ["hero_template_name_profiles"] + ) + if profiles: + return copy.deepcopy(profiles) + + # Use legacy approach for cases new settings are not filled yet for the + # project + legacy_profiles = copy.deepcopy( + project_settings + ["global"] + ["publish"] + ["IntegrateHeroVersion"] + ["template_name_profiles"] + ) + if legacy_profiles: + if not logger: + logger = Logger.get_logger("get_hero_template_name_profiles") + + logger.warning(( + "Project \"{}\" is using legacy access to hero publish template." + " It is recommended to move settings to new location" + " 'project_settings/global/tools/publish/" + "hero_template_name_profiles'." + ).format(project_name)) + return legacy_profiles + + +def get_publish_template_name( + project_name, + host_name, + family, + task_name, + task_type, + project_settings=None, + hero=False, + logger=None +): + """Get template name which should be used for passed context. + + Publish templates are filtered by host name, family, task name and + task type. + + Default template which is used at if profiles are not available or profile + has empty value is defined by 'DEFAULT_PUBLISH_TEMPLATE' constant. + + Args: + project_name (str): Name of project where to look for settings. + host_name (str): Name of host integration. + family (str): Family for which should be found template. + task_name (str): Task name on which is intance working. + task_type (str): Task type on which is intance working. + project_setting (Dict[str, Any]): Prepared project settings. + logger (logging.Logger): Custom logger used for 'filter_profiles' + function. + + Returns: + str: Template name which should be used for integration. + """ + + template = None + filter_criteria = { + "hosts": host_name, + "families": family, + "task_names": task_name, + "task_types": task_type, + } + if hero: + default_template = DEFAULT_HERO_PUBLISH_TEMPLATE + profiles = get_hero_template_name_profiles( + project_name, project_settings, logger + ) + + else: + profiles = get_template_name_profiles( + project_name, project_settings, logger + ) + default_template = DEFAULT_PUBLISH_TEMPLATE + + profile = filter_profiles(profiles, filter_criteria, logger=logger) + if profile: + template = profile["template_name"] + return template or default_template class DiscoverResult: @@ -180,3 +368,267 @@ def publish_plugins_discover(paths=None): result.plugins = plugins return result + + +def filter_pyblish_plugins(plugins): + """Pyblish plugin filter which applies OpenPype settings. + + Apply OpenPype settings on discovered plugins. On plugin with implemented + class method 'def apply_settings(cls, project_settings, system_settings)' + is called the method. Default behavior looks for plugin name and current + host name to look for + + Args: + plugins (List[pyblish.plugin.Plugin]): Discovered plugins on which + are applied settings. + """ + + log = Logger.get_logger("filter_pyblish_plugins") + + # TODO: Don't use host from 'pyblish.api' but from defined host by us. + # - kept becau on farm is probably used host 'shell' which propably + # affect how settings are applied there + host = pyblish.api.current_host() + project_name = os.environ.get("AVALON_PROJECT") + + project_setting = get_project_settings(project_name) + system_settings = get_system_settings() + + # iterate over plugins + for plugin in plugins[:]: + if hasattr(plugin, "apply_settings"): + try: + # Use classmethod 'apply_settings' + # - can be used to target settings from custom settings place + # - skip default behavior when successful + plugin.apply_settings(project_setting, system_settings) + continue + + except Exception: + log.warning( + ( + "Failed to apply settings on plugin {}" + ).format(plugin.__name__), + exc_info=True + ) + + try: + config_data = ( + project_setting + [host] + ["publish"] + [plugin.__name__] + ) + except KeyError: + # host determined from path + file = os.path.normpath(inspect.getsourcefile(plugin)) + file = os.path.normpath(file) + + split_path = file.split(os.path.sep) + if len(split_path) < 4: + log.warning( + 'plugin path too short to extract host {}'.format(file) + ) + continue + + host_from_file = split_path[-4] + plugin_kind = split_path[-2] + + # TODO: change after all plugins are moved one level up + if host_from_file == "openpype": + host_from_file = "global" + + try: + config_data = ( + project_setting + [host_from_file] + [plugin_kind] + [plugin.__name__] + ) + except KeyError: + continue + + for option, value in config_data.items(): + if option == "enabled" and value is False: + log.info('removing plugin {}'.format(plugin.__name__)) + plugins.remove(plugin) + else: + log.info('setting {}:{} on plugin {}'.format( + option, value, plugin.__name__)) + + setattr(plugin, option, value) + + +def find_close_plugin(close_plugin_name, log): + if close_plugin_name: + plugins = pyblish.api.discover() + for plugin in plugins: + if plugin.__name__ == close_plugin_name: + return plugin + + log.debug("Close plugin not found, app might not close.") + + +def remote_publish(log, close_plugin_name=None, raise_error=False): + """Loops through all plugins, logs to console. Used for tests. + + Args: + log (openpype.lib.Logger) + close_plugin_name (str): name of plugin with responsibility to + close host app + """ + # Error exit as soon as any error occurs. + error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" + + close_plugin = find_close_plugin(close_plugin_name, log) + + for result in pyblish.util.publish_iter(): + for record in result["records"]: + log.info("{}: {}".format( + result["plugin"].label, record.msg)) + + if result["error"]: + error_message = error_format.format(**result) + log.error(error_message) + if close_plugin: # close host app explicitly after error + context = pyblish.api.Context() + close_plugin().process(context) + if raise_error: + # Fatal Error is because of Deadline + error_message = "Fatal Error: " + error_format.format(**result) + raise RuntimeError(error_message) + + +def get_errored_instances_from_context(context): + """Collect failed instances from pyblish context. + + Args: + context (pyblish.api.Context): Publish context where we're looking + for failed instances. + + Returns: + List[pyblish.lib.Instance]: Instances which failed during processing. + """ + + instances = list() + for result in context.data["results"]: + if result["instance"] is None: + # When instance is None we are on the "context" result + continue + + if result["error"]: + instances.append(result["instance"]) + + return instances + + +def get_errored_plugins_from_context(context): + """Collect failed plugins from pyblish context. + + Args: + context (pyblish.api.Context): Publish context where we're looking + for failed plugins. + + Returns: + List[pyblish.api.Plugin]: Plugins which failed during processing. + """ + + plugins = list() + results = context.data.get("results", []) + for result in results: + if result["success"] is True: + continue + plugins.append(result["plugin"]) + + return plugins + + +def filter_instances_for_context_plugin(plugin, context): + """Filter instances on context by context plugin filters. + + This is for cases when context plugin need similar filtering like instance + plugin have, but for some reason must run on context or should find out + if there is at least one instance with a family. + + Args: + plugin (pyblish.api.Plugin): Plugin with filters. + context (pyblish.api.Context): Pyblish context with insances. + + Returns: + Iterator[pyblish.lib.Instance]: Iteration of valid instances. + """ + + instances = [] + plugin_families = set() + all_families = False + if plugin.families: + instances = context + plugin_families = set(plugin.families) + all_families = "*" in plugin_families + + for instance in instances: + # Ignore inactive instances + if ( + not instance.data.get("publish", True) + or not instance.data.get("active", True) + ): + continue + + family = instance.data.get("family") + families = instance.data.get("families") or [] + if ( + all_families + or (family and family in plugin_families) + or any(f in plugin_families for f in families) + ): + yield instance + + +def context_plugin_should_run(plugin, context): + """Return whether the ContextPlugin should run on the given context. + + This is a helper function to work around a bug pyblish-base#250 + Whenever a ContextPlugin sets specific families it will still trigger even + when no instances are present that have those families. + + This actually checks it correctly and returns whether it should run. + + Args: + plugin (pyblish.api.Plugin): Plugin with filters. + context (pyblish.api.Context): Pyblish context with insances. + + Returns: + bool: Context plugin should run based on valid instances. + """ + + for _ in filter_instances_for_context_plugin(plugin, context): + return True + return False + + +def get_instance_staging_dir(instance): + """Unified way how staging dir is stored and created on instances. + + First check if 'stagingDir' is already set in instance data. If there is + not create new in tempdir. + + Note: + Staging dir does not have to be necessarily in tempdir so be carefull + about it's usage. + + Args: + instance (pyblish.lib.Instance): Instance for which we want to get + staging dir. + + Returns: + str: Path to staging dir of instance. + """ + + staging_dir = instance.data.get("stagingDir") + if not staging_dir: + staging_dir = os.path.normpath( + tempfile.mkdtemp(prefix="pyblish_tmp_") + ) + instance.data["stagingDir"] = staging_dir + + return staging_dir diff --git a/openpype/pipeline/publish/publish_plugins.py b/openpype/pipeline/publish/publish_plugins.py index 2402a005c2..6e2be1ce2c 100644 --- a/openpype/pipeline/publish/publish_plugins.py +++ b/openpype/pipeline/publish/publish_plugins.py @@ -1,5 +1,24 @@ +from abc import ABCMeta + +import pyblish.api +from pyblish.plugin import MetaPlugin, ExplicitMetaPlugin + from openpype.lib import BoolDef -from .lib import load_help_content_from_plugin + +from .lib import ( + load_help_content_from_plugin, + get_errored_instances_from_context, + get_errored_plugins_from_context, + get_instance_staging_dir, +) + + +class AbstractMetaInstancePlugin(ABCMeta, MetaPlugin): + pass + + +class AbstractMetaContextPlugin(ABCMeta, ExplicitMetaPlugin): + pass class PublishValidationError(Exception): @@ -16,6 +35,7 @@ class PublishValidationError(Exception): description(str): Detailed description of an error. It is possible to use Markdown syntax. """ + def __init__(self, message, title=None, description=None, detail=None): self.message = message self.title = title or "< Missing title >" @@ -49,6 +69,7 @@ class KnownPublishError(Exception): Message will be shown in UI for artist. """ + pass @@ -92,6 +113,7 @@ class OpenPypePyblishPluginMixin: Returns: list: Attribute definitions for plugin. """ + return [] @classmethod @@ -116,6 +138,7 @@ class OpenPypePyblishPluginMixin: Args: data(dict): Data from instance or context. """ + return ( data .get("publish_attributes", {}) @@ -170,3 +193,74 @@ class OptionalPyblishPluginMixin(OpenPypePyblishPluginMixin): if active is None: active = getattr(self, "active", True) return active + + +class RepairAction(pyblish.api.Action): + """Repairs the action + + To process the repairing this requires a static `repair(instance)` method + is available on the plugin. + """ + + label = "Repair" + on = "failed" # This action is only available on a failed plug-in + icon = "wrench" # Icon from Awesome Icon + + def process(self, context, plugin): + if not hasattr(plugin, "repair"): + raise RuntimeError("Plug-in does not have repair method.") + + # Get the errored instances + self.log.info("Finding failed instances..") + errored_instances = get_errored_instances_from_context(context) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(errored_instances, plugin) + for instance in instances: + plugin.repair(instance) + + +class RepairContextAction(pyblish.api.Action): + """Repairs the action + + To process the repairing this requires a static `repair(instance)` method + is available on the plugin. + """ + + label = "Repair" + on = "failed" # This action is only available on a failed plug-in + + def process(self, context, plugin): + if not hasattr(plugin, "repair"): + raise RuntimeError("Plug-in does not have repair method.") + + # Get the errored instances + self.log.info("Finding failed instances..") + errored_plugins = get_errored_plugins_from_context(context) + + # Apply pyblish.logic to get the instances for the plug-in + if plugin in errored_plugins: + self.log.info("Attempting fix ...") + plugin.repair(context) + + +class Extractor(pyblish.api.InstancePlugin): + """Extractor base class. + + The extractor base class implements a "staging_dir" function used to + generate a temporary directory for an instance to extract to. + + This temporary directory is generated through `tempfile.mkdtemp()` + + """ + + order = 2.0 + + def staging_dir(self, instance): + """Provide a temporary directory in which to store extracted files + + Upon calling this method the staging directory is stored inside + the instance.data['stagingDir'] + """ + + return get_instance_staging_dir(instance) diff --git a/openpype/pipeline/template_data.py b/openpype/pipeline/template_data.py new file mode 100644 index 0000000000..627eba5c3d --- /dev/null +++ b/openpype/pipeline/template_data.py @@ -0,0 +1,238 @@ +from openpype.client import get_project, get_asset_by_name +from openpype.settings import get_system_settings +from openpype.lib.local_settings import get_openpype_username + + +def get_general_template_data(system_settings=None): + """General template data based on system settings or machine. + + Output contains formatting keys: + - 'studio[name]' - Studio name filled from system settings + - 'studio[code]' - Studio code filled from system settings + - 'user' - User's name using 'get_openpype_username' + + Args: + system_settings (Dict[str, Any]): System settings. + """ + + if not system_settings: + system_settings = get_system_settings() + studio_name = system_settings["general"]["studio_name"] + studio_code = system_settings["general"]["studio_code"] + return { + "studio": { + "name": studio_name, + "code": studio_code + }, + "user": get_openpype_username() + } + + +def get_project_template_data(project_doc=None, project_name=None): + """Extract data from project document that are used in templates. + + Project document must have 'name' and (at this moment) optional + key 'data.code'. + + One of 'project_name' or 'project_doc' must be passed. With prepared + project document is function much faster because don't have to query. + + Output contains formatting keys: + - 'project[name]' - Project name + - 'project[code]' - Project code + + Args: + project_doc (Dict[str, Any]): Queried project document. + project_name (str): Name of project. + + Returns: + Dict[str, Dict[str, str]]: Template data based on project document. + """ + + if not project_name: + project_name = project_doc["name"] + + if not project_doc: + project_doc = get_project(project_name, fields=["data.code"]) + + project_code = project_doc.get("data", {}).get("code") + return { + "project": { + "name": project_name, + "code": project_code + } + } + + +def get_asset_template_data(asset_doc, project_name): + """Extract data from asset document that are used in templates. + + Output dictionary contains keys: + - 'asset' - asset name + - 'hierarchy' - parent asset names joined with '/' + - 'parent' - direct parent name, project name used if is under project + + Required document fields: + Asset: 'name', 'data.parents' + + Args: + asset_doc (Dict[str, Any]): Queried asset document. + project_name (str): Is used for 'parent' key if asset doc does not have + any. + + Returns: + Dict[str, str]: Data that are based on asset document and can be used + in templates. + """ + + asset_parents = asset_doc["data"]["parents"] + hierarchy = "/".join(asset_parents) + if asset_parents: + parent_name = asset_parents[-1] + else: + parent_name = project_name + + return { + "asset": asset_doc["name"], + "hierarchy": hierarchy, + "parent": parent_name + } + + +def get_task_type(asset_doc, task_name): + """Get task type based on asset document and task name. + + Required document fields: + Asset: 'data.tasks' + + Args: + asset_doc (Dict[str, Any]): Queried asset document. + task_name (str): Task name which is under asset. + + Returns: + str: Task type name. + None: Task was not found on asset document. + """ + + asset_tasks_info = asset_doc["data"]["tasks"] + return asset_tasks_info.get(task_name, {}).get("type") + + +def get_task_template_data(project_doc, asset_doc, task_name): + """"Extract task specific data from project and asset documents. + + Required document fields: + Project: 'config.tasks' + Asset: 'data.tasks'. + + Args: + project_doc (Dict[str, Any]): Queried project document. + asset_doc (Dict[str, Any]): Queried asset document. + tas_name (str): Name of task for which data should be returned. + + Returns: + Dict[str, Dict[str, str]]: Template data + """ + + project_task_types = project_doc["config"]["tasks"] + task_type = get_task_type(asset_doc, task_name) + task_code = project_task_types.get(task_type, {}).get("short_name") + + return { + "task": { + "name": task_name, + "type": task_type, + "short": task_code, + } + } + + +def get_template_data( + project_doc, + asset_doc=None, + task_name=None, + host_name=None, + system_settings=None +): + """Prepare data for templates filling from entered documents and info. + + This function does not "auto fill" any values except system settings and + it's on purpose. + + Universal function to receive template data from passed arguments. Only + required argument is project document all other arguments are optional + and their values won't be added to template data if are not passed. + + Required document fields: + Project: 'name', 'data.code', 'config.tasks' + Asset: 'name', 'data.parents', 'data.tasks' + + Args: + project_doc (Dict[str, Any]): Mongo document of project from MongoDB. + asset_doc (Dict[str, Any]): Mongo document of asset from MongoDB. + task_name (Union[str, None]): Task name under passed asset. + host_name (Union[str, None]): Used to fill '{app}' key. + system_settings (Union[Dict, None]): Prepared system settings. + They're queried if not passed (may be slower). + + Returns: + Dict[str, Any]: Data prepared for filling workdir template. + """ + + template_data = get_general_template_data(system_settings) + template_data.update(get_project_template_data(project_doc)) + if asset_doc: + template_data.update(get_asset_template_data( + asset_doc, project_doc["name"] + )) + if task_name: + template_data.update(get_task_template_data( + project_doc, asset_doc, task_name + )) + + if host_name: + template_data["app"] = host_name + + return template_data + + +def get_template_data_with_names( + project_name, + asset_name=None, + task_name=None, + host_name=None, + system_settings=None +): + """Prepare data for templates filling from entered entity names and info. + + Copy of 'get_template_data' but based on entity names instead of documents. + Only difference is that documents are queried. + + Args: + project_name (str): Project name for which template data are + calculated. + asset_name (Union[str, None]): Asset name for which template data are + calculated. + task_name (Union[str, None]): Task name under passed asset. + host_name (Union[str, None]):Used to fill '{app}' key. + because workdir template may contain `{app}` key. + system_settings (Union[Dict, None]): Prepared system settings. + They're queried if not passed. + + Returns: + Dict[str, Any]: Data prepared for filling workdir template. + """ + + project_doc = get_project( + project_name, fields=["name", "data.code", "config.tasks"] + ) + asset_doc = None + if asset_name: + asset_doc = get_asset_by_name( + project_name, + asset_name, + fields=["name", "data.parents", "data.tasks"] + ) + return get_template_data( + project_doc, asset_doc, task_name, host_name, system_settings + ) diff --git a/openpype/pipeline/thumbnail.py b/openpype/pipeline/thumbnail.py index ec97b36954..39f3e17893 100644 --- a/openpype/pipeline/thumbnail.py +++ b/openpype/pipeline/thumbnail.py @@ -2,7 +2,9 @@ import os import copy import logging +from openpype.client import get_project from . import legacy_io +from .anatomy import Anatomy from .plugin_discover import ( discover, register_plugin, @@ -72,26 +74,22 @@ class ThumbnailResolver(object): class TemplateResolver(ThumbnailResolver): - priority = 90 def process(self, thumbnail_entity, thumbnail_type): - - if not os.environ.get("AVALON_THUMBNAIL_ROOT"): - return - template = thumbnail_entity["data"].get("template") if not template: self.log.debug("Thumbnail entity does not have set template") return - project = self.dbcon.find_one( - {"type": "project"}, - { - "name": True, - "data.code": True - } - ) + thumbnail_root_format_key = "{thumbnail_root}" + thumbnail_root = os.environ.get("AVALON_THUMBNAIL_ROOT") or "" + # Check if template require thumbnail root and if is avaiable + if thumbnail_root_format_key in template and not thumbnail_root: + return + + project_name = self.dbcon.active_project() + project = get_project(project_name, fields=["name", "data.code"]) template_data = copy.deepcopy( thumbnail_entity["data"].get("template_data") or {} @@ -99,12 +97,16 @@ class TemplateResolver(ThumbnailResolver): template_data.update({ "_id": str(thumbnail_entity["_id"]), "thumbnail_type": thumbnail_type, - "thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"), + "thumbnail_root": thumbnail_root, "project": { "name": project["name"], "code": project["data"].get("code") - } + }, }) + # Add anatomy roots if is in template + if "{root" in template: + anatomy = Anatomy(project_name) + template_data["root"] = anatomy.roots try: filepath = os.path.normpath(template.format(**template_data)) diff --git a/openpype/pipeline/workfile/__init__.py b/openpype/pipeline/workfile/__init__.py new file mode 100644 index 0000000000..94ecc81bd6 --- /dev/null +++ b/openpype/pipeline/workfile/__init__.py @@ -0,0 +1,34 @@ +from .path_resolving import ( + get_workfile_template_key_from_context, + get_workfile_template_key, + get_workdir_with_workdir_data, + get_workdir, + + get_last_workfile_with_version, + get_last_workfile, + + get_custom_workfile_template, + get_custom_workfile_template_by_string_context, + + create_workdir_extra_folders, +) + +from .build_workfile import BuildWorkfile + + +__all__ = ( + "get_workfile_template_key_from_context", + "get_workfile_template_key", + "get_workdir_with_workdir_data", + "get_workdir", + + "get_last_workfile_with_version", + "get_last_workfile", + + "get_custom_workfile_template", + "get_custom_workfile_template_by_string_context", + + "create_workdir_extra_folders", + + "BuildWorkfile", +) diff --git a/openpype/pipeline/workfile/build_workfile.py b/openpype/pipeline/workfile/build_workfile.py new file mode 100644 index 0000000000..87b9df158f --- /dev/null +++ b/openpype/pipeline/workfile/build_workfile.py @@ -0,0 +1,704 @@ +"""Workfile build based on settings. + +Workfile builder will do stuff based on project settings. Advantage is that +it need only access to settings. Disadvantage is that it is hard to focus +build per context and being explicit about loaded content. + +For more explicit workfile build is recommended 'AbstractTemplateBuilder' +from '~/openpype/pipeline/workfile/workfile_template_builder'. Which gives +more abilities to define how build happens but require more code to achive it. +""" + +import os +import re +import collections +import json + +from openpype.client import ( + get_asset_by_name, + get_subsets, + get_last_versions, + get_representations, + get_linked_assets, +) +from openpype.settings import get_project_settings +from openpype.lib import ( + filter_profiles, + Logger, +) +from openpype.pipeline import legacy_io +from openpype.pipeline.load import ( + discover_loader_plugins, + IncompatibleLoaderError, + load_container, +) + + +class BuildWorkfile: + """Wrapper for build workfile process. + + Load representations for current context by build presets. Build presets + are host related, since each host has it's loaders. + """ + + _log = None + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + @staticmethod + def map_subsets_by_family(subsets): + subsets_by_family = collections.defaultdict(list) + for subset in subsets: + family = subset["data"].get("family") + if not family: + families = subset["data"].get("families") + if not families: + continue + family = families[0] + + subsets_by_family[family].append(subset) + return subsets_by_family + + def process(self): + """Main method of this wrapper. + + Building of workfile is triggered and is possible to implement + post processing of loaded containers if necessary. + + Returns: + List[Dict[str, Any]]: Loaded containers during build. + """ + + return self.build_workfile() + + def build_workfile(self): + """Prepares and load containers into workfile. + + Loads latest versions of current and linked assets to workfile by logic + stored in Workfile profiles from presets. Profiles are set by host, + filtered by current task name and used by families. + + Each family can specify representation names and loaders for + representations and first available and successful loaded + representation is returned as container. + + At the end you'll get list of loaded containers per each asset. + + loaded_containers [{ + "asset_entity": , + "containers": [, , ...] + }, { + "asset_entity": , + "containers": [, ...] + }, { + ... + }] + + Returns: + List[Dict[str, Any]]: Loaded containers during build. + """ + + loaded_containers = [] + + # Get current asset name and entity + project_name = legacy_io.active_project() + current_asset_name = legacy_io.Session["AVALON_ASSET"] + current_asset_entity = get_asset_by_name( + project_name, current_asset_name + ) + # Skip if asset was not found + if not current_asset_entity: + print("Asset entity with name `{}` was not found".format( + current_asset_name + )) + return loaded_containers + + # Prepare available loaders + loaders_by_name = {} + for loader in discover_loader_plugins(): + loader_name = loader.__name__ + if loader_name in loaders_by_name: + raise KeyError( + "Duplicated loader name {0}!".format(loader_name) + ) + loaders_by_name[loader_name] = loader + + # Skip if there are any loaders + if not loaders_by_name: + self.log.warning("There are no registered loaders.") + return loaded_containers + + # Get current task name + current_task_name = legacy_io.Session["AVALON_TASK"] + + # Load workfile presets for task + self.build_presets = self.get_build_presets( + current_task_name, current_asset_entity + ) + + # Skip if there are any presets for task + if not self.build_presets: + self.log.warning( + "Current task `{}` does not have any loading preset.".format( + current_task_name + ) + ) + return loaded_containers + + # Get presets for loading current asset + current_context_profiles = self.build_presets.get("current_context") + # Get presets for loading linked assets + link_context_profiles = self.build_presets.get("linked_assets") + # Skip if both are missing + if not current_context_profiles and not link_context_profiles: + self.log.warning( + "Current task `{}` has empty loading preset.".format( + current_task_name + ) + ) + return loaded_containers + + elif not current_context_profiles: + self.log.warning(( + "Current task `{}` doesn't have any loading" + " preset for it's context." + ).format(current_task_name)) + + elif not link_context_profiles: + self.log.warning(( + "Current task `{}` doesn't have any" + "loading preset for it's linked assets." + ).format(current_task_name)) + + # Prepare assets to process by workfile presets + assets = [] + current_asset_id = None + if current_context_profiles: + # Add current asset entity if preset has current context set + assets.append(current_asset_entity) + current_asset_id = current_asset_entity["_id"] + + if link_context_profiles: + # Find and append linked assets if preset has set linked mapping + link_assets = get_linked_assets(current_asset_entity) + if link_assets: + assets.extend(link_assets) + + # Skip if there are no assets. This can happen if only linked mapping + # is set and there are no links for his asset. + if not assets: + self.log.warning( + "Asset does not have linked assets. Nothing to process." + ) + return loaded_containers + + # Prepare entities from database for assets + prepared_entities = self._collect_last_version_repres(assets) + + # Load containers by prepared entities and presets + # - Current asset containers + if current_asset_id and current_asset_id in prepared_entities: + current_context_data = prepared_entities.pop(current_asset_id) + loaded_data = self.load_containers_by_asset_data( + current_context_data, current_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # - Linked assets container + for linked_asset_data in prepared_entities.values(): + loaded_data = self.load_containers_by_asset_data( + linked_asset_data, link_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # Return list of loaded containers + return loaded_containers + + def get_build_presets(self, task_name, asset_doc): + """ Returns presets to build workfile for task name. + + Presets are loaded for current project set in + io.Session["AVALON_PROJECT"], filtered by registered host + and entered task name. + + Args: + task_name (str): Task name used for filtering build presets. + + Returns: + Dict[str, Any]: preset per entered task name + """ + + host_name = os.environ["AVALON_APP"] + project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + + host_settings = project_settings.get(host_name) or {} + # Get presets for host + wb_settings = host_settings.get("workfile_builder") + if not wb_settings: + # backward compatibility + wb_settings = host_settings.get("workfile_build") or {} + + builder_profiles = wb_settings.get("profiles") + if not builder_profiles: + return None + + task_type = ( + asset_doc + .get("data", {}) + .get("tasks", {}) + .get(task_name, {}) + .get("type") + ) + filter_data = { + "task_types": task_type, + "tasks": task_name + } + return filter_profiles(builder_profiles, filter_data) + + def _filter_build_profiles(self, build_profiles, loaders_by_name): + """ Filter build profiles by loaders and prepare process data. + + Valid profile must have "loaders", "families" and "repre_names" keys + with valid values. + - "loaders" expects list of strings representing possible loaders. + - "families" expects list of strings for filtering + by main subset family. + - "repre_names" expects list of strings for filtering by + representation name. + + Lowered "families" and "repre_names" are prepared for each profile with + all required keys. + + Args: + build_profiles (Dict[str, Any]): Profiles for building workfile. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + List[Dict[str, Any]]: Filtered and prepared profiles. + """ + + valid_profiles = [] + for profile in build_profiles: + # Check loaders + profile_loaders = profile.get("loaders") + if not profile_loaders: + self.log.warning(( + "Build profile has missing loaders configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check if any loader is available + loaders_match = False + for loader_name in profile_loaders: + if loader_name in loaders_by_name: + loaders_match = True + break + + if not loaders_match: + self.log.warning(( + "All loaders from Build profile are not available: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check families + profile_families = profile.get("families") + if not profile_families: + self.log.warning(( + "Build profile is missing families configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check representation names + profile_repre_names = profile.get("repre_names") + if not profile_repre_names: + self.log.warning(( + "Build profile is missing" + " representation names filtering: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Prepare lowered families and representation names + profile["families_lowered"] = [ + fam.lower() for fam in profile_families + ] + profile["repre_names_lowered"] = [ + name.lower() for name in profile_repre_names + ] + + valid_profiles.append(profile) + + return valid_profiles + + def _prepare_profile_for_subsets(self, subsets, profiles): + """Select profile for each subset by it's data. + + Profiles are filtered for each subset individually. + Profile is filtered by subset's family, optionally by name regex and + representation names set in profile. + It is possible to not find matching profile for subset, in that case + subset is skipped and it is possible that none of subsets have + matching profile. + + Args: + subsets (List[Dict[str, Any]]): Subset documents. + profiles (List[Dict[str, Any]]): Build profiles. + + Returns: + Dict[str, Any]: Profile by subset's id. + """ + + # Prepare subsets + subsets_by_family = self.map_subsets_by_family(subsets) + + profiles_per_subset_id = {} + for family, subsets in subsets_by_family.items(): + family_low = family.lower() + for profile in profiles: + # Skip profile if does not contain family + if family_low not in profile["families_lowered"]: + continue + + # Precompile name filters as regexes + profile_regexes = profile.get("subset_name_filters") + if profile_regexes: + _profile_regexes = [] + for regex in profile_regexes: + _profile_regexes.append(re.compile(regex)) + profile_regexes = _profile_regexes + + # TODO prepare regex compilation + for subset in subsets: + # Verify regex filtering (optional) + if profile_regexes: + valid = False + for pattern in profile_regexes: + if re.match(pattern, subset["name"]): + valid = True + break + + if not valid: + continue + + profiles_per_subset_id[subset["_id"]] = profile + + # break profiles loop on finding the first matching profile + break + return profiles_per_subset_id + + def load_containers_by_asset_data( + self, asset_entity_data, build_profiles, loaders_by_name + ): + """Load containers for entered asset entity by Build profiles. + + Args: + asset_entity_data (Dict[str, Any]): Prepared data with subsets, + last versions and representations for specific asset. + build_profiles (Dict[str, Any]): Build profiles. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + Dict[str, Any]: Output contains asset document + and loaded containers. + """ + + # Make sure all data are not empty + if not asset_entity_data or not build_profiles or not loaders_by_name: + return + + asset_entity = asset_entity_data["asset_entity"] + + valid_profiles = self._filter_build_profiles( + build_profiles, loaders_by_name + ) + if not valid_profiles: + self.log.warning( + "There are not valid Workfile profiles. Skipping process." + ) + return + + self.log.debug("Valid Workfile profiles: {}".format(valid_profiles)) + + subsets_by_id = {} + version_by_subset_id = {} + repres_by_version_id = {} + for subset_id, in_data in asset_entity_data["subsets"].items(): + subset_entity = in_data["subset_entity"] + subsets_by_id[subset_entity["_id"]] = subset_entity + + version_data = in_data["version"] + version_entity = version_data["version_entity"] + version_by_subset_id[subset_id] = version_entity + repres_by_version_id[version_entity["_id"]] = ( + version_data["repres"] + ) + + if not subsets_by_id: + self.log.warning("There are not subsets for asset {0}".format( + asset_entity["name"] + )) + return + + profiles_per_subset_id = self._prepare_profile_for_subsets( + subsets_by_id.values(), valid_profiles + ) + if not profiles_per_subset_id: + self.log.warning("There are not valid subsets.") + return + + valid_repres_by_subset_id = collections.defaultdict(list) + for subset_id, profile in profiles_per_subset_id.items(): + profile_repre_names = profile["repre_names_lowered"] + + version_entity = version_by_subset_id[subset_id] + version_id = version_entity["_id"] + repres = repres_by_version_id[version_id] + for repre in repres: + repre_name_low = repre["name"].lower() + if repre_name_low in profile_repre_names: + valid_repres_by_subset_id[subset_id].append(repre) + + # DEBUG message + msg = "Valid representations for Asset: `{}`".format( + asset_entity["name"] + ) + for subset_id, repres in valid_repres_by_subset_id.items(): + subset = subsets_by_id[subset_id] + msg += "\n# Subset Name/ID: `{}`/{}".format( + subset["name"], subset_id + ) + for repre in repres: + msg += "\n## Repre name: `{}`".format(repre["name"]) + + self.log.debug(msg) + + containers = self._load_containers( + valid_repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ) + + return { + "asset_entity": asset_entity, + "containers": containers + } + + def _load_containers( + self, repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ): + """Real load by collected data happens here. + + Loading of representations per subset happens here. Each subset can + loads one representation. Loading is tried in specific order. + Representations are tried to load by names defined in configuration. + If subset has representation matching representation name each loader + is tried to load it until any is successful. If none of them was + successful then next representation name is tried. + Subset process loop ends when any representation is loaded or + all matching representations were already tried. + + Args: + repres_by_subset_id (Dict[str, Dict[str, Any]]): Available + representations mapped by their parent (subset) id. + subsets_by_id (Dict[str, Dict[str, Any]]): Subset documents + mapped by their id. + profiles_per_subset_id (Dict[str, Dict[str, Any]]): Build profiles + mapped by subset id. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + List[Dict[str, Any]]: Objects of loaded containers. + """ + + loaded_containers = [] + + # Get subset id order from build presets. + build_presets = self.build_presets.get("current_context", []) + build_presets += self.build_presets.get("linked_assets", []) + subset_ids_ordered = [] + for preset in build_presets: + for preset_family in preset["families"]: + for id, subset in subsets_by_id.items(): + if preset_family not in subset["data"].get("families", []): + continue + + subset_ids_ordered.append(id) + + # Order representations from subsets. + print("repres_by_subset_id", repres_by_subset_id) + representations_ordered = [] + representations = [] + for id in subset_ids_ordered: + for subset_id, repres in repres_by_subset_id.items(): + if repres in representations: + continue + + if id == subset_id: + representations_ordered.append((subset_id, repres)) + representations.append(repres) + + print("representations", representations) + + # Load ordered representations. + for subset_id, repres in representations_ordered: + subset_name = subsets_by_id[subset_id]["name"] + + profile = profiles_per_subset_id[subset_id] + loaders_last_idx = len(profile["loaders"]) - 1 + repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 + + repre_by_low_name = { + repre["name"].lower(): repre for repre in repres + } + + is_loaded = False + for repre_name_idx, profile_repre_name in enumerate( + profile["repre_names_lowered"] + ): + # Break iteration if representation was already loaded + if is_loaded: + break + + repre = repre_by_low_name.get(profile_repre_name) + if not repre: + continue + + for loader_idx, loader_name in enumerate(profile["loaders"]): + if is_loaded: + break + + loader = loaders_by_name.get(loader_name) + if not loader: + continue + try: + container = load_container( + loader, + repre["_id"], + name=subset_name + ) + loaded_containers.append(container) + is_loaded = True + + except Exception as exc: + if exc == IncompatibleLoaderError: + self.log.info(( + "Loader `{}` is not compatible with" + " representation `{}`" + ).format(loader_name, repre["name"])) + + else: + self.log.error( + "Unexpected error happened during loading", + exc_info=True + ) + + msg = "Loading failed." + if loader_idx < loaders_last_idx: + msg += " Trying next loader." + elif repre_name_idx < repre_names_last_idx: + msg += ( + " Loading of subset `{}` was not successful." + ).format(subset_name) + else: + msg += " Trying next representation." + self.log.info(msg) + + return loaded_containers + + def _collect_last_version_repres(self, asset_docs): + """Collect subsets, versions and representations for asset_entities. + + Args: + asset_docs (List[Dict[str, Any]]): Asset entities for which + want to find data. + + Returns: + Dict[str, Any]: collected entities + + Example output: + ``` + { + {Asset ID}: { + "asset_entity": , + "subsets": { + {Subset ID}: { + "subset_entity": , + "version": { + "version_entity": , + "repres": [ + , , ... + ] + } + }, + ... + } + }, + ... + } + output[asset_id]["subsets"][subset_id]["version"]["repres"] + ``` + """ + + output = {} + if not asset_docs: + return output + + asset_docs_by_ids = {asset["_id"]: asset for asset in asset_docs} + + project_name = legacy_io.active_project() + subsets = list(get_subsets( + project_name, asset_ids=asset_docs_by_ids.keys() + )) + subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} + + last_version_by_subset_id = get_last_versions( + project_name, subset_entity_by_ids.keys() + ) + last_version_docs_by_id = { + version["_id"]: version + for version in last_version_by_subset_id.values() + } + repre_docs = get_representations( + project_name, version_ids=last_version_docs_by_id.keys() + ) + + for repre_doc in repre_docs: + version_id = repre_doc["parent"] + version_doc = last_version_docs_by_id[version_id] + + subset_id = version_doc["parent"] + subset_doc = subset_entity_by_ids[subset_id] + + asset_id = subset_doc["parent"] + asset_doc = asset_docs_by_ids[asset_id] + + if asset_id not in output: + output[asset_id] = { + "asset_entity": asset_doc, + "subsets": {} + } + + if subset_id not in output[asset_id]["subsets"]: + output[asset_id]["subsets"][subset_id] = { + "subset_entity": subset_doc, + "version": { + "version_entity": version_doc, + "repres": [] + } + } + + output[asset_id]["subsets"][subset_id]["version"]["repres"].append( + repre_doc + ) + + return output diff --git a/openpype/pipeline/workfile/lock_workfile.py b/openpype/pipeline/workfile/lock_workfile.py new file mode 100644 index 0000000000..579840c07d --- /dev/null +++ b/openpype/pipeline/workfile/lock_workfile.py @@ -0,0 +1,74 @@ +import os +import json +from openpype.lib import Logger, filter_profiles +from openpype.lib.pype_info import get_workstation_info +from openpype.settings import get_project_settings +from openpype.pipeline import get_process_id + + +def _read_lock_file(lock_filepath): + if not os.path.exists(lock_filepath): + log = Logger.get_logger("_read_lock_file") + log.debug("lock file is not created or readable as expected!") + with open(lock_filepath, "r") as stream: + data = json.load(stream) + return data + + +def _get_lock_file(filepath): + return filepath + ".oplock" + + +def is_workfile_locked(filepath): + lock_filepath = _get_lock_file(filepath) + if not os.path.exists(lock_filepath): + return False + return True + + +def get_workfile_lock_data(filepath): + lock_filepath = _get_lock_file(filepath) + return _read_lock_file(lock_filepath) + + +def is_workfile_locked_for_current_process(filepath): + if not is_workfile_locked(filepath): + return False + + lock_filepath = _get_lock_file(filepath) + data = _read_lock_file(lock_filepath) + return data["process_id"] == get_process_id() + + +def delete_workfile_lock(filepath): + lock_filepath = _get_lock_file(filepath) + if os.path.exists(lock_filepath): + os.remove(lock_filepath) + + +def create_workfile_lock(filepath): + lock_filepath = _get_lock_file(filepath) + info = get_workstation_info() + info["process_id"] = get_process_id() + with open(lock_filepath, "w") as stream: + json.dump(info, stream) + + +def remove_workfile_lock(filepath): + if is_workfile_locked_for_current_process(filepath): + delete_workfile_lock(filepath) + + +def is_workfile_lock_enabled(host_name, project_name, project_setting=None): + if project_setting is None: + project_setting = get_project_settings(project_name) + workfile_lock_profiles = ( + project_setting + ["global"] + ["tools"] + ["Workfiles"] + ["workfile_lock_profiles"]) + profile = filter_profiles(workfile_lock_profiles, {"host_name": host_name}) + if not profile: + return False + return profile["enabled"] diff --git a/openpype/pipeline/workfile/path_resolving.py b/openpype/pipeline/workfile/path_resolving.py new file mode 100644 index 0000000000..801cb7223c --- /dev/null +++ b/openpype/pipeline/workfile/path_resolving.py @@ -0,0 +1,530 @@ +import os +import re +import copy +import platform + +from openpype.client import get_project, get_asset_by_name +from openpype.settings import get_project_settings +from openpype.lib import ( + filter_profiles, + Logger, + StringTemplate, +) +from openpype.pipeline import Anatomy +from openpype.pipeline.template_data import get_template_data + + +def get_workfile_template_key_from_context( + asset_name, task_name, host_name, project_name, project_settings=None +): + """Helper function to get template key for workfile template. + + Do the same as `get_workfile_template_key` but returns value for "session + context". + + Args: + asset_name(str): Name of asset document. + task_name(str): Task name for which is template key retrieved. + Must be available on asset document under `data.tasks`. + host_name(str): Name of host implementation for which is workfile + used. + project_name(str): Project name where asset and task is. + project_settings(Dict[str, Any]): Project settings for passed + 'project_name'. Not required at all but makes function faster. + """ + + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["data.tasks"] + ) + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + task_info = asset_tasks.get(task_name) or {} + task_type = task_info.get("type") + + return get_workfile_template_key( + task_type, host_name, project_name, project_settings + ) + + +def get_workfile_template_key( + task_type, host_name, project_name, project_settings=None +): + """Workfile template key which should be used to get workfile template. + + Function is using profiles from project settings to return right template + for passet task type and host name. + + Args: + task_type(str): Name of task type. + host_name(str): Name of host implementation (e.g. "maya", "nuke", ...) + project_name(str): Name of project in which context should look for + settings. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. + """ + + default = "work" + if not task_type or not host_name: + return default + + if not project_settings: + project_settings = get_project_settings(project_name) + + try: + profiles = ( + project_settings + ["global"] + ["tools"] + ["Workfiles"] + ["workfile_template_profiles"] + ) + except Exception: + profiles = [] + + if not profiles: + return default + + profile_filter = { + "task_types": task_type, + "hosts": host_name + } + profile = filter_profiles(profiles, profile_filter) + if profile: + return profile["workfile_template"] or default + return default + + +def get_workdir_with_workdir_data( + workdir_data, + project_name, + anatomy=None, + template_key=None, + project_settings=None +): + """Fill workdir path from entered data and project's anatomy. + + It is possible to pass only project's name instead of project's anatomy but + one of them **must** be entered. It is preferred to enter anatomy if is + available as initialization of a new Anatomy object may be time consuming. + + Args: + workdir_data (Dict[str, Any]): Data to fill workdir template. + project_name (str): Project's name. + anatomy (Anatomy): Anatomy object for specific project. Faster + processing if is passed. + template_key (str): Key of work templates in anatomy templates. If not + passed `get_workfile_template_key_from_context` is used to get it. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. Ans id used only + if 'template_key' is not passed. + + Returns: + TemplateResult: Workdir path. + """ + + if not anatomy: + anatomy = Anatomy(project_name) + + if not template_key: + template_key = get_workfile_template_key( + workdir_data["task"]["type"], + workdir_data["app"], + workdir_data["project"]["name"], + project_settings + ) + + anatomy_filled = anatomy.format(workdir_data) + # Output is TemplateResult object which contain useful data + output = anatomy_filled[template_key]["folder"] + if output: + return output.normalized() + return output + + +def get_workdir( + project_doc, + asset_doc, + task_name, + host_name, + anatomy=None, + template_key=None, + project_settings=None +): + """Fill workdir path from entered data and project's anatomy. + + Args: + project_doc (Dict[str, Any]): Mongo document of project from MongoDB. + asset_doc (Dict[str, Any]): Mongo document of asset from MongoDB. + task_name (str): Task name for which are workdir data preapred. + host_name (str): Host which is used to workdir. This is required + because workdir template may contain `{app}` key. In `Session` + is stored under `AVALON_APP` key. + anatomy (Anatomy): Optional argument. Anatomy object is created using + project name from `project_doc`. It is preferred to pass this + argument as initialization of a new Anatomy object may be time + consuming. + template_key (str): Key of work templates in anatomy templates. Default + value is defined in `get_workdir_with_workdir_data`. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. Ans id used only + if 'template_key' is not passed. + + Returns: + TemplateResult: Workdir path. + """ + + if not anatomy: + anatomy = Anatomy(project_doc["name"]) + + workdir_data = get_template_data( + project_doc, asset_doc, task_name, host_name + ) + # Output is TemplateResult object which contain useful data + return get_workdir_with_workdir_data( + workdir_data, + anatomy.project_name, + anatomy, + template_key, + project_settings + ) + + +def get_last_workfile_with_version( + workdir, file_template, fill_data, extensions +): + """Return last workfile version. + + Usign workfile template and it's filling data find most possible last + version of workfile which was created for the context. + + Functionality is fully based on knowing which keys are optional or what + values are expected as value. + + The last modified file is used if more files can be considered as + last workfile. + + Args: + workdir (str): Path to dir where workfiles are stored. + file_template (str): Template of file name. + fill_data (Dict[str, Any]): Data for filling template. + extensions (Iterable[str]): All allowed file extensions of workfile. + + Returns: + Tuple[Union[str, None], Union[int, None]]: Last workfile with version + if there is any workfile otherwise None for both. + """ + + if not os.path.exists(workdir): + return None, None + + dotted_extensions = set() + for ext in extensions: + if not ext.startswith("."): + ext = ".{}".format(ext) + dotted_extensions.add(ext) + + # Fast match on extension + filenames = [ + filename + for filename in os.listdir(workdir) + if os.path.splitext(filename)[-1] in dotted_extensions + ] + + # Build template without optionals, version to digits only regex + # and comment to any definable value. + # Escape extensions dot for regex + regex_exts = [ + "\\" + ext + for ext in dotted_extensions + ] + ext_expression = "(?:" + "|".join(regex_exts) + ")" + + # Replace `.{ext}` with `{ext}` so we are sure there is not dot at the end + file_template = re.sub(r"\.?{ext}", ext_expression, file_template) + # Replace optional keys with optional content regex + file_template = re.sub(r"<.*?>", r".*?", file_template) + # Replace `{version}` with group regex + file_template = re.sub(r"{version.*?}", r"([0-9]+)", file_template) + file_template = re.sub(r"{comment.*?}", r".+?", file_template) + file_template = StringTemplate.format_strict_template( + file_template, fill_data + ) + + # Match with ignore case on Windows due to the Windows + # OS not being case-sensitive. This avoids later running + # into the error that the file did exist if it existed + # with a different upper/lower-case. + kwargs = {} + if platform.system().lower() == "windows": + kwargs["flags"] = re.IGNORECASE + + # Get highest version among existing matching files + version = None + output_filenames = [] + for filename in sorted(filenames): + match = re.match(file_template, filename, **kwargs) + if not match: + continue + + if not match.groups(): + output_filenames.append(filename) + continue + + file_version = int(match.group(1)) + if version is None or file_version > version: + output_filenames[:] = [] + version = file_version + + if file_version == version: + output_filenames.append(filename) + + output_filename = None + if output_filenames: + if len(output_filenames) == 1: + output_filename = output_filenames[0] + else: + last_time = None + for _output_filename in output_filenames: + full_path = os.path.join(workdir, _output_filename) + mod_time = os.path.getmtime(full_path) + if last_time is None or last_time < mod_time: + output_filename = _output_filename + last_time = mod_time + + return output_filename, version + + +def get_last_workfile( + workdir, file_template, fill_data, extensions, full_path=False +): + """Return last workfile filename. + + Returns file with version 1 if there is not workfile yet. + + Args: + workdir(str): Path to dir where workfiles are stored. + file_template(str): Template of file name. + fill_data(Dict[str, Any]): Data for filling template. + extensions(Iterable[str]): All allowed file extensions of workfile. + full_path(bool): Full path to file is returned if set to True. + + Returns: + str: Last or first workfile as filename of full path to filename. + """ + + filename, version = get_last_workfile_with_version( + workdir, file_template, fill_data, extensions + ) + if filename is None: + data = copy.deepcopy(fill_data) + data["version"] = 1 + data.pop("comment", None) + if not data.get("ext"): + data["ext"] = extensions[0] + data["ext"] = data["ext"].replace('.', '') + filename = StringTemplate.format_strict_template(file_template, data) + + if full_path: + return os.path.normpath(os.path.join(workdir, filename)) + + return filename + + +def get_custom_workfile_template( + project_doc, + asset_doc, + task_name, + host_name, + anatomy=None, + project_settings=None +): + """Filter and fill workfile template profiles by passed context. + + Custom workfile template can be used as first version of workfiles. + Template is a file on a disk which is set in settings. Expected settings + structure to have this feature enabled is: + project settings + |- + |- workfile_builder + |- create_first_version - a bool which must be set to 'True' + |- custom_templates - profiles based on task name/type which + points to a file which is copied as + first workfile + + It is expected that passed argument are already queried documents of + project and asset as parents of processing task name. + + Args: + project_doc (Dict[str, Any]): Project document from MongoDB. + asset_doc (Dict[str, Any]): Asset document from MongoDB. + task_name (str): Name of task for which templates are filtered. + host_name (str): Name of host. + anatomy (Anatomy): Optionally passed anatomy object for passed project + name. + project_settings(Dict[str, Any]): Preloaded project settings. + + Returns: + str: Path to template or None if none of profiles match current + context. Existence of formatted path is not validated. + None: If no profile is matching context. + """ + + log = Logger.get_logger("CustomWorkfileResolve") + + project_name = project_doc["name"] + if project_settings is None: + project_settings = get_project_settings(project_name) + + host_settings = project_settings.get(host_name) + if not host_settings: + log.info("Host \"{}\" doesn't have settings".format(host_name)) + return None + + workfile_builder_settings = host_settings.get("workfile_builder") + if not workfile_builder_settings: + log.info(( + "Seems like old version of settings is used." + " Can't access custom templates in host \"{}\"." + ).format(host_name)) + return + + if not workfile_builder_settings["create_first_version"]: + log.info(( + "Project \"{}\" has turned off to create first workfile for" + " host \"{}\"" + ).format(project_name, host_name)) + return + + # Backwards compatibility + template_profiles = workfile_builder_settings.get("custom_templates") + if not template_profiles: + log.info( + "Custom templates are not filled. Skipping template copy." + ) + return + + if anatomy is None: + anatomy = Anatomy(project_name) + + # get project, asset, task anatomy context data + anatomy_context_data = get_template_data( + project_doc, asset_doc, task_name, host_name + ) + # add root dict + anatomy_context_data["root"] = anatomy.roots + + # get task type for the task in context + current_task_type = anatomy_context_data["task"]["type"] + + # get path from matching profile + matching_item = filter_profiles( + template_profiles, + {"task_types": current_task_type} + ) + # when path is available try to format it in case + # there are some anatomy template strings + if matching_item: + # extend anatomy context with os.environ to + # also allow formatting against env + full_context_data = os.environ.copy() + full_context_data.update(anatomy_context_data) + + template = matching_item["path"][platform.system().lower()] + return StringTemplate.format_strict_template( + template, full_context_data + ).normalized() + + return None + + +def get_custom_workfile_template_by_string_context( + project_name, + asset_name, + task_name, + host_name, + anatomy=None, + project_settings=None +): + """Filter and fill workfile template profiles by passed context. + + Passed context are string representations of project, asset and task. + Function will query documents of project and asset to be able use + `get_custom_workfile_template` for rest of logic. + + Args: + project_name(str): Project name. + asset_name(str): Asset name. + task_name(str): Task name. + host_name (str): Name of host. + anatomy(Anatomy): Optionally prepared anatomy object for passed + project. + project_settings(Dict[str, Any]): Preloaded project settings. + + Returns: + str: Path to template or None if none of profiles match current + context. (Existence of formatted path is not validated.) + None: If no profile is matching context. + """ + + project_doc = get_project(project_name) + asset_doc = get_asset_by_name(project_name, asset_name) + + return get_custom_workfile_template( + project_doc, asset_doc, task_name, host_name, anatomy, project_settings + ) + + +def create_workdir_extra_folders( + workdir, + host_name, + task_type, + task_name, + project_name, + project_settings=None +): + """Create extra folders in work directory based on context. + + Args: + workdir (str): Path to workdir where workfiles is stored. + host_name (str): Name of host implementation. + task_type (str): Type of task for which extra folders should be + created. + task_name (str): Name of task for which extra folders should be + created. + project_name (str): Name of project on which task is. + project_settings (dict): Prepared project settings. Are loaded if not + passed. + """ + + # Load project settings if not set + if not project_settings: + project_settings = get_project_settings(project_name) + + # Load extra folders profiles + extra_folders_profiles = ( + project_settings["global"]["tools"]["Workfiles"]["extra_folders"] + ) + # Skip if are empty + if not extra_folders_profiles: + return + + # Prepare profiles filters + filter_data = { + "task_types": task_type, + "task_names": task_name, + "hosts": host_name + } + profile = filter_profiles(extra_folders_profiles, filter_data) + if profile is None: + return + + for subfolder in profile["folders"]: + # Make sure backslashes are converted to forwards slashes + # and does not start with slash + subfolder = subfolder.replace("\\", "/").lstrip("/") + # Skip empty strings + if not subfolder: + continue + + fullpath = os.path.join(workdir, subfolder) + if not os.path.exists(fullpath): + os.makedirs(fullpath) diff --git a/openpype/pipeline/workfile/workfile_template_builder.py b/openpype/pipeline/workfile/workfile_template_builder.py new file mode 100644 index 0000000000..582657c735 --- /dev/null +++ b/openpype/pipeline/workfile/workfile_template_builder.py @@ -0,0 +1,1451 @@ +"""Workfile build mechanism using workfile templates. + +Build templates are manually prepared using plugin definitions which create +placeholders inside the template which are populated on import. + +This approach is very explicit to achive very specific build logic that can be +targeted by task types and names. + +Placeholders are created using placeholder plugins which should care about +logic and data of placeholder items. 'PlaceholderItem' is used to keep track +about it's progress. +""" + +import os +import re +import collections +import copy +from abc import ABCMeta, abstractmethod + +import six + +from openpype.client import ( + get_asset_by_name, + get_linked_assets, + get_representations, +) +from openpype.settings import ( + get_project_settings, + get_system_settings, +) +from openpype.host import HostBase +from openpype.lib import ( + Logger, + StringTemplate, + filter_profiles, + attribute_definitions, +) +from openpype.lib.attribute_definitions import get_attributes_keys +from openpype.pipeline import legacy_io, Anatomy +from openpype.pipeline.load import ( + get_loaders_by_name, + get_contexts_for_repre_docs, + load_with_repre_context, +) +from openpype.pipeline.create import get_legacy_creator_by_name + + +class TemplateNotFound(Exception): + """Exception raised when template does not exist.""" + pass + + +class TemplateProfileNotFound(Exception): + """Exception raised when current profile + doesn't match any template profile""" + pass + + +class TemplateAlreadyImported(Exception): + """Error raised when Template was already imported by host for + this session""" + pass + + +class TemplateLoadFailed(Exception): + """Error raised whend Template loader was unable to load the template""" + pass + + +@six.add_metaclass(ABCMeta) +class AbstractTemplateBuilder(object): + """Abstraction of Template Builder. + + Builder cares about context, shared data, cache, discovery of plugins + and trigger logic. Provides public api for host workfile build systen. + + Rest of logic is based on plugins that care about collection and creation + of placeholder items. + + Population of placeholders happens in loops. Each loop will collect all + available placeholders, skip already populated, and populate the rest. + + Builder item has 2 types of shared data. Refresh lifetime which are cleared + on refresh and populate lifetime which are cleared after loop of + placeholder population. + + Args: + host (Union[HostBase, ModuleType]): Implementation of host. + """ + + _log = None + + def __init__(self, host): + # Get host name + if isinstance(host, HostBase): + host_name = host.name + else: + host_name = os.environ.get("AVALON_APP") + + self._host = host + self._host_name = host_name + + # Shared data across placeholder plugins + self._shared_data = {} + self._shared_populate_data = {} + + # Where created objects of placeholder plugins will be stored + self._placeholder_plugins = None + self._loaders_by_name = None + self._creators_by_name = None + + self._system_settings = None + self._project_settings = None + + self._current_asset_doc = None + self._linked_asset_docs = None + self._task_type = None + + @property + def project_name(self): + return legacy_io.active_project() + + @property + def current_asset_name(self): + return legacy_io.Session["AVALON_ASSET"] + + @property + def current_task_name(self): + return legacy_io.Session["AVALON_TASK"] + + @property + def system_settings(self): + if self._system_settings is None: + self._system_settings = get_system_settings() + return self._system_settings + + @property + def project_settings(self): + if self._project_settings is None: + self._project_settings = get_project_settings(self.project_name) + return self._project_settings + + @property + def current_asset_doc(self): + if self._current_asset_doc is None: + self._current_asset_doc = get_asset_by_name( + self.project_name, self.current_asset_name + ) + return self._current_asset_doc + + @property + def linked_asset_docs(self): + if self._linked_asset_docs is None: + self._linked_asset_docs = get_linked_assets( + self.current_asset_doc + ) + return self._linked_asset_docs + + @property + def current_task_type(self): + asset_doc = self.current_asset_doc + if not asset_doc: + return None + return ( + asset_doc + .get("data", {}) + .get("tasks", {}) + .get(self.current_task_name, {}) + .get("type") + ) + + def get_placeholder_plugin_classes(self): + """Get placeholder plugin classes that can be used to build template. + + Default implementation looks for method + 'get_workfile_build_placeholder_plugins' on host. + + Returns: + List[PlaceholderPlugin]: Plugin classes available for host. + """ + + if hasattr(self._host, "get_workfile_build_placeholder_plugins"): + return self._host.get_workfile_build_placeholder_plugins() + return [] + + @property + def host(self): + """Access to host implementation. + + Returns: + Union[HostBase, ModuleType]: Implementation of host. + """ + + return self._host + + @property + def host_name(self): + """Name of 'host' implementation. + + Returns: + str: Host's name. + """ + + return self._host_name + + @property + def log(self): + """Dynamically created logger for the plugin.""" + + if self._log is None: + self._log = Logger.get_logger(repr(self)) + return self._log + + def refresh(self): + """Reset cached data.""" + + self._placeholder_plugins = None + self._loaders_by_name = None + self._creators_by_name = None + + self._current_asset_doc = None + self._linked_asset_docs = None + self._task_type = None + + self._system_settings = None + self._project_settings = None + + self.clear_shared_data() + self.clear_shared_populate_data() + + def get_loaders_by_name(self): + if self._loaders_by_name is None: + self._loaders_by_name = get_loaders_by_name() + return self._loaders_by_name + + def get_creators_by_name(self): + if self._creators_by_name is None: + self._creators_by_name = get_legacy_creator_by_name() + return self._creators_by_name + + def get_shared_data(self, key): + """Receive shared data across plugins and placeholders. + + This can be used to scroll scene only once to look for placeholder + items if the storing is unified but each placeholder plugin would have + to call it again. + + Args: + key (str): Key under which are shared data stored. + + Returns: + Union[None, Any]: None if key was not set. + """ + + return self._shared_data.get(key) + + def set_shared_data(self, key, value): + """Store share data across plugins and placeholders. + + Store data that can be afterwards accessed from any future call. It + is good practice to check if the same value is not already stored under + different key or if the key is not already used for something else. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + self._shared_data[key] = value + + def clear_shared_data(self): + """Clear shared data. + + Method only clear shared data to default state. + """ + + self._shared_data = {} + + def clear_shared_populate_data(self): + """Receive shared data across plugins and placeholders. + + These data are cleared after each loop of populating of template. + + This can be used to scroll scene only once to look for placeholder + items if the storing is unified but each placeholder plugin would have + to call it again. + + Args: + key (str): Key under which are shared data stored. + + Returns: + Union[None, Any]: None if key was not set. + """ + + self._shared_populate_data = {} + + def get_shared_populate_data(self, key): + """Store share populate data across plugins and placeholders. + + These data are cleared after each loop of populating of template. + + Store data that can be afterwards accessed from any future call. It + is good practice to check if the same value is not already stored under + different key or if the key is not already used for something else. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + return self._shared_populate_data.get(key) + + def set_shared_populate_data(self, key, value): + """Store share populate data across plugins and placeholders. + + These data are cleared after each loop of populating of template. + + Store data that can be afterwards accessed from any future call. It + is good practice to check if the same value is not already stored under + different key or if the key is not already used for something else. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + self._shared_populate_data[key] = value + + @property + def placeholder_plugins(self): + """Access to initialized placeholder plugins. + + Returns: + List[PlaceholderPlugin]: Initialized plugins available for host. + """ + + if self._placeholder_plugins is None: + placeholder_plugins = {} + for cls in self.get_placeholder_plugin_classes(): + try: + plugin = cls(self) + placeholder_plugins[plugin.identifier] = plugin + + except Exception: + self.log.warning( + "Failed to initialize placeholder plugin {}".format( + cls.__name__ + ), + exc_info=True + ) + + self._placeholder_plugins = placeholder_plugins + return self._placeholder_plugins + + def create_placeholder(self, plugin_identifier, placeholder_data): + """Create new placeholder using plugin identifier and data. + + Args: + plugin_identifier (str): Identifier of plugin. That's how builder + know which plugin should be used. + placeholder_data (Dict[str, Any]): Placeholder item data. They + should match options required by the plugin. + + Returns: + PlaceholderItem: Created placeholder item. + """ + + plugin = self.placeholder_plugins[plugin_identifier] + return plugin.create_placeholder(placeholder_data) + + def get_placeholders(self): + """Collect placeholder items from scene. + + Each placeholder plugin can collect it's placeholders and return them. + This method does not use cached values but always go through the scene. + + Returns: + List[PlaceholderItem]: Sorted placeholder items. + """ + + placeholders = [] + for placeholder_plugin in self.placeholder_plugins.values(): + result = placeholder_plugin.collect_placeholders() + if result: + placeholders.extend(result) + + return list(sorted( + placeholders, + key=lambda i: i.order + )) + + def build_template(self, template_path=None, level_limit=None): + """Main callback for building workfile from template path. + + Todo: + Handle report of populated placeholders from + 'populate_scene_placeholders' to be shown to a user. + + Args: + template_path (str): Path to a template file with placeholders. + Template from settings 'get_template_path' used when not + passed. + level_limit (int): Limit of populate loops. Related to + 'populate_scene_placeholders' method. + """ + + if template_path is None: + template_path = self.get_template_path() + self.import_template(template_path) + self.populate_scene_placeholders(level_limit) + + def rebuild_template(self): + """Go through existing placeholders in scene and update them. + + This could not make sense for all plugin types so this is optional + logic for plugins. + + Note: + Logic is not importing the template again but using placeholders + that were already available. We should maybe change the method + name. + + Question: + Should this also handle subloops as it is possible that another + template is loaded during processing? + """ + + if not self.placeholder_plugins: + self.log.info("There are no placeholder plugins available.") + return + + placeholders = self.get_placeholders() + if not placeholders: + self.log.info("No placeholders were found.") + return + + for placeholder in placeholders: + plugin = placeholder.plugin + plugin.repopulate_placeholder(placeholder) + + self.clear_shared_populate_data() + + @abstractmethod + def import_template(self, template_path): + """ + Import template in current host. + + Should load the content of template into scene so + 'populate_scene_placeholders' can be started. + + Args: + template_path (str): Fullpath for current task and + host's template file. + """ + + pass + + def _prepare_placeholders(self, placeholders): + """Run preparation part for placeholders on plugins. + + Args: + placeholders (List[PlaceholderItem]): Placeholder items that will + be processed. + """ + + # Prepare placeholder items by plugin + plugins_by_identifier = {} + placeholders_by_plugin_id = collections.defaultdict(list) + for placeholder in placeholders: + plugin = placeholder.plugin + identifier = plugin.identifier + plugins_by_identifier[identifier] = plugin + placeholders_by_plugin_id[identifier].append(placeholder) + + # Plugin should prepare data for passed placeholders + for identifier, placeholders in placeholders_by_plugin_id.items(): + plugin = plugins_by_identifier[identifier] + plugin.prepare_placeholders(placeholders) + + def populate_scene_placeholders(self, level_limit=None): + """Find placeholders in scene using plugins and process them. + + This should happen after 'import_template'. + + Collect available placeholders from scene. All of them are processed + after that shared data are cleared. Placeholder items are collected + again and if there are any new the loop happens again. This is possible + to change with defying 'level_limit'. + + Placeholders are marked as processed so they're not re-processed. To + identify which placeholders were already processed is used + placeholder's 'scene_identifier'. + + Args: + level_limit (int): Level of loops that can happen. Default is 1000. + """ + + if not self.placeholder_plugins: + self.log.warning("There are no placeholder plugins available.") + return + + placeholders = self.get_placeholders() + if not placeholders: + self.log.warning("No placeholders were found.") + return + + # Avoid infinite loop + # - 1000 iterations of placeholders processing must be enough + if not level_limit: + level_limit = 1000 + + placeholder_by_scene_id = { + placeholder.scene_identifier: placeholder + for placeholder in placeholders + } + all_processed = len(placeholders) == 0 + # Counter is checked at the ned of a loop so the loop happens at least + # once. + iter_counter = 0 + while not all_processed: + filtered_placeholders = [] + for placeholder in placeholders: + if placeholder.finished: + continue + + if placeholder.in_progress: + self.log.warning(( + "Placeholder that should be processed" + " is already in progress." + )) + continue + filtered_placeholders.append(placeholder) + + self._prepare_placeholders(filtered_placeholders) + + for placeholder in filtered_placeholders: + placeholder.set_in_progress() + placeholder_plugin = placeholder.plugin + try: + placeholder_plugin.populate_placeholder(placeholder) + + except Exception as exc: + self.log.warning( + ( + "Failed to process placeholder {} with plugin {}" + ).format( + placeholder.scene_identifier, + placeholder_plugin.__class__.__name__ + ), + exc_info=True + ) + placeholder.set_failed(exc) + + placeholder.set_finished() + + # Clear shared data before getting new placeholders + self.clear_shared_populate_data() + + iter_counter += 1 + if iter_counter >= level_limit: + break + + all_processed = True + collected_placeholders = self.get_placeholders() + for placeholder in collected_placeholders: + identifier = placeholder.scene_identifier + if identifier in placeholder_by_scene_id: + continue + + all_processed = False + placeholder_by_scene_id[identifier] = placeholder + placeholders.append(placeholder) + + self.refresh() + + def _get_build_profiles(self): + """Get build profiles for workfile build template path. + + Returns: + List[Dict[str, Any]]: Profiles for template path resolving. + """ + + return ( + self.project_settings + [self.host_name] + ["templated_workfile_build"] + ["profiles"] + ) + + def get_template_path(self): + """Unified way how template path is received usign settings. + + Method is dependent on '_get_build_profiles' which should return filter + profiles to resolve path to a template. Default implementation looks + into host settings: + - 'project_settings/{host name}/templated_workfile_build/profiles' + + Returns: + str: Path to a template file with placeholders. + + Raises: + TemplateProfileNotFound: When profiles are not filled. + TemplateLoadFailed: Profile was found but path is not set. + TemplateNotFound: Path was set but file does not exists. + """ + + host_name = self.host_name + project_name = self.project_name + task_name = self.current_task_name + task_type = self.current_task_type + + build_profiles = self._get_build_profiles() + profile = filter_profiles( + build_profiles, + { + "task_types": task_type, + "task_names": task_name + } + ) + + if not profile: + raise TemplateProfileNotFound(( + "No matching profile found for task '{}' of type '{}' " + "with host '{}'" + ).format(task_name, task_type, host_name)) + + path = profile["path"] + if not path: + raise TemplateLoadFailed(( + "Template path is not set.\n" + "Path need to be set in {}\\Template Workfile Build " + "Settings\\Profiles" + ).format(host_name.title())) + + # Try fill path with environments and anatomy roots + anatomy = Anatomy(project_name) + fill_data = { + key: value + for key, value in os.environ.items() + } + fill_data["root"] = anatomy.roots + result = StringTemplate.format_template(path, fill_data) + if result.solved: + path = result.normalized() + + if path and os.path.exists(path): + self.log.info("Found template at: '{}'".format(path)) + return path + + solved_path = None + while True: + try: + solved_path = anatomy.path_remapper(path) + except KeyError as missing_key: + raise KeyError( + "Could not solve key '{}' in template path '{}'".format( + missing_key, path)) + + if solved_path is None: + solved_path = path + if solved_path == path: + break + path = solved_path + + solved_path = os.path.normpath(solved_path) + if not os.path.exists(solved_path): + raise TemplateNotFound( + "Template found in openPype settings for task '{}' with host " + "'{}' does not exists. (Not found : {})".format( + task_name, host_name, solved_path)) + + self.log.info("Found template at: '{}'".format(solved_path)) + + return solved_path + + +@six.add_metaclass(ABCMeta) +class PlaceholderPlugin(object): + """Plugin which care about handling of placeholder items logic. + + Plugin create and update placeholders in scene and populate them on + template import. Populating means that based on placeholder data happens + a logic in the scene. Most common logic is to load representation using + loaders or to create instances in scene. + """ + + label = None + _log = None + + def __init__(self, builder): + self._builder = builder + + @property + def builder(self): + """Access to builder which initialized the plugin. + + Returns: + AbstractTemplateBuilder: Loader of template build. + """ + + return self._builder + + @property + def project_name(self): + return self._builder.project_name + + @property + def log(self): + """Dynamically created logger for the plugin.""" + + if self._log is None: + self._log = Logger.get_logger(repr(self)) + return self._log + + @property + def identifier(self): + """Identifier which will be stored to placeholder. + + Default implementation uses class name. + + Returns: + str: Unique identifier of placeholder plugin. + """ + + return self.__class__.__name__ + + @abstractmethod + def create_placeholder(self, placeholder_data): + """Create new placeholder in scene and get it's item. + + It matters on the plugin implementation if placeholder will use + selection in scene or create new node. + + Args: + placeholder_data (Dict[str, Any]): Data that were created + based on attribute definitions from 'get_placeholder_options'. + + Returns: + PlaceholderItem: Created placeholder item. + """ + + pass + + @abstractmethod + def update_placeholder(self, placeholder_item, placeholder_data): + """Update placeholder item with new data. + + New data should be propagated to object of placeholder item itself + and also into the scene. + + Reason: + Some placeholder plugins may require some special way how the + updates should be propagated to object. + + Args: + placeholder_item (PlaceholderItem): Object of placeholder that + should be updated. + placeholder_data (Dict[str, Any]): Data related to placeholder. + Should match plugin options. + """ + + pass + + @abstractmethod + def collect_placeholders(self): + """Collect placeholders from scene. + + Returns: + List[PlaceholderItem]: Placeholder objects. + """ + + pass + + def get_placeholder_options(self, options=None): + """Placeholder options for data showed. + + Returns: + List[AbtractAttrDef]: Attribute definitions of placeholder options. + """ + + return [] + + def get_placeholder_keys(self): + """Get placeholder keys that are stored in scene. + + Returns: + Set[str]: Key of placeholder keys that are stored in scene. + """ + + option_keys = get_attributes_keys(self.get_placeholder_options()) + option_keys.add("plugin_identifier") + return option_keys + + def prepare_placeholders(self, placeholders): + """Preparation part of placeholders. + + Args: + placeholders (List[PlaceholderItem]): List of placeholders that + will be processed. + """ + + pass + + @abstractmethod + def populate_placeholder(self, placeholder): + """Process single placeholder item. + + Processing of placeholders is defined by their order thus can't be + processed in batch. + + Args: + placeholder (PlaceholderItem): Placeholder that should be + processed. + """ + + pass + + def repopulate_placeholder(self, placeholder): + """Update scene with current context for passed placeholder. + + Can be used to re-run placeholder logic (if it make sense). + """ + + pass + + def get_plugin_shared_data(self, key): + """Receive shared data across plugin and placeholders. + + Using shared data from builder but stored under plugin identifier. + + Args: + key (str): Key under which are shared data stored. + + Returns: + Union[None, Any]: None if key was not set. + """ + + plugin_data = self.builder.get_shared_data(self.identifier) + if plugin_data is None: + return None + return plugin_data.get(key) + + def set_plugin_shared_data(self, key, value): + """Store share data across plugin and placeholders. + + Using shared data from builder but stored under plugin identifier. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + plugin_data = self.builder.get_shared_data(self.identifier) + if plugin_data is None: + plugin_data = {} + plugin_data[key] = value + self.builder.set_shared_data(self.identifier, plugin_data) + + def get_plugin_shared_populate_data(self, key): + """Receive shared data across plugin and placeholders. + + Using shared populate data from builder but stored under plugin + identifier. + + Shared populate data are cleaned up during populate while loop. + + Args: + key (str): Key under which are shared data stored. + + Returns: + Union[None, Any]: None if key was not set. + """ + + plugin_data = self.builder.get_shared_populate_data(self.identifier) + if plugin_data is None: + return None + return plugin_data.get(key) + + def set_plugin_shared_populate_data(self, key, value): + """Store share data across plugin and placeholders. + + Using shared data from builder but stored under plugin identifier. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Shared populate data are cleaned up during populate while loop. + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + plugin_data = self.builder.get_shared_populate_data(self.identifier) + if plugin_data is None: + plugin_data = {} + plugin_data[key] = value + self.builder.set_shared_populate_data(self.identifier, plugin_data) + + +class PlaceholderItem(object): + """Item representing single item in scene that is a placeholder to process. + + Items are always created and updated by their plugins. Each plugin can use + modified class of 'PlacehoderItem' but only to add more options instead of + new other. + + Scene identifier is used to avoid processing of the palceholder item + multiple times so must be unique across whole workfile builder. + + Args: + scene_identifier (str): Unique scene identifier. If placeholder is + created from the same "node" it must have same identifier. + data (Dict[str, Any]): Data related to placeholder. They're defined + by plugin. + plugin (PlaceholderPlugin): Plugin which created the placeholder item. + """ + + default_order = 100 + + def __init__(self, scene_identifier, data, plugin): + self._log = None + self._scene_identifier = scene_identifier + self._data = data + self._plugin = plugin + + # Keep track about state of Placeholder process + self._state = 0 + + # Error messages to be shown in UI + # - all other messages should be logged + self._errors = [] # -> List[str] + + @property + def plugin(self): + """Access to plugin which created placeholder. + + Returns: + PlaceholderPlugin: Plugin object. + """ + + return self._plugin + + @property + def builder(self): + """Access to builder. + + Returns: + AbstractTemplateBuilder: Builder which is the top part of + placeholder. + """ + + return self.plugin.builder + + @property + def data(self): + """Placeholder data which can modify how placeholder is processed. + + Possible general keys + - order: Can define the order in which is palceholder processed. + Lower == earlier. + + Other keys are defined by placeholder and should validate them on item + creation. + + Returns: + Dict[str, Any]: Placeholder item data. + """ + + return self._data + + def to_dict(self): + """Create copy of item's data. + + Returns: + Dict[str, Any]: Placeholder data. + """ + + return copy.deepcopy(self.data) + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(repr(self)) + return self._log + + def __repr__(self): + return "< {} {} >".format(self.__class__.__name__, self.name) + + @property + def order(self): + """Order of item processing.""" + + order = self._data.get("order") + if order is None: + return self.default_order + return order + + @property + def scene_identifier(self): + return self._scene_identifier + + @property + def finished(self): + """Item was already processed.""" + + return self._state == 2 + + @property + def in_progress(self): + """Processing is in progress.""" + + return self._state == 1 + + def set_in_progress(self): + """Change to in progress state.""" + + self._state = 1 + + def set_finished(self): + """Change to finished state.""" + + self._state = 2 + + def set_failed(self, exception): + self.add_error(str(exception)) + + def add_error(self, error): + """Set placeholder item as failed and mark it as finished.""" + + self._errors.append(error) + + def get_errors(self): + """Exception with which the placeholder process failed. + + Gives ability to access the exception. + """ + + return self._errors + + +class PlaceholderLoadMixin(object): + """Mixin prepared for loading placeholder plugins. + + Implementation prepares options for placeholders with + 'get_load_plugin_options'. + + For placeholder population is implemented 'populate_load_placeholder'. + + PlaceholderItem can have implemented methods: + - 'load_failed' - called when loading of one representation failed + - 'load_succeed' - called when loading of one representation succeeded + """ + + def get_load_plugin_options(self, options=None): + """Unified attribute definitions for load placeholder. + + Common function for placeholder plugins used for loading of + repsentations. Use it in 'get_placeholder_options'. + + Args: + plugin (PlaceholderPlugin): Plugin used for loading of + representations. + options (Dict[str, Any]): Already available options which are used + as defaults for attributes. + + Returns: + List[AbtractAttrDef]: Attribute definitions common for load + plugins. + """ + + loaders_by_name = self.builder.get_loaders_by_name() + loader_items = [ + (loader_name, loader.label or loader_name) + for loader_name, loader in loaders_by_name.items() + ] + + loader_items = list(sorted(loader_items, key=lambda i: i[1])) + options = options or {} + return [ + attribute_definitions.UISeparatorDef(), + attribute_definitions.UILabelDef("Main attributes"), + attribute_definitions.UISeparatorDef(), + + attribute_definitions.EnumDef( + "builder_type", + label="Asset Builder Type", + default=options.get("builder_type"), + items=[ + ("context_asset", "Current asset"), + ("linked_asset", "Linked assets"), + ("all_assets", "All assets") + ], + tooltip=( + "Asset Builder Type\n" + "\nBuilder type describe what template loader will look" + " for." + "\ncontext_asset : Template loader will look for subsets" + " of current context asset (Asset bob will find asset)" + "\nlinked_asset : Template loader will look for assets" + " linked to current context asset." + "\nLinked asset are looked in database under" + " field \"inputLinks\"" + ) + ), + attribute_definitions.TextDef( + "family", + label="Family", + default=options.get("family"), + placeholder="model, look, ..." + ), + attribute_definitions.TextDef( + "representation", + label="Representation name", + default=options.get("representation"), + placeholder="ma, abc, ..." + ), + attribute_definitions.EnumDef( + "loader", + label="Loader", + default=options.get("loader"), + items=loader_items, + tooltip=( + "Loader" + "\nDefines what OpenPype loader will be used to" + " load assets." + "\nUseable loader depends on current host's loader list." + "\nField is case sensitive." + ) + ), + attribute_definitions.TextDef( + "loader_args", + label="Loader Arguments", + default=options.get("loader_args"), + placeholder='{"camera":"persp", "lights":True}', + tooltip=( + "Loader" + "\nDefines a dictionnary of arguments used to load assets." + "\nUseable arguments depend on current placeholder Loader." + "\nField should be a valid python dict." + " Anything else will be ignored." + ) + ), + attribute_definitions.NumberDef( + "order", + label="Order", + default=options.get("order") or 0, + decimals=0, + minimum=0, + maximum=999, + tooltip=( + "Order" + "\nOrder defines asset loading priority (0 to 999)" + "\nPriority rule is : \"lowest is first to load\"." + ) + ), + attribute_definitions.UISeparatorDef(), + attribute_definitions.UILabelDef("Optional attributes"), + attribute_definitions.UISeparatorDef(), + attribute_definitions.TextDef( + "asset", + label="Asset filter", + default=options.get("asset"), + placeholder="regex filtering by asset name", + tooltip=( + "Filtering assets by matching field regex to asset's name" + ) + ), + attribute_definitions.TextDef( + "subset", + label="Subset filter", + default=options.get("subset"), + placeholder="regex filtering by subset name", + tooltip=( + "Filtering assets by matching field regex to subset's name" + ) + ), + attribute_definitions.TextDef( + "hierarchy", + label="Hierarchy filter", + default=options.get("hierarchy"), + placeholder="regex filtering by asset's hierarchy", + tooltip=( + "Filtering assets by matching field asset's hierarchy" + ) + ) + ] + + def parse_loader_args(self, loader_args): + """Helper function to parse string of loader arugments. + + Empty dictionary is returned if conversion fails. + + Args: + loader_args (str): Loader args filled by user. + + Returns: + Dict[str, Any]: Parsed arguments used as dictionary. + """ + + if not loader_args: + return {} + + try: + parsed_args = eval(loader_args) + if isinstance(parsed_args, dict): + return parsed_args + + except Exception as err: + print( + "Error while parsing loader arguments '{}'.\n{}: {}\n\n" + "Continuing with default arguments. . .".format( + loader_args, err.__class__.__name__, err)) + + return {} + + def _get_representations(self, placeholder): + """Prepared query of representations based on load options. + + This function is directly connected to options defined in + 'get_load_plugin_options'. + + Note: + This returns all representation documents from all versions of + matching subset. To filter for last version use + '_reduce_last_version_repre_docs'. + + Args: + placeholder (PlaceholderItem): Item which should be populated. + + Returns: + List[Dict[str, Any]]: Representation documents matching filters + from placeholder data. + """ + + project_name = self.builder.project_name + current_asset_doc = self.builder.current_asset_doc + linked_asset_docs = self.builder.linked_asset_docs + + builder_type = placeholder.data["builder_type"] + if builder_type == "context_asset": + context_filters = { + "asset": [current_asset_doc["name"]], + "subset": [re.compile(placeholder.data["subset"])], + "hierarchy": [re.compile(placeholder.data["hierarchy"])], + "representation": [placeholder.data["representation"]], + "family": [placeholder.data["family"]] + } + + elif builder_type != "linked_asset": + context_filters = { + "asset": [re.compile(placeholder.data["asset"])], + "subset": [re.compile(placeholder.data["subset"])], + "hierarchy": [re.compile(placeholder.data["hierarchy"])], + "representation": [placeholder.data["representation"]], + "family": [placeholder.data["family"]] + } + + else: + asset_regex = re.compile(placeholder.data["asset"]) + linked_asset_names = [] + for asset_doc in linked_asset_docs: + asset_name = asset_doc["name"] + if asset_regex.match(asset_name): + linked_asset_names.append(asset_name) + + context_filters = { + "asset": linked_asset_names, + "subset": [re.compile(placeholder.data["subset"])], + "hierarchy": [re.compile(placeholder.data["hierarchy"])], + "representation": [placeholder.data["representation"]], + "family": [placeholder.data["family"]], + } + + return list(get_representations( + project_name, + context_filters=context_filters + )) + + def _before_repre_load(self, placeholder, representation): + """Can be overriden. Is called before representation is loaded.""" + + pass + + def _reduce_last_version_repre_docs(self, representations): + """Reduce representations to last verison.""" + + mapping = {} + for repre_doc in representations: + repre_context = repre_doc["context"] + + asset_name = repre_context["asset"] + subset_name = repre_context["subset"] + version = repre_context.get("version", -1) + + if asset_name not in mapping: + mapping[asset_name] = {} + + subset_mapping = mapping[asset_name] + if subset_name not in subset_mapping: + subset_mapping[subset_name] = collections.defaultdict(list) + + version_mapping = subset_mapping[subset_name] + version_mapping[version].append(repre_doc) + + output = [] + for subset_mapping in mapping.values(): + for version_mapping in subset_mapping.values(): + last_version = tuple(sorted(version_mapping.keys()))[-1] + output.extend(version_mapping[last_version]) + return output + + def populate_load_placeholder(self, placeholder, ignore_repre_ids=None): + """Load placeholder is goind to load matching representations. + + Note: + Ignore repre ids is to avoid loading the same representation again + on load. But the representation can be loaded with different loader + and there could be published new version of matching subset for the + representation. We should maybe expect containers. + + Also import loaders don't have containers at all... + + Args: + placeholder (PlaceholderItem): Placeholder item with information + about requested representations. + ignore_repre_ids (Iterable[Union[str, ObjectId]]): Representation + ids that should be skipped. + """ + + if ignore_repre_ids is None: + ignore_repre_ids = set() + + # TODO check loader existence + loader_name = placeholder.data["loader"] + loader_args = placeholder.data["loader_args"] + + placeholder_representations = self._get_representations(placeholder) + + filtered_representations = [] + for representation in self._reduce_last_version_repre_docs( + placeholder_representations + ): + repre_id = str(representation["_id"]) + if repre_id not in ignore_repre_ids: + filtered_representations.append(representation) + + if not filtered_representations: + self.log.info(( + "There's no representation for this placeholder: {}" + ).format(placeholder.scene_identifier)) + return + + repre_load_contexts = get_contexts_for_repre_docs( + self.project_name, filtered_representations + ) + loaders_by_name = self.builder.get_loaders_by_name() + for repre_load_context in repre_load_contexts.values(): + representation = repre_load_context["representation"] + repre_context = representation["context"] + self._before_repre_load( + placeholder, representation + ) + self.log.info( + "Loading {} from {} with loader {}\n" + "Loader arguments used : {}".format( + repre_context["subset"], + repre_context["asset"], + loader_name, + loader_args + ) + ) + try: + container = load_with_repre_context( + loaders_by_name[loader_name], + repre_load_context, + options=self.parse_loader_args(loader_args) + ) + + except Exception: + failed = True + self.load_failed(placeholder, representation) + + else: + failed = False + self.load_succeed(placeholder, container) + self.cleanup_placeholder(placeholder, failed) + + def load_failed(self, placeholder, representation): + if hasattr(placeholder, "load_failed"): + placeholder.load_failed(representation) + + def load_succeed(self, placeholder, container): + if hasattr(placeholder, "load_succeed"): + placeholder.load_succeed(container) + + def cleanup_placeholder(self, placeholder, failed): + """Cleanup placeholder after load of single representation. + + Can be called multiple times during placeholder item populating and is + called even if loading failed. + + Args: + placeholder (PlaceholderItem): Item which was just used to load + representation. + failed (bool): Loading of representation failed. + """ + + pass + + +class LoadPlaceholderItem(PlaceholderItem): + """PlaceholderItem for plugin which is loading representations. + + Connected to 'PlaceholderLoadMixin'. + """ + + def __init__(self, *args, **kwargs): + super(LoadPlaceholderItem, self).__init__(*args, **kwargs) + self._failed_representations = [] + + def get_errors(self): + if not self._failed_representations: + return [] + message = ( + "Failed to load {} representations using Loader {}" + ).format( + len(self._failed_representations), + self.data["loader"] + ) + return [message] + + def load_failed(self, representation): + self._failed_representations.append(representation) diff --git a/openpype/plugin.py b/openpype/plugin.py index bb9bc2ff85..7e906b4451 100644 --- a/openpype/plugin.py +++ b/openpype/plugin.py @@ -1,24 +1,91 @@ -import tempfile -import os +import functools +import warnings + import pyblish.api +# New location of orders: openpype.pipeline.publish.constants +# - can be imported as +# 'from openpype.pipeline.publish import ValidatePipelineOrder' ValidatePipelineOrder = pyblish.api.ValidatorOrder + 0.05 ValidateContentsOrder = pyblish.api.ValidatorOrder + 0.1 ValidateSceneOrder = pyblish.api.ValidatorOrder + 0.2 ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3 +class PluginDeprecatedWarning(DeprecationWarning): + pass + + +def _deprecation_warning(item_name, warning_message): + warnings.simplefilter("always", PluginDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(item_name, warning_message), + category=PluginDeprecatedWarning, + stacklevel=4 + ) + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + _deprecation_warning(decorated_func.__name__, warning_message) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + +# Classes just inheriting from pyblish classes +# - seems to be unused in code (not 100% sure) +# - they should be removed but because it is not clear if they're used +# we'll keep then and log deprecation warning +# Deprecated since 3.14.* will be removed in 3.16.* class ContextPlugin(pyblish.api.ContextPlugin): - def process(cls, *args, **kwargs): - super(ContextPlugin, cls).process(cls, *args, **kwargs) + def __init__(self, *args, **kwargs): + _deprecation_warning( + "openpype.plugin.ContextPlugin", + " Please replace your usage with 'pyblish.api.ContextPlugin'." + ) + super(ContextPlugin, self).__init__(*args, **kwargs) +# Deprecated since 3.14.* will be removed in 3.16.* class InstancePlugin(pyblish.api.InstancePlugin): - def process(cls, *args, **kwargs): - super(InstancePlugin, cls).process(cls, *args, **kwargs) + def __init__(self, *args, **kwargs): + _deprecation_warning( + "openpype.plugin.ContextPlugin", + " Please replace your usage with 'pyblish.api.InstancePlugin'." + ) + super(InstancePlugin, self).__init__(*args, **kwargs) -class Extractor(InstancePlugin): +class Extractor(pyblish.api.InstancePlugin): """Extractor base class. The extractor base class implements a "staging_dir" function used to @@ -36,17 +103,13 @@ class Extractor(InstancePlugin): Upon calling this method the staging directory is stored inside the instance.data['stagingDir'] """ - staging_dir = instance.data.get('stagingDir', None) - if not staging_dir: - staging_dir = os.path.normpath( - tempfile.mkdtemp(prefix="pyblish_tmp_") - ) - instance.data['stagingDir'] = staging_dir + from openpype.pipeline.publish import get_instance_staging_dir - return staging_dir + return get_instance_staging_dir(instance) +@deprecated("openpype.pipeline.publish.context_plugin_should_run") def contextplugin_should_run(plugin, context): """Return whether the ContextPlugin should run on the given context. @@ -56,30 +119,10 @@ def contextplugin_should_run(plugin, context): This actually checks it correctly and returns whether it should run. + Deprecated: + Since 3.14.* will be removed in 3.16.* or later. """ - required = set(plugin.families) - # When no filter always run - if "*" in required: - return True + from openpype.pipeline.publish import context_plugin_should_run - for instance in context: - - # Ignore inactive instances - if (not instance.data.get("publish", True) or - not instance.data.get("active", True)): - continue - - families = instance.data.get("families", []) - if any(f in required for f in families): - return True - - family = instance.data.get("family") - if family and family in required: - return True - - return False - - -class ValidationException(Exception): - pass + return context_plugin_should_run(plugin, context) diff --git a/openpype/plugins/load/add_site.py b/openpype/plugins/load/add_site.py index 55fda55d17..ac931e41db 100644 --- a/openpype/plugins/load/add_site.py +++ b/openpype/plugins/load/add_site.py @@ -1,6 +1,6 @@ +from openpype.client import get_linked_representation_id from openpype.modules import ModulesManager from openpype.pipeline import load -from openpype.lib.avalon_context import get_linked_ids_for_representations from openpype.modules.sync_server.utils import SiteAlreadyPresentError @@ -45,9 +45,11 @@ class AddSyncSite(load.LoaderPlugin): force=True) if family == "workfile": - links = get_linked_ids_for_representations(project_name, - [repre_id], - link_type="reference") + links = get_linked_representation_id( + project_name, + repre_id=repre_id, + link_type="reference" + ) for link_repre_id in links: try: self.sync_server.add_site(project_name, link_repre_id, diff --git a/openpype/plugins/load/delete_old_versions.py b/openpype/plugins/load/delete_old_versions.py index c3e9e9fa0a..b7ac015268 100644 --- a/openpype/plugins/load/delete_old_versions.py +++ b/openpype/plugins/load/delete_old_versions.py @@ -4,14 +4,18 @@ import uuid import clique from pymongo import UpdateOne -import ftrack_api import qargparse from Qt import QtWidgets, QtCore from openpype import style -from openpype.pipeline import load, AvalonMongoDB -from openpype.lib import StringTemplate -from openpype.api import Anatomy +from openpype.client import get_versions, get_representations +from openpype.modules import ModulesManager +from openpype.lib import format_file_size +from openpype.pipeline import load, AvalonMongoDB, Anatomy +from openpype.pipeline.load import ( + get_representation_path_with_anatomy, + InvalidRepresentationContext, +) class DeleteOldVersions(load.SubsetLoaderPlugin): @@ -38,13 +42,6 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): ) ] - def sizeof_fmt(self, num, suffix='B'): - for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: - if abs(num) < 1024.0: - return "%3.1f%s%s" % (num, unit, suffix) - num /= 1024.0 - return "%.1f%s%s" % (num, 'Yi', suffix) - def delete_whole_dir_paths(self, dir_paths, delete=True): size = 0 @@ -80,27 +77,28 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): def path_from_representation(self, representation, anatomy): try: - template = representation["data"]["template"] - + context = representation["context"] except KeyError: return (None, None) + try: + path = get_representation_path_with_anatomy( + representation, anatomy + ) + except InvalidRepresentationContext: + return (None, None) + sequence_path = None - try: - context = representation["context"] - context["root"] = anatomy.roots - path = str(StringTemplate.format_template(template, context)) - if "frame" in context: - context["frame"] = self.sequence_splitter - sequence_path = os.path.normpath(str( - StringTemplate.format_template(template, context) - )) + if "frame" in context: + context["frame"] = self.sequence_splitter + sequence_path = get_representation_path_with_anatomy( + representation, anatomy + ) - except KeyError: - # Template references unavailable data - return (None, None) + if sequence_path: + sequence_path = sequence_path.normalized() - return (os.path.normpath(path), sequence_path) + return (path.normalized(), sequence_path) def delete_only_repre_files(self, dir_paths, file_paths, delete=True): size = 0 @@ -198,18 +196,10 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): def get_data(self, context, versions_count): subset = context["subset"] asset = context["asset"] - anatomy = Anatomy(context["project"]["name"]) + project_name = context["project"]["name"] + anatomy = Anatomy(project_name) - self.dbcon = AvalonMongoDB() - self.dbcon.Session["AVALON_PROJECT"] = context["project"]["name"] - self.dbcon.install() - - versions = list( - self.dbcon.find({ - "type": "version", - "parent": {"$in": [subset["_id"]]} - }) - ) + versions = list(get_versions(project_name, subset_ids=[subset["_id"]])) versions_by_parent = collections.defaultdict(list) for ent in versions: @@ -268,10 +258,9 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): print(msg) return - repres = list(self.dbcon.find({ - "type": "representation", - "parent": {"$in": version_ids} - })) + repres = list(get_representations( + project_name, version_ids=version_ids + )) self.log.debug( "Collected representations to remove ({})".format(len(repres)) @@ -330,7 +319,7 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): return data - def main(self, data, remove_publish_folder): + def main(self, project_name, data, remove_publish_folder): # Size of files. size = 0 if not data: @@ -367,30 +356,70 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): )) if mongo_changes_bulk: - self.dbcon.bulk_write(mongo_changes_bulk) + dbcon = AvalonMongoDB() + dbcon.Session["AVALON_PROJECT"] = project_name + dbcon.install() + dbcon.bulk_write(mongo_changes_bulk) + dbcon.uninstall() - self.dbcon.uninstall() + self._ftrack_delete_versions(data) + + return size + + def _ftrack_delete_versions(self, data): + """Delete version on ftrack. + + Handling of ftrack logic in this plugin is not ideal. But in OP3 it is + almost impossible to solve the issue other way. + + Note: + Asset versions on ftrack are not deleted but marked as + "not published" which cause that they're invisible. + + Args: + data (dict): Data sent to subset loader with full context. + """ + + # First check for ftrack id on asset document + # - skip if ther is none + asset_ftrack_id = data["asset"]["data"].get("ftrackId") + if not asset_ftrack_id: + self.log.info(( + "Asset does not have filled ftrack id. Skipped delete" + " of ftrack version." + )) + return + + # Check if ftrack module is enabled + modules_manager = ModulesManager() + ftrack_module = modules_manager.modules_by_name.get("ftrack") + if not ftrack_module or not ftrack_module.enabled: + return + + import ftrack_api + + session = ftrack_api.Session() + subset_name = data["subset"]["name"] + versions = { + '"{}"'.format(version_doc["name"]) + for version_doc in data["versions"] + } + asset_versions = session.query( + ( + "select id, is_published from AssetVersion where" + " asset.parent.id is \"{}\"" + " and asset.name is \"{}\"" + " and version in ({})" + ).format( + asset_ftrack_id, + subset_name, + ",".join(versions) + ) + ).all() # Set attribute `is_published` to `False` on ftrack AssetVersions - session = ftrack_api.Session() - query = ( - "AssetVersion where asset.parent.id is \"{}\"" - " and asset.name is \"{}\"" - " and version is \"{}\"" - ) - for v in data["versions"]: - try: - ftrack_version = session.query( - query.format( - data["asset"]["data"]["ftrackId"], - data["subset"]["name"], - v["name"] - ) - ).one() - except ftrack_api.exception.NoResultFoundError: - continue - - ftrack_version["is_published"] = False + for asset_version in asset_versions: + asset_version["is_published"] = False try: session.commit() @@ -403,8 +432,6 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): self.log.error(msg) self.message(msg) - return size - def load(self, contexts, name=None, namespace=None, options=None): try: size = 0 @@ -423,10 +450,11 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): if not data: continue - size += self.main(data, remove_publish_folder) + project_name = context["project"]["name"] + size += self.main(project_name, data, remove_publish_folder) print("Progressing {}/{}".format(count + 1, len(contexts))) - msg = "Total size of files: " + self.sizeof_fmt(size) + msg = "Total size of files: {}".format(format_file_size(size)) self.log.info(msg) self.message(msg) @@ -449,7 +477,7 @@ class CalculateOldVersions(DeleteOldVersions): ) ] - def main(self, data, remove_publish_folder): + def main(self, project_name, data, remove_publish_folder): size = 0 if not data: diff --git a/openpype/plugins/load/delivery.py b/openpype/plugins/load/delivery.py index 7df07e3f64..89c24f2402 100644 --- a/openpype/plugins/load/delivery.py +++ b/openpype/plugins/load/delivery.py @@ -3,18 +3,21 @@ from collections import defaultdict from Qt import QtWidgets, QtCore, QtGui -from openpype.pipeline import load, AvalonMongoDB -from openpype.api import Anatomy, config +from openpype.client import get_representations +from openpype.pipeline import load, Anatomy from openpype import resources, style -from openpype.lib.delivery import ( - sizeof_fmt, - path_from_representation, +from openpype.lib import ( + format_file_size, + collect_frames, + get_datetime_data, +) +from openpype.pipeline.load import get_representation_path_with_anatomy +from openpype.pipeline.delivery import ( get_format_dict, check_destination_path, - process_single_file, - process_sequence, - collect_frames + deliver_single_file, + deliver_sequence, ) @@ -68,17 +71,13 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): self.setStyleSheet(style.load_stylesheet()) - project = contexts[0]["project"]["name"] - self.anatomy = Anatomy(project) + project_name = contexts[0]["project"]["name"] + self.anatomy = Anatomy(project_name) self._representations = None self.log = log self.currently_uploaded = 0 - self.dbcon = AvalonMongoDB() - self.dbcon.Session["AVALON_PROJECT"] = project - self.dbcon.install() - - self._set_representations(contexts) + self._set_representations(project_name, contexts) dropdown = QtWidgets.QComboBox() self.templates = self._get_templates(self.anatomy) @@ -163,14 +162,16 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): selected_repres = self._get_selected_repres() - datetime_data = config.get_datetime_data() + datetime_data = get_datetime_data() template_name = self.dropdown.currentText() format_dict = get_format_dict(self.anatomy, self.root_line_edit.text()) for repre in self._representations: if repre["name"] not in selected_repres: continue - repre_path = path_from_representation(repre, self.anatomy) + repre_path = get_representation_path_with_anatomy( + repre, self.anatomy + ) anatomy_data = copy.deepcopy(repre["context"]) new_report_items = check_destination_path(str(repre["_id"]), @@ -205,7 +206,7 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): args[0] = src_path if frame: anatomy_data["frame"] = frame - new_report_items, uploaded = process_single_file(*args) + new_report_items, uploaded = deliver_single_file(*args) report_items.update(new_report_items) self._update_progress(uploaded) else: # fallback for Pype2 and representations without files @@ -214,9 +215,9 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): repre["context"]["frame"] = len(str(frame)) * "#" if not frame: - new_report_items, uploaded = process_single_file(*args) + new_report_items, uploaded = deliver_single_file(*args) else: - new_report_items, uploaded = process_sequence(*args) + new_report_items, uploaded = deliver_sequence(*args) report_items.update(new_report_items) self._update_progress(uploaded) @@ -238,13 +239,12 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): return templates - def _set_representations(self, contexts): + def _set_representations(self, project_name, contexts): version_ids = [context["version"]["_id"] for context in contexts] - repres = list(self.dbcon.find({ - "type": "representation", - "parent": {"$in": version_ids} - })) + repres = list(get_representations( + project_name, version_ids=version_ids + )) self._representations = repres @@ -267,8 +267,9 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): def _prepare_label(self): """Provides text with no of selected files and their size.""" - label = "{} files, size {}".format(self.files_selected, - sizeof_fmt(self.size_selected)) + label = "{} files, size {}".format( + self.files_selected, + format_file_size(self.size_selected)) return label def _get_selected_repres(self): diff --git a/openpype/plugins/load/open_djv.py b/openpype/plugins/load/open_djv.py index 273c77c93f..bc5fd64b87 100644 --- a/openpype/plugins/load/open_djv.py +++ b/openpype/plugins/load/open_djv.py @@ -1,5 +1,5 @@ import os -from openpype.api import ApplicationManager +from openpype.lib import ApplicationManager from openpype.pipeline import load diff --git a/openpype/plugins/load/open_file.py b/openpype/plugins/load/open_file.py index f21cd07c7f..00b2ecd7c5 100644 --- a/openpype/plugins/load/open_file.py +++ b/openpype/plugins/load/open_file.py @@ -15,8 +15,8 @@ def open(filepath): subprocess.call(('xdg-open', filepath)) -class Openfile(load.LoaderPlugin): - """Open Image Sequence with system default""" +class OpenFile(load.LoaderPlugin): + """Open Image Sequence or Video with system default""" families = ["render2d"] representations = ["*"] @@ -27,32 +27,10 @@ class Openfile(load.LoaderPlugin): color = "orange" def load(self, context, name, namespace, data): - import clique - directory = os.path.dirname(self.fname) - pattern = clique.PATTERNS["frames"] + path = self.fname + if not os.path.exists(path): + raise RuntimeError("File not found: {}".format(path)) - files = os.listdir(directory) - representation = context["representation"] - - ext = representation["name"] - path = representation["data"]["path"] - - if ext in ["#"]: - collections, remainder = clique.assemble(files, - patterns=[pattern], - minimum_items=1) - - seqeunce = collections[0] - - first_image = list(seqeunce)[0] - filepath = os.path.normpath(os.path.join(directory, first_image)) - else: - file = [f for f in files - if ext in f - if "#" not in f][0] - filepath = os.path.normpath(os.path.join(directory, file)) - - self.log.info("Opening : {}".format(filepath)) - - open(filepath) + self.log.info("Opening : {}".format(path)) + open(path) diff --git a/openpype/plugins/publish/collect_anatomy_context_data.py b/openpype/plugins/publish/collect_anatomy_context_data.py index 0794adfb67..55ce8e06f4 100644 --- a/openpype/plugins/publish/collect_anatomy_context_data.py +++ b/openpype/plugins/publish/collect_anatomy_context_data.py @@ -15,10 +15,7 @@ Provides: import json import pyblish.api -from openpype.lib import ( - get_system_general_anatomy_data -) -from openpype.pipeline import legacy_io +from openpype.pipeline.template_data import get_template_data class CollectAnatomyContextData(pyblish.api.ContextPlugin): @@ -33,11 +30,15 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): "asset": "AssetName", "hierarchy": "path/to/asset", "task": "Working", + "user": "MeDespicable", + # Duplicated entry "username": "MeDespicable", + # Current host name + "app": "maya" + *** OPTIONAL *** - "app": "maya" # Current application base name - + mutliple keys from `datetimeData` # see it's collector + + mutliple keys from `datetimeData` (See it's collector) } """ @@ -45,52 +46,26 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): label = "Collect Anatomy Context Data" def process(self, context): + host_name = context.data["hostName"] + system_settings = context.data["system_settings"] project_entity = context.data["projectEntity"] - context_data = { - "project": { - "name": project_entity["name"], - "code": project_entity["data"].get("code") - }, - "username": context.data["user"], - "app": context.data["hostName"] - } - - context.data["anatomyData"] = context_data - - # add system general settings anatomy data - system_general_data = get_system_general_anatomy_data() - context_data.update(system_general_data) - - datetime_data = context.data.get("datetimeData") or {} - context_data.update(datetime_data) - asset_entity = context.data.get("assetEntity") + task_name = None if asset_entity: - task_name = legacy_io.Session["AVALON_TASK"] + task_name = context.data["task"] - asset_tasks = asset_entity["data"]["tasks"] - task_type = asset_tasks.get(task_name, {}).get("type") + anatomy_data = get_template_data( + project_entity, asset_entity, task_name, host_name, system_settings + ) + anatomy_data.update(context.data.get("datetimeData") or {}) - project_task_types = project_entity["config"]["tasks"] - task_code = project_task_types.get(task_type, {}).get("short_name") + username = context.data["user"] + anatomy_data["user"] = username + # Backwards compatibility for 'username' key + anatomy_data["username"] = username - asset_parents = asset_entity["data"]["parents"] - hierarchy = "/".join(asset_parents) - - parent_name = project_entity["name"] - if asset_parents: - parent_name = asset_parents[-1] - - context_data.update({ - "asset": asset_entity["name"], - "parent": parent_name, - "hierarchy": hierarchy, - "task": { - "name": task_name, - "type": task_type, - "short": task_code, - } - }) + # Store + context.data["anatomyData"] = anatomy_data self.log.info("Global anatomy Data collected") - self.log.debug(json.dumps(context_data, indent=4)) + self.log.debug(json.dumps(anatomy_data, indent=4)) diff --git a/openpype/plugins/publish/collect_anatomy_instance_data.py b/openpype/plugins/publish/collect_anatomy_instance_data.py index 6a6ea170b5..f67d3373d9 100644 --- a/openpype/plugins/publish/collect_anatomy_instance_data.py +++ b/openpype/plugins/publish/collect_anatomy_instance_data.py @@ -27,6 +27,11 @@ import collections import pyblish.api +from openpype.client import ( + get_assets, + get_subsets, + get_last_versions +) from openpype.pipeline import legacy_io @@ -44,13 +49,15 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): def process(self, context): self.log.info("Collecting anatomy data for all instances.") - self.fill_missing_asset_docs(context) - self.fill_latest_versions(context) + project_name = legacy_io.active_project() + self.fill_missing_asset_docs(context, project_name) + self.fill_instance_data_from_asset(context) + self.fill_latest_versions(context, project_name) self.fill_anatomy_data(context) self.log.info("Anatomy Data collection finished.") - def fill_missing_asset_docs(self, context): + def fill_missing_asset_docs(self, context, project_name): self.log.debug("Qeurying asset documents for instances.") context_asset_doc = context.data.get("assetEntity") @@ -84,10 +91,8 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): self.log.debug("Querying asset documents with names: {}".format( ", ".join(["\"{}\"".format(name) for name in asset_names]) )) - asset_docs = legacy_io.find({ - "type": "asset", - "name": {"$in": asset_names} - }) + + asset_docs = get_assets(project_name, asset_names=asset_names) asset_docs_by_name = { asset_doc["name"]: asset_doc for asset_doc in asset_docs @@ -111,7 +116,24 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): "Not found asset documents with names \"{}\"." ).format(joined_asset_names)) - def fill_latest_versions(self, context): + def fill_instance_data_from_asset(self, context): + for instance in context: + asset_doc = instance.data.get("assetEntity") + if not asset_doc: + continue + + asset_data = asset_doc["data"] + for key in ( + "fps", + "frameStart", + "frameEnd", + "handleStart", + "handleEnd", + ): + if key not in instance.data and key in asset_data: + instance.data[key] = asset_data[key] + + def fill_latest_versions(self, context, project_name): """Try to find latest version for each instance's subset. Key "latestVersion" is always set to latest version or `None`. @@ -126,7 +148,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): self.log.debug("Qeurying latest versions for instances.") hierarchy = {} - subset_filters = [] + names_by_asset_ids = collections.defaultdict(set) for instance in context: # Make sure `"latestVersion"` key is set latest_version = instance.data.get("latestVersion") @@ -147,67 +169,33 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): if subset_name not in hierarchy[asset_id]: hierarchy[asset_id][subset_name] = [] hierarchy[asset_id][subset_name].append(instance) - subset_filters.append({ - "parent": asset_id, - "name": subset_name - }) + names_by_asset_ids[asset_id].add(subset_name) subset_docs = [] - if subset_filters: - subset_docs = list(legacy_io.find({ - "type": "subset", - "$or": subset_filters - })) + if names_by_asset_ids: + subset_docs = list(get_subsets( + project_name, names_by_asset_ids=names_by_asset_ids + )) subset_ids = [ subset_doc["_id"] for subset_doc in subset_docs ] - last_version_by_subset_id = self._query_last_versions(subset_ids) + last_version_docs_by_subset_id = get_last_versions( + project_name, subset_ids, fields=["name"] + ) for subset_doc in subset_docs: subset_id = subset_doc["_id"] - last_version = last_version_by_subset_id.get(subset_id) - if last_version is None: + last_version_doc = last_version_docs_by_subset_id.get(subset_id) + if last_version_docs_by_subset_id is None: continue asset_id = subset_doc["parent"] subset_name = subset_doc["name"] _instances = hierarchy[asset_id][subset_name] for _instance in _instances: - _instance.data["latestVersion"] = last_version - - def _query_last_versions(self, subset_ids): - """Retrieve all latest versions for entered subset_ids. - - Args: - subset_ids (list): List of subset ids with type `ObjectId`. - - Returns: - dict: Key is subset id and value is last version name. - """ - _pipeline = [ - # Find all versions of those subsets - {"$match": { - "type": "version", - "parent": {"$in": subset_ids} - }}, - # Sorting versions all together - {"$sort": {"name": 1}}, - # Group them by "parent", but only take the last - {"$group": { - "_id": "$parent", - "_version_id": {"$last": "$_id"}, - "name": {"$last": "$name"} - }} - ] - - last_version_by_subset_id = {} - for doc in legacy_io.aggregate(_pipeline): - subset_id = doc["_id"] - last_version_by_subset_id[subset_id] = doc["name"] - - return last_version_by_subset_id + _instance.data["latestVersion"] = last_version_doc["name"] def fill_anatomy_data(self, context): self.log.debug("Storing anatomy data to instance data.") diff --git a/openpype/plugins/publish/collect_anatomy_object.py b/openpype/plugins/publish/collect_anatomy_object.py index 2c87918728..725cae2b14 100644 --- a/openpype/plugins/publish/collect_anatomy_object.py +++ b/openpype/plugins/publish/collect_anatomy_object.py @@ -1,29 +1,32 @@ """Collect Anatomy object. Requires: - os.environ -> AVALON_PROJECT + context -> projectName Provides: - context -> anatomy (pype.api.Anatomy) + context -> anatomy (openpype.pipeline.anatomy.Anatomy) """ -import os -from openpype.api import Anatomy + import pyblish.api +from openpype.pipeline import Anatomy, KnownPublishError class CollectAnatomyObject(pyblish.api.ContextPlugin): - """Collect Anatomy object into Context""" + """Collect Anatomy object into Context. + + Order offset could be changed to '-0.45'. + """ order = pyblish.api.CollectorOrder - 0.4 label = "Collect Anatomy Object" def process(self, context): - project_name = os.environ.get("AVALON_PROJECT") + project_name = context.data.get("projectName") if project_name is None: - raise AssertionError( - "Environment `AVALON_PROJECT` is not set." + raise KnownPublishError(( + "Project name is not set in 'projectName'." "Could not initialize project's Anatomy." - ) + )) context.data["anatomy"] = Anatomy(project_name) diff --git a/openpype/plugins/publish/collect_audio.py b/openpype/plugins/publish/collect_audio.py new file mode 100644 index 0000000000..7d53b24e54 --- /dev/null +++ b/openpype/plugins/publish/collect_audio.py @@ -0,0 +1,105 @@ +import pyblish.api + +from openpype.client import ( + get_last_version_by_subset_name, + get_representations, +) +from openpype.pipeline import ( + legacy_io, + get_representation_path, +) + + +class CollectAudio(pyblish.api.InstancePlugin): + """Collect asset's last published audio. + + The audio subset name searched for is defined in: + project settings > Collect Audio + """ + label = "Collect Asset Audio" + order = pyblish.api.CollectorOrder + 0.1 + families = ["review"] + hosts = [ + "nuke", + "maya", + "shell", + "hiero", + "premiere", + "harmony", + "traypublisher", + "standalonepublisher", + "fusion", + "tvpaint", + "resolve", + "webpublisher", + "aftereffects", + "flame", + "unreal" + ] + + audio_subset_name = "audioMain" + + def process(self, instance): + if instance.data.get("audio"): + self.log.info( + "Skipping Audio collecion. It is already collected" + ) + return + + # Add audio to instance if exists. + self.log.info(( + "Searching for audio subset '{subset}'" + " in asset '{asset}'" + ).format( + subset=self.audio_subset_name, + asset=instance.data["asset"] + )) + + repre_doc = self._get_repre_doc(instance) + + # Add audio to instance if representation was found + if repre_doc: + instance.data["audio"] = [{ + "offset": 0, + "filename": get_representation_path(repre_doc) + }] + self.log.info("Audio Data added to instance ...") + + def _get_repre_doc(self, instance): + cache = instance.context.data.get("__cache_asset_audio") + if cache is None: + cache = {} + instance.context.data["__cache_asset_audio"] = cache + asset_name = instance.data["asset"] + + # first try to get it from cache + if asset_name in cache: + return cache[asset_name] + + project_name = legacy_io.active_project() + + # Find latest versions document + last_version_doc = get_last_version_by_subset_name( + project_name, + self.audio_subset_name, + asset_name=asset_name, + fields=["_id"] + ) + + repre_doc = None + if last_version_doc: + # Try to find it's representation (Expected there is only one) + repre_docs = list(get_representations( + project_name, version_ids=[last_version_doc["_id"]] + )) + if not repre_docs: + self.log.warning( + "Version document does not contain any representations" + ) + else: + repre_doc = repre_docs[0] + + # update cache + cache[asset_name] = repre_doc + + return repre_doc diff --git a/openpype/plugins/publish/collect_cleanup_keys.py b/openpype/plugins/publish/collect_cleanup_keys.py index 635b038387..b9cd1a9fc9 100644 --- a/openpype/plugins/publish/collect_cleanup_keys.py +++ b/openpype/plugins/publish/collect_cleanup_keys.py @@ -14,7 +14,7 @@ class CollectCleanupKeys(pyblish.api.ContextPlugin): """Prepare keys for 'ExplicitCleanUp' plugin.""" label = "Collect Cleanup Keys" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.5 def process(self, context): context.data["cleanupFullPaths"] = [] diff --git a/openpype/plugins/publish/collect_avalon_entities.py b/openpype/plugins/publish/collect_context_entities.py similarity index 72% rename from openpype/plugins/publish/collect_avalon_entities.py rename to openpype/plugins/publish/collect_context_entities.py index 3e7843407f..31fbeb5dbd 100644 --- a/openpype/plugins/publish/collect_avalon_entities.py +++ b/openpype/plugins/publish/collect_context_entities.py @@ -1,37 +1,39 @@ """Collect Anatomy and global anatomy data. Requires: - session -> AVALON_PROJECT, AVALON_ASSET + session -> AVALON_ASSET + context -> projectName + context -> asset + context -> task Provides: - context -> projectEntity - project entity from database - context -> assetEntity - asset entity from database + context -> projectEntity - Project document from database. + context -> assetEntity - Asset document from database only if 'asset' is + set in context. """ import pyblish.api -from openpype.pipeline import legacy_io +from openpype.client import get_project, get_asset_by_name +from openpype.pipeline import KnownPublishError -class CollectAvalonEntities(pyblish.api.ContextPlugin): - """Collect Anatomy into Context""" +class CollectContextEntities(pyblish.api.ContextPlugin): + """Collect entities into Context.""" order = pyblish.api.CollectorOrder - 0.1 - label = "Collect Avalon Entities" + label = "Collect Context Entities" def process(self, context): - legacy_io.install() - project_name = legacy_io.Session["AVALON_PROJECT"] - asset_name = legacy_io.Session["AVALON_ASSET"] - task_name = legacy_io.Session["AVALON_TASK"] + project_name = context.data["projectName"] + asset_name = context.data["asset"] + task_name = context.data["task"] - project_entity = legacy_io.find_one({ - "type": "project", - "name": project_name - }) - assert project_entity, ( - "Project '{0}' was not found." - ).format(project_name) + project_entity = get_project(project_name) + if not project_entity: + raise KnownPublishError( + "Project '{0}' was not found.".format(project_name) + ) self.log.debug("Collected Project \"{}\"".format(project_entity)) context.data["projectEntity"] = project_entity @@ -39,11 +41,8 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): if not asset_name: self.log.info("Context is not set. Can't collect global data.") return - asset_entity = legacy_io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project_entity["_id"] - }) + + asset_entity = get_asset_by_name(project_name, asset_name) assert asset_entity, ( "No asset found by the name '{0}' in project '{1}'" ).format(asset_name, project_name) diff --git a/openpype/plugins/publish/collect_context_label.py b/openpype/plugins/publish/collect_context_label.py index 8cf71882aa..6cdeba8418 100644 --- a/openpype/plugins/publish/collect_context_label.py +++ b/openpype/plugins/publish/collect_context_label.py @@ -1,5 +1,6 @@ """ -Requires: +Optional: + context -> hostName (str) context -> currentFile (str) Provides: context -> label (str) @@ -16,16 +17,27 @@ class CollectContextLabel(pyblish.api.ContextPlugin): label = "Context Label" def process(self, context): + # Add ability to use custom context label + label = context.data.get("label") + if label: + self.log.debug("Context label is already set to \"{}\"".format( + label + )) + return - # Get last registered host - host = pyblish.api.registered_hosts()[-1] + host_name = context.data.get("hostName") + if not host_name: + host_name = pyblish.api.registered_hosts()[-1] + # Use host name as base for label + label = host_name.title() - # Get scene name from "currentFile" - path = context.data.get("currentFile") or "" - base = os.path.basename(path) + # Get scene name from "currentFile" and use basename as ending of label + path = context.data.get("currentFile") + if path: + label += " - {}".format(os.path.basename(path)) # Set label - label = "{host} - {scene}".format(host=host.title(), scene=base) - if host == "standalonepublisher": - label = host.title() context.data["label"] = label + self.log.debug("Context label is changed to \"{}\"".format( + label + )) diff --git a/openpype/plugins/publish/collect_current_context.py b/openpype/plugins/publish/collect_current_context.py new file mode 100644 index 0000000000..7e42700d7d --- /dev/null +++ b/openpype/plugins/publish/collect_current_context.py @@ -0,0 +1,47 @@ +""" +Provides: + context -> projectName (str) + context -> asset (str) + context -> task (str) +""" + +import pyblish.api +from openpype.pipeline import legacy_io + + +class CollectCurrentContext(pyblish.api.ContextPlugin): + """Collect project context into publish context data. + + Plugin does not override any value if is already set. + """ + + order = pyblish.api.CollectorOrder - 0.5 + label = "Collect Current context" + + def process(self, context): + # Make sure 'legacy_io' is intalled + legacy_io.install() + + # Check if values are already set + project_name = context.data.get("projectName") + asset_name = context.data.get("asset") + task_name = context.data.get("task") + if not project_name: + project_name = legacy_io.current_project() + context.data["projectName"] = project_name + + if not asset_name: + asset_name = legacy_io.Session.get("AVALON_ASSET") + context.data["asset"] = asset_name + + if not task_name: + task_name = legacy_io.Session.get("AVALON_TASK") + context.data["task"] = task_name + + # QUESTION should we be explicit with keys? (the same on instances) + # - 'asset' -> 'assetName' + # - 'task' -> 'taskName' + + self.log.info(( + "Collected project context\nProject: {}\nAsset: {}\nTask: {}" + ).format(project_name, asset_name, task_name)) diff --git a/openpype/plugins/publish/collect_current_pype_user.py b/openpype/plugins/publish/collect_current_pype_user.py index 1a52a59012..2d507ba292 100644 --- a/openpype/plugins/publish/collect_current_pype_user.py +++ b/openpype/plugins/publish/collect_current_pype_user.py @@ -1,5 +1,3 @@ -import os -import getpass import pyblish.api from openpype.lib import get_openpype_username diff --git a/openpype/plugins/publish/collect_datetime_data.py b/openpype/plugins/publish/collect_datetime_data.py index 1675ae1a98..b3178ca3d2 100644 --- a/openpype/plugins/publish/collect_datetime_data.py +++ b/openpype/plugins/publish/collect_datetime_data.py @@ -5,14 +5,14 @@ Provides: """ import pyblish.api -from openpype.api import config +from openpype.lib.dateutils import get_datetime_data class CollectDateTimeData(pyblish.api.ContextPlugin): - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.5 label = "Collect DateTime data" def process(self, context): key = "datetimeData" if key not in context.data: - context.data[key] = config.get_datetime_data() + context.data[key] = get_datetime_data() diff --git a/openpype/plugins/publish/collect_from_create_context.py b/openpype/plugins/publish/collect_from_create_context.py index f6ead98809..ddb6908a4c 100644 --- a/openpype/plugins/publish/collect_from_create_context.py +++ b/openpype/plugins/publish/collect_from_create_context.py @@ -19,10 +19,29 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): if not create_context: return + thumbnail_paths_by_instance_id = ( + create_context.thumbnail_paths_by_instance_id + ) + context.data["thumbnailSource"] = ( + thumbnail_paths_by_instance_id.get(None) + ) + + project_name = create_context.project_name + if project_name: + context.data["projectName"] = project_name + for created_instance in create_context.instances: instance_data = created_instance.data_to_store() if instance_data["active"]: - self.create_instance(context, instance_data) + thumbnail_path = thumbnail_paths_by_instance_id.get( + created_instance.id + ) + self.create_instance( + context, + instance_data, + created_instance.transient_data, + thumbnail_path + ) # Update global data to context context.data.update(create_context.context_data_to_store()) @@ -34,7 +53,13 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): legacy_io.Session[key] = value os.environ[key] = value - def create_instance(self, context, in_data): + def create_instance( + self, + context, + in_data, + transient_data, + thumbnail_path + ): subset = in_data["subset"] # If instance data already contain families then use it instance_families = in_data.get("families") or [] @@ -44,15 +69,18 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): "subset": subset, "asset": in_data["asset"], "task": in_data["task"], - "label": subset, + "label": in_data.get("label") or subset, "name": subset, "family": in_data["family"], - "families": instance_families + "families": instance_families, + "representations": [], + "thumbnailSource": thumbnail_path }) for key, value in in_data.items(): if key not in instance.data: instance.data[key] = value + + instance.data["transientData"] = transient_data + self.log.info("collected instance: {}".format(instance.data)) self.log.info("parsing data: {}".format(in_data)) - - instance.data["representations"] = list() diff --git a/openpype/plugins/publish/collect_hierarchy.py b/openpype/plugins/publish/collect_hierarchy.py index a96d444be6..687397be8a 100644 --- a/openpype/plugins/publish/collect_hierarchy.py +++ b/openpype/plugins/publish/collect_hierarchy.py @@ -1,7 +1,5 @@ import pyblish.api -from openpype.pipeline import legacy_io - class CollectHierarchy(pyblish.api.ContextPlugin): """Collecting hierarchy from `parents`. @@ -20,7 +18,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin): def process(self, context): temp_context = {} - project_name = legacy_io.Session["AVALON_PROJECT"] + project_name = context.data["projectName"] final_context = {} final_context[project_name] = {} final_context[project_name]['entity_type'] = 'Project' @@ -33,10 +31,6 @@ class CollectHierarchy(pyblish.api.ContextPlugin): family = instance.data["family"] families = instance.data["families"] - # filter out all unepropriate instances - if not instance.data["publish"]: - continue - # exclude other families then self.families with intersection if not set(self.families).intersection(set(families + [family])): continue @@ -62,7 +56,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin): "frameEnd": instance.data["frameEnd"], "clipIn": instance.data["clipIn"], "clipOut": instance.data["clipOut"], - 'fps': instance.context.data["fps"], + "fps": instance.data["fps"], "resolutionWidth": instance.data["resolutionWidth"], "resolutionHeight": instance.data["resolutionHeight"], "pixelAspect": instance.data["pixelAspect"] diff --git a/openpype/plugins/publish/collect_input_representations_to_versions.py b/openpype/plugins/publish/collect_input_representations_to_versions.py new file mode 100644 index 0000000000..18a19bce80 --- /dev/null +++ b/openpype/plugins/publish/collect_input_representations_to_versions.py @@ -0,0 +1,47 @@ +import pyblish.api + +from bson.objectid import ObjectId + +from openpype.client import get_representations + + +class CollectInputRepresentationsToVersions(pyblish.api.ContextPlugin): + """Converts collected input representations to input versions. + + Any data in `instance.data["inputRepresentations"]` gets converted into + `instance.data["inputVersions"]` as supported in OpenPype v3. + + """ + # This is a ContextPlugin because then we can query the database only once + # for the conversion of representation ids to version ids (optimization) + label = "Input Representations to Versions" + order = pyblish.api.CollectorOrder + 0.499 + hosts = ["*"] + + def process(self, context): + # Query all version ids for representation ids from the database once + representations = set() + for instance in context: + inst_repre = instance.data.get("inputRepresentations", []) + representations.update(inst_repre) + + representations_docs = get_representations( + project_name=context.data["projectEntity"]["name"], + representation_ids=representations, + fields=["_id", "parent"]) + + representation_id_to_version_id = { + repre["_id"]: repre["parent"] for repre in representations_docs + } + + for instance in context: + inst_repre = instance.data.get("inputRepresentations", []) + if not inst_repre: + continue + + input_versions = instance.data.get("inputVersions", []) + for repre_id in inst_repre: + repre_id = ObjectId(repre_id) + version_id = representation_id_to_version_id[repre_id] + input_versions.append(version_id) + instance.data["inputVersions"] = input_versions diff --git a/openpype/plugins/publish/collect_machine_name.py b/openpype/plugins/publish/collect_machine_name.py index 72ef68f8ed..8c25966031 100644 --- a/openpype/plugins/publish/collect_machine_name.py +++ b/openpype/plugins/publish/collect_machine_name.py @@ -11,7 +11,7 @@ import pyblish.api class CollectMachineName(pyblish.api.ContextPlugin): label = "Local Machine Name" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.5 hosts = ["*"] def process(self, context): diff --git a/openpype/plugins/publish/collect_modules.py b/openpype/plugins/publish/collect_modules.py index 2f6cb1ef0e..d76096bcd9 100644 --- a/openpype/plugins/publish/collect_modules.py +++ b/openpype/plugins/publish/collect_modules.py @@ -7,7 +7,7 @@ import pyblish.api class CollectModules(pyblish.api.ContextPlugin): """Collect OpenPype modules.""" - order = pyblish.api.CollectorOrder - 0.45 + order = pyblish.api.CollectorOrder - 0.5 label = "OpenPype Modules" def process(self, context): diff --git a/openpype/plugins/publish/collect_otio_frame_ranges.py b/openpype/plugins/publish/collect_otio_frame_ranges.py index ee7b7957ad..9a68b6e43d 100644 --- a/openpype/plugins/publish/collect_otio_frame_ranges.py +++ b/openpype/plugins/publish/collect_otio_frame_ranges.py @@ -8,8 +8,12 @@ Requires: # import os import opentimelineio as otio import pyblish.api -import openpype.lib from pprint import pformat +from openpype.pipeline.editorial import ( + get_media_range_with_retimes, + otio_range_to_frame_range, + otio_range_with_handles +) class CollectOtioFrameRanges(pyblish.api.InstancePlugin): @@ -20,20 +24,21 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin): label = "Collect OTIO Frame Ranges" order = pyblish.api.CollectorOrder - 0.08 families = ["shot", "clip"] - hosts = ["resolve", "hiero", "flame"] + hosts = ["resolve", "hiero", "flame", "traypublisher"] def process(self, instance): # get basic variables otio_clip = instance.data["otioClip"] workfile_start = instance.data["workfileFrameStart"] + workfile_source_duration = instance.data.get("shotDurationFromSource") # get ranges otio_tl_range = otio_clip.range_in_parent() otio_src_range = otio_clip.source_range otio_avalable_range = otio_clip.available_range() - otio_tl_range_handles = openpype.lib.otio_range_with_handles( + otio_tl_range_handles = otio_range_with_handles( otio_tl_range, instance) - otio_src_range_handles = openpype.lib.otio_range_with_handles( + otio_src_range_handles = otio_range_with_handles( otio_src_range, instance) # get source avalable start frame @@ -42,7 +47,7 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin): otio_avalable_range.start_time.rate) # convert to frames - range_convert = openpype.lib.otio_range_to_frame_range + range_convert = otio_range_to_frame_range tl_start, tl_end = range_convert(otio_tl_range) tl_start_h, tl_end_h = range_convert(otio_tl_range_handles) src_start, src_end = range_convert(otio_src_range) @@ -51,17 +56,29 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin): frame_end = frame_start + otio.opentime.to_frames( otio_tl_range.duration, otio_tl_range.duration.rate) - 1 + # in case of retimed clip and frame range should not be retimed + if workfile_source_duration: + # get available range trimmed with processed retimes + retimed_attributes = get_media_range_with_retimes( + otio_clip, 0, 0) + self.log.debug( + ">> retimed_attributes: {}".format(retimed_attributes)) + media_in = int(retimed_attributes["mediaIn"]) + media_out = int(retimed_attributes["mediaOut"]) + frame_end = frame_start + (media_out - media_in) + 1 + self.log.debug(frame_end) + data = { "frameStart": frame_start, "frameEnd": frame_end, "clipIn": tl_start, - "clipOut": tl_end, + "clipOut": tl_end - 1, "clipInH": tl_start_h, - "clipOutH": tl_end_h, + "clipOutH": tl_end_h - 1, "sourceStart": src_starting_from + src_start, - "sourceEnd": src_starting_from + src_end, + "sourceEnd": src_starting_from + src_end - 1, "sourceStartH": src_starting_from + src_start_h, - "sourceEndH": src_starting_from + src_end_h, + "sourceEndH": src_starting_from + src_end_h - 1, } instance.data.update(data) self.log.debug( diff --git a/openpype/plugins/publish/collect_otio_subset_resources.py b/openpype/plugins/publish/collect_otio_subset_resources.py index 7c11462ef0..3387cd1176 100644 --- a/openpype/plugins/publish/collect_otio_subset_resources.py +++ b/openpype/plugins/publish/collect_otio_subset_resources.py @@ -10,8 +10,11 @@ import os import clique import opentimelineio as otio import pyblish.api -import openpype -from openpype.lib import editorial +from openpype.pipeline.editorial import ( + get_media_range_with_retimes, + range_from_frames, + make_sequence_collection +) class CollectOtioSubsetResources(pyblish.api.InstancePlugin): @@ -43,7 +46,7 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): available_duration = otio_avalable_range.duration.value # get available range trimmed with processed retimes - retimed_attributes = editorial.get_media_range_with_retimes( + retimed_attributes = get_media_range_with_retimes( otio_clip, handle_start, handle_end) self.log.debug( ">> retimed_attributes: {}".format(retimed_attributes)) @@ -65,8 +68,8 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): a_frame_end_h = media_out + handle_end # create trimmed otio time range - trimmed_media_range_h = editorial.range_from_frames( - a_frame_start_h, (a_frame_end_h - a_frame_start_h + 1), + trimmed_media_range_h = range_from_frames( + a_frame_start_h, (a_frame_end_h - a_frame_start_h) + 1, media_fps ) trimmed_duration = trimmed_media_range_h.duration.value @@ -113,13 +116,13 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): # check in two way if it is sequence if hasattr(otio.schema, "ImageSequenceReference"): # for OpenTimelineIO 0.13 and newer - if isinstance(media_ref, - otio.schema.ImageSequenceReference): - is_sequence = True - else: - # for OpenTimelineIO 0.12 and older - if metadata.get("padding"): + if isinstance( + media_ref, + otio.schema.ImageSequenceReference + ): is_sequence = True + elif metadata.get("padding"): + is_sequence = True self.log.info( "frame_start-frame_end: {}-{}".format(frame_start, frame_end)) @@ -136,22 +139,20 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): padding=media_ref.frame_zero_padding ) collection.indexes.update( - [i for i in range(a_frame_start_h, (a_frame_end_h + 1))]) + list(range(a_frame_start_h, (a_frame_end_h + 1))) + ) - self.log.debug(collection) - repre = self._create_representation( - frame_start, frame_end, collection=collection) else: # in case it is file sequence but not new OTIO schema # `ImageSequenceReference` path = media_ref.target_url - collection_data = openpype.lib.make_sequence_collection( + collection_data = make_sequence_collection( path, trimmed_media_range_h, metadata) self.staging_dir, collection = collection_data - self.log.debug(collection) - repre = self._create_representation( - frame_start, frame_end, collection=collection) + self.log.debug(collection) + repre = self._create_representation( + frame_start, frame_end, collection=collection) else: _trim = False dirname, filename = os.path.split(media_ref.target_url) @@ -195,7 +196,7 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): if kwargs.get("collection"): collection = kwargs.get("collection") - files = [f for f in collection] + files = list(collection) ext = collection.format("{tail}") representation_data.update({ "name": ext[1:], @@ -217,7 +218,5 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): }) if kwargs.get("trim") is True: - representation_data.update({ - "tags": ["trim"] - }) + representation_data["tags"] = ["trim"] return representation_data diff --git a/openpype/plugins/publish/collect_rendered_files.py b/openpype/plugins/publish/collect_rendered_files.py index 670e57ed10..8f8d0a5eeb 100644 --- a/openpype/plugins/publish/collect_rendered_files.py +++ b/openpype/plugins/publish/collect_rendered_files.py @@ -1,7 +1,7 @@ """Loads publishing context from json and continues in publish process. Requires: - anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.11) + anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.4) Provides: context, instances -> All data from previous publishing process. @@ -12,7 +12,7 @@ import json import pyblish.api -from openpype.pipeline import legacy_io +from openpype.pipeline import legacy_io, KnownPublishError class CollectRenderedFiles(pyblish.api.ContextPlugin): @@ -20,7 +20,12 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): This collector will try to find json files in provided `OPENPYPE_PUBLISH_DATA`. Those files _MUST_ share same context. + Note: + We should split this collector and move the part which handle reading + of file and it's context from session data before collect anatomy + and instance creation dependent on anatomy can be done here. """ + order = pyblish.api.CollectorOrder - 0.2 # Keep "filesequence" for backwards compatibility of older jobs targets = ["filesequence", "farm"] @@ -118,23 +123,20 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): def process(self, context): self._context = context - assert os.environ.get("OPENPYPE_PUBLISH_DATA"), ( - "Missing `OPENPYPE_PUBLISH_DATA`") + if not os.environ.get("OPENPYPE_PUBLISH_DATA"): + raise KnownPublishError("Missing `OPENPYPE_PUBLISH_DATA`") + + # QUESTION + # Do we support (or want support) multiple files in the variable? + # - what if they have different context? paths = os.environ["OPENPYPE_PUBLISH_DATA"].split(os.pathsep) - project_name = os.environ.get("AVALON_PROJECT") - if project_name is None: - raise AssertionError( - "Environment `AVALON_PROJECT` was not found." - "Could not set project `root` which may cause issues." - ) - - # TODO root filling should happen after collect Anatomy + # Using already collected Anatomy + anatomy = context.data["anatomy"] self.log.info("Getting root setting for project \"{}\"".format( - project_name + anatomy.project_name )) - anatomy = context.data["anatomy"] self.log.info("anatomy: {}".format(anatomy.roots)) try: session_is_set = False diff --git a/openpype/plugins/publish/collect_resources_path.py b/openpype/plugins/publish/collect_resources_path.py index 89df031fb0..00f65b8b67 100644 --- a/openpype/plugins/publish/collect_resources_path.py +++ b/openpype/plugins/publish/collect_resources_path.py @@ -13,8 +13,6 @@ import copy import pyblish.api -from openpype.pipeline import legacy_io - class CollectResourcesPath(pyblish.api.InstancePlugin): """Generate directory path where the files and resources will be stored""" @@ -41,6 +39,7 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): "rig", "plate", "look", + "mvLook", "yetiRig", "yeticache", "nukenodes", @@ -57,7 +56,6 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): "effect", "staticMesh", "skeletalMesh" - ] def process(self, instance): @@ -85,11 +83,10 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): else: # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." - ).format(project_name)) + ).format(anatomy.project_name)) file_path = anatomy_filled["publish"]["path"] # Directory diff --git a/openpype/plugins/publish/collect_scene_loaded_versions.py b/openpype/plugins/publish/collect_scene_loaded_versions.py index bb34e3ce31..5ff2b46e3b 100644 --- a/openpype/plugins/publish/collect_scene_loaded_versions.py +++ b/openpype/plugins/publish/collect_scene_loaded_versions.py @@ -1,7 +1,6 @@ -from bson.objectid import ObjectId - import pyblish.api +from openpype.client import get_representations from openpype.pipeline import ( registered_host, legacy_io, @@ -39,23 +38,29 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): return loaded_versions = [] - _containers = list(host.ls()) - _repr_ids = [ObjectId(c["representation"]) for c in _containers] - repre_docs = legacy_io.find( - {"_id": {"$in": _repr_ids}}, - projection={"_id": 1, "parent": 1} + containers = list(host.ls()) + repre_ids = { + container["representation"] + for container in containers + } + + project_name = legacy_io.active_project() + repre_docs = get_representations( + project_name, + representation_ids=repre_ids, + fields=["_id", "parent"] ) - version_by_repr = { - str(doc["_id"]): doc["parent"] + repre_doc_by_str_id = { + str(doc["_id"]): doc for doc in repre_docs } # QUESTION should we add same representation id when loaded multiple # times? - for con in _containers: + for con in containers: repre_id = con["representation"] - version_id = version_by_repr.get(repre_id) - if version_id is None: + repre_doc = repre_doc_by_str_id.get(repre_id) + if repre_doc is None: self.log.warning(( "Skipping container," " did not find representation document. {}" @@ -66,8 +71,8 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): # may have more then one representation that are same version version = { "subsetName": con["name"], - "representation": ObjectId(repre_id), - "version": version_id, + "representation": repre_doc["_id"], + "version": repre_doc["parent"], } loaded_versions.append(version) diff --git a/openpype/plugins/publish/collect_scene_version.py b/openpype/plugins/publish/collect_scene_version.py index 917647c61a..a7cea6093a 100644 --- a/openpype/plugins/publish/collect_scene_version.py +++ b/openpype/plugins/publish/collect_scene_version.py @@ -1,6 +1,7 @@ import os import pyblish.api -import openpype.api as pype + +from openpype.lib import get_version_from_path class CollectSceneVersion(pyblish.api.ContextPlugin): @@ -46,7 +47,7 @@ class CollectSceneVersion(pyblish.api.ContextPlugin): if '' in filename: return - version = pype.get_version_from_path(filename) + version = get_version_from_path(filename) assert version, "Cannot determine version" rootVersion = int(version) diff --git a/openpype/plugins/publish/collect_settings.py b/openpype/plugins/publish/collect_settings.py index d56eabd1b5..a418a6400c 100644 --- a/openpype/plugins/publish/collect_settings.py +++ b/openpype/plugins/publish/collect_settings.py @@ -1,5 +1,8 @@ from pyblish import api -from openpype.api import get_current_project_settings, get_system_settings +from openpype.settings import ( + get_current_project_settings, + get_system_settings, +) class CollectSettings(api.ContextPlugin): diff --git a/openpype/plugins/publish/extract_burnin.py b/openpype/plugins/publish/extract_burnin.py index 88093fb92f..4179199317 100644 --- a/openpype/plugins/publish/extract_burnin.py +++ b/openpype/plugins/publish/extract_burnin.py @@ -8,10 +8,10 @@ import shutil import clique import six -import pyblish +import pyblish.api -import openpype -import openpype.api +from openpype import resources, PACKAGE_DIR +from openpype.pipeline import publish from openpype.lib import ( run_openpype_process, @@ -23,7 +23,7 @@ from openpype.lib import ( ) -class ExtractBurnin(openpype.api.Extractor): +class ExtractBurnin(publish.Extractor): """ Extractor to create video with pre-defined burnins from existing extracted video representation. @@ -400,7 +400,7 @@ class ExtractBurnin(openpype.api.Extractor): # Use OpenPype default font if not font_filepath: - font_filepath = openpype.api.resources.get_liberation_font_path() + font_filepath = resources.get_liberation_font_path() burnin_options["font"] = font_filepath @@ -488,12 +488,6 @@ class ExtractBurnin(openpype.api.Extractor): "frame_end_handle": frame_end_handle } - # use explicit username for webpublishes as rewriting - # OPENPYPE_USERNAME might have side effects - webpublish_user_name = os.environ.get("WEBPUBLISH_OPENPYPE_USERNAME") - if webpublish_user_name: - burnin_data["username"] = webpublish_user_name - self.log.debug( "Basic burnin_data: {}".format(json.dumps(burnin_data, indent=4)) ) @@ -981,7 +975,7 @@ class ExtractBurnin(openpype.api.Extractor): """Return path to python script for burnin processing.""" scriptpath = os.path.normpath( os.path.join( - openpype.PACKAGE_DIR, + PACKAGE_DIR, "scripts", "otio_burnin.py" ) diff --git a/openpype/plugins/publish/extract_hierarchy_avalon.py b/openpype/plugins/publish/extract_hierarchy_avalon.py index 2f528d4469..6b4e5f48c5 100644 --- a/openpype/plugins/publish/extract_hierarchy_avalon.py +++ b/openpype/plugins/publish/extract_hierarchy_avalon.py @@ -1,7 +1,11 @@ from copy import deepcopy - import pyblish.api - +from openpype.client import ( + get_project, + get_asset_by_id, + get_asset_by_name, + get_archived_assets +) from openpype.pipeline import legacy_io @@ -17,35 +21,24 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): if "hierarchyContext" not in context.data: self.log.info("skipping IntegrateHierarchyToAvalon") return - hierarchy_context = deepcopy(context.data["hierarchyContext"]) if not legacy_io.Session: legacy_io.install() - active_assets = [] - # filter only the active publishing insatnces - for instance in context: - if instance.data.get("publish") is False: - continue - - if not instance.data.get("asset"): - continue - - active_assets.append(instance.data["asset"]) - - # remove duplicity in list - self.active_assets = list(set(active_assets)) - self.log.debug("__ self.active_assets: {}".format(self.active_assets)) - - hierarchy_context = self._get_assets(hierarchy_context) - + project_name = legacy_io.active_project() + hierarchy_context = self._get_active_assets(context) self.log.debug("__ hierarchy_context: {}".format(hierarchy_context)) - input_data = context.data["hierarchyContext"] = hierarchy_context self.project = None - self.import_to_avalon(input_data) + self.import_to_avalon(context, project_name, hierarchy_context) - def import_to_avalon(self, input_data, parent=None): + def import_to_avalon( + self, + context, + project_name, + input_data, + parent=None, + ): for name in input_data: self.log.info("input_data[name]: {}".format(input_data[name])) entity_data = input_data[name] @@ -81,7 +74,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): update_data = True # Process project if entity_type.lower() == "project": - entity = legacy_io.find_one({"type": "project"}) + entity = get_project(project_name) # TODO: should be in validator? assert (entity is not None), "Did not find project in DB" @@ -98,7 +91,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): ) # Else process assset else: - entity = legacy_io.find_one({"type": "asset", "name": name}) + entity = get_asset_by_name(project_name, name) if entity: # Do not override data, only update cur_entity_data = entity.get("data") or {} @@ -122,10 +115,10 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): # Skip updating data update_data = False - archived_entities = legacy_io.find({ - "type": "archived_asset", - "name": name - }) + archived_entities = get_archived_assets( + project_name, + asset_names=[name] + ) unarchive_entity = None for archived_entity in archived_entities: archived_parents = ( @@ -139,11 +132,20 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): if unarchive_entity is None: # Create entity if doesn"t exist - entity = self.create_avalon_asset(name, data) + entity = self.create_avalon_asset( + name, data + ) else: # Unarchive if entity was archived entity = self.unarchive_entity(unarchive_entity, data) + # make sure all relative instances have correct avalon data + self._set_avalon_data_to_relative_instances( + context, + project_name, + entity + ) + if update_data: # Update entity data with input data legacy_io.update_many( @@ -152,7 +154,9 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): ) if "childs" in entity_data: - self.import_to_avalon(entity_data["childs"], entity) + self.import_to_avalon( + context, project_name, entity_data["childs"], entity + ) def unarchive_entity(self, entity, data): # Unarchived asset should not use same data @@ -168,38 +172,87 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): {"_id": entity["_id"]}, new_entity ) + return new_entity def create_avalon_asset(self, name, data): - item = { + asset_doc = { "schema": "openpype:asset-3.0", "name": name, "parent": self.project["_id"], "type": "asset", "data": data } - self.log.debug("Creating asset: {}".format(item)) - entity_id = legacy_io.insert_one(item).inserted_id + self.log.debug("Creating asset: {}".format(asset_doc)) + asset_doc["_id"] = legacy_io.insert_one(asset_doc).inserted_id - return legacy_io.find_one({"_id": entity_id}) + return asset_doc - def _get_assets(self, input_dict): + def _set_avalon_data_to_relative_instances( + self, + context, + project_name, + asset_doc + ): + for instance in context: + # Skip instance if has filled asset entity + if instance.data.get("assetEntity"): + continue + asset_name = asset_doc["name"] + inst_asset_name = instance.data["asset"] + + if asset_name == inst_asset_name: + instance.data["assetEntity"] = asset_doc + + # get parenting data + parents = asset_doc["data"].get("parents") or list() + + # equire only relative parent + parent_name = project_name + if parents: + parent_name = parents[-1] + + # update avalon data on instance + instance.data["anatomyData"].update({ + "hierarchy": "/".join(parents), + "task": {}, + "parent": parent_name + }) + + def _get_active_assets(self, context): """ Returns only asset dictionary. Usually the last part of deep dictionary which is not having any children """ - input_dict_copy = deepcopy(input_dict) - - for key in input_dict.keys(): - self.log.debug("__ key: {}".format(key)) - # check if child key is available - if input_dict[key].get("childs"): - # loop deeper - input_dict_copy[key]["childs"] = self._get_assets( - input_dict[key]["childs"]) - else: - # filter out unwanted assets - if key not in self.active_assets: + def get_pure_hierarchy_data(input_dict): + input_dict_copy = deepcopy(input_dict) + for key in input_dict.keys(): + self.log.debug("__ key: {}".format(key)) + # check if child key is available + if input_dict[key].get("childs"): + # loop deeper + input_dict_copy[ + key]["childs"] = get_pure_hierarchy_data( + input_dict[key]["childs"]) + elif key not in active_assets: input_dict_copy.pop(key, None) + return input_dict_copy - return input_dict_copy + hierarchy_context = context.data["hierarchyContext"] + + active_assets = [] + # filter only the active publishing insatnces + for instance in context: + if instance.data.get("publish") is False: + continue + + if not instance.data.get("asset"): + continue + + active_assets.append(instance.data["asset"]) + + # remove duplicity in list + active_assets = list(set(active_assets)) + self.log.debug("__ active_assets: {}".format(active_assets)) + + return get_pure_hierarchy_data(hierarchy_context) diff --git a/openpype/plugins/publish/extract_jpeg_exr.py b/openpype/plugins/publish/extract_jpeg_exr.py deleted file mode 100644 index d6d6854092..0000000000 --- a/openpype/plugins/publish/extract_jpeg_exr.py +++ /dev/null @@ -1,171 +0,0 @@ -import os - -import pyblish.api -from openpype.lib import ( - get_ffmpeg_tool_path, - - run_subprocess, - path_to_subprocess_arg, - - get_transcode_temp_directory, - convert_input_paths_for_ffmpeg, - should_convert_for_ffmpeg -) - -import shutil - - -class ExtractJpegEXR(pyblish.api.InstancePlugin): - """Create jpg thumbnail from sequence using ffmpeg""" - - label = "Extract Jpeg EXR" - order = pyblish.api.ExtractorOrder - families = [ - "imagesequence", "render", "render2d", - "source", "plate", "take" - ] - hosts = ["shell", "fusion", "resolve"] - enabled = False - - # presetable attribute - ffmpeg_args = None - - def process(self, instance): - self.log.info("subset {}".format(instance.data['subset'])) - - # skip crypto passes. - # TODO: This is just a quick fix and has its own side-effects - it is - # affecting every subset name with `crypto` in its name. - # This must be solved properly, maybe using tags on - # representation that can be determined much earlier and - # with better precision. - if 'crypto' in instance.data['subset'].lower(): - self.log.info("Skipping crypto passes.") - return - - # Skip if review not set. - if not instance.data.get("review", True): - self.log.info("Skipping - no review set on instance.") - return - - filtered_repres = self._get_filtered_repres(instance) - for repre in filtered_repres: - repre_files = repre["files"] - if not isinstance(repre_files, (list, tuple)): - input_file = repre_files - else: - file_index = int(float(len(repre_files)) * 0.5) - input_file = repre_files[file_index] - - stagingdir = os.path.normpath(repre["stagingDir"]) - - full_input_path = os.path.join(stagingdir, input_file) - self.log.info("input {}".format(full_input_path)) - - do_convert = should_convert_for_ffmpeg(full_input_path) - # If result is None the requirement of conversion can't be - # determined - if do_convert is None: - self.log.info(( - "Can't determine if representation requires conversion." - " Skipped." - )) - continue - - # Do conversion if needed - # - change staging dir of source representation - # - must be set back after output definitions processing - convert_dir = None - if do_convert: - convert_dir = get_transcode_temp_directory() - filename = os.path.basename(full_input_path) - convert_input_paths_for_ffmpeg( - [full_input_path], - convert_dir, - self.log - ) - full_input_path = os.path.join(convert_dir, filename) - - filename = os.path.splitext(input_file)[0] - if not filename.endswith('.'): - filename += "." - jpeg_file = filename + "jpg" - full_output_path = os.path.join(stagingdir, jpeg_file) - - self.log.info("output {}".format(full_output_path)) - - ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") - ffmpeg_args = self.ffmpeg_args or {} - - jpeg_items = [] - jpeg_items.append(path_to_subprocess_arg(ffmpeg_path)) - # override file if already exists - jpeg_items.append("-y") - # use same input args like with mov - jpeg_items.extend(ffmpeg_args.get("input") or []) - # input file - jpeg_items.append("-i {}".format( - path_to_subprocess_arg(full_input_path) - )) - # output arguments from presets - jpeg_items.extend(ffmpeg_args.get("output") or []) - - # If its a movie file, we just want one frame. - if repre["ext"] == "mov": - jpeg_items.append("-vframes 1") - - # output file - jpeg_items.append(path_to_subprocess_arg(full_output_path)) - - subprocess_command = " ".join(jpeg_items) - - # run subprocess - self.log.debug("{}".format(subprocess_command)) - try: # temporary until oiiotool is supported cross platform - run_subprocess( - subprocess_command, shell=True, logger=self.log - ) - except RuntimeError as exp: - if "Compression" in str(exp): - self.log.debug( - "Unsupported compression on input files. Skipping!!!" - ) - return - self.log.warning("Conversion crashed", exc_info=True) - raise - - new_repre = { - "name": "thumbnail", - "ext": "jpg", - "files": jpeg_file, - "stagingDir": stagingdir, - "thumbnail": True, - "tags": ["thumbnail"] - } - - # adding representation - self.log.debug("Adding: {}".format(new_repre)) - instance.data["representations"].append(new_repre) - - # Cleanup temp folder - if convert_dir is not None and os.path.exists(convert_dir): - shutil.rmtree(convert_dir) - - def _get_filtered_repres(self, instance): - filtered_repres = [] - src_repres = instance.data.get("representations") or [] - for repre in src_repres: - self.log.debug(repre) - tags = repre.get("tags") or [] - valid = "review" in tags or "thumb-nuke" in tags - if not valid: - continue - - if not repre.get("files"): - self.log.info(( - "Representation \"{}\" don't have files. Skipping" - ).format(repre["name"])) - continue - - filtered_repres.append(repre) - return filtered_repres diff --git a/openpype/plugins/publish/extract_otio_audio_tracks.py b/openpype/plugins/publish/extract_otio_audio_tracks.py index 00c1748cdc..e19b7eeb13 100644 --- a/openpype/plugins/publish/extract_otio_audio_tracks.py +++ b/openpype/plugins/publish/extract_otio_audio_tracks.py @@ -1,9 +1,8 @@ import os import pyblish -import openpype.api from openpype.lib import ( get_ffmpeg_tool_path, - path_to_subprocess_arg + run_subprocess ) import tempfile import opentimelineio as otio @@ -57,15 +56,7 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): audio_inputs.insert(0, empty) # create cmd - cmd = path_to_subprocess_arg(self.ffmpeg_path) + " " - cmd += self.create_cmd(audio_inputs) - cmd += path_to_subprocess_arg(audio_temp_fpath) - - # run subprocess - self.log.debug("Executing: {}".format(cmd)) - openpype.api.run_subprocess( - cmd, shell=True, logger=self.log - ) + self.mix_audio(audio_inputs, audio_temp_fpath) # remove empty os.remove(empty["mediaPath"]) @@ -110,9 +101,7 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): # run subprocess self.log.debug("Executing: {}".format(" ".join(cmd))) - openpype.api.run_subprocess( - cmd, logger=self.log - ) + run_subprocess(cmd, logger=self.log) else: audio_fpath = recycling_file.pop() @@ -233,7 +222,7 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): # run subprocess self.log.debug("Executing: {}".format(" ".join(cmd))) - openpype.api.run_subprocess( + run_subprocess( cmd, logger=self.log ) @@ -245,46 +234,80 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): "durationSec": max_duration_sec } - def create_cmd(self, inputs): + def mix_audio(self, audio_inputs, audio_temp_fpath): """Creating multiple input cmd string Args: - inputs (list): list of input dicts. Order mater. + audio_inputs (list): list of input dicts. Order mater. Returns: str: the command body - """ + + longest_input = 0 + for audio_input in audio_inputs: + audio_len = audio_input["durationSec"] + if audio_len > longest_input: + longest_input = audio_len + # create cmd segments - _inputs = "" - _filters = "-filter_complex \"" - _channels = "" - for index, input in enumerate(inputs): - input_format = input.copy() - input_format.update({"i": index}) - input_format["mediaPath"] = path_to_subprocess_arg( - input_format["mediaPath"] + input_args = [] + filters = [] + tag_names = [] + for index, audio_input in enumerate(audio_inputs): + input_args.extend([ + "-ss", str(audio_input["startSec"]), + "-t", str(audio_input["durationSec"]), + "-i", audio_input["mediaPath"] + ]) + + # Output tag of a filtered audio input + tag_name = "[r{}]".format(index) + tag_names.append(tag_name) + # Delay in audio by delay in item + filters.append("[{}]adelay={}:all=1{}".format( + index, audio_input["delayMilSec"], tag_name + )) + + # Mixing filter + # - dropout transition (when audio will get loader) is set to be + # higher then any input audio item + # - volume is set to number of inputs - each mix adds 1/n volume + # where n is input inder (to get more info read ffmpeg docs and + # send a giftcard to contributor) + filters.append( + ( + "{}amix=inputs={}:duration=first:" + "dropout_transition={},volume={}[a]" + ).format( + "".join(tag_names), + len(audio_inputs), + (longest_input * 1000) + 1000, + len(audio_inputs), ) + ) - _inputs += ( - "-ss {startSec} " - "-t {durationSec} " - "-i {mediaPath} " - ).format(**input_format) + # Store filters to a file (separated by ',') + # - this is to avoid "too long" command issue in ffmpeg + with tempfile.NamedTemporaryFile( + delete=False, mode="w", suffix=".txt" + ) as tmp_file: + filters_tmp_filepath = tmp_file.name + tmp_file.write(",".join(filters)) - _filters += "[{i}]adelay={delayMilSec}:all=1[r{i}]; ".format( - **input_format) - _channels += "[r{}]".format(index) + args = [self.ffmpeg_path] + args.extend(input_args) + args.extend([ + "-filter_complex_script", filters_tmp_filepath, + "-map", "[a]" + ]) + args.append(audio_temp_fpath) - # merge all cmd segments together - cmd = _inputs + _filters + _channels - cmd += str( - "amix=inputs={inputs}:duration=first:" - "dropout_transition=1000,volume={inputs}[a]\" " - ).format(inputs=len(inputs)) - cmd += "-map \"[a]\" " + # run subprocess + self.log.debug("Executing: {}".format(args)) + run_subprocess(args, logger=self.log) - return cmd + os.remove(filters_tmp_filepath) def create_temp_file(self, name): """Create temp wav file diff --git a/openpype/plugins/publish/extract_otio_file.py b/openpype/plugins/publish/extract_otio_file.py index 3bd217d5d4..1a6a82117d 100644 --- a/openpype/plugins/publish/extract_otio_file.py +++ b/openpype/plugins/publish/extract_otio_file.py @@ -1,10 +1,11 @@ import os import pyblish.api -import openpype.api import opentimelineio as otio +from openpype.pipeline import publish -class ExtractOTIOFile(openpype.api.Extractor): + +class ExtractOTIOFile(publish.Extractor): """ Extractor export OTIO file """ @@ -12,9 +13,11 @@ class ExtractOTIOFile(openpype.api.Extractor): label = "Extract OTIO file" order = pyblish.api.ExtractorOrder - 0.45 families = ["workfile"] - hosts = ["resolve", "hiero"] + hosts = ["resolve", "hiero", "traypublisher"] def process(self, instance): + if not instance.context.data.get("otioTimeline"): + return # create representation data if "representations" not in instance.data: instance.data["representations"] = [] diff --git a/openpype/plugins/publish/extract_otio_review.py b/openpype/plugins/publish/extract_otio_review.py index 35adc97442..169ff9e136 100644 --- a/openpype/plugins/publish/extract_otio_review.py +++ b/openpype/plugins/publish/extract_otio_review.py @@ -18,10 +18,22 @@ import os import clique import opentimelineio as otio from pyblish import api -import openpype + +from openpype.lib import ( + get_ffmpeg_tool_path, + run_subprocess, +) +from openpype.pipeline import publish +from openpype.pipeline.editorial import ( + otio_range_to_frame_range, + trim_media_range, + range_from_frames, + frames_to_seconds, + make_sequence_collection +) -class ExtractOTIOReview(openpype.api.Extractor): +class ExtractOTIOReview(publish.Extractor): """ Extract OTIO timeline into one concuted image sequence file. @@ -161,7 +173,7 @@ class ExtractOTIOReview(openpype.api.Extractor): dirname = media_ref.target_url_base head = media_ref.name_prefix tail = media_ref.name_suffix - first, last = openpype.lib.otio_range_to_frame_range( + first, last = otio_range_to_frame_range( available_range) collection = clique.Collection( head=head, @@ -180,7 +192,7 @@ class ExtractOTIOReview(openpype.api.Extractor): # in case it is file sequence but not new OTIO schema # `ImageSequenceReference` path = media_ref.target_url - collection_data = openpype.lib.make_sequence_collection( + collection_data = make_sequence_collection( path, available_range, metadata) dir_path, collection = collection_data @@ -305,8 +317,8 @@ class ExtractOTIOReview(openpype.api.Extractor): duration = avl_durtation # return correct trimmed range - return openpype.lib.trim_media_range( - avl_range, openpype.lib.range_from_frames(start, duration, fps) + return trim_media_range( + avl_range, range_from_frames(start, duration, fps) ) def _render_seqment(self, sequence=None, @@ -327,7 +339,7 @@ class ExtractOTIOReview(openpype.api.Extractor): otio.time.TimeRange: trimmed available range """ # get rendering app path - ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") # create path and frame start to destination output_path, out_frame_start = self._get_ffmpeg_output() @@ -357,8 +369,8 @@ class ExtractOTIOReview(openpype.api.Extractor): frame_start = otio_range.start_time.value input_fps = otio_range.start_time.rate frame_duration = otio_range.duration.value - sec_start = openpype.lib.frames_to_secons(frame_start, input_fps) - sec_duration = openpype.lib.frames_to_secons( + sec_start = frames_to_seconds(frame_start, input_fps) + sec_duration = frames_to_seconds( frame_duration, input_fps ) @@ -370,8 +382,7 @@ class ExtractOTIOReview(openpype.api.Extractor): ]) elif gap: - sec_duration = openpype.lib.frames_to_secons( - gap, self.actual_fps) + sec_duration = frames_to_seconds(gap, self.actual_fps) # form command for rendering gap files command.extend([ @@ -391,7 +402,7 @@ class ExtractOTIOReview(openpype.api.Extractor): ]) # execute self.log.debug("Executing: {}".format(" ".join(command))) - output = openpype.api.run_subprocess( + output = run_subprocess( command, logger=self.log ) self.log.debug("Output: {}".format(output)) diff --git a/openpype/plugins/publish/extract_otio_trimming_video.py b/openpype/plugins/publish/extract_otio_trimming_video.py index 30b57e2c69..70726338aa 100644 --- a/openpype/plugins/publish/extract_otio_trimming_video.py +++ b/openpype/plugins/publish/extract_otio_trimming_video.py @@ -6,17 +6,24 @@ Requires: """ import os -from pyblish import api -import openpype from copy import deepcopy +import pyblish.api -class ExtractOTIOTrimmingVideo(openpype.api.Extractor): +from openpype.lib import ( + get_ffmpeg_tool_path, + run_subprocess, +) +from openpype.pipeline import publish +from openpype.pipeline.editorial import frames_to_seconds + + +class ExtractOTIOTrimmingVideo(publish.Extractor): """ Trimming video file longer then required lenght """ - order = api.ExtractorOrder + order = pyblish.api.ExtractorOrder label = "Extract OTIO trim longer video" families = ["trim"] hosts = ["resolve", "hiero", "flame"] @@ -69,7 +76,7 @@ class ExtractOTIOTrimmingVideo(openpype.api.Extractor): """ # get rendering app path - ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") # create path to destination output_path = self._get_ffmpeg_output(input_file_path) @@ -80,9 +87,9 @@ class ExtractOTIOTrimmingVideo(openpype.api.Extractor): video_path = input_file_path frame_start = otio_range.start_time.value input_fps = otio_range.start_time.rate - frame_duration = (otio_range.duration.value + 1) - sec_start = openpype.lib.frames_to_secons(frame_start, input_fps) - sec_duration = openpype.lib.frames_to_secons(frame_duration, input_fps) + frame_duration = otio_range.duration.value - 1 + sec_start = frames_to_seconds(frame_start, input_fps) + sec_duration = frames_to_seconds(frame_duration, input_fps) # form command for rendering gap files command.extend([ @@ -95,7 +102,7 @@ class ExtractOTIOTrimmingVideo(openpype.api.Extractor): # execute self.log.debug("Executing: {}".format(" ".join(command))) - output = openpype.api.run_subprocess( + output = run_subprocess( command, logger=self.log ) self.log.debug("Output: {}".format(output)) diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index 879125dac3..f299d1c6e9 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -3,25 +3,26 @@ import re import copy import json import shutil - from abc import ABCMeta, abstractmethod + import six - import clique - +import speedcopy import pyblish.api -import openpype.api + from openpype.lib import ( get_ffmpeg_tool_path, - get_ffprobe_streams, path_to_subprocess_arg, - + run_subprocess, +) +from openpype.lib.transcoding import ( + IMAGE_EXTENSIONS, + get_ffprobe_streams, should_convert_for_ffmpeg, convert_input_paths_for_ffmpeg, - get_transcode_temp_directory + get_transcode_temp_directory, ) -import speedcopy class ExtractReview(pyblish.api.InstancePlugin): @@ -128,6 +129,7 @@ class ExtractReview(pyblish.api.InstancePlugin): for repre in instance.data["representations"]: repre_name = str(repre.get("name")) tags = repre.get("tags") or [] + custom_tags = repre.get("custom_tags") if "review" not in tags: self.log.debug(( "Repre: {} - Didn't found \"review\" in tags. Skipping" @@ -158,18 +160,41 @@ class ExtractReview(pyblish.api.InstancePlugin): ) continue - # Filter output definition by representation tags (optional) - outputs = self.filter_outputs_by_tags(profile_outputs, tags) + # Filter output definition by representation's + # custom tags (optional) + outputs = self.filter_outputs_by_custom_tags( + profile_outputs, custom_tags) if not outputs: self.log.info(( "Skipped representation. All output definitions from" " selected profile does not match to representation's" - " tags. \"{}\"" + " custom tags. \"{}\"" ).format(str(tags))) continue + outputs_per_representations.append((repre, outputs)) return outputs_per_representations + def _single_frame_filter(self, input_filepaths, output_defs): + single_frame_image = False + if len(input_filepaths) == 1: + ext = os.path.splitext(input_filepaths[0])[-1] + single_frame_image = ext.lower() in IMAGE_EXTENSIONS + + filtered_defs = [] + for output_def in output_defs: + output_filters = output_def.get("filter") or {} + frame_filter = output_filters.get("single_frame_filter") + if ( + (not single_frame_image and frame_filter == "single_frame") + or (single_frame_image and frame_filter == "multi_frame") + ): + continue + + filtered_defs.append(output_def) + + return filtered_defs + @staticmethod def get_instance_label(instance): return ( @@ -190,7 +215,7 @@ class ExtractReview(pyblish.api.InstancePlugin): outputs_per_repres = self._get_outputs_per_representations( instance, profile_outputs ) - for repre, outpu_defs in outputs_per_repres: + for repre, output_defs in outputs_per_repres: # Check if input should be preconverted before processing # Store original staging dir (it's value may change) src_repre_staging_dir = repre["stagingDir"] @@ -211,6 +236,16 @@ class ExtractReview(pyblish.api.InstancePlugin): if first_input_path is None: first_input_path = filepath + filtered_output_defs = self._single_frame_filter( + input_filepaths, output_defs + ) + if not filtered_output_defs: + self.log.debug(( + "Repre: {} - All output definitions were filtered" + " out by single frame filter. Skipping" + ).format(repre["name"])) + continue + # Skip if file is not set if first_input_path is None: self.log.warning(( @@ -244,7 +279,10 @@ class ExtractReview(pyblish.api.InstancePlugin): try: self._render_output_definitions( - instance, repre, src_repre_staging_dir, outpu_defs + instance, + repre, + src_repre_staging_dir, + filtered_output_defs ) finally: @@ -258,10 +296,10 @@ class ExtractReview(pyblish.api.InstancePlugin): shutil.rmtree(new_staging_dir) def _render_output_definitions( - self, instance, repre, src_repre_staging_dir, outpu_defs + self, instance, repre, src_repre_staging_dir, output_defs ): fill_data = copy.deepcopy(instance.data["anatomyData"]) - for _output_def in outpu_defs: + for _output_def in output_defs: output_def = copy.deepcopy(_output_def) # Make sure output definition has "tags" key if "tags" not in output_def: @@ -350,9 +388,7 @@ class ExtractReview(pyblish.api.InstancePlugin): # run subprocess self.log.debug("Executing: {}".format(subprcs_cmd)) - openpype.api.run_subprocess( - subprcs_cmd, shell=True, logger=self.log - ) + run_subprocess(subprcs_cmd, shell=True, logger=self.log) # delete files added to fill gaps if files_to_clean: @@ -360,6 +396,7 @@ class ExtractReview(pyblish.api.InstancePlugin): os.unlink(f) new_repre.update({ + "fps": temp_data["fps"], "name": "{}_{}".format(output_name, output_ext), "outputName": output_name, "outputDef": output_def, @@ -447,9 +484,24 @@ class ExtractReview(pyblish.api.InstancePlugin): input_is_sequence = self.input_is_sequence(repre) input_allow_bg = False + first_sequence_frame = None if input_is_sequence and repre["files"]: + # Calculate first frame that should be used + cols, _ = clique.assemble(repre["files"]) + input_frames = list(sorted(cols[0].indexes)) + first_sequence_frame = input_frames[0] + # WARNING: This is an issue as we don't know if first frame + # is with or without handles! + # - handle start is added but how do not know if we should + output_duration = (output_frame_end - output_frame_start) + 1 + if ( + without_handles + and len(input_frames) - handle_start >= output_duration + ): + first_sequence_frame += handle_start + ext = os.path.splitext(repre["files"][0])[1].replace(".", "") - if ext in self.alpha_exts: + if ext.lower() in self.alpha_exts: input_allow_bg = True return { @@ -467,6 +519,7 @@ class ExtractReview(pyblish.api.InstancePlugin): "resolution_height": instance.data.get("resolutionHeight"), "origin_repre": repre, "input_is_sequence": input_is_sequence, + "first_sequence_frame": first_sequence_frame, "input_allow_bg": input_allow_bg, "with_audio": with_audio, "without_handles": without_handles, @@ -545,9 +598,9 @@ class ExtractReview(pyblish.api.InstancePlugin): if temp_data["input_is_sequence"]: # Set start frame of input sequence (just frame in filename) # - definition of input filepath - ffmpeg_input_args.append( - "-start_number {}".format(temp_data["output_frame_start"]) - ) + ffmpeg_input_args.extend([ + "-start_number", str(temp_data["first_sequence_frame"]) + ]) # TODO add fps mapping `{fps: fraction}` ? # - e.g.: { @@ -763,7 +816,8 @@ class ExtractReview(pyblish.api.InstancePlugin): start_frame = int(start_frame) end_frame = int(end_frame) collections = clique.assemble(files)[0] - assert len(collections) == 1, "Multiple collections found." + msg = "Multiple collections {} found.".format(collections) + assert len(collections) == 1, msg col = collections[0] # do nothing if no gap is found in input range @@ -880,6 +934,8 @@ class ExtractReview(pyblish.api.InstancePlugin): if output_ext.startswith("."): output_ext = output_ext[1:] + output_ext = output_ext.lower() + # Store extension to representation new_repre["ext"] = output_ext @@ -1192,7 +1248,6 @@ class ExtractReview(pyblish.api.InstancePlugin): # Get instance data pixel_aspect = temp_data["pixel_aspect"] - if reformat_in_baking: self.log.debug(( "Using resolution from input. It is already " @@ -1212,6 +1267,10 @@ class ExtractReview(pyblish.api.InstancePlugin): # - settings value can't have None but has value of 0 output_width = output_def.get("width") or output_width or None output_height = output_def.get("height") or output_height or None + # Force to use input resolution if output resolution was not defined + # in settings. Resolution from instance is not used when + # 'use_input_res' is set to 'True'. + use_input_res = False # Overscal color overscan_color_value = "black" @@ -1223,6 +1282,17 @@ class ExtractReview(pyblish.api.InstancePlugin): ) self.log.debug("Overscan color: `{}`".format(overscan_color_value)) + # Scale input to have proper pixel aspect ratio + # - scale width by the pixel aspect ratio + scale_pixel_aspect = output_def.get("scale_pixel_aspect", True) + if scale_pixel_aspect and pixel_aspect != 1: + # Change input width after pixel aspect + input_width = int(input_width * pixel_aspect) + use_input_res = True + filters.append(( + "scale={}x{}:flags=lanczos".format(input_width, input_height) + )) + # Convert overscan value video filters overscan_crop = output_def.get("overscan_crop") overscan = OverscanCrop( @@ -1233,13 +1303,10 @@ class ExtractReview(pyblish.api.InstancePlugin): # resolution by it's values if overscan_crop_filters: filters.extend(overscan_crop_filters) + # Change input resolution after overscan crop input_width = overscan.width() input_height = overscan.height() - # Use output resolution as inputs after cropping to skip usage of - # instance data resolution - if output_width is None or output_height is None: - output_width = input_width - output_height = input_height + use_input_res = True # Make sure input width and height is not an odd number input_width_is_odd = bool(input_width % 2 != 0) @@ -1265,8 +1332,10 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("input_width: `{}`".format(input_width)) self.log.debug("input_height: `{}`".format(input_height)) - # Use instance resolution if output definition has not set it. - if output_width is None or output_height is None: + # Use instance resolution if output definition has not set it + # - use instance resolution only if there were not scale changes + # that may massivelly affect output 'use_input_res' + if not use_input_res and output_width is None or output_height is None: output_width = temp_data["resolution_width"] output_height = temp_data["resolution_height"] @@ -1308,7 +1377,6 @@ class ExtractReview(pyblish.api.InstancePlugin): output_width == input_width and output_height == input_height and not letter_box_enabled - and pixel_aspect == 1 ): self.log.debug( "Output resolution is same as input's" @@ -1318,66 +1386,16 @@ class ExtractReview(pyblish.api.InstancePlugin): new_repre["resolutionHeight"] = input_height return filters - # defining image ratios - input_res_ratio = ( - (float(input_width) * pixel_aspect) / input_height - ) - output_res_ratio = float(output_width) / float(output_height) - self.log.debug("input_res_ratio: `{}`".format(input_res_ratio)) - self.log.debug("output_res_ratio: `{}`".format(output_res_ratio)) - - # Round ratios to 2 decimal places for comparing - input_res_ratio = round(input_res_ratio, 2) - output_res_ratio = round(output_res_ratio, 2) - - # get scale factor - scale_factor_by_width = ( - float(output_width) / (input_width * pixel_aspect) - ) - scale_factor_by_height = ( - float(output_height) / input_height - ) - - self.log.debug( - "scale_factor_by_with: `{}`".format(scale_factor_by_width) - ) - self.log.debug( - "scale_factor_by_height: `{}`".format(scale_factor_by_height) - ) - # scaling none square pixels and 1920 width - if ( - input_height != output_height - or input_width != output_width - or pixel_aspect != 1 - ): - if input_res_ratio < output_res_ratio: - self.log.debug( - "Input's resolution ratio is lower then output's" - ) - width_scale = int(input_width * scale_factor_by_height) - width_half_pad = int((output_width - width_scale) / 2) - height_scale = output_height - height_half_pad = 0 - else: - self.log.debug("Input is heigher then output") - width_scale = output_width - width_half_pad = 0 - height_scale = int(input_height * scale_factor_by_width) - height_half_pad = int((output_height - height_scale) / 2) - - self.log.debug("width_scale: `{}`".format(width_scale)) - self.log.debug("width_half_pad: `{}`".format(width_half_pad)) - self.log.debug("height_scale: `{}`".format(height_scale)) - self.log.debug("height_half_pad: `{}`".format(height_half_pad)) - + if input_height != output_height or input_width != output_width: filters.extend([ - "scale={}x{}:flags=lanczos".format( - width_scale, height_scale - ), - "pad={}:{}:{}:{}:{}".format( + ( + "scale={}x{}" + ":flags=lanczos" + ":force_original_aspect_ratio=decrease" + ).format(output_width, output_height), + "pad={}:{}:(ow-iw)/2:(oh-ih)/2:{}".format( output_width, output_height, - width_half_pad, height_half_pad, overscan_color_value ), "setsar=1" @@ -1479,6 +1497,8 @@ class ExtractReview(pyblish.api.InstancePlugin): output = -1 regexes = self.compile_list_of_regexes(in_list) for regex in regexes: + if not value: + continue if re.match(regex, value): output = 1 break @@ -1682,6 +1702,7 @@ class ExtractReview(pyblish.api.InstancePlugin): Args: profile (dict): Profile from presets matching current context. families (list): All families of current instance. + subset_name (str): name of subset Returns: list: Containg all output definitions matching entered families. @@ -1729,40 +1750,51 @@ class ExtractReview(pyblish.api.InstancePlugin): return filtered_outputs - def filter_outputs_by_tags(self, outputs, tags): - """Filter output definitions by entered representation tags. + def filter_outputs_by_custom_tags(self, outputs, custom_tags): + """Filter output definitions by entered representation custom_tags. - Output definitions without tags filter are marked as valid. + Output definitions without custom_tags filter are marked as invalid, + only in case representation is having any custom_tags defined. Args: outputs (list): Contain list of output definitions from presets. - tags (list): Tags of processed representation. + custom_tags (list): Custom Tags of processed representation. Returns: list: Containg all output definitions matching entered tags. """ - filtered_outputs = [] - repre_tags_low = [tag.lower() for tag in tags] - for output_def in outputs: - valid = True - output_filters = output_def.get("filter") - if output_filters: - # Check tag filters - tag_filters = output_filters.get("tags") - if tag_filters: - tag_filters_low = [tag.lower() for tag in tag_filters] - valid = False - for tag in repre_tags_low: - if tag in tag_filters_low: - valid = True - break - if not valid: - continue + filtered_outputs = [] + repre_c_tags_low = [tag.lower() for tag in (custom_tags or [])] + for output_def in outputs: + tag_filters = output_def.get("filter", {}).get("custom_tags") + + if not custom_tags and not tag_filters: + # Definition is valid if both tags are empty + valid = True + + elif not custom_tags or not tag_filters: + # Invalid if one is empty + valid = False + + else: + # Check if output definition tags are in representation tags + valid = False + # lower all filter tags + tag_filters_low = [tag.lower() for tag in tag_filters] + # check if any repre tag is not in filter tags + for tag in repre_c_tags_low: + if tag in tag_filters_low: + valid = True + break if valid: filtered_outputs.append(output_def) + self.log.debug("__ filtered_outputs: {}".format( + [_o["filename_suffix"] for _o in filtered_outputs] + )) + return filtered_outputs def add_video_filter_args(self, args, inserting_arg): diff --git a/openpype/plugins/publish/extract_review_slate.py b/openpype/plugins/publish/extract_review_slate.py index 49f0eac41d..fca3d96ca6 100644 --- a/openpype/plugins/publish/extract_review_slate.py +++ b/openpype/plugins/publish/extract_review_slate.py @@ -1,17 +1,22 @@ import os -import openpype.api -import pyblish +import re +from pprint import pformat + +import pyblish.api + from openpype.lib import ( path_to_subprocess_arg, + run_subprocess, get_ffmpeg_tool_path, get_ffprobe_data, get_ffprobe_streams, get_ffmpeg_codec_args, get_ffmpeg_format_args, ) +from openpype.pipeline import publish -class ExtractReviewSlate(openpype.api.Extractor): +class ExtractReviewSlate(publish.Extractor): """ Will add slate frame at the start of the video files """ @@ -21,6 +26,8 @@ class ExtractReviewSlate(openpype.api.Extractor): families = ["slate", "review"] match = pyblish.api.Subset + SUFFIX = "_slate" + hosts = ["nuke", "shell"] optional = True @@ -29,28 +36,19 @@ class ExtractReviewSlate(openpype.api.Extractor): if "representations" not in inst_data: raise RuntimeError("Burnin needs already created mov to work on.") - suffix = "_slate" - slate_path = inst_data.get("slateFrame") + # get slates frame from upstream + slates_data = inst_data.get("slateFrames") + if not slates_data: + # make it backward compatible and open for slates generator + # premium plugin + slates_data = { + "*": inst_data["slateFrame"] + } + + self.log.info("_ slates_data: {}".format(pformat(slates_data))) + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") - slate_streams = get_ffprobe_streams(slate_path, self.log) - # Try to find first stream with defined 'width' and 'height' - # - this is to avoid order of streams where audio can be as first - # - there may be a better way (checking `codec_type`?)+ - slate_width = None - slate_height = None - for slate_stream in slate_streams: - if "width" in slate_stream and "height" in slate_stream: - slate_width = int(slate_stream["width"]) - slate_height = int(slate_stream["height"]) - break - - # Raise exception of any stream didn't define input resolution - if slate_width is None: - raise AssertionError(( - "FFprobe couldn't read resolution from input file: \"{}\"" - ).format(slate_path)) - if "reviewToWidth" in inst_data: use_legacy_code = True else: @@ -58,6 +56,7 @@ class ExtractReviewSlate(openpype.api.Extractor): pixel_aspect = inst_data.get("pixelAspect", 1) fps = inst_data.get("fps") + self.log.debug("fps {} ".format(fps)) for idx, repre in enumerate(inst_data["representations"]): self.log.debug("repre ({}): `{}`".format(idx + 1, repre)) @@ -73,20 +72,22 @@ class ExtractReviewSlate(openpype.api.Extractor): os.path.normpath(stagingdir), repre["files"]) self.log.debug("__ input_path: {}".format(input_path)) - video_streams = get_ffprobe_streams( + streams = get_ffprobe_streams( input_path, self.log ) + # get slate data + slate_path = self._get_slate_path(input_file, slates_data) + self.log.info("_ slate_path: {}".format(slate_path)) - # Try to find first stream with defined 'width' and 'height' - # - this is to avoid order of streams where audio can be as first - # - there may be a better way (checking `codec_type`?) - input_width = None - input_height = None - for stream in video_streams: - if "width" in stream and "height" in stream: - input_width = int(stream["width"]) - input_height = int(stream["height"]) - break + slate_width, slate_height = self._get_slates_resolution(slate_path) + + # Get video metadata + ( + input_width, + input_height, + input_timecode, + input_frame_rate + ) = self._get_video_metadata(streams) # Raise exception of any stream didn't define input resolution if input_width is None: @@ -94,6 +95,14 @@ class ExtractReviewSlate(openpype.api.Extractor): "FFprobe couldn't read resolution from input file: \"{}\"" ).format(input_path)) + ( + audio_codec, + audio_channels, + audio_sample_rate, + audio_channel_layout, + input_audio + ) = self._get_audio_metadata(streams) + # values are set in ExtractReview if use_legacy_code: to_width = inst_data["reviewToWidth"] @@ -133,7 +142,7 @@ class ExtractReviewSlate(openpype.api.Extractor): _remove_at_end = [] ext = os.path.splitext(input_file)[1] - output_file = input_file.replace(ext, "") + suffix + ext + output_file = input_file.replace(ext, "") + self.SUFFIX + ext _remove_at_end.append(input_path) @@ -149,14 +158,26 @@ class ExtractReviewSlate(openpype.api.Extractor): input_args.extend(repre["_profile"].get('input', [])) else: input_args.extend(repre["outputDef"].get('input', [])) - input_args.append("-loop 1 -i {}".format( - path_to_subprocess_arg(slate_path) - )) + input_args.extend([ - "-r {}".format(fps), - "-t 0.04" + "-loop", "1", + "-i", path_to_subprocess_arg(slate_path), + "-r", str(input_frame_rate), + "-frames:v", "1", ]) + # add timecode from source to the slate, substract one frame + offset_timecode = "" + if input_timecode: + offset_timecode = self._tc_offset( + str(input_timecode), + framerate=fps, + frame_offset=-1 + ) + self.log.debug("Slate Timecode: `{}`".format( + offset_timecode + )) + if use_legacy_code: format_args = [] codec_args = repre["_profile"].get('codec', []) @@ -171,10 +192,9 @@ class ExtractReviewSlate(openpype.api.Extractor): # make sure colors are correct output_args.extend([ - "-vf scale=out_color_matrix=bt709", - "-color_primaries bt709", - "-color_trc bt709", - "-colorspace bt709" + "-color_primaries", "bt709", + "-color_trc", "bt709", + "-colorspace", "bt709", ]) # scaling none square pixels and 1920 width @@ -210,15 +230,25 @@ class ExtractReviewSlate(openpype.api.Extractor): "__ height_half_pad: `{}`".format(height_half_pad) ) - scaling_arg = ("scale={0}x{1}:flags=lanczos," - "pad={2}:{3}:{4}:{5}:black,setsar=1").format( - width_scale, height_scale, to_width, to_height, - width_half_pad, height_half_pad + scaling_arg = ( + "scale={0}x{1}:flags=lanczos" + ":out_color_matrix=bt709" + ",pad={2}:{3}:{4}:{5}:black" + ",setsar=1" + ",fps={6}" + ).format( + width_scale, + height_scale, + to_width, + to_height, + width_half_pad, + height_half_pad, + input_frame_rate ) - vf_back = self.add_video_filter_args(output_args, scaling_arg) - # add it to output_args - output_args.insert(0, vf_back) + vf_back = self.add_video_filter_args(output_args, scaling_arg) + # add it to output_args + output_args.insert(0, vf_back) # overrides output file output_args.append("-y") @@ -240,48 +270,88 @@ class ExtractReviewSlate(openpype.api.Extractor): self.log.debug( "Slate Executing: {}".format(slate_subprocess_cmd) ) - openpype.api.run_subprocess( + run_subprocess( slate_subprocess_cmd, shell=True, logger=self.log ) - # create ffmpeg concat text file path - conc_text_file = input_file.replace(ext, "") + "_concat" + ".txt" - conc_text_path = os.path.join( - os.path.normpath(stagingdir), conc_text_file) - _remove_at_end.append(conc_text_path) - self.log.debug("__ conc_text_path: {}".format(conc_text_path)) + # Create slate with silent audio track + if input_audio: + # silent slate output path + slate_silent_path = "_silent".join( + os.path.splitext(slate_v_path)) + _remove_at_end.append(slate_silent_path) + self._create_silent_slate( + ffmpeg_path, + slate_v_path, + slate_silent_path, + audio_codec, + audio_channels, + audio_sample_rate, + audio_channel_layout, + input_frame_rate + ) - new_line = "\n" - with open(conc_text_path, "w") as conc_text_f: - conc_text_f.writelines([ - "file {}".format( - slate_v_path.replace("\\", "/")), - new_line, - "file {}".format(input_path.replace("\\", "/")) - ]) + # replace slate with silent slate for concat + slate_v_path = slate_silent_path - # concat slate and videos together + # concat slate and videos together with concat filter + # this will reencode the output + if input_audio: + fmap = [ + "-filter_complex", + "[0:v] [0:a] [1:v] [1:a] concat=n=2:v=1:a=1 [v] [a]", + "-map", '[v]', + "-map", '[a]' + ] + else: + fmap = [ + "-filter_complex", + "[0:v] [1:v] concat=n=2:v=1:a=0 [v]", + "-map", '[v]' + ] concat_args = [ ffmpeg_path, "-y", - "-f", "concat", - "-safe", "0", - "-i", conc_text_path, - "-c", "copy", + "-i", slate_v_path, + "-i", input_path, ] + concat_args.extend(fmap) + if offset_timecode: + concat_args.extend(["-timecode", offset_timecode]) # NOTE: Added because of OP Atom demuxers # Add format arguments if there are any # - keep format of output if format_args: concat_args.extend(format_args) - # Add final output path + + if codec_args: + concat_args.extend(codec_args) + + # Use arguments from ffmpeg preset + source_ffmpeg_cmd = repre.get("ffmpeg_cmd") + if source_ffmpeg_cmd: + copy_args = ( + "-metadata", + "-metadata:s:v:0", + "-b:v", + "-b:a", + ) + args = source_ffmpeg_cmd.split(" ") + for indx, arg in enumerate(args): + if arg in copy_args: + concat_args.append(arg) + # assumes arg has one parameter + concat_args.append(args[indx + 1]) + + # add final output path concat_args.append(output_path) # ffmpeg concat subprocess self.log.debug( - "Executing concat: {}".format(" ".join(concat_args)) + "Executing concat filter: {}".format + (" ".join(concat_args)) ) - openpype.api.run_subprocess( + run_subprocess( concat_args, logger=self.log ) @@ -309,6 +379,167 @@ class ExtractReviewSlate(openpype.api.Extractor): self.log.debug(inst_data["representations"]) + def _get_slate_path(self, input_file, slates_data): + slate_path = None + for sl_n, _slate_path in slates_data.items(): + if "*" in sl_n: + slate_path = _slate_path + break + elif re.search(sl_n, input_file): + slate_path = _slate_path + break + + if not slate_path: + raise AttributeError( + "Missing slates paths: {}".format(slates_data)) + + return slate_path + + def _get_slates_resolution(self, slate_path): + slate_streams = get_ffprobe_streams(slate_path, self.log) + # Try to find first stream with defined 'width' and 'height' + # - this is to avoid order of streams where audio can be as first + # - there may be a better way (checking `codec_type`?)+ + slate_width = None + slate_height = None + for slate_stream in slate_streams: + if "width" in slate_stream and "height" in slate_stream: + slate_width = int(slate_stream["width"]) + slate_height = int(slate_stream["height"]) + break + + # Raise exception of any stream didn't define input resolution + if slate_width is None: + raise AssertionError(( + "FFprobe couldn't read resolution from input file: \"{}\"" + ).format(slate_path)) + + return (slate_width, slate_height) + + def _get_video_metadata(self, streams): + input_timecode = "" + input_width = None + input_height = None + input_frame_rate = None + for stream in streams: + if stream.get("codec_type") != "video": + continue + self.log.debug("FFprobe Video: {}".format(stream)) + + if "width" not in stream or "height" not in stream: + continue + width = int(stream["width"]) + height = int(stream["height"]) + if not width or not height: + continue + + # Make sure that width and height are captured even if frame rate + # is not available + input_width = width + input_height = height + + tags = stream.get("tags") or {} + input_timecode = tags.get("timecode") or "" + + input_frame_rate = stream.get("r_frame_rate") + if input_frame_rate is not None: + break + return ( + input_width, + input_height, + input_timecode, + input_frame_rate + ) + + def _get_audio_metadata(self, streams): + # Get audio metadata + audio_codec = None + audio_channels = None + audio_sample_rate = None + audio_channel_layout = None + input_audio = False + + for stream in streams: + if stream.get("codec_type") != "audio": + continue + self.log.debug("__Ffprobe Audio: {}".format(stream)) + + if all( + stream.get(key) + for key in ( + "codec_name", + "channels", + "sample_rate", + "channel_layout", + ) + ): + audio_codec = stream["codec_name"] + audio_channels = stream["channels"] + audio_sample_rate = stream["sample_rate"] + audio_channel_layout = stream["channel_layout"] + input_audio = True + break + + return ( + audio_codec, + audio_channels, + audio_sample_rate, + audio_channel_layout, + input_audio, + ) + + def _create_silent_slate( + self, + ffmpeg_path, + src_path, + dst_path, + audio_codec, + audio_channels, + audio_sample_rate, + audio_channel_layout, + input_frame_rate + ): + # Get duration of one frame in micro seconds + items = input_frame_rate.split("/") + if len(items) == 1: + one_frame_duration = 1.0 / float(items[0]) + elif len(items) == 2: + one_frame_duration = float(items[1]) / float(items[0]) + else: + one_frame_duration = None + + if one_frame_duration is None: + one_frame_duration = "40000us" + else: + one_frame_duration *= 1000000 + one_frame_duration = str(int(one_frame_duration)) + "us" + self.log.debug("One frame duration is {}".format(one_frame_duration)) + + slate_silent_args = [ + ffmpeg_path, + "-i", src_path, + "-f", "lavfi", "-i", + "anullsrc=r={}:cl={}:d={}".format( + audio_sample_rate, + audio_channel_layout, + one_frame_duration + ), + "-c:v", "copy", + "-c:a", audio_codec, + "-map", "0:v", + "-map", "1:a", + "-shortest", + "-y", + dst_path + ] + # run slate generation subprocess + self.log.debug("Silent Slate Executing: {}".format( + " ".join(slate_silent_args) + )) + run_subprocess( + slate_silent_args, logger=self.log + ) + def add_video_filter_args(self, args, inserting_arg): """ Fixing video filter argumets to be one long string @@ -375,3 +606,41 @@ class ExtractReviewSlate(openpype.api.Extractor): ) return format_args, codec_args + + def _tc_offset(self, timecode, framerate=24.0, frame_offset=-1): + """Offsets timecode by frame""" + def _seconds(value, framerate): + if isinstance(value, str): + _zip_ft = zip((3600, 60, 1, 1 / framerate), value.split(':')) + _s = sum(f * float(t) for f, t in _zip_ft) + elif isinstance(value, (int, float)): + _s = value / framerate + else: + _s = 0 + return _s + + def _frames(seconds, framerate, frame_offset): + _f = seconds * framerate + frame_offset + if _f < 0: + _f = framerate * 60 * 60 * 24 + _f + return _f + + def _timecode(seconds, framerate): + return '{h:02d}:{m:02d}:{s:02d}:{f:02d}'.format( + h=int(seconds / 3600), + m=int(seconds / 60 % 60), + s=int(seconds % 60), + f=int(round((seconds - int(seconds)) * framerate))) + drop = False + if ';' in timecode: + timecode = timecode.replace(';', ':') + drop = True + frames = _frames( + _seconds(timecode, framerate), + framerate, + frame_offset + ) + tc = _timecode(_seconds(frames, framerate), framerate) + if drop: + tc = ';'.join(tc.rsplit(':', 1)) + return tc diff --git a/openpype/plugins/publish/extract_scanline_exr.py b/openpype/plugins/publish/extract_scanline_exr.py index a7f7de5188..0e4c0ca65f 100644 --- a/openpype/plugins/publish/extract_scanline_exr.py +++ b/openpype/plugins/publish/extract_scanline_exr.py @@ -4,8 +4,8 @@ import os import shutil import pyblish.api -import openpype.api -import openpype.lib + +from openpype.lib import run_subprocess, get_oiio_tools_path class ExtractScanlineExr(pyblish.api.InstancePlugin): @@ -45,7 +45,7 @@ class ExtractScanlineExr(pyblish.api.InstancePlugin): stagingdir = os.path.normpath(repre.get("stagingDir")) - oiio_tool_path = openpype.lib.get_oiio_tools_path() + oiio_tool_path = get_oiio_tools_path() if not os.path.exists(oiio_tool_path): self.log.error( "OIIO tool not found in {}".format(oiio_tool_path)) @@ -65,7 +65,7 @@ class ExtractScanlineExr(pyblish.api.InstancePlugin): subprocess_exr = " ".join(oiio_cmd) self.log.info(f"running: {subprocess_exr}") - openpype.api.run_subprocess(subprocess_exr, logger=self.log) + run_subprocess(subprocess_exr, logger=self.log) # raise error if there is no ouptput if not os.path.exists(os.path.join(stagingdir, original_name)): diff --git a/openpype/plugins/publish/extract_thumbnail.py b/openpype/plugins/publish/extract_thumbnail.py new file mode 100644 index 0000000000..14b43beae8 --- /dev/null +++ b/openpype/plugins/publish/extract_thumbnail.py @@ -0,0 +1,231 @@ +import os +import tempfile + +import pyblish.api +from openpype.lib import ( + get_ffmpeg_tool_path, + get_oiio_tools_path, + is_oiio_supported, + + run_subprocess, + path_to_subprocess_arg, +) + + +class ExtractThumbnail(pyblish.api.InstancePlugin): + """Create jpg thumbnail from sequence using ffmpeg""" + + label = "Extract Thumbnail" + order = pyblish.api.ExtractorOrder + families = [ + "imagesequence", "render", "render2d", "prerender", + "source", "clip", "take" + ] + hosts = ["shell", "fusion", "resolve", "traypublisher"] + enabled = False + + # presetable attribute + ffmpeg_args = None + + def process(self, instance): + subset_name = instance.data["subset"] + instance_repres = instance.data.get("representations") + if not instance_repres: + self.log.debug(( + "Instance {} does not have representations. Skipping" + ).format(subset_name)) + return + + self.log.info( + "Processing instance with subset name {}".format(subset_name) + ) + + # Skip if instance have 'review' key in data set to 'False' + if not self._is_review_instance(instance): + self.log.info("Skipping - no review set on instance.") + return + + # Check if already has thumbnail created + if self._already_has_thumbnail(instance_repres): + self.log.info("Thumbnail representation already present.") + return + + # skip crypto passes. + # TODO: This is just a quick fix and has its own side-effects - it is + # affecting every subset name with `crypto` in its name. + # This must be solved properly, maybe using tags on + # representation that can be determined much earlier and + # with better precision. + if "crypto" in subset_name.lower(): + self.log.info("Skipping crypto passes.") + return + + filtered_repres = self._get_filtered_repres(instance) + if not filtered_repres: + self.log.info(( + "Instance don't have representations" + " that can be used as source for thumbnail. Skipping" + )) + return + + # Create temp directory for thumbnail + # - this is to avoid "override" of source file + dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_") + self.log.debug( + "Create temp directory {} for thumbnail".format(dst_staging) + ) + # Store new staging to cleanup paths + instance.context.data["cleanupFullPaths"].append(dst_staging) + + thumbnail_created = False + oiio_supported = is_oiio_supported() + for repre in filtered_repres: + repre_files = repre["files"] + if not isinstance(repre_files, (list, tuple)): + input_file = repre_files + else: + file_index = int(float(len(repre_files)) * 0.5) + input_file = repre_files[file_index] + + src_staging = os.path.normpath(repre["stagingDir"]) + full_input_path = os.path.join(src_staging, input_file) + self.log.info("input {}".format(full_input_path)) + filename = os.path.splitext(input_file)[0] + jpeg_file = filename + ".jpg" + full_output_path = os.path.join(dst_staging, jpeg_file) + + if oiio_supported: + self.log.info("Trying to convert with OIIO") + # If the input can read by OIIO then use OIIO method for + # conversion otherwise use ffmpeg + thumbnail_created = self.create_thumbnail_oiio( + full_input_path, full_output_path + ) + + # Try to use FFMPEG if OIIO is not supported or for cases when + # oiiotool isn't available + if not thumbnail_created: + if oiio_supported: + self.log.info(( + "Converting with FFMPEG because input" + " can't be read by OIIO." + )) + + thumbnail_created = self.create_thumbnail_ffmpeg( + full_input_path, full_output_path + ) + + # Skip representation and try next one if wasn't created + if not thumbnail_created: + continue + + new_repre = { + "name": "thumbnail", + "ext": "jpg", + "files": jpeg_file, + "stagingDir": dst_staging, + "thumbnail": True, + "tags": ["thumbnail"] + } + + # adding representation + self.log.debug( + "Adding thumbnail representation: {}".format(new_repre) + ) + instance.data["representations"].append(new_repre) + # There is no need to create more then one thumbnail + break + + if not thumbnail_created: + self.log.warning("Thumbanil has not been created.") + + def _is_review_instance(self, instance): + # TODO: We should probably handle "not creating" of thumbnail + # other way then checking for "review" key on instance data? + if instance.data.get("review", True): + return True + return False + + def _already_has_thumbnail(self, repres): + for repre in repres: + self.log.info("repre {}".format(repre)) + if repre["name"] == "thumbnail": + return True + return False + + def _get_filtered_repres(self, instance): + filtered_repres = [] + src_repres = instance.data.get("representations") or [] + for repre in src_repres: + self.log.debug(repre) + tags = repre.get("tags") or [] + valid = "review" in tags or "thumb-nuke" in tags + if not valid: + continue + + if not repre.get("files"): + self.log.info(( + "Representation \"{}\" don't have files. Skipping" + ).format(repre["name"])) + continue + + filtered_repres.append(repre) + return filtered_repres + + def create_thumbnail_oiio(self, src_path, dst_path): + self.log.info("outputting {}".format(dst_path)) + oiio_tool_path = get_oiio_tools_path() + oiio_cmd = [ + oiio_tool_path, + "-a", src_path, + "-o", dst_path + ] + self.log.info("running: {}".format(" ".join(oiio_cmd))) + try: + run_subprocess(oiio_cmd, logger=self.log) + return True + except Exception: + self.log.warning( + "Failed to create thubmnail using oiiotool", + exc_info=True + ) + return False + + def create_thumbnail_ffmpeg(self, src_path, dst_path): + self.log.info("outputting {}".format(dst_path)) + + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") + ffmpeg_args = self.ffmpeg_args or {} + + jpeg_items = [] + jpeg_items.append(path_to_subprocess_arg(ffmpeg_path)) + # override file if already exists + jpeg_items.append("-y") + # flag for large file sizes + max_int = 2147483647 + jpeg_items.append("-analyzeduration {}".format(max_int)) + jpeg_items.append("-probesize {}".format(max_int)) + # use same input args like with mov + jpeg_items.extend(ffmpeg_args.get("input") or []) + # input file + jpeg_items.append("-i {}".format( + path_to_subprocess_arg(src_path) + )) + # output arguments from presets + jpeg_items.extend(ffmpeg_args.get("output") or []) + # we just want one frame from movie files + jpeg_items.append("-vframes 1") + # output file + jpeg_items.append(path_to_subprocess_arg(dst_path)) + subprocess_command = " ".join(jpeg_items) + try: + run_subprocess( + subprocess_command, shell=True, logger=self.log + ) + return True + except Exception: + self.log.warning( + "Failed to create thubmnail using ffmpeg", + exc_info=True + ) + return False diff --git a/openpype/plugins/publish/extract_thumbnail_from_source.py b/openpype/plugins/publish/extract_thumbnail_from_source.py new file mode 100644 index 0000000000..8da1213807 --- /dev/null +++ b/openpype/plugins/publish/extract_thumbnail_from_source.py @@ -0,0 +1,194 @@ +"""Create instance thumbnail from "thumbnailSource" on 'instance.data'. + +Output is new representation with "thumbnail" name on instance. If instance +already have such representation the process is skipped. + +This way a collector can point to a file from which should be thumbnail +generated. This is different approach then what global plugin for thumbnails +does. The global plugin has specific logic which does not support + +Todos: + No size handling. Size of input is used for output thumbnail which can + cause issues. +""" + +import os +import tempfile + +import pyblish.api +from openpype.lib import ( + get_ffmpeg_tool_path, + get_oiio_tools_path, + is_oiio_supported, + + run_subprocess, +) + + +class ExtractThumbnailFromSource(pyblish.api.InstancePlugin): + """Create jpg thumbnail for instance based on 'thumbnailSource'. + + Thumbnail source must be a single image or video filepath. + """ + + label = "Extract Thumbnail (from source)" + # Before 'ExtractThumbnail' in global plugins + order = pyblish.api.ExtractorOrder - 0.00001 + + def process(self, instance): + self._create_context_thumbnail(instance.context) + + subset_name = instance.data["subset"] + self.log.info( + "Processing instance with subset name {}".format(subset_name) + ) + thumbnail_source = instance.data.get("thumbnailSource") + if not thumbnail_source: + self.log.debug("Thumbnail source not filled. Skipping.") + return + + # Check if already has thumbnail created + if self._instance_has_thumbnail(instance): + self.log.info("Thumbnail representation already present.") + return + + dst_filepath = self._create_thumbnail( + instance.context, thumbnail_source + ) + if not dst_filepath: + return + + dst_staging, dst_filename = os.path.split(dst_filepath) + new_repre = { + "name": "thumbnail", + "ext": "jpg", + "files": dst_filename, + "stagingDir": dst_staging, + "thumbnail": True, + "tags": ["thumbnail"] + } + + # adding representation + self.log.debug( + "Adding thumbnail representation: {}".format(new_repre) + ) + instance.data["representations"].append(new_repre) + + def _create_thumbnail(self, context, thumbnail_source): + if not thumbnail_source: + self.log.debug("Thumbnail source not filled. Skipping.") + return + + if not os.path.exists(thumbnail_source): + self.log.debug(( + "Thumbnail source is set but file was not found {}. Skipping." + ).format(thumbnail_source)) + return + + # Create temp directory for thumbnail + # - this is to avoid "override" of source file + dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_") + self.log.debug( + "Create temp directory {} for thumbnail".format(dst_staging) + ) + # Store new staging to cleanup paths + context.data["cleanupFullPaths"].append(dst_staging) + + thumbnail_created = False + oiio_supported = is_oiio_supported() + + self.log.info("Thumbnail source: {}".format(thumbnail_source)) + src_basename = os.path.basename(thumbnail_source) + dst_filename = os.path.splitext(src_basename)[0] + ".jpg" + full_output_path = os.path.join(dst_staging, dst_filename) + + if oiio_supported: + self.log.info("Trying to convert with OIIO") + # If the input can read by OIIO then use OIIO method for + # conversion otherwise use ffmpeg + thumbnail_created = self.create_thumbnail_oiio( + thumbnail_source, full_output_path + ) + + # Try to use FFMPEG if OIIO is not supported or for cases when + # oiiotool isn't available + if not thumbnail_created: + if oiio_supported: + self.log.info(( + "Converting with FFMPEG because input" + " can't be read by OIIO." + )) + + thumbnail_created = self.create_thumbnail_ffmpeg( + thumbnail_source, full_output_path + ) + + # Skip representation and try next one if wasn't created + if thumbnail_created: + return full_output_path + + self.log.warning("Thumbanil has not been created.") + + def _instance_has_thumbnail(self, instance): + if "representations" not in instance.data: + self.log.warning( + "Instance does not have 'representations' key filled" + ) + instance.data["representations"] = [] + + for repre in instance.data["representations"]: + if repre["name"] == "thumbnail": + return True + return False + + def create_thumbnail_oiio(self, src_path, dst_path): + self.log.info("outputting {}".format(dst_path)) + oiio_tool_path = get_oiio_tools_path() + oiio_cmd = [ + oiio_tool_path, + "-a", src_path, + "-o", dst_path + ] + self.log.info("Running: {}".format(" ".join(oiio_cmd))) + try: + run_subprocess(oiio_cmd, logger=self.log) + return True + except Exception: + self.log.warning( + "Failed to create thubmnail using oiiotool", + exc_info=True + ) + return False + + def create_thumbnail_ffmpeg(self, src_path, dst_path): + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") + + max_int = str(2147483647) + ffmpeg_cmd = [ + ffmpeg_path, + "-y", + "-analyzeduration", max_int, + "-probesize", max_int, + "-i", src_path, + "-vframes", "1", + dst_path + ] + + self.log.info("Running: {}".format(" ".join(ffmpeg_cmd))) + try: + run_subprocess(ffmpeg_cmd, logger=self.log) + return True + except Exception: + self.log.warning( + "Failed to create thubmnail using ffmpeg", + exc_info=True + ) + return False + + def _create_context_thumbnail(self, context): + if "thumbnailPath" in context.data: + return + + thumbnail_source = context.data.get("thumbnailSource") + thumbnail_path = self._create_thumbnail(context, thumbnail_source) + context.data["thumbnailPath"] = thumbnail_path diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py b/openpype/plugins/publish/extract_trim_video_audio.py similarity index 67% rename from openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py rename to openpype/plugins/publish/extract_trim_video_audio.py index f327895b83..b951136391 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py +++ b/openpype/plugins/publish/extract_trim_video_audio.py @@ -1,20 +1,22 @@ import os +from pprint import pformat + import pyblish.api -import openpype.api from openpype.lib import ( get_ffmpeg_tool_path, + run_subprocess, ) -from pprint import pformat +from openpype.pipeline import publish -class ExtractTrimVideoAudio(openpype.api.Extractor): +class ExtractTrimVideoAudio(publish.Extractor): """Trim with ffmpeg "mov" and "wav" files.""" # must be before `ExtractThumbnailSP` order = pyblish.api.ExtractorOrder - 0.01 label = "Extract Trim Video/Audio" - hosts = ["standalonepublisher"] + hosts = ["standalonepublisher", "traypublisher"] families = ["clip", "trimming"] # make sure it is enabled only if at least both families are available @@ -39,23 +41,35 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): # Generate mov file. fps = instance.data["fps"] video_file_path = instance.data["editorialSourcePath"] - extensions = instance.data.get("extensions", [".mov"]) + extensions = instance.data.get("extensions", ["mov"]) + output_file_type = instance.data.get("outputFileType") + reviewable = "review" in instance.data["families"] + + frame_start = int(instance.data["frameStart"]) + frame_end = int(instance.data["frameEnd"]) + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + + clip_start_h = float(instance.data["clipInH"]) + _dur = instance.data["clipDuration"] + handle_dur = (handle_start + handle_end) + clip_dur_h = float(_dur + handle_dur) + + if output_file_type: + extensions = [output_file_type] for ext in extensions: self.log.info("Processing ext: `{}`".format(ext)) + if not ext.startswith("."): + ext = "." + ext + clip_trimed_path = os.path.join( staging_dir, instance.data["name"] + ext) - # # check video file metadata - # input_data = plib.get_ffprobe_streams(video_file_path)[0] - # self.log.debug(f"__ input_data: `{input_data}`") - - start = float(instance.data["clipInH"]) - dur = float(instance.data["clipDurationH"]) if ext == ".wav": # offset time as ffmpeg is having bug - start += 0.5 + clip_start_h += 0.5 # remove "review" from families instance.data["families"] = [ fml for fml in instance.data["families"] @@ -64,9 +78,9 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): ffmpeg_args = [ ffmpeg_path, - "-ss", str(start / fps), + "-ss", str(clip_start_h / fps), "-i", video_file_path, - "-t", str(dur / fps) + "-t", str(clip_dur_h / fps) ] if ext in [".mov", ".mp4"]: ffmpeg_args.extend([ @@ -86,7 +100,7 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): joined_args = " ".join(ffmpeg_args) self.log.info(f"Processing: {joined_args}") - openpype.api.run_subprocess( + run_subprocess( ffmpeg_args, logger=self.log ) @@ -95,14 +109,15 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): "ext": ext[1:], "files": os.path.basename(clip_trimed_path), "stagingDir": staging_dir, - "frameStart": int(instance.data["frameStart"]), - "frameEnd": int(instance.data["frameEnd"]), - "frameStartFtrack": int(instance.data["frameStartH"]), - "frameEndFtrack": int(instance.data["frameEndH"]), + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartFtrack": frame_start - handle_start, + "frameEndFtrack": frame_end + handle_end, "fps": fps, + "tags": [] } - if ext in [".mov", ".mp4"]: + if ext in [".mov", ".mp4"] and reviewable: repre.update({ "thumbnail": True, "tags": ["review", "ftrackreview", "delete"]}) diff --git a/openpype/plugins/publish/help/validate_containers.xml b/openpype/plugins/publish/help/validate_containers.xml new file mode 100644 index 0000000000..5d18bb4c19 --- /dev/null +++ b/openpype/plugins/publish/help/validate_containers.xml @@ -0,0 +1,23 @@ + + + +Not up-to-date assets + +## Outdated containers found + +Scene contains one or more outdated loaded containers, eg. versions loaded into scene by Loader are not latest. + +### How to repair? + +Use 'Scene Inventory' and update all highlighted old container to latest OR + refresh Publish and switch 'Validate Containers' toggle on 'Options' tab. + + WARNING: Skipping this validator will result in publishing (and probably rendering) old version of loaded assets. + + +### __Detailed Info__ (optional) + +This validates whether you're working with the latest versions of published content loaded into your scene. This protects you from using outdated versions of an asset. + + + \ No newline at end of file diff --git a/openpype/plugins/publish/integrate.py b/openpype/plugins/publish/integrate.py new file mode 100644 index 0000000000..401270a788 --- /dev/null +++ b/openpype/plugins/publish/integrate.py @@ -0,0 +1,890 @@ +import os +import logging +import sys +import copy +import clique +import six + +from bson.objectid import ObjectId +import pyblish.api + +from openpype.client.operations import ( + OperationsSession, + new_subset_document, + new_version_doc, + new_representation_doc, + prepare_subset_update_data, + prepare_version_update_data, + prepare_representation_update_data, +) + +from openpype.client import ( + get_representations, + get_subset_by_name, + get_version_by_name, +) +from openpype.lib import source_hash +from openpype.lib.file_transaction import FileTransaction +from openpype.pipeline import legacy_io +from openpype.pipeline.publish import ( + KnownPublishError, + get_publish_template_name, +) + +log = logging.getLogger(__name__) + + +def get_instance_families(instance): + """Get all families of the instance""" + # todo: move this to lib? + family = instance.data.get("family") + families = [] + if family: + families.append(family) + + for _family in (instance.data.get("families") or []): + if _family not in families: + families.append(_family) + + return families + + +def get_frame_padded(frame, padding): + """Return frame number as string with `padding` amount of padded zeros""" + return "{frame:0{padding}d}".format(padding=padding, frame=frame) + + +class IntegrateAsset(pyblish.api.InstancePlugin): + """Register publish in the database and transfer files to destinations. + + Steps: + 1) Register the subset and version + 2) Transfer the representation files to the destination + 3) Register the representation + + Requires: + instance.data['representations'] - must be a list and each member + must be a dictionary with following data: + 'files': list of filenames for sequence, string for single file. + Only the filename is allowed, without the folder path. + 'stagingDir': "path/to/folder/with/files" + 'name': representation name (usually the same as extension) + 'ext': file extension + optional data + "frameStart" + "frameEnd" + 'fps' + "data": additional metadata for each representation. + """ + + label = "Integrate Asset" + order = pyblish.api.IntegratorOrder + families = ["workfile", + "pointcache", + "camera", + "animation", + "model", + "mayaAscii", + "mayaScene", + "setdress", + "layout", + "ass", + "vdbcache", + "scene", + "vrayproxy", + "vrayscene_layer", + "render", + "prerender", + "imagesequence", + "review", + "rendersetup", + "rig", + "plate", + "look", + "audio", + "yetiRig", + "yeticache", + "nukenodes", + "gizmo", + "source", + "matchmove", + "image", + "assembly", + "fbx", + "textures", + "action", + "harmony.template", + "harmony.palette", + "editorial", + "background", + "camerarig", + "redshiftproxy", + "effect", + "xgen", + "hda", + "usd", + "staticMesh", + "skeletalMesh", + "mvLook", + "mvUsd", + "mvUsdComposition", + "mvUsdOverride", + "simpleUnrealTexture", + "online" + ] + + default_template_name = "publish" + + # Representation context keys that should always be written to + # the database even if not used by the destination template + db_representation_context_keys = [ + "project", "asset", "task", "subset", "version", "representation", + "family", "hierarchy", "username", "user", "output" + ] + skip_host_families = [] + + def process(self, instance): + if self._temp_skip_instance_by_settings(instance): + return + + # Mark instance as processed for legacy integrator + instance.data["processedWithNewIntegrator"] = True + + # Instance should be integrated on a farm + if instance.data.get("farm"): + self.log.info( + "Instance is marked to be processed on farm. Skipping") + return + + filtered_repres = self.filter_representations(instance) + # Skip instance if there are not representations to integrate + # all representations should not be integrated + if not filtered_repres: + self.log.warning(( + "Skipping, there are no representations" + " to integrate for instance {}" + ).format(instance.data["family"])) + return + + file_transactions = FileTransaction(log=self.log) + try: + self.register(instance, file_transactions, filtered_repres) + except Exception: + # clean destination + # todo: preferably we'd also rollback *any* changes to the database + file_transactions.rollback() + self.log.critical("Error when registering", exc_info=True) + six.reraise(*sys.exc_info()) + + # Finalizing can't rollback safely so no use for moving it to + # the try, except. + file_transactions.finalize() + + def _temp_skip_instance_by_settings(self, instance): + """Decide if instance will be processed with new or legacy integrator. + + This is temporary solution until we test all usecases with new (this) + integrator plugin. + """ + + host_name = instance.context.data["hostName"] + instance_family = instance.data["family"] + instance_families = set(instance.data.get("families") or []) + + skip = False + for item in self.skip_host_families: + if host_name not in item["host"]: + continue + + families = set(item["families"]) + if instance_family in families: + skip = True + break + + for family in instance_families: + if family in families: + skip = True + break + + if skip: + break + + if skip: + self.log.debug("Instance is marked to be skipped by settings.") + return skip + + def filter_representations(self, instance): + # Prepare repsentations that should be integrated + repres = instance.data.get("representations") + # Raise error if instance don't have any representations + if not repres: + raise KnownPublishError( + "Instance {} has no representations to integrate".format( + instance.data["family"] + ) + ) + + # Validate type of stored representations + if not isinstance(repres, (list, tuple)): + raise TypeError( + "Instance 'files' must be a list, got: {0} {1}".format( + str(type(repres)), str(repres) + ) + ) + + # Filter representations + filtered_repres = [] + for repre in repres: + if "delete" in repre.get("tags", []): + continue + filtered_repres.append(repre) + + return filtered_repres + + def register(self, instance, file_transactions, filtered_repres): + project_name = legacy_io.active_project() + + instance_stagingdir = instance.data.get("stagingDir") + if not instance_stagingdir: + self.log.info(( + "{0} is missing reference to staging directory." + " Will try to get it from representation." + ).format(instance)) + + else: + self.log.debug( + "Establishing staging directory " + "@ {0}".format(instance_stagingdir) + ) + + template_name = self.get_template_name(instance) + + op_session = OperationsSession() + subset = self.prepare_subset( + instance, op_session, project_name + ) + version = self.prepare_version( + instance, op_session, subset, project_name + ) + instance.data["versionEntity"] = version + + # Get existing representations (if any) + existing_repres_by_name = { + repre_doc["name"].lower(): repre_doc + for repre_doc in get_representations( + project_name, + version_ids=[version["_id"]], + fields=["_id", "name"] + ) + } + + # Prepare all representations + prepared_representations = [] + for repre in filtered_repres: + # todo: reduce/simplify what is returned from this function + prepared = self.prepare_representation( + repre, + template_name, + existing_repres_by_name, + version, + instance_stagingdir, + instance) + + for src, dst in prepared["transfers"]: + # todo: add support for hardlink transfers + file_transactions.add(src, dst) + + prepared_representations.append(prepared) + + # Each instance can also have pre-defined transfers not explicitly + # part of a representation - like texture resources used by a + # .ma representation. Those destination paths are pre-defined, etc. + # todo: should we move or simplify this logic? + resource_destinations = set() + for src, dst in instance.data.get("transfers", []): + file_transactions.add(src, dst, mode=FileTransaction.MODE_COPY) + resource_destinations.add(os.path.abspath(dst)) + + for src, dst in instance.data.get("hardlinks", []): + file_transactions.add(src, dst, mode=FileTransaction.MODE_HARDLINK) + resource_destinations.add(os.path.abspath(dst)) + + # Bulk write to the database + # We write the subset and version to the database before the File + # Transaction to reduce the chances of another publish trying to + # publish to the same version number since that chance can greatly + # increase if the file transaction takes a long time. + op_session.commit() + + self.log.info("Subset {subset[name]} and Version {version[name]} " + "written to database..".format(subset=subset, + version=version)) + + # Process all file transfers of all integrations now + self.log.debug("Integrating source files to destination ...") + file_transactions.process() + self.log.debug( + "Backed up existing files: {}".format(file_transactions.backups)) + self.log.debug( + "Transferred files: {}".format(file_transactions.transferred)) + self.log.debug("Retrieving Representation Site Sync information ...") + + # Get the accessible sites for Site Sync + modules_by_name = instance.context.data["openPypeModules"] + sync_server_module = modules_by_name["sync_server"] + sites = sync_server_module.compute_resource_sync_sites( + project_name=instance.data["projectEntity"]["name"] + ) + self.log.debug("Sync Server Sites: {}".format(sites)) + + # Compute the resource file infos once (files belonging to the + # version instance instead of an individual representation) so + # we can re-use those file infos per representation + anatomy = instance.context.data["anatomy"] + resource_file_infos = self.get_files_info(resource_destinations, + sites=sites, + anatomy=anatomy) + + # Finalize the representations now the published files are integrated + # Get 'files' info for representations and its attached resources + new_repre_names_low = set() + for prepared in prepared_representations: + repre_doc = prepared["representation"] + repre_update_data = prepared["repre_doc_update_data"] + transfers = prepared["transfers"] + destinations = [dst for src, dst in transfers] + repre_doc["files"] = self.get_files_info( + destinations, sites=sites, anatomy=anatomy + ) + + # Add the version resource file infos to each representation + repre_doc["files"] += resource_file_infos + + # Set up representation for writing to the database. Since + # we *might* be overwriting an existing entry if the version + # already existed we'll use ReplaceOnce with `upsert=True` + if repre_update_data is None: + op_session.create_entity( + project_name, repre_doc["type"], repre_doc + ) + else: + op_session.update_entity( + project_name, + repre_doc["type"], + repre_doc["_id"], + repre_update_data + ) + + new_repre_names_low.add(repre_doc["name"].lower()) + + # Delete any existing representations that didn't get any new data + # if the instance is not set to append mode + if not instance.data.get("append", False): + for name, existing_repres in existing_repres_by_name.items(): + if name not in new_repre_names_low: + # We add the exact representation name because `name` is + # lowercase for name matching only and not in the database + op_session.delete_entity( + project_name, "representation", existing_repres["_id"] + ) + + self.log.debug("{}".format(op_session.to_data())) + op_session.commit() + + # Backwards compatibility + # todo: can we avoid the need to store this? + instance.data["published_representations"] = { + p["representation"]["_id"]: p for p in prepared_representations + } + + self.log.info("Registered {} representations" + "".format(len(prepared_representations))) + + def prepare_subset(self, instance, op_session, project_name): + asset_doc = instance.data["assetEntity"] + subset_name = instance.data["subset"] + family = instance.data["family"] + self.log.debug("Subset: {}".format(subset_name)) + + # Get existing subset if it exists + existing_subset_doc = get_subset_by_name( + project_name, subset_name, asset_doc["_id"] + ) + + # Define subset data + data = { + "families": get_instance_families(instance) + } + + subset_group = instance.data.get("subsetGroup") + if subset_group: + data["subsetGroup"] = subset_group + elif existing_subset_doc: + # Preserve previous subset group if new version does not set it + if "subsetGroup" in existing_subset_doc.get("data", {}): + subset_group = existing_subset_doc["data"]["subsetGroup"] + data["subsetGroup"] = subset_group + + subset_id = None + if existing_subset_doc: + subset_id = existing_subset_doc["_id"] + subset_doc = new_subset_document( + subset_name, family, asset_doc["_id"], data, subset_id + ) + + if existing_subset_doc is None: + # Create a new subset + self.log.info("Subset '%s' not found, creating ..." % subset_name) + op_session.create_entity( + project_name, subset_doc["type"], subset_doc + ) + + else: + # Update existing subset data with new data and set in database. + # We also change the found subset in-place so we don't need to + # re-query the subset afterwards + subset_doc["data"].update(data) + update_data = prepare_subset_update_data( + existing_subset_doc, subset_doc + ) + op_session.update_entity( + project_name, + subset_doc["type"], + subset_doc["_id"], + update_data + ) + + self.log.info("Prepared subset: {}".format(subset_name)) + return subset_doc + + def prepare_version(self, instance, op_session, subset_doc, project_name): + version_number = instance.data["version"] + + existing_version = get_version_by_name( + project_name, + version_number, + subset_doc["_id"], + fields=["_id"] + ) + version_id = None + if existing_version: + version_id = existing_version["_id"] + + version_data = self.create_version_data(instance) + version_doc = new_version_doc( + version_number, + subset_doc["_id"], + version_data, + version_id + ) + + if existing_version: + self.log.debug("Updating existing version ...") + update_data = prepare_version_update_data( + existing_version, version_doc + ) + op_session.update_entity( + project_name, + version_doc["type"], + version_doc["_id"], + update_data + ) + else: + self.log.debug("Creating new version ...") + op_session.create_entity( + project_name, version_doc["type"], version_doc + ) + + self.log.info("Prepared version: v{0:03d}".format(version_doc["name"])) + + return version_doc + + def prepare_representation(self, repre, + template_name, + existing_repres_by_name, + version, + instance_stagingdir, + instance): + + # pre-flight validations + if repre["ext"].startswith("."): + raise KnownPublishError(( + "Extension must not start with a dot '.': {}" + ).format(repre["ext"])) + + if repre.get("transfers"): + raise KnownPublishError(( + "Representation is not allowed to have transfers" + "data before integration. They are computed in " + "the integrator. Got: {}" + ).format(repre["transfers"])) + + # create template data for Anatomy + template_data = copy.deepcopy(instance.data["anatomyData"]) + + # required representation keys + files = repre["files"] + template_data["representation"] = repre["name"] + template_data["ext"] = repre["ext"] + + # optionals + # retrieve additional anatomy data from representation if exists + for key, anatomy_key in { + # Representation Key: Anatomy data key + "resolutionWidth": "resolution_width", + "resolutionHeight": "resolution_height", + "fps": "fps", + "outputName": "output", + "originalBasename": "originalBasename" + }.items(): + # Allow to take value from representation + # if not found also consider instance.data + value = repre.get(key) + if value is None: + value = instance.data.get(key) + + if value is not None: + template_data[anatomy_key] = value + + stagingdir = repre.get("stagingDir") + if not stagingdir: + # Fall back to instance staging dir if not explicitly + # set for representation in the instance + self.log.debug(( + "Representation uses instance staging dir: {}" + ).format(instance_stagingdir)) + stagingdir = instance_stagingdir + + if not stagingdir: + raise KnownPublishError( + "No staging directory set for representation: {}".format(repre) + ) + + self.log.debug("Anatomy template name: {}".format(template_name)) + anatomy = instance.context.data["anatomy"] + publish_template_category = anatomy.templates[template_name] + template = os.path.normpath(publish_template_category["path"]) + + is_udim = bool(repre.get("udim")) + + is_sequence_representation = isinstance(files, (list, tuple)) + if is_sequence_representation: + # Collection of files (sequence) + if any(os.path.isabs(fname) for fname in files): + raise KnownPublishError("Given file names contain full paths") + + src_collections, remainders = clique.assemble(files) + if len(files) < 2 or len(src_collections) != 1 or remainders: + raise KnownPublishError(( + "Files of representation does not contain proper" + " sequence files.\nCollected collections: {}" + "\nCollected remainders: {}" + ).format( + ", ".join([str(col) for col in src_collections]), + ", ".join([str(rem) for rem in remainders]) + )) + + src_collection = src_collections[0] + destination_indexes = list(src_collection.indexes) + # Use last frame for minimum padding + # - that should cover both 'udim' and 'frame' minimum padding + destination_padding = len(str(destination_indexes[-1])) + if not is_udim: + # Change padding for frames if template has defined higher + # padding. + template_padding = int( + publish_template_category["frame_padding"] + ) + if template_padding > destination_padding: + destination_padding = template_padding + + # If the representation has `frameStart` set it renumbers the + # frame indices of the published collection. It will start from + # that `frameStart` index instead. Thus if that frame start + # differs from the collection we want to shift the destination + # frame indices from the source collection. + repre_frame_start = repre.get("frameStart") + if repre_frame_start is not None: + index_frame_start = int(repre["frameStart"]) + # Shift destination sequence to the start frame + destination_indexes = [ + index_frame_start + idx + for idx in range(len(destination_indexes)) + ] + + # To construct the destination template with anatomy we require + # a Frame or UDIM tile set for the template data. We use the first + # index of the destination for that because that could've shifted + # from the source indexes, etc. + first_index_padded = get_frame_padded( + frame=destination_indexes[0], + padding=destination_padding + ) + + # Construct destination collection from template + repre_context = None + dst_filepaths = [] + for index in destination_indexes: + if is_udim: + template_data["udim"] = index + else: + template_data["frame"] = index + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled[template_name]["path"] + dst_filepaths.append(template_filled) + if repre_context is None: + self.log.debug( + "Template filled: {}".format(str(template_filled)) + ) + repre_context = template_filled.used_values + + # Make sure context contains frame + # NOTE: Frame would not be available only if template does not + # contain '{frame}' in template -> Do we want support it? + if not is_udim: + repre_context["frame"] = first_index_padded + + # Update the destination indexes and padding + dst_collection = clique.assemble(dst_filepaths)[0][0] + dst_collection.padding = destination_padding + if len(src_collection.indexes) != len(dst_collection.indexes): + raise KnownPublishError(( + "This is a bug. Source sequence frames length" + " does not match integration frames length" + )) + + # Multiple file transfers + transfers = [] + for src_file_name, dst in zip(src_collection, dst_collection): + src = os.path.join(stagingdir, src_file_name) + transfers.append((src, dst)) + + else: + # Single file + fname = files + if os.path.isabs(fname): + self.log.error( + "Filename in representation is filepath {}".format(fname) + ) + raise KnownPublishError( + "This is a bug. Representation file name is full path" + ) + + # Manage anatomy template data + template_data.pop("frame", None) + if is_udim: + template_data["udim"] = repre["udim"][0] + + # Construct destination filepath from template + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled[template_name]["path"] + repre_context = template_filled.used_values + dst = os.path.normpath(template_filled) + + # Single file transfer + src = os.path.join(stagingdir, fname) + transfers = [(src, dst)] + + # todo: Are we sure the assumption each representation + # ends up in the same folder is valid? + if not instance.data.get("publishDir"): + instance.data["publishDir"] = ( + anatomy_filled + [template_name] + ["folder"] + ) + + for key in self.db_representation_context_keys: + # Also add these values to the context even if not used by the + # destination template + value = template_data.get(key) + if value is not None: + repre_context[key] = value + + # Explicitly store the full list even though template data might + # have a different value because it uses just a single udim tile + if repre.get("udim"): + repre_context["udim"] = repre.get("udim") # store list + + # Use previous representation's id if there is a name match + existing = existing_repres_by_name.get(repre["name"].lower()) + repre_id = None + if existing: + repre_id = existing["_id"] + + # Store first transferred destination as published path data + # - used primarily for reviews that are integrated to custom modules + # TODO we should probably store all integrated files + # related to the representation? + published_path = transfers[0][1] + repre["published_path"] = published_path + + # todo: `repre` is not the actual `representation` entity + # we should simplify/clarify difference between data above + # and the actual representation entity for the database + data = repre.get("data", {}) + data.update({"path": published_path, "template": template}) + repre_doc = new_representation_doc( + repre["name"], version["_id"], repre_context, data, repre_id + ) + update_data = None + if repre_id is not None: + update_data = prepare_representation_update_data( + existing, repre_doc + ) + + return { + "representation": repre_doc, + "repre_doc_update_data": update_data, + "anatomy_data": template_data, + "transfers": transfers, + # todo: avoid the need for 'published_files' used by Integrate Hero + # backwards compatibility + "published_files": [transfer[1] for transfer in transfers] + } + + def create_version_data(self, instance): + """Create the data dictionary for the version + + Args: + instance: the current instance being published + + Returns: + dict: the required information for version["data"] + """ + + context = instance.context + + # create relative source path for DB + if "source" in instance.data: + source = instance.data["source"] + else: + source = context.data["currentFile"] + anatomy = instance.context.data["anatomy"] + source = self.get_rootless_path(anatomy, source) + self.log.debug("Source: {}".format(source)) + + version_data = { + "families": get_instance_families(instance), + "time": context.data["time"], + "author": context.data["user"], + "source": source, + "comment": context.data.get("comment"), + "machine": context.data.get("machine"), + "fps": instance.data.get("fps", context.data.get("fps")) + } + + # todo: preferably we wouldn't need this "if dict" etc. logic and + # instead be able to rely what the input value is if it's set. + intent_value = context.data.get("intent") + if intent_value and isinstance(intent_value, dict): + intent_value = intent_value.get("value") + + if intent_value: + version_data["intent"] = intent_value + + # Include optional data if present in + optionals = [ + "frameStart", "frameEnd", "step", "handles", + "handleEnd", "handleStart", "sourceHashes" + ] + for key in optionals: + if key in instance.data: + version_data[key] = instance.data[key] + + # Include instance.data[versionData] directly + version_data_instance = instance.data.get("versionData") + if version_data_instance: + version_data.update(version_data_instance) + + return version_data + + def get_template_name(self, instance): + """Return anatomy template name to use for integration""" + + # Anatomy data is pre-filled by Collectors + + project_name = legacy_io.active_project() + + # Task can be optional in anatomy data + host_name = instance.context.data["hostName"] + anatomy_data = instance.data["anatomyData"] + family = anatomy_data["family"] + task_info = anatomy_data.get("task") or {} + + return get_publish_template_name( + project_name, + host_name, + family, + task_name=task_info.get("name"), + task_type=task_info.get("type"), + project_settings=instance.context.data["project_settings"], + logger=self.log + ) + + def get_rootless_path(self, anatomy, path): + """Returns, if possible, path without absolute portion from root + (eg. 'c:\' or '/opt/..') + + This information is platform dependent and shouldn't be captured. + Example: + 'c:/projects/MyProject1/Assets/publish...' > + '{root}/MyProject1/Assets...' + + Args: + anatomy: anatomy part from instance + path: path (absolute) + Returns: + path: modified path if possible, or unmodified path + + warning logged + """ + + success, rootless_path = anatomy.find_root_template_from_path(path) + if success: + path = rootless_path + else: + self.log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(path)) + return path + + def get_files_info(self, destinations, sites, anatomy): + """Prepare 'files' info portion for representations. + + Arguments: + destinations (list): List of transferred file destinations + sites (list): array of published locations + anatomy: anatomy part from instance + Returns: + output_resources: array of dictionaries to be added to 'files' key + in representation + """ + + file_infos = [] + for file_path in destinations: + file_info = self.prepare_file_info(file_path, anatomy, sites=sites) + file_infos.append(file_info) + return file_infos + + def prepare_file_info(self, path, anatomy, sites): + """ Prepare information for one file (asset or resource) + + Arguments: + path: destination url of published file + anatomy: anatomy part from instance + sites: array of published locations, + [ {'name':'studio', 'created_dt':date} by default + keys expected ['studio', 'site1', 'gdrive1'] + + Returns: + dict: file info dictionary + """ + + return { + "_id": ObjectId(), + "path": self.get_rootless_path(anatomy, path), + "size": os.path.getsize(path), + "hash": source_hash(path), + "sites": sites + } diff --git a/openpype/plugins/publish/integrate_hero_version.py b/openpype/plugins/publish/integrate_hero_version.py index a706b653c4..5f4d284740 100644 --- a/openpype/plugins/publish/integrate_hero_version.py +++ b/openpype/plugins/publish/integrate_hero_version.py @@ -4,18 +4,25 @@ import clique import errno import shutil -from bson.objectid import ObjectId -from pymongo import InsertOne, ReplaceOne import pyblish.api -from openpype.lib import ( - create_hard_link, - filter_profiles +from openpype.client import ( + get_version_by_id, + get_hero_version_by_subset_id, + get_archived_representations, + get_representations, ) +from openpype.client.operations import ( + OperationsSession, + new_hero_version_doc, + prepare_hero_version_update_data, + prepare_representation_update_data, +) +from openpype.lib import create_hard_link from openpype.pipeline import ( - schema, - legacy_io, + schema ) +from openpype.pipeline.publish import get_publish_template_name class IntegrateHeroVersion(pyblish.api.InstancePlugin): @@ -40,7 +47,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): ignored_representation_names = [] db_representation_context_keys = [ "project", "asset", "task", "subset", "representation", - "family", "hierarchy", "task", "username" + "family", "hierarchy", "task", "username", "user" ] # QUESTION/TODO this process should happen on server if crashed due to # permissions error on files (files were used or user didn't have perms) @@ -62,10 +69,11 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): ) return - template_key = self._get_template_key(instance) - anatomy = instance.context.data["anatomy"] - project_name = legacy_io.Session["AVALON_PROJECT"] + project_name = anatomy.project_name + + template_key = self._get_template_key(project_name, instance) + if template_key not in anatomy.templates: self.log.warning(( "!!! Anatomy of project \"{}\" does not have set" @@ -85,9 +93,13 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): hero_template )) - self.integrate_instance(instance, template_key, hero_template) + self.integrate_instance( + instance, project_name, template_key, hero_template + ) - def integrate_instance(self, instance, template_key, hero_template): + def integrate_instance( + self, instance, project_name, template_key, hero_template + ): anatomy = instance.context.data["anatomy"] published_repres = instance.data["published_representations"] hero_publish_dir = self.get_publish_dir(instance, template_key) @@ -118,8 +130,8 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): "Published version entity was not sent in representation data." " Querying entity from database." )) - src_version_entity = ( - self.version_from_representations(published_repres) + src_version_entity = self.version_from_representations( + project_name, published_repres ) if not src_version_entity: @@ -170,43 +182,40 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): other_file_paths_mapping.append((file_path, dst_filepath)) # Current version - old_version, old_repres = ( - self.current_hero_ents(src_version_entity) + old_version, old_repres = self.current_hero_ents( + project_name, src_version_entity ) old_repres_by_name = { repre["name"].lower(): repre for repre in old_repres } + op_session = OperationsSession() + + entity_id = None if old_version: - new_version_id = old_version["_id"] - else: - new_version_id = ObjectId() - - new_hero_version = { - "_id": new_version_id, - "version_id": src_version_entity["_id"], - "parent": src_version_entity["parent"], - "type": "hero_version", - "schema": "openpype:hero_version-1.0" - } - schema.validate(new_hero_version) - - # Don't make changes in database until everything is O.K. - bulk_writes = [] + entity_id = old_version["_id"] + new_hero_version = new_hero_version_doc( + src_version_entity["_id"], + src_version_entity["parent"], + entity_id=entity_id + ) if old_version: self.log.debug("Replacing old hero version.") - bulk_writes.append( - ReplaceOne( - {"_id": new_hero_version["_id"]}, - new_hero_version - ) + update_data = prepare_hero_version_update_data( + old_version, new_hero_version + ) + op_session.update_entity( + project_name, + new_hero_version["type"], + old_version["_id"], + update_data ) else: self.log.debug("Creating first hero version.") - bulk_writes.append( - InsertOne(new_hero_version) + op_session.create_entity( + project_name, new_hero_version["type"], new_hero_version ) # Separate old representations into `to replace` and `to delete` @@ -223,11 +232,11 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): if old_repres_by_name: old_repres_to_delete = old_repres_by_name - archived_repres = list(legacy_io.find({ + archived_repres = list(get_archived_representations( + project_name, # Check what is type of archived representation - "type": "archived_repsentation", - "parent": new_version_id - })) + version_ids=[new_hero_version["_id"]] + )) archived_repres_by_name = {} for repre in archived_repres: repre_name_low = repre["name"].lower() @@ -303,13 +312,9 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): } repre_context = template_filled.used_values for key in self.db_representation_context_keys: - if ( - key in repre_context or - key not in anatomy_data - ): - continue - - repre_context[key] = anatomy_data[key] + value = anatomy_data.get(key) + if value is not None: + repre_context[key] = value # Prepare new repre repre = copy.deepcopy(repre_info["representation"]) @@ -377,12 +382,15 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): # Replace current representation if repre_name_low in old_repres_to_replace: old_repre = old_repres_to_replace.pop(repre_name_low) + repre["_id"] = old_repre["_id"] - bulk_writes.append( - ReplaceOne( - {"_id": old_repre["_id"]}, - repre - ) + update_data = prepare_representation_update_data( + old_repre, repre) + op_session.update_entity( + project_name, + old_repre["type"], + old_repre["_id"], + update_data ) # Unarchive representation @@ -390,21 +398,21 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): archived_repre = archived_repres_by_name.pop( repre_name_low ) - old_id = archived_repre["old_id"] - repre["_id"] = old_id - bulk_writes.append( - ReplaceOne( - {"old_id": old_id}, - repre - ) + repre["_id"] = archived_repre["old_id"] + update_data = prepare_representation_update_data( + archived_repre, repre) + op_session.update_entity( + project_name, + old_repre["type"], + archived_repre["_id"], + update_data ) # Create representation else: - repre["_id"] = ObjectId() - bulk_writes.append( - InsertOne(repre) - ) + repre.pop("_id", None) + op_session.create_entity(project_name, "representation", + repre) self.path_checks = [] @@ -425,29 +433,22 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): archived_repre = archived_repres_by_name.pop( repre_name_low ) - repre["old_id"] = repre["_id"] - repre["_id"] = archived_repre["_id"] - repre["type"] = archived_repre["type"] - bulk_writes.append( - ReplaceOne( - {"_id": archived_repre["_id"]}, - repre - ) - ) + changes = {"old_id": repre["_id"], + "_id": archived_repre["_id"], + "type": archived_repre["type"]} + op_session.update_entity(project_name, + archived_repre["type"], + archived_repre["_id"], + changes) else: - repre["old_id"] = repre["_id"] - repre["_id"] = ObjectId() + repre["old_id"] = repre.pop("_id") repre["type"] = "archived_representation" - bulk_writes.append( - InsertOne(repre) - ) + op_session.create_entity(project_name, + "archived_representation", + repre) - if bulk_writes: - project_name = legacy_io.Session["AVALON_PROJECT"] - legacy_io.database[project_name].bulk_write( - bulk_writes - ) + op_session.commit() # Remove backuped previous hero if ( @@ -507,11 +508,10 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): anatomy_filled = anatomy.format(template_data) # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." - ).format(project_name)) + ).format(anatomy.project_name)) file_path = anatomy_filled[template_key]["path"] # Directory @@ -523,30 +523,24 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): return publish_folder - def _get_template_key(self, instance): + def _get_template_key(self, project_name, instance): anatomy_data = instance.data["anatomyData"] - task_data = anatomy_data.get("task") or {} - task_name = task_data.get("name") - task_type = task_data.get("type") + task_info = anatomy_data.get("task") or {} host_name = instance.context.data["hostName"] + # TODO raise error if Hero not set? family = self.main_family_from_instance(instance) - key_values = { - "families": family, - "task_names": task_name, - "task_types": task_type, - "hosts": host_name - } - profile = filter_profiles( - self.template_name_profiles, - key_values, + + return get_publish_template_name( + project_name, + host_name, + family, + task_info.get("name"), + task_info.get("type"), + project_settings=instance.context.data["project_settings"], + hero=True, logger=self.log ) - if profile: - template_name = profile["template_name"] - else: - template_name = self._default_template_name - return template_name def main_family_from_instance(self, instance): """Returns main family of entered instance.""" @@ -580,31 +574,32 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): return except OSError as exc: - # re-raise exception if different than cross drive path - if exc.errno != errno.EXDEV: + # re-raise exception if different than + # EXDEV - cross drive path + # EINVAL - wrong format, must be NTFS + self.log.debug("Hardlink failed with errno:'{}'".format(exc.errno)) + if exc.errno not in [errno.EXDEV, errno.EINVAL]: raise shutil.copy(src_path, dst_path) - def version_from_representations(self, repres): + def version_from_representations(self, project_name, repres): for repre in repres: - version = legacy_io.find_one({"_id": repre["parent"]}) + version = get_version_by_id(project_name, repre["parent"]) if version: return version - def current_hero_ents(self, version): - hero_version = legacy_io.find_one({ - "parent": version["parent"], - "type": "hero_version" - }) + def current_hero_ents(self, project_name, version): + hero_version = get_hero_version_by_subset_id( + project_name, version["parent"] + ) if not hero_version: return (None, []) - hero_repres = list(legacy_io.find({ - "parent": hero_version["_id"], - "type": "representation" - })) + hero_repres = list(get_representations( + project_name, version_ids=[hero_version["_id"]] + )) return (hero_version, hero_repres) def _update_path(self, anatomy, path, src_file, dst_file): diff --git a/openpype/plugins/publish/integrate_new.py b/openpype/plugins/publish/integrate_legacy.py similarity index 92% rename from openpype/plugins/publish/integrate_new.py rename to openpype/plugins/publish/integrate_legacy.py index bf13a4050e..536ab83f2c 100644 --- a/openpype/plugins/publish/integrate_new.py +++ b/openpype/plugins/publish/integrate_legacy.py @@ -15,15 +15,26 @@ from bson.objectid import ObjectId from pymongo import DeleteOne, InsertOne import pyblish.api -import openpype.api -from openpype.lib.profiles_filtering import filter_profiles +from openpype.client import ( + get_asset_by_name, + get_subset_by_id, + get_subset_by_name, + get_version_by_id, + get_version_by_name, + get_representations, + get_archived_representations, +) from openpype.lib import ( prepare_template_data, create_hard_link, StringTemplate, - TemplateUnsolved + TemplateUnsolved, + source_hash, + filter_profiles, + get_local_site_id, ) from openpype.pipeline import legacy_io +from openpype.pipeline.publish import get_publish_template_name # this is needed until speedcopy for linux is fixed if sys.platform == "win32": @@ -60,8 +71,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "data": additional metadata for each representation. """ - label = "Integrate Asset New" - order = pyblish.api.IntegratorOrder + label = "Integrate Asset (legacy)" + # Make sure it happens after new integrator + order = pyblish.api.IntegratorOrder + 0.00001 families = ["workfile", "pointcache", "camera", @@ -92,7 +104,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "source", "matchmove", "image", - "source", "assembly", "fbx", "textures", @@ -109,14 +120,16 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "usd", "staticMesh", "skeletalMesh", - "usdComposition", - "usdOverride", + "mvLook", + "mvUsd", + "mvUsdComposition", + "mvUsdOverride", "simpleUnrealTexture" ] - exclude_families = ["clip", "render.farm"] + exclude_families = ["render.farm"] db_representation_context_keys = [ "project", "asset", "task", "subset", "version", "representation", - "family", "hierarchy", "task", "username" + "family", "hierarchy", "task", "username", "user" ] default_template_name = "publish" @@ -127,10 +140,13 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): integrated_file_sizes = {} # Attributes set by settings - template_name_profiles = None subset_grouping_profiles = None def process(self, instance): + if instance.data.get("processedWithNewIntegrator"): + self.log.info("Instance was already processed with new integrator") + return + for ef in self.exclude_families: if ( instance.data["family"] == ef or @@ -139,9 +155,47 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): ef, instance.data["family"], instance.data["families"])) return + # instance should be published on a farm + if instance.data.get("farm"): + return + + # Prepare repsentations that should be integrated + repres = instance.data.get("representations") + # Raise error if instance don't have any representations + if not repres: + raise ValueError( + "Instance {} has no files to transfer".format( + instance.data["family"] + ) + ) + + # Validate type of stored representations + if not isinstance(repres, (list, tuple)): + raise TypeError( + "Instance 'files' must be a list, got: {0} {1}".format( + str(type(repres)), str(repres) + ) + ) + + # Filter representations + filtered_repres = [] + for repre in repres: + if "delete" in repre.get("tags", []): + continue + filtered_repres.append(repre) + + # Skip instance if there are not representations to integrate + # all representations should not be integrated + if not filtered_repres: + self.log.warning(( + "Skipping, there are no representations" + " to integrate for instance {}" + ).format(instance.data["family"])) + return + self.integrated_file_sizes = {} try: - self.register(instance) + self.register(instance, filtered_repres) self.log.info("Integrated Asset in to the database ...") self.log.info("instance.data: {}".format(instance.data)) self.handle_destination_files(self.integrated_file_sizes, @@ -152,7 +206,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.handle_destination_files(self.integrated_file_sizes, 'remove') six.reraise(*sys.exc_info()) - def register(self, instance): + def register(self, instance, repres): # Required environment variables anatomy_data = instance.data["anatomyData"] @@ -161,6 +215,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): context = instance.context project_entity = instance.data["projectEntity"] + project_name = project_entity["name"] context_asset_name = None context_asset_doc = context.data.get("assetEntity") @@ -170,11 +225,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): asset_name = instance.data["asset"] asset_entity = instance.data.get("assetEntity") if not asset_entity or asset_entity["name"] != context_asset_name: - asset_entity = legacy_io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project_entity["_id"] - }) + asset_entity = get_asset_by_name(project_name, asset_name) assert asset_entity, ( "No asset found by the name \"{0}\" in project \"{1}\"" ).format(asset_name, project_entity["name"]) @@ -230,19 +281,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "Establishing staging directory @ {0}".format(stagingdir) ) - # Ensure at least one file is set up for transfer in staging dir. - repres = instance.data.get("representations") - repres = instance.data.get("representations") - msg = "Instance {} has no files to transfer".format( - instance.data["family"]) - assert repres, msg - assert isinstance(repres, (list, tuple)), ( - "Instance 'files' must be a list, got: {0} {1}".format( - str(type(repres)), str(repres) - ) - ) - - subset = self.get_subset(asset_entity, instance) + subset = self.get_subset(project_name, asset_entity, instance) instance.data["subsetEntity"] = subset version_number = instance.data["version"] @@ -264,13 +303,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.debug("Creating version ...") - new_repre_names_low = [_repre["name"].lower() for _repre in repres] + new_repre_names_low = [ + _repre["name"].lower() + for _repre in repres + ] - existing_version = legacy_io.find_one({ - 'type': 'version', - 'parent': subset["_id"], - 'name': version_number - }) + existing_version = get_version_by_name( + project_name, version_number, subset["_id"] + ) if existing_version is None: version_id = legacy_io.insert_one(version).inserted_id @@ -291,10 +331,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): version_id = existing_version['_id'] # Find representations of existing version and archive them - current_repres = list(legacy_io.find({ - "type": "representation", - "parent": version_id - })) + current_repres = list(get_representations( + project_name, version_ids=[version_id] + )) bulk_writes = [] for repre in current_repres: if append_repres: @@ -314,18 +353,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # bulk updates if bulk_writes: - project_name = legacy_io.Session["AVALON_PROJECT"] legacy_io.database[project_name].bulk_write( bulk_writes ) - version = legacy_io.find_one({"_id": version_id}) + version = get_version_by_id(project_name, version_id) instance.data["versionEntity"] = version - existing_repres = list(legacy_io.find({ - "parent": version_id, - "type": "archived_representation" - })) + existing_repres = list(get_archived_representations( + project_name, + version_ids=[version_id] + )) instance.data['version'] = version['name'] @@ -351,34 +389,18 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): family = self.main_family_from_instance(instance) - key_values = { - "families": family, - "tasks": task_name, - "hosts": instance.context.data["hostName"], - "task_types": task_type - } - profile = filter_profiles( - self.template_name_profiles, - key_values, + template_name = get_publish_template_name( + project_name, + instance.context.data["hostName"], + family, + task_name=task_info.get("name"), + task_type=task_info.get("type"), + project_settings=instance.context.data["project_settings"], logger=self.log ) - template_name = "publish" - if profile: - template_name = profile["template_name"] - - - published_representations = {} - for idx, repre in enumerate(instance.data["representations"]): - # reset transfers for next representation - # instance.data['transfers'] is used as a global variable - # in current codebase - instance.data['transfers'] = list(orig_transfers) - - if "delete" in repre.get("tags", []): - continue - + for idx, repre in enumerate(repres): published_files = [] # create template data for Anatomy @@ -656,6 +678,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "published_files": published_files } self.log.debug("__ representations: {}".format(representations)) + # reset transfers for next representation + # instance.data['transfers'] is used as a global variable + # in current codebase + instance.data['transfers'] = list(orig_transfers) # Remove old representations if there are any (before insertion of new) if existing_repres: @@ -767,13 +793,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): create_hard_link(src, dst) - def get_subset(self, asset, instance): + def get_subset(self, project_name, asset, instance): subset_name = instance.data["subset"] - subset = legacy_io.find_one({ - "type": "subset", - "parent": asset["_id"], - "name": subset_name - }) + subset = get_subset_by_name(project_name, subset_name, asset["_id"]) if subset is None: self.log.info("Subset '%s' not found, creating ..." % subset_name) @@ -800,7 +822,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "parent": asset["_id"] }).inserted_id - subset = legacy_io.find_one({"_id": _id}) + subset = get_subset_by_id(project_name, _id) # QUESTION Why is changing of group and updating it's # families in 'get_subset'? @@ -934,9 +956,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): families += current_families # create relative source path for DB - if "source" in instance.data: - source = instance.data["source"] - else: + source = instance.data.get("source") + if not source: source = context.data["currentFile"] anatomy = instance.context.data["anatomy"] source = self.get_rootless_path(anatomy, source) @@ -1032,7 +1053,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): for _src, dest in resources: path = self.get_rootless_path(anatomy, dest) dest = self.get_dest_temp_url(dest) - file_hash = openpype.api.source_hash(dest) + file_hash = source_hash(dest) if self.TMP_FILE_EXT and \ ',{}'.format(self.TMP_FILE_EXT) in file_hash: file_hash = file_hash.replace(',{}'.format(self.TMP_FILE_EXT), @@ -1142,7 +1163,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): def _get_sites(self, sync_project_presets): """Returns tuple (local_site, remote_site)""" - local_site_id = openpype.api.get_local_site_id() + local_site_id = get_local_site_id() local_site = sync_project_presets["config"]. \ get("active_site", "studio").strip() diff --git a/openpype/plugins/publish/integrate_subset_group.py b/openpype/plugins/publish/integrate_subset_group.py new file mode 100644 index 0000000000..a24ebba3a5 --- /dev/null +++ b/openpype/plugins/publish/integrate_subset_group.py @@ -0,0 +1,98 @@ +"""Produces instance.data["subsetGroup"] data used during integration. + +Requires: + dict -> context["anatomyData"] *(pyblish.api.CollectorOrder + 0.49) + +Provides: + instance -> subsetGroup (str) + +""" +import pyblish.api + +from openpype.lib.profiles_filtering import filter_profiles +from openpype.lib import ( + prepare_template_data, + StringTemplate, + TemplateUnsolved +) + + +class IntegrateSubsetGroup(pyblish.api.InstancePlugin): + """Integrate Subset Group for publish.""" + + # Run after CollectAnatomyInstanceData + order = pyblish.api.IntegratorOrder - 0.1 + label = "Subset Group" + + # Attributes set by settings + subset_grouping_profiles = None + + def process(self, instance): + """Look into subset group profiles set by settings. + + Attribute 'subset_grouping_profiles' is defined by OpenPype settings. + """ + + # Skip if 'subset_grouping_profiles' is empty + if not self.subset_grouping_profiles: + return + + if instance.data.get("subsetGroup"): + # If subsetGroup is already set then allow that value to remain + self.log.debug(( + "Skipping collect subset group due to existing value: {}" + ).format(instance.data["subsetGroup"])) + return + + # Skip if there is no matching profile + filter_criteria = self.get_profile_filter_criteria(instance) + profile = filter_profiles( + self.subset_grouping_profiles, + filter_criteria, + logger=self.log + ) + + if not profile: + return + + template = profile["template"] + + fill_pairs = prepare_template_data({ + "family": filter_criteria["families"], + "task": filter_criteria["tasks"], + "host": filter_criteria["hosts"], + "subset": instance.data["subset"], + "renderlayer": instance.data.get("renderlayer") + }) + + filled_template = None + try: + filled_template = StringTemplate.format_strict_template( + template, fill_pairs + ) + except (KeyError, TemplateUnsolved): + keys = fill_pairs.keys() + self.log.warning(( + "Subset grouping failed. Only {} are expected in Settings" + ).format(','.join(keys))) + + if filled_template: + instance.data["subsetGroup"] = filled_template + + def get_profile_filter_criteria(self, instance): + """Return filter criteria for `filter_profiles`""" + # TODO: This logic is used in much more plug-ins in one way or another + # Maybe better suited for lib? + # Anatomy data is pre-filled by Collectors + anatomy_data = instance.data["anatomyData"] + + # Task can be optional in anatomy data + task = anatomy_data.get("task", {}) + + # Return filter criteria + return { + "families": anatomy_data["family"], + "tasks": task.get("name"), + "hosts": instance.context.data["hostName"], + "task_types": task.get("type") + } diff --git a/openpype/plugins/publish/integrate_thumbnail.py b/openpype/plugins/publish/integrate_thumbnail.py index 5d6fc561ea..f74c3d9609 100644 --- a/openpype/plugins/publish/integrate_thumbnail.py +++ b/openpype/plugins/publish/integrate_thumbnail.py @@ -1,160 +1,291 @@ +""" Integrate Thumbnails for Openpype use in Loaders. + + This thumbnail is different from 'thumbnail' representation which could + be uploaded to Ftrack, or used as any other representation in Loaders to + pull into a scene. + + This one is used only as image describing content of published item and + shows up only in Loader in right column section. +""" + import os import sys import errno import shutil import copy +import collections import six import pyblish.api -from bson.objectid import ObjectId -from openpype.pipeline import legacy_io +from openpype.client import get_versions +from openpype.client.operations import OperationsSession, new_thumbnail_doc + +InstanceFilterResult = collections.namedtuple( + "InstanceFilterResult", + ["instance", "thumbnail_path", "version_id"] +) -class IntegrateThumbnails(pyblish.api.InstancePlugin): - """Integrate Thumbnails.""" +class IntegrateThumbnails(pyblish.api.ContextPlugin): + """Integrate Thumbnails for Openpype use in Loaders.""" label = "Integrate Thumbnails" order = pyblish.api.IntegratorOrder + 0.01 - families = ["review"] required_context_keys = [ "project", "asset", "task", "subset", "version" ] - def process(self, instance): - - if not os.environ.get("AVALON_THUMBNAIL_ROOT"): - self.log.warning( - "AVALON_THUMBNAIL_ROOT is not set." - " Skipping thumbnail integration." + def process(self, context): + # Filter instances which can be used for integration + filtered_instance_items = self._prepare_instances(context) + if not filtered_instance_items: + self.log.info( + "All instances were filtered. Thumbnail integration skipped." ) return - published_repres = instance.data.get("published_representations") - if not published_repres: - self.log.debug( - "There are no published representations on the instance." - ) - return + # Initial validation of available templated and required keys + env_key = "AVALON_THUMBNAIL_ROOT" + thumbnail_root_format_key = "{thumbnail_root}" + thumbnail_root = os.environ.get(env_key) or "" - project_name = legacy_io.Session["AVALON_PROJECT"] - - anatomy = instance.context.data["anatomy"] + anatomy = context.data["anatomy"] + project_name = anatomy.project_name if "publish" not in anatomy.templates: - self.log.warning("Anatomy is missing the \"publish\" key!") + self.log.warning( + "Anatomy is missing the \"publish\" key. Skipping." + ) return if "thumbnail" not in anatomy.templates["publish"]: self.log.warning(( - "There is no \"thumbnail\" template set for the project \"{}\"" + "There is no \"thumbnail\" template set for the project" + " \"{}\". Skipping." ).format(project_name)) return - thumb_repre = None - thumb_repre_anatomy_data = None - for repre_info in published_repres.values(): - repre = repre_info["representation"] - if repre["name"].lower() == "thumbnail": - thumb_repre = repre - thumb_repre_anatomy_data = repre_info["anatomy_data"] + thumbnail_template = anatomy.templates["publish"]["thumbnail"] + if not thumbnail_template: + self.log.info("Thumbnail template is not filled. Skipping.") + return + + if ( + not thumbnail_root + and thumbnail_root_format_key in thumbnail_template + ): + self.log.warning(("{} is not set. Skipping.").format(env_key)) + return + + # Collect verion ids from all filtered instance + version_ids = { + instance_items.version_id + for instance_items in filtered_instance_items + } + # Query versions + version_docs = get_versions( + project_name, + version_ids=version_ids, + hero=True, + fields=["_id", "type", "name"] + ) + # Store version by their id (converted to string) + version_docs_by_str_id = { + str(version_doc["_id"]): version_doc + for version_doc in version_docs + } + self._integrate_thumbnails( + filtered_instance_items, + version_docs_by_str_id, + anatomy, + thumbnail_root + ) + + def _prepare_instances(self, context): + context_thumbnail_path = context.get("thumbnailPath") + valid_context_thumbnail = False + if context_thumbnail_path and os.path.exists(context_thumbnail_path): + valid_context_thumbnail = True + + filtered_instances = [] + for instance in context: + instance_label = self._get_instance_label(instance) + # Skip instances without published representations + # - there is no place where to put the thumbnail + published_repres = instance.data.get("published_representations") + if not published_repres: + self.log.debug(( + "There are no published representations" + " on the instance {}." + ).format(instance_label)) + continue + + # Find thumbnail path on instance + thumbnail_path = self._get_instance_thumbnail_path( + published_repres) + if thumbnail_path: + self.log.debug(( + "Found thumbnail path for instance \"{}\"." + " Thumbnail path: {}" + ).format(instance_label, thumbnail_path)) + + elif valid_context_thumbnail: + # Use context thumbnail path if is available + thumbnail_path = context_thumbnail_path + self.log.debug(( + "Using context thumbnail path for instance \"{}\"." + " Thumbnail path: {}" + ).format(instance_label, thumbnail_path)) + + # Skip instance if thumbnail path is not available for it + if not thumbnail_path: + self.log.info(( + "Skipping thumbnail integration for instance \"{}\"." + " Instance and context" + " thumbnail paths are not available." + ).format(instance_label)) + continue + + version_id = str(self._get_version_id(published_repres)) + filtered_instances.append( + InstanceFilterResult(instance, thumbnail_path, version_id) + ) + return filtered_instances + + def _get_version_id(self, published_representations): + for repre_info in published_representations.values(): + return repre_info["representation"]["parent"] + + def _get_instance_thumbnail_path(self, published_representations): + thumb_repre_doc = None + for repre_info in published_representations.values(): + repre_doc = repre_info["representation"] + if repre_doc["name"].lower() == "thumbnail": + thumb_repre_doc = repre_doc break - if not thumb_repre: + if thumb_repre_doc is None: self.log.debug( "There is not representation with name \"thumbnail\"" ) - return + return None - legacy_io.install() - - thumbnail_template = anatomy.templates["publish"]["thumbnail"] - - version = legacy_io.find_one({"_id": thumb_repre["parent"]}) - if not version: - raise AssertionError( - "There does not exist version with id {}".format( - str(thumb_repre["parent"]) - ) + path = thumb_repre_doc["data"]["path"] + if not os.path.exists(path): + self.log.warning( + "Thumbnail file cannot be found. Path: {}".format(path) ) + return None + return os.path.normpath(path) - # Get full path to thumbnail file from representation - src_full_path = os.path.normpath(thumb_repre["data"]["path"]) - if not os.path.exists(src_full_path): - self.log.warning("Thumbnail file was not found. Path: {}".format( - src_full_path - )) - return + def _integrate_thumbnails( + self, + filtered_instance_items, + version_docs_by_str_id, + anatomy, + thumbnail_root + ): + op_session = OperationsSession() + project_name = anatomy.project_name - filename, file_extension = os.path.splitext(src_full_path) - # Create id for mongo entity now to fill anatomy template - thumbnail_id = ObjectId() - - # Prepare anatomy template fill data - template_data = copy.deepcopy(thumb_repre_anatomy_data) - template_data.update({ - "_id": str(thumbnail_id), - "thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"), - "ext": file_extension[1:], - "thumbnail_type": "thumbnail" - }) - - anatomy_filled = anatomy.format(template_data) - template_filled = anatomy_filled["publish"]["thumbnail"] - - dst_full_path = os.path.normpath(str(template_filled)) - self.log.debug( - "Copying file .. {} -> {}".format(src_full_path, dst_full_path) - ) - dirname = os.path.dirname(dst_full_path) - try: - os.makedirs(dirname) - except OSError as e: - if e.errno != errno.EEXIST: - tp, value, tb = sys.exc_info() - six.reraise(tp, value, tb) - - shutil.copy(src_full_path, dst_full_path) - - # Clean template data from keys that are dynamic - template_data.pop("_id") - template_data.pop("thumbnail_root") - - repre_context = template_filled.used_values - for key in self.required_context_keys: - value = template_data.get(key) - if not value: + for instance_item in filtered_instance_items: + instance, thumbnail_path, version_id = instance_item + instance_label = self._get_instance_label(instance) + version_doc = version_docs_by_str_id.get(version_id) + if not version_doc: + self.log.warning(( + "Version entity for instance \"{}\" was not found." + ).format(instance_label)) continue - repre_context[key] = template_data[key] - thumbnail_entity = { - "_id": thumbnail_id, - "type": "thumbnail", - "schema": "openpype:thumbnail-1.0", - "data": { + filename, file_extension = os.path.splitext(thumbnail_path) + # Create id for mongo entity now to fill anatomy template + thumbnail_doc = new_thumbnail_doc() + thumbnail_id = thumbnail_doc["_id"] + + # Prepare anatomy template fill data + template_data = copy.deepcopy(instance.data["anatomyData"]) + template_data.update({ + "_id": str(thumbnail_id), + "ext": file_extension[1:], + "name": "thumbnail", + "thumbnail_root": thumbnail_root, + "thumbnail_type": "thumbnail" + }) + + anatomy_filled = anatomy.format(template_data) + thumbnail_template = anatomy.templates["publish"]["thumbnail"] + template_filled = anatomy_filled["publish"]["thumbnail"] + + dst_full_path = os.path.normpath(str(template_filled)) + self.log.debug("Copying file .. {} -> {}".format( + thumbnail_path, dst_full_path + )) + dirname = os.path.dirname(dst_full_path) + try: + os.makedirs(dirname) + except OSError as e: + if e.errno != errno.EEXIST: + tp, value, tb = sys.exc_info() + six.reraise(tp, value, tb) + + shutil.copy(thumbnail_path, dst_full_path) + + # Clean template data from keys that are dynamic + for key in ("_id", "thumbnail_root"): + template_data.pop(key, None) + + repre_context = template_filled.used_values + for key in self.required_context_keys: + value = template_data.get(key) + if not value: + continue + repre_context[key] = template_data[key] + + thumbnail_doc["data"] = { "template": thumbnail_template, "template_data": repre_context } - } - # Create thumbnail entity - legacy_io.insert_one(thumbnail_entity) - self.log.debug( - "Creating entity in database {}".format(str(thumbnail_entity)) - ) - # Set thumbnail id for version - legacy_io.update_many( - {"_id": version["_id"]}, - {"$set": {"data.thumbnail_id": thumbnail_id}} - ) - self.log.debug("Setting thumbnail for version \"{}\" <{}>".format( - version["name"], str(version["_id"]) - )) + op_session.create_entity( + project_name, thumbnail_doc["type"], thumbnail_doc + ) + # Create thumbnail entity + self.log.debug( + "Creating entity in database {}".format(str(thumbnail_doc)) + ) - asset_entity = instance.data["assetEntity"] - legacy_io.update_many( - {"_id": asset_entity["_id"]}, - {"$set": {"data.thumbnail_id": thumbnail_id}} + # Set thumbnail id for version + op_session.update_entity( + project_name, + version_doc["type"], + version_doc["_id"], + {"data.thumbnail_id": thumbnail_id} + ) + if version_doc["type"] == "hero_version": + version_name = "Hero" + else: + version_name = version_doc["name"] + self.log.debug("Setting thumbnail for version \"{}\" <{}>".format( + version_name, version_id + )) + + asset_entity = instance.data["assetEntity"] + op_session.update_entity( + project_name, + asset_entity["type"], + asset_entity["_id"], + {"data.thumbnail_id": thumbnail_id} + ) + self.log.debug("Setting thumbnail for asset \"{}\" <{}>".format( + asset_entity["name"], version_id + )) + + op_session.commit() + + def _get_instance_label(self, instance): + return ( + instance.data.get("label") + or instance.data.get("name") + or "N/A" ) - self.log.debug("Setting thumbnail for asset \"{}\" <{}>".format( - asset_entity["name"], str(version["_id"]) - )) diff --git a/openpype/plugins/publish/preintegrate_thumbnail_representation.py b/openpype/plugins/publish/preintegrate_thumbnail_representation.py new file mode 100644 index 0000000000..b88ccee9dc --- /dev/null +++ b/openpype/plugins/publish/preintegrate_thumbnail_representation.py @@ -0,0 +1,71 @@ +""" Marks thumbnail representation for integrate to DB or not. + + Some hosts produce thumbnail representation, most of them do not create + them explicitly, but they created during extract phase. + + In some cases it might be useful to override implicit setting for host/task + + This plugin needs to run after extract phase, but before integrate.py as + thumbnail is part of review family and integrated there. + + It should be better to control integration of thumbnail in one place than + configure it in multiple places on host implementations. +""" +import pyblish.api + +from openpype.lib.profiles_filtering import filter_profiles + + +class PreIntegrateThumbnails(pyblish.api.InstancePlugin): + """Marks thumbnail representation for integrate to DB or not.""" + + label = "Override Integrate Thumbnail Representations" + order = pyblish.api.IntegratorOrder - 0.1 + + integrate_profiles = [] + + def process(self, instance): + repres = instance.data.get("representations") + if not repres: + return + + thumbnail_repre = None + for repre in repres: + if repre["name"] == "thumbnail": + thumbnail_repre = repre + break + + if not thumbnail_repre: + return + + family = instance.data["family"] + subset_name = instance.data["subset"] + host_name = instance.context.data["hostName"] + + anatomy_data = instance.data["anatomyData"] + task = anatomy_data.get("task", {}) + + found_profile = filter_profiles( + self.integrate_profiles, + { + "hosts": host_name, + "task_names": task.get("name"), + "task_types": task.get("type"), + "families": family, + "subsets": subset_name, + }, + logger=self.log + ) + + if not found_profile: + return + + if not found_profile["integrate_thumbnail"]: + if "delete" not in thumbnail_repre["tags"]: + thumbnail_repre["tags"].append("delete") + else: + if "delete" in thumbnail_repre["tags"]: + thumbnail_repre["tags"].remove("delete") + + self.log.debug( + "Thumbnail repre tags {}".format(thumbnail_repre["tags"])) diff --git a/openpype/plugins/publish/start_timer.py b/openpype/plugins/publish/start_timer.py deleted file mode 100644 index 112d92bef0..0000000000 --- a/openpype/plugins/publish/start_timer.py +++ /dev/null @@ -1,14 +0,0 @@ -import pyblish.api - -from openpype.lib import change_timer_to_current_context - - -class StartTimer(pyblish.api.ContextPlugin): - label = "Start Timer" - order = pyblish.api.IntegratorOrder + 1 - hosts = ["*"] - - def process(self, context): - modules_settings = context.data["system_settings"]["modules"] - if modules_settings["timers_manager"]["disregard_publishing"]: - change_timer_to_current_context() diff --git a/openpype/plugins/publish/stop_timer.py b/openpype/plugins/publish/stop_timer.py deleted file mode 100644 index 414e43a3c4..0000000000 --- a/openpype/plugins/publish/stop_timer.py +++ /dev/null @@ -1,17 +0,0 @@ -import os -import requests - -import pyblish.api - - -class StopTimer(pyblish.api.ContextPlugin): - label = "Stop Timer" - order = pyblish.api.ExtractorOrder - 0.49 - hosts = ["*"] - - def process(self, context): - modules_settings = context.data["system_settings"]["modules"] - if modules_settings["timers_manager"]["disregard_publishing"]: - webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") - rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) - requests.post(rest_api_url) diff --git a/openpype/plugins/publish/validate_asset_docs.py b/openpype/plugins/publish/validate_asset_docs.py index bc1f9b9e6c..9a1ca5b8de 100644 --- a/openpype/plugins/publish/validate_asset_docs.py +++ b/openpype/plugins/publish/validate_asset_docs.py @@ -24,6 +24,10 @@ class ValidateAssetDocs(pyblish.api.InstancePlugin): if instance.data.get("assetEntity"): self.log.info("Instance has set asset document in its data.") + elif instance.data.get("newAssetPublishing"): + # skip if it is editorial + self.log.info("Editorial instance is no need to check...") + else: raise PublishValidationError(( "Instance \"{}\" doesn't have asset document " diff --git a/openpype/plugins/publish/validate_containers.py b/openpype/plugins/publish/validate_containers.py index ce91bd3396..79759450e1 100644 --- a/openpype/plugins/publish/validate_containers.py +++ b/openpype/plugins/publish/validate_containers.py @@ -1,5 +1,9 @@ import pyblish.api -import openpype.lib +from openpype.pipeline.load import any_outdated_containers +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) class ShowInventory(pyblish.api.Action): @@ -14,15 +18,21 @@ class ShowInventory(pyblish.api.Action): host_tools.show_scene_inventory() -class ValidateContainers(pyblish.api.ContextPlugin): +class ValidateContainers(OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin): + """Containers are must be updated to latest version on publish.""" label = "Validate Containers" order = pyblish.api.ValidatorOrder - hosts = ["maya", "houdini", "nuke", "harmony", "photoshop"] + hosts = ["maya", "houdini", "nuke", "harmony", "photoshop", "aftereffects"] optional = True actions = [ShowInventory] def process(self, context): - if openpype.lib.any_outdated(): - raise ValueError("There are outdated containers in the scene.") + if not self.is_active(context.data): + return + + if any_outdated_containers(): + msg = "There are outdated containers in the scene." + raise PublishXmlValidationError(self, msg) diff --git a/openpype/plugins/publish/validate_editorial_asset_name.py b/openpype/plugins/publish/validate_editorial_asset_name.py index f9cdaebf0c..694788c414 100644 --- a/openpype/plugins/publish/validate_editorial_asset_name.py +++ b/openpype/plugins/publish/validate_editorial_asset_name.py @@ -3,6 +3,7 @@ from pprint import pformat import pyblish.api from openpype.pipeline import legacy_io +from openpype.client import get_assets class ValidateEditorialAssetName(pyblish.api.ContextPlugin): @@ -18,7 +19,8 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin): "hiero", "standalonepublisher", "resolve", - "flame" + "flame", + "traypublisher" ] def process(self, context): @@ -29,8 +31,10 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin): if not legacy_io.Session: legacy_io.install() - db_assets = list(legacy_io.find( - {"type": "asset"}, {"name": 1, "data.parents": 1})) + project_name = legacy_io.active_project() + db_assets = list(get_assets( + project_name, fields=["name", "data.parents"] + )) self.log.debug("__ db_assets: {}".format(db_assets)) asset_db_docs = { diff --git a/openpype/plugins/publish/validate_resources.py b/openpype/plugins/publish/validate_resources.py index 644977ecd4..7911c70c2d 100644 --- a/openpype/plugins/publish/validate_resources.py +++ b/openpype/plugins/publish/validate_resources.py @@ -1,7 +1,6 @@ -import pyblish.api -import openpype.api - import os +import pyblish.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateResources(pyblish.api.InstancePlugin): @@ -17,7 +16,7 @@ class ValidateResources(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "Resources" def process(self, instance): diff --git a/openpype/plugins/publish/validate_version.py b/openpype/plugins/publish/validate_version.py index b94152ef2d..b91633430f 100644 --- a/openpype/plugins/publish/validate_version.py +++ b/openpype/plugins/publish/validate_version.py @@ -10,7 +10,8 @@ class ValidateVersion(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder label = "Validate Version" - hosts = ["nuke", "maya", "houdini", "blender", "standalonepublisher"] + hosts = ["nuke", "maya", "houdini", "blender", "standalonepublisher", + "photoshop", "aftereffects"] optional = False active = True diff --git a/openpype/pype_commands.py b/openpype/pype_commands.py index d945a1f697..d08a812c61 100644 --- a/openpype/pype_commands.py +++ b/openpype/pype_commands.py @@ -4,19 +4,7 @@ import os import sys import json import time - -from openpype.lib import PypeLogger -from openpype.api import get_app_environments_for_context -from openpype.lib.plugin_tools import parse_json, get_batch_asset_task_info -from openpype.lib.remote_publish import ( - get_webpublish_conn, - start_webpublish_log, - publish_and_log, - fail_batch, - find_variant_key, - get_task_data, - IN_PROGRESS_STATUS -) +import signal class PypeCommands: @@ -26,10 +14,11 @@ class PypeCommands: """ @staticmethod def launch_tray(): - PypeLogger.set_process_name("Tray") - + from openpype.lib import Logger from openpype.tools import tray + Logger.set_process_name("Tray") + tray.main() @staticmethod @@ -46,10 +35,12 @@ class PypeCommands: @staticmethod def add_modules(click_func): """Modules/Addons can add their cli commands dynamically.""" + + from openpype.lib import Logger from openpype.modules import ModulesManager manager = ModulesManager() - log = PypeLogger.get_logger("AddModulesCLI") + log = Logger.get_logger("CLI-AddModules") for module in manager.modules: try: module.cli(click_func) @@ -71,14 +62,9 @@ class PypeCommands: @staticmethod def launch_webpublisher_webservercli(*args, **kwargs): - from openpype.hosts.webpublisher.webserver_service.webserver_cli \ - import (run_webserver) - return run_webserver(*args, **kwargs) + from openpype.hosts.webpublisher.webserver_service import run_webserver - @staticmethod - def launch_standalone_publisher(): - from openpype.tools import standalonepublish - standalonepublish.main() + return run_webserver(*args, **kwargs) @staticmethod def launch_traypublisher(): @@ -100,10 +86,11 @@ class PypeCommands: Raises: RuntimeError: When there is no path to process. """ + + from openpype.lib import Logger + from openpype.lib.applications import get_app_environments_for_context from openpype.modules import ModulesManager from openpype.pipeline import install_openpype_plugins - - from openpype.api import Logger from openpype.tools.utils.host_tools import show_publish from openpype.tools.utils.lib import qt_app_context @@ -111,7 +98,7 @@ class PypeCommands: import pyblish.api import pyblish.util - log = Logger.get_logger() + log = Logger.get_logger("CLI-publish") install_openpype_plugins() @@ -144,6 +131,7 @@ class PypeCommands: pyblish.api.register_target("farm") os.environ["OPENPYPE_PUBLISH_DATA"] = os.pathsep.join(paths) + os.environ["HEADLESS_PUBLISH"] = 'true' # to use in app lib log.info("Running publish ...") @@ -169,13 +157,15 @@ class PypeCommands: log.info("Publish finished.") @staticmethod - def remotepublishfromapp(project, batch_path, host_name, + def remotepublishfromapp(project_name, batch_path, host_name, user_email, targets=None): """Opens installed variant of 'host' and run remote publish there. + Eventually should be yanked out to Webpublisher cli. + Currently implemented and tested for Photoshop where customer wants to process uploaded .psd file and publish collected layers - from there. + from there. Triggered by Webpublisher. Checks if no other batches are running (status =='in_progress). If so, it sleeps for SLEEP (this is separate process), @@ -186,8 +176,8 @@ class PypeCommands: Runs publish process as user would, in automatic fashion. Args: - project (str): project to publish (only single context is expected - per call of remotepublish + project_name (str): project to publish (only single context is + expected per call of remotepublish batch_path (str): Path batch folder. Contains subfolders with resources (workfile, another subfolder 'renders' etc.) host_name (str): 'photoshop' @@ -196,84 +186,21 @@ class PypeCommands: targets (list): Pyblish targets (to choose validator for example) """ - import pyblish.api - from openpype.api import Logger - from openpype.lib import ApplicationManager - log = Logger.get_logger() - - log.info("remotepublishphotoshop command") - - task_data = get_task_data(batch_path) - - workfile_path = os.path.join(batch_path, - task_data["task"], - task_data["files"][0]) - - print("workfile_path {}".format(workfile_path)) - - batch_id = task_data["batch"] - dbcon = get_webpublish_conn() - # safer to start logging here, launch might be broken altogether - _id = start_webpublish_log(dbcon, batch_id, user_email) - - batches_in_progress = list(dbcon.find({"status": IN_PROGRESS_STATUS})) - if len(batches_in_progress) > 1: - fail_batch(_id, batches_in_progress, dbcon) - print("Another batch running, probably stuck, ask admin for help") - - asset, task_name, _ = get_batch_asset_task_info(task_data["context"]) - - application_manager = ApplicationManager() - found_variant_key = find_variant_key(application_manager, host_name) - app_name = "{}/{}".format(host_name, found_variant_key) - - # must have for proper launch of app - env = get_app_environments_for_context( - project, - asset, - task_name, - app_name + from openpype.hosts.webpublisher.publish_functions import ( + cli_publish_from_app ) - print("env:: {}".format(env)) - os.environ.update(env) - os.environ["OPENPYPE_PUBLISH_DATA"] = batch_path - # must pass identifier to update log lines for a batch - os.environ["BATCH_LOG_ID"] = str(_id) - os.environ["HEADLESS_PUBLISH"] = 'true' # to use in app lib - os.environ["USER_EMAIL"] = user_email - - pyblish.api.register_host(host_name) - if targets: - if isinstance(targets, str): - targets = [targets] - current_targets = os.environ.get("PYBLISH_TARGETS", "").split( - os.pathsep) - for target in targets: - current_targets.append(target) - - os.environ["PYBLISH_TARGETS"] = os.pathsep.join( - set(current_targets)) - - data = { - "last_workfile_path": workfile_path, - "start_last_workfile": True, - "project_name": project, - "asset_name": asset, - "task_name": task_name - } - - launched_app = application_manager.launch(app_name, **data) - - while launched_app.poll() is None: - time.sleep(0.5) + cli_publish_from_app( + project_name, batch_path, host_name, user_email, targets + ) @staticmethod def remotepublish(project, batch_path, user_email, targets=None): """Start headless publishing. - Used to publish rendered assets, workfiles etc. + Used to publish rendered assets, workfiles etc via Webpublisher. + Eventually should be yanked out to Webpublisher cli. Publish use json from passed paths argument. @@ -290,52 +217,24 @@ class PypeCommands: Raises: RuntimeError: When there is no path to process. """ - if not batch_path: - raise RuntimeError("No publish paths specified") - # Register target and host - import pyblish.api - import pyblish.util + from openpype.hosts.webpublisher.publish_functions import ( + cli_publish + ) - from openpype.pipeline import install_host - from openpype.hosts.webpublisher import api as webpublisher - - log = PypeLogger.get_logger() - - log.info("remotepublish command") - - host_name = "webpublisher" - os.environ["OPENPYPE_PUBLISH_DATA"] = batch_path - os.environ["AVALON_PROJECT"] = project - os.environ["AVALON_APP"] = host_name - os.environ["USER_EMAIL"] = user_email - - pyblish.api.register_host(host_name) - - if targets: - if isinstance(targets, str): - targets = [targets] - for target in targets: - pyblish.api.register_target(target) - - install_host(webpublisher) - - log.info("Running publish ...") - - _, batch_id = os.path.split(batch_path) - dbcon = get_webpublish_conn() - _id = start_webpublish_log(dbcon, batch_id, user_email) - - publish_and_log(dbcon, _id, log, batch_id=batch_id) - - log.info("Publish finished.") + cli_publish(project, batch_path, user_email, targets) @staticmethod - def extractenvironments( - output_json_path, project, asset, task, app, env_group - ): + def extractenvironments(output_json_path, project, asset, task, app, + env_group): + """Produces json file with environment based on project and app. + + Called by Deadline plugin to propagate environment into render jobs. + """ + + from openpype.lib.applications import get_app_environments_for_context + if all((project, asset, task, app)): - from openpype.api import get_app_environments_for_context env = get_app_environments_for_context( project, asset, task, app, env_group ) @@ -417,8 +316,12 @@ class PypeCommands: pytest.main(args) def syncserver(self, active_site): - """Start running sync_server in background.""" - import signal + """Start running sync_server in background. + + This functionality is available in directly in module cli commands. + `~/openpype_console module sync_server syncservice` + """ + os.environ["OPENPYPE_LOCAL_ID"] = active_site def signal_handler(sig, frame): @@ -437,7 +340,6 @@ class PypeCommands: sync_server_module.server_init() sync_server_module.server_start() - import time while True: time.sleep(1.0) diff --git a/openpype/resources/app_icons/hiero.png b/openpype/resources/app_icons/hiero.png index 04bbf6265b..ba666c2fe0 100644 Binary files a/openpype/resources/app_icons/hiero.png and b/openpype/resources/app_icons/hiero.png differ diff --git a/openpype/resources/app_icons/nuke.png b/openpype/resources/app_icons/nuke.png index 4234454096..e734b4984e 100644 Binary files a/openpype/resources/app_icons/nuke.png and b/openpype/resources/app_icons/nuke.png differ diff --git a/openpype/resources/app_icons/nukestudio.png b/openpype/resources/app_icons/nukestudio.png new file mode 100644 index 0000000000..601d4a591d Binary files /dev/null and b/openpype/resources/app_icons/nukestudio.png differ diff --git a/openpype/resources/app_icons/nukex.png b/openpype/resources/app_icons/nukex.png index 1c5a83c8ab..980f150124 100644 Binary files a/openpype/resources/app_icons/nukex.png and b/openpype/resources/app_icons/nukex.png differ diff --git a/openpype/resources/app_icons/shotgrid.png b/openpype/resources/app_icons/shotgrid.png new file mode 100644 index 0000000000..6d0cc047f9 Binary files /dev/null and b/openpype/resources/app_icons/shotgrid.png differ diff --git a/openpype/scripts/fusion_switch_shot.py b/openpype/scripts/fusion_switch_shot.py index 245fc665f0..fc22f060a2 100644 --- a/openpype/scripts/fusion_switch_shot.py +++ b/openpype/scripts/fusion_switch_shot.py @@ -3,6 +3,8 @@ import re import sys import logging +from openpype.client import get_asset_by_name, get_versions + # Pipeline imports from openpype.hosts.fusion import api import openpype.hosts.fusion.api.lib as fusion_lib @@ -15,13 +17,10 @@ from openpype.pipeline import ( legacy_io, ) -from openpype.lib.avalon_context import get_workdir_from_session +from openpype.pipeline.context_tools import get_workdir_from_session log = logging.getLogger("Update Slap Comp") -self = sys.modules[__name__] -self._project = None - def _format_version_folder(folder): """Format a version folder based on the filepath @@ -131,8 +130,8 @@ def update_frame_range(comp, representations): """ version_ids = [r["parent"] for r in representations] - versions = legacy_io.find({"type": "version", "_id": {"$in": version_ids}}) - versions = list(versions) + project_name = legacy_io.active_project() + versions = list(get_versions(project_name, version_ids=version_ids)) start = min(v["data"]["frameStart"] for v in versions) end = max(v["data"]["frameEnd"] for v in versions) @@ -162,15 +161,10 @@ def switch(asset_name, filepath=None, new=True): # Assert asset name exists # It is better to do this here then to wait till switch_shot does it - asset = legacy_io.find_one({"type": "asset", "name": asset_name}) + project_name = legacy_io.active_project() + asset = get_asset_by_name(project_name, asset_name) assert asset, "Could not find '%s' in the database" % asset_name - # Get current project - self._project = legacy_io.find_one({ - "type": "project", - "name": legacy_io.Session["AVALON_PROJECT"] - }) - # Go to comp if not filepath: current_comp = api.get_current_comp() diff --git a/openpype/scripts/otio_burnin.py b/openpype/scripts/otio_burnin.py index 1f57891b84..7223e8d4de 100644 --- a/openpype/scripts/otio_burnin.py +++ b/openpype/scripts/otio_burnin.py @@ -22,10 +22,6 @@ FFMPEG = ( '"{}"%(input_args)s -i "%(input)s" %(filters)s %(args)s%(output)s' ).format(ffmpeg_path) -FFPROBE = ( - '"{}" -v quiet -print_format json -show_format -show_streams "%(source)s"' -).format(ffprobe_path) - DRAWTEXT = ( "drawtext=fontfile='%(font)s':text=\\'%(text)s\\':" "x=%(x)s:y=%(y)s:fontcolor=%(color)s@%(opacity).1f:fontsize=%(size)d" @@ -48,8 +44,15 @@ def _get_ffprobe_data(source): :param str source: source media file :rtype: [{}, ...] """ - command = FFPROBE % {'source': source} - proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) + command = [ + ffprobe_path, + "-v", "quiet", + "-print_format", "json", + "-show_format", + "-show_streams", + source + ] + proc = subprocess.Popen(command, stdout=subprocess.PIPE) out = proc.communicate()[0] if proc.returncode != 0: raise RuntimeError("Failed to run: %s" % command) @@ -113,11 +116,20 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): if not ffprobe_data: ffprobe_data = _get_ffprobe_data(source) + # Validate 'streams' before calling super to raise more specific + # error + source_streams = ffprobe_data.get("streams") + if not source_streams: + raise ValueError(( + "Input file \"{}\" does not contain any streams" + " with image/video content." + ).format(source)) + self.ffprobe_data = ffprobe_data self.first_frame = first_frame self.input_args = [] - super().__init__(source, ffprobe_data["streams"]) + super().__init__(source, source_streams) if options_init: self.options_init.update(options_init) @@ -568,6 +580,7 @@ def burnins_from_data( if source_ffmpeg_cmd: copy_args = ( "-metadata", + "-metadata:s:v:0", ) args = source_ffmpeg_cmd.split(" ") for idx, arg in enumerate(args): diff --git a/openpype/scripts/remote_publish.py b/openpype/scripts/remote_publish.py new file mode 100644 index 0000000000..37df35e36c --- /dev/null +++ b/openpype/scripts/remote_publish.py @@ -0,0 +1,12 @@ +try: + from openpype.lib import Logger + from openpype.pipeline.publish.lib import remote_publish +except ImportError as exc: + # Ensure Deadline fails by output an error that contains "Fatal Error:" + raise ImportError("Fatal Error: %s" % exc) + + +if __name__ == "__main__": + # Perform remote publish with thorough error checking + log = Logger.get_logger(__name__) + remote_publish(log, raise_error=True) diff --git a/openpype/settings/defaults/project_anatomy/templates.json b/openpype/settings/defaults/project_anatomy/templates.json index caf399a903..0ac56a4dad 100644 --- a/openpype/settings/defaults/project_anatomy/templates.json +++ b/openpype/settings/defaults/project_anatomy/templates.json @@ -29,7 +29,7 @@ "delivery": {}, "unreal": { "folder": "{root[work]}/{project[name]}/unreal/{task[name]}", - "file": "{project[code]}_{asset}", + "file": "{project[code]}_{asset}.{ext}", "path": "{@folder}/{@file}" }, "others": { @@ -48,10 +48,16 @@ "file": "{originalBasename}_{@version}.{ext}", "path": "{@folder}/{@file}" }, + "online": { + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/{@version}", + "file": "{originalBasename}<.{@frame}><_{udim}>.{ext}", + "path": "{@folder}/{@file}" + }, "__dynamic_keys_labels__": { "maya2unreal": "Maya to Unreal", "simpleUnrealTextureHero": "Simple Unreal Texture - Hero", - "simpleUnrealTexture": "Simple Unreal Texture" + "simpleUnrealTexture": "Simple Unreal Texture", + "online": "online" } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/blender.json b/openpype/settings/defaults/project_settings/blender.json index a7262dcb5d..7acecfaae0 100644 --- a/openpype/settings/defaults/project_settings/blender.json +++ b/openpype/settings/defaults/project_settings/blender.json @@ -2,5 +2,69 @@ "workfile_builder": { "create_first_version": false, "custom_templates": [] + }, + "publish": { + "ValidateCameraZeroKeyframe": { + "enabled": true, + "optional": true, + "active": true + }, + "ValidateMeshHasUvs": { + "enabled": true, + "optional": true, + "active": true + }, + "ValidateMeshNoNegativeScale": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateTransformZero": { + "enabled": true, + "optional": false, + "active": true + }, + "ExtractBlend": { + "enabled": true, + "optional": true, + "active": true, + "families": [ + "model", + "camera", + "rig", + "action", + "layout" + ] + }, + "ExtractFBX": { + "enabled": true, + "optional": true, + "active": false + }, + "ExtractABC": { + "enabled": true, + "optional": true, + "active": false + }, + "ExtractBlendAnimation": { + "enabled": true, + "optional": true, + "active": true + }, + "ExtractAnimationFBX": { + "enabled": true, + "optional": true, + "active": false + }, + "ExtractCamera": { + "enabled": true, + "optional": true, + "active": true + }, + "ExtractLayout": { + "enabled": true, + "optional": true, + "active": false + } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/deadline.json b/openpype/settings/defaults/project_settings/deadline.json index f0b2a7e555..a6e7b4a94a 100644 --- a/openpype/settings/defaults/project_settings/deadline.json +++ b/openpype/settings/defaults/project_settings/deadline.json @@ -83,9 +83,6 @@ "maya": [ ".*([Bb]eauty).*" ], - "nuke": [ - ".*" - ], "aftereffects": [ ".*" ], @@ -98,4 +95,4 @@ } } } -} +} \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/flame.json b/openpype/settings/defaults/project_settings/flame.json index dd8c05d460..34baf9ba06 100644 --- a/openpype/settings/defaults/project_settings/flame.json +++ b/openpype/settings/defaults/project_settings/flame.json @@ -1,4 +1,23 @@ { + "imageio": { + "project": { + "colourPolicy": "ACES 1.1", + "frameDepth": "16-bit fp", + "fieldDominance": "PROGRESSIVE" + }, + "profilesMapping": { + "inputs": [ + { + "flameName": "ACEScg", + "ocioName": "ACES - ACEScg" + }, + { + "flameName": "Rec.709 video", + "ocioName": "Output - Rec.709" + } + ] + } + }, "create": { "CreateShotClip": { "hierarchy": "{folder}/{sequence}", @@ -16,7 +35,10 @@ "vSyncOn": false, "workfileFrameStart": 1001, "handleStart": 5, - "handleEnd": 5 + "handleEnd": 5, + "includeHandles": false, + "retimedHandles": true, + "retimedFramerange": true } }, "publish": { @@ -97,7 +119,7 @@ ], "reel_group_name": "OpenPype_Reels", "reel_name": "Loaded", - "clip_name_template": "{asset}_{subset}_{output}" + "clip_name_template": "{asset}_{subset}<_{output}>" }, "LoadClipBatch": { "enabled": true, @@ -120,7 +142,7 @@ "exr16fpdwaa" ], "reel_name": "OP_LoadedReel", - "clip_name_template": "{asset}_{subset}_{output}" + "clip_name_template": "{batch}_{asset}_{subset}<_{output}>" } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json index f9d16d6476..cdf861df4a 100644 --- a/openpype/settings/defaults/project_settings/ftrack.json +++ b/openpype/settings/defaults/project_settings/ftrack.json @@ -56,7 +56,7 @@ "Not Ready" ], "__ignore__": [ - "in prgoress", + "in progress", "omitted", "on hold" ] @@ -96,10 +96,6 @@ "mapping": {}, "asset_types_to_skip": [] }, - "first_version_status": { - "enabled": true, - "status": "" - }, "next_task_update": { "enabled": true, "mapping": { @@ -109,6 +105,27 @@ "Omitted" ], "name_sorting": false + }, + "transfer_values_of_hierarchical_attributes": { + "enabled": true, + "role_list": [ + "Administrator", + "Project manager" + ] + }, + "create_daily_review_session": { + "enabled": true, + "role_list": [ + "Administrator", + "Project Manager" + ], + "cycle_enabled": false, + "cycle_hour_start": [ + 0, + 0, + 0 + ], + "review_session_template": "{yy}{mm}{dd}" } }, "user_handlers": { @@ -252,6 +269,51 @@ } ] }, + { + "hosts": [ + "traypublisher" + ], + "families": [], + "task_types": [], + "tasks": [], + "add_ftrack_family": true, + "advanced_filtering": [] + }, + { + "hosts": [ + "traypublisher" + ], + "families": [ + "matchmove", + "shot" + ], + "task_types": [], + "tasks": [], + "add_ftrack_family": false, + "advanced_filtering": [] + }, + { + "hosts": [ + "traypublisher" + ], + "families": [ + "plate", + "review", + "audio" + ], + "task_types": [], + "tasks": [], + "add_ftrack_family": false, + "advanced_filtering": [ + { + "families": [ + "clip", + "review" + ], + "add_ftrack_family": true + } + ] + }, { "hosts": [ "maya" @@ -368,6 +430,9 @@ "enabled": false, "custom_attribute_keys": [] }, + "IntegrateHierarchyToFtrack": { + "create_task_status_profiles": [] + }, "IntegrateFtrackNote": { "enabled": true, "note_template": "{intent}: {comment}", @@ -383,11 +448,14 @@ "enabled": false, "ftrack_custom_attributes": {} }, + "IntegrateFtrackComponentOverwrite": { + "enabled": true + }, "IntegrateFtrackInstance": { "family_mapping": { "camera": "cam", "look": "look", - "mayaascii": "scene", + "mayaAscii": "scene", "model": "geo", "rig": "rig", "setdress": "setdress", @@ -419,7 +487,11 @@ "usd": "usd" }, "keep_first_subset_name_for_review": true, - "asset_versions_status_profiles": [] + "asset_versions_status_profiles": [], + "additional_metadata_keys": [] + }, + "IntegrateFtrackFarmStatus": { + "farm_status_profiles": [] } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/fusion.json b/openpype/settings/defaults/project_settings/fusion.json new file mode 100644 index 0000000000..1b4c4c55b5 --- /dev/null +++ b/openpype/settings/defaults/project_settings/fusion.json @@ -0,0 +1,12 @@ +{ + "imageio": { + "ocio": { + "enabled": false, + "configFilePath": { + "windows": [], + "darwin": [], + "linux": [] + } + } + } +} \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json index 7b223798f1..b8995de99e 100644 --- a/openpype/settings/defaults/project_settings/global.json +++ b/openpype/settings/defaults/project_settings/global.json @@ -3,6 +3,10 @@ "CollectAnatomyInstanceData": { "follow_workfile_version": false }, + "CollectAudio": { + "enabled": false, + "audio_subset_name": "audioMain" + }, "CollectSceneVersion": { "hosts": [ "aftereffects", @@ -33,7 +37,7 @@ "enabled": false, "profiles": [] }, - "ExtractJpegEXR": { + "ExtractThumbnail": { "enabled": true, "ffmpeg_args": { "input": [ @@ -49,6 +53,62 @@ "families": [], "hosts": [], "outputs": { + "png": { + "ext": "png", + "tags": [ + "ftrackreview" + ], + "burnins": [], + "ffmpeg_args": { + "video_filters": [], + "audio_filters": [], + "input": [], + "output": [] + }, + "filter": { + "families": [ + "render", + "review", + "ftrack" + ], + "subsets": [], + "custom_tags": [], + "single_frame_filter": "single_frame" + }, + "overscan_crop": "", + "overscan_color": [ + 0, + 0, + 0, + 255 + ], + "width": 1920, + "height": 1080, + "scale_pixel_aspect": true, + "bg_color": [ + 0, + 0, + 0, + 0 + ], + "letter_box": { + "enabled": false, + "ratio": 0.0, + "fill_color": [ + 0, + 0, + 0, + 255 + ], + "line_thickness": 0, + "line_color": [ + 255, + 0, + 0, + 255 + ] + } + }, "h264": { "ext": "mp4", "tags": [ @@ -74,7 +134,9 @@ "review", "ftrack" ], - "subsets": [] + "subsets": [], + "custom_tags": [], + "single_frame_filter": "multi_frame" }, "overscan_crop": "", "overscan_color": [ @@ -85,6 +147,7 @@ ], "width": 0, "height": 0, + "scale_pixel_aspect": true, "bg_color": [ 0, 0, @@ -159,7 +222,31 @@ } ] }, + "PreIntegrateThumbnails": { + "enabled": true, + "integrate_profiles": [] + }, + "IntegrateSubsetGroup": { + "subset_grouping_profiles": [ + { + "families": [], + "hosts": [], + "task_types": [], + "tasks": [], + "template": "" + } + ] + }, "IntegrateAssetNew": { + "subset_grouping_profiles": [ + { + "families": [], + "hosts": [], + "task_types": [], + "tasks": [], + "template": "" + } + ], "template_name_profiles": [ { "families": [], @@ -202,17 +289,11 @@ "tasks": [], "template_name": "maya2unreal" } - ], - "subset_grouping_profiles": [ - { - "families": [], - "hosts": [], - "task_types": [], - "tasks": [], - "template": "" - } ] }, + "IntegrateAsset": { + "skip_host_families": [] + }, "IntegrateHeroVersion": { "enabled": true, "optional": true, @@ -377,7 +458,8 @@ "hosts": [], "task_types": [], "tasks": [], - "enabled": true + "enabled": true, + "use_last_published_workfile": false } ], "open_workfile_tool_on_startup": [ @@ -388,7 +470,8 @@ "enabled": false } ], - "extra_folders": [] + "extra_folders": [], + "workfile_lock_profiles": [] }, "loader": { "family_filter_profiles": [ @@ -399,9 +482,25 @@ "filter_families": [] } ] + }, + "publish": { + "template_name_profiles": [ + { + "families": [ + "online" + ], + "hosts": [ + "traypublisher" + ], + "task_types": [], + "task_names": [], + "template_name": "online" + } + ], + "hero_template_name_profiles": [] } }, - "project_folder_structure": "{\"__project_root__\": {\"prod\": {}, \"resources\": {\"footage\": {\"plates\": {}, \"offline\": {}}, \"audio\": {}, \"art_dept\": {}}, \"editorial\": {}, \"assets[ftrack.Library]\": {\"characters[ftrack]\": {}, \"locations[ftrack]\": {}}, \"shots[ftrack.Sequence]\": {\"scripts\": {}, \"editorial[ftrack.Folder]\": {}}}}", + "project_folder_structure": "{\"__project_root__\": {\"prod\": {}, \"resources\": {\"footage\": {\"plates\": {}, \"offline\": {}}, \"audio\": {}, \"art_dept\": {}}, \"editorial\": {}, \"assets\": {\"characters\": {}, \"locations\": {}}, \"shots\": {}}}", "sync_server": { "enabled": false, "config": { diff --git a/openpype/settings/defaults/project_settings/harmony.json b/openpype/settings/defaults/project_settings/harmony.json index 1508b02e1b..d843bc8e70 100644 --- a/openpype/settings/defaults/project_settings/harmony.json +++ b/openpype/settings/defaults/project_settings/harmony.json @@ -1,4 +1,20 @@ { + "load": { + "ImageSequenceLoader": { + "family": [ + "shot", + "render", + "image", + "plate", + "reference" + ], + "representations": [ + "jpeg", + "png", + "jpg" + ] + } + }, "publish": { "CollectPalettes": { "allowed_tasks": [ diff --git a/openpype/settings/defaults/project_settings/hiero.json b/openpype/settings/defaults/project_settings/hiero.json index 1dff3aac51..d2ba697305 100644 --- a/openpype/settings/defaults/project_settings/hiero.json +++ b/openpype/settings/defaults/project_settings/hiero.json @@ -1,4 +1,29 @@ { + "imageio": { + "workfile": { + "ocioConfigName": "nuke-default", + "ocioconfigpath": { + "windows": [], + "darwin": [], + "linux": [] + }, + "workingSpace": "linear", + "sixteenBitLut": "sRGB", + "eightBitLut": "sRGB", + "floatLut": "linear", + "logLut": "Cineon", + "viewerLut": "sRGB", + "thumbnailLut": "sRGB" + }, + "regexInputs": { + "inputs": [ + { + "regex": "[^-a-zA-Z0-9](plateRef).*(?=mp4)", + "colorspace": "sRGB" + } + ] + } + }, "create": { "CreateShotClip": { "hierarchy": "{folder}/{sequence}", @@ -51,5 +76,17 @@ ] } }, - "filters": {} + "filters": {}, + "scriptsmenu": { + "name": "OpenPype Tools", + "definition": [ + { + "type": "action", + "sourcetype": "python", + "title": "OpenPype Docs", + "command": "import webbrowser;webbrowser.open(url='https://openpype.io/docs/artist_hosts_hiero')", + "tooltip": "Open the OpenPype Hiero user doc page" + } + ] + } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/houdini.json b/openpype/settings/defaults/project_settings/houdini.json index 911bf82d9b..1517983569 100644 --- a/openpype/settings/defaults/project_settings/houdini.json +++ b/openpype/settings/defaults/project_settings/houdini.json @@ -1,4 +1,5 @@ { + "shelves": [], "create": { "CreateArnoldAss": { "enabled": true, @@ -47,6 +48,18 @@ } }, "publish": { + "ValidateWorkfilePaths": { + "enabled": true, + "optional": true, + "node_types": [ + "file", + "alembic" + ], + "prohibited_vars": [ + "$HIP", + "$JOB" + ] + }, "ValidateContainers": { "enabled": true, "optional": true, diff --git a/openpype/settings/defaults/project_settings/kitsu.json b/openpype/settings/defaults/project_settings/kitsu.json new file mode 100644 index 0000000000..3a9723b9c0 --- /dev/null +++ b/openpype/settings/defaults/project_settings/kitsu.json @@ -0,0 +1,13 @@ +{ + "entities_naming_pattern": { + "episode": "E##", + "sequence": "SQ##", + "shot": "SH##" + }, + "publish": { + "IntegrateKitsuNote": { + "set_status_note": false, + "note_status_shortname": "wfa" + } + } +} \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index 4cdfe1ca5d..988c0e777a 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -1,4 +1,27 @@ { + "imageio": { + "colorManagementPreference_v2": { + "enabled": true, + "configFilePath": { + "windows": [], + "darwin": [], + "linux": [] + }, + "renderSpace": "ACEScg", + "displayName": "sRGB", + "viewName": "ACES 1.0 SDR-video" + }, + "colorManagementPreference": { + "configFilePath": { + "windows": [], + "darwin": [], + "linux": [] + }, + "renderSpace": "scene-linear Rec 709/sRGB", + "viewTransform": "sRGB gamma" + } + }, + "mel_workspace": "workspace -fr \"shaders\" \"renderData/shaders\";\nworkspace -fr \"images\" \"renders/maya\";\nworkspace -fr \"particles\" \"particles\";\nworkspace -fr \"mayaAscii\" \"\";\nworkspace -fr \"mayaBinary\" \"\";\nworkspace -fr \"scene\" \"\";\nworkspace -fr \"alembicCache\" \"cache/alembic\";\nworkspace -fr \"renderData\" \"renderData\";\nworkspace -fr \"sourceImages\" \"sourceimages\";\nworkspace -fr \"fileCache\" \"cache/nCache\";\n", "ext_mapping": { "model": "ma", "mayaAscii": "ma", @@ -8,6 +31,7 @@ "yetiRig": "ma" }, "maya-dirmap": { + "use_env_var_as_root": false, "enabled": false, "paths": { "source-path": [], @@ -30,6 +54,38 @@ } ] }, + "RenderSettings": { + "apply_render_settings": true, + "default_render_image_folder": "renders/maya", + "enable_all_lights": true, + "aov_separator": "underscore", + "reset_current_frame": false, + "arnold_renderer": { + "image_prefix": "//_", + "image_format": "exr", + "multilayer_exr": true, + "tiled": true, + "aov_list": [], + "additional_options": [] + }, + "vray_renderer": { + "image_prefix": "//", + "engine": "1", + "image_format": "exr", + "aov_list": [], + "additional_options": [] + }, + "redshift_renderer": { + "image_prefix": "//", + "primary_gi_engine": "0", + "secondary_gi_engine": "0", + "image_format": "exr", + "multilayer_exr": true, + "force_combine": true, + "aov_list": [], + "additional_options": [] + } + }, "create": { "CreateLook": { "enabled": true, @@ -42,9 +98,7 @@ "enabled": true, "defaults": [ "Main" - ], - "aov_separator": "underscore", - "default_render_image_folder": "renders" + ] }, "CreateUnrealStaticMesh": { "enabled": true, @@ -65,7 +119,49 @@ "defaults": [], "joint_hints": "jnt_org" }, + "CreateMultiverseLook": { + "enabled": true, + "publish_mip_map": true + }, "CreateAnimation": { + "enabled": true, + "write_color_sets": false, + "write_face_sets": false, + "defaults": [ + "Main" + ] + }, + "CreateModel": { + "enabled": true, + "write_color_sets": false, + "write_face_sets": false, + "defaults": [ + "Main", + "Proxy", + "Sculpt" + ] + }, + "CreatePointCache": { + "enabled": true, + "write_color_sets": false, + "write_face_sets": false, + "defaults": [ + "Main" + ] + }, + "CreateMultiverseUsd": { + "enabled": true, + "defaults": [ + "Main" + ] + }, + "CreateMultiverseUsdComp": { + "enabled": true, + "defaults": [ + "Main" + ] + }, + "CreateMultiverseUsdOver": { "enabled": true, "defaults": [ "Main" @@ -101,20 +197,6 @@ "Main" ] }, - "CreateModel": { - "enabled": true, - "defaults": [ - "Main", - "Proxy", - "Sculpt" - ] - }, - "CreatePointCache": { - "enabled": true, - "defaults": [ - "Main" - ] - }, "CreateRenderSetup": { "enabled": true, "defaults": [ @@ -165,6 +247,9 @@ "CollectMayaRender": { "sync_workfile_version": false }, + "CollectFbxCamera": { + "enabled": false + }, "ValidateInstanceInContext": { "enabled": true, "optional": true, @@ -178,10 +263,16 @@ "ValidateFrameRange": { "enabled": true, "optional": true, - "active": true + "active": true, + "exclude_families": [ + "model", + "rig", + "staticMesh" + ] }, "ValidateShaderName": { "enabled": false, + "optional": true, "regex": "(?P.*)_(.*)_SHD" }, "ValidateShadingEngine": { @@ -195,6 +286,7 @@ }, "ValidateLoadedPlugin": { "enabled": false, + "optional": true, "whitelist_native_plugins": false, "authorized_plugins": [] }, @@ -209,6 +301,7 @@ }, "ValidateUnrealStaticMeshName": { "enabled": true, + "optional": true, "validate_mesh": false, "validate_collision": true }, @@ -225,6 +318,81 @@ "redshift_render_attributes": [], "renderman_render_attributes": [] }, + "ValidateCurrentRenderLayerIsRenderable": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderImageRule": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderNoDefaultCameras": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderSingleCamera": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderLayerAOVs": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateStepSize": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVRayDistributedRendering": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVrayReferencedAOVs": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVRayTranslatorEnabled": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVrayProxy": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVrayProxyMembers": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRenderScriptCallbacks": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRigCacheState": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRigInputShapesInInstance": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRigSettings": { + "enabled": true, + "optional": false, + "active": true + }, "ValidateModelName": { "enabled": false, "database": true, @@ -243,6 +411,7 @@ }, "ValidateTransformNamingSuffix": { "enabled": true, + "optional": true, "SUFFIX_NAMING_TABLE": { "mesh": [ "_GEO", @@ -266,7 +435,7 @@ "ALLOW_IF_NOT_IN_SUFFIX_TABLE": true }, "ValidateColorSets": { - "enabled": false, + "enabled": true, "optional": true, "active": true }, @@ -310,6 +479,16 @@ "optional": true, "active": true }, + "ValidateMeshNoNegativeScale": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateMeshNonZeroEdgeLength": { + "enabled": true, + "optional": true, + "active": true + }, "ValidateMeshNormalsUnlocked": { "enabled": false, "optional": true, @@ -332,22 +511,22 @@ }, "ValidateNoNamespace": { "enabled": true, - "optional": true, + "optional": false, "active": true }, "ValidateNoNullTransforms": { "enabled": true, - "optional": true, + "optional": false, "active": true }, "ValidateNoUnknownNodes": { "enabled": true, - "optional": true, + "optional": false, "active": true }, "ValidateNodeNoGhosting": { "enabled": false, - "optional": true, + "optional": false, "active": true }, "ValidateShapeDefaultNames": { @@ -375,6 +554,33 @@ "optional": true, "active": true }, + "ValidateNoVRayMesh": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateUnrealMeshTriangulated": { + "enabled": false, + "optional": true, + "active": true + }, + "ValidateAlembicVisibleOnly": { + "enabled": true, + "optional": false, + "active": true + }, + "ExtractAlembic": { + "enabled": true, + "families": [ + "pointcache", + "model", + "vrayproxy" + ] + }, + "ExtractObj": { + "enabled": false, + "optional": true + }, "ValidateRigContents": { "enabled": false, "optional": true, @@ -390,8 +596,34 @@ "optional": true, "active": true }, + "ValidateAnimationContent": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateOutRelatedNodeIds": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRigControllersArnoldAttributes": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateSkeletalMeshHierarchy": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateSkinclusterDeformerSet": { + "enabled": true, + "optional": false, + "active": true + }, "ValidateRigOutSetNodeIds": { "enabled": true, + "optional": false, "allow_history_only": false }, "ValidateCameraAttributes": { @@ -404,11 +636,46 @@ "optional": true, "active": true }, + "ValidateAssemblyNamespaces": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateAssemblyModelTransforms": { + "enabled": true, + "optional": false, + "active": true + }, "ValidateAssRelativePaths": { "enabled": true, + "optional": false, + "active": true + }, + "ValidateInstancerContent": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateInstancerFrameRanges": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateNoDefaultCameras": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateUnrealUpAxis": { + "enabled": false, "optional": true, "active": true }, + "ValidateCameraContents": { + "enabled": true, + "optional": false, + "validate_shapes": true + }, "ExtractPlayblast": { "capture_preset": { "Codec": { @@ -441,30 +708,42 @@ "isolate_view": true, "off_screen": true }, - "PanZoom": { - "pan_zoom": true - }, "Renderer": { "rendererName": "vp2Renderer" }, "Resolution": { "width": 1920, - "height": 1080, - "percent": 1.0, - "mode": "Custom" + "height": 1080 }, "Viewport Options": { "override_viewport_options": true, "displayLights": "default", + "displayTextures": true, "textureMaxResolution": 1024, - "multiSample": 4, + "renderDepthOfField": true, "shadows": true, - "textures": true, "twoSidedLighting": true, - "ssaoEnable": true, + "lineAAEnable": true, + "multiSample": 8, + "ssaoEnable": false, + "ssaoAmount": 1, + "ssaoRadius": 16, + "ssaoFilterRadius": 16, + "ssaoSamples": 16, + "fogging": false, + "hwFogFalloff": "0", + "hwFogDensity": 0.0, + "hwFogStart": 0, + "hwFogEnd": 100, + "hwFogAlpha": 0, + "hwFogColorR": 1.0, + "hwFogColorG": 1.0, + "hwFogColorB": 1.0, + "motionBlurEnable": false, + "motionBlurSampleCount": 8, + "motionBlurShutterOpenFraction": 0.2, "cameras": false, "clipGhosts": false, - "controlVertices": false, "deformers": false, "dimensions": false, "dynamicConstraints": false, @@ -476,8 +755,7 @@ "grid": false, "hairSystems": true, "handles": false, - "hud": false, - "hulls": false, + "headsUpDisplay": false, "ikHandles": false, "imagePlane": true, "joints": false, @@ -488,7 +766,9 @@ "nCloths": false, "nParticles": false, "nRigids": false, + "controlVertices": false, "nurbsCurves": false, + "hulls": false, "nurbsSurfaces": false, "particleInstancers": false, "pivots": false, @@ -496,7 +776,8 @@ "pluginShapes": false, "polymeshes": true, "strokes": false, - "subdivSurfaces": false + "subdivSurfaces": false, + "textures": false }, "Camera Options": { "displayGateMask": false, @@ -714,6 +995,9 @@ } ] }, + "templated_workfile_build": { + "profiles": [] + }, "filters": { "preset 1": { "ValidateNoAnimation": false, diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json index 128d440732..57a09086ca 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/openpype/settings/defaults/project_settings/nuke.json @@ -8,6 +8,197 @@ "build_workfile": "ctrl+alt+b" } }, + "imageio": { + "enabled": false, + "viewer": { + "viewerProcess": "sRGB" + }, + "baking": { + "viewerProcess": "rec709" + }, + "workfile": { + "colorManagement": "Nuke", + "OCIO_config": "nuke-default", + "customOCIOConfigPath": { + "windows": [], + "darwin": [], + "linux": [] + }, + "workingSpaceLUT": "linear", + "monitorLut": "sRGB", + "int8Lut": "sRGB", + "int16Lut": "sRGB", + "logLut": "Cineon", + "floatLut": "linear" + }, + "nodes": { + "requiredNodes": [ + { + "plugins": [ + "CreateWriteRender" + ], + "nukeNodeClass": "Write", + "knobs": [ + { + "type": "text", + "name": "file_type", + "value": "exr" + }, + { + "type": "text", + "name": "datatype", + "value": "16 bit half" + }, + { + "type": "text", + "name": "compression", + "value": "Zip (1 scanline)" + }, + { + "type": "bool", + "name": "autocrop", + "value": true + }, + { + "type": "color_gui", + "name": "tile_color", + "value": [ + 186, + 35, + 35, + 255 + ] + }, + { + "type": "text", + "name": "channels", + "value": "rgb" + }, + { + "type": "text", + "name": "colorspace", + "value": "linear" + }, + { + "type": "bool", + "name": "create_directories", + "value": true + } + ] + }, + { + "plugins": [ + "CreateWritePrerender" + ], + "nukeNodeClass": "Write", + "knobs": [ + { + "type": "text", + "name": "file_type", + "value": "exr" + }, + { + "type": "text", + "name": "datatype", + "value": "16 bit half" + }, + { + "type": "text", + "name": "compression", + "value": "Zip (1 scanline)" + }, + { + "type": "bool", + "name": "autocrop", + "value": true + }, + { + "type": "color_gui", + "name": "tile_color", + "value": [ + 171, + 171, + 10, + 255 + ] + }, + { + "type": "text", + "name": "channels", + "value": "rgb" + }, + { + "type": "text", + "name": "colorspace", + "value": "linear" + }, + { + "type": "bool", + "name": "create_directories", + "value": true + } + ] + }, + { + "plugins": [ + "CreateWriteStill" + ], + "nukeNodeClass": "Write", + "knobs": [ + { + "type": "text", + "name": "file_type", + "value": "tiff" + }, + { + "type": "text", + "name": "datatype", + "value": "16 bit" + }, + { + "type": "text", + "name": "compression", + "value": "Deflate" + }, + { + "type": "color_gui", + "name": "tile_color", + "value": [ + 56, + 162, + 7, + 255 + ] + }, + { + "type": "text", + "name": "channels", + "value": "rgb" + }, + { + "type": "text", + "name": "colorspace", + "value": "sRGB" + }, + { + "type": "bool", + "name": "create_directories", + "value": true + } + ] + } + ], + "overrideNodes": [] + }, + "regexInputs": { + "inputs": [ + { + "regex": "(beauty).*(?=.exr)", + "colorspace": "linear" + } + ] + } + }, "nuke-dirmap": { "enabled": false, "paths": { @@ -15,6 +206,46 @@ "destination-path": [] } }, + "scriptsmenu": { + "name": "OpenPype Tools", + "definition": [ + { + "type": "action", + "sourcetype": "python", + "title": "OpenPype Docs", + "command": "import webbrowser;webbrowser.open(url='https://openpype.io/docs/artist_hosts_nuke_tut')", + "tooltip": "Open the OpenPype Nuke user doc page" + } + ] + }, + "gizmo": [ + { + "toolbar_menu_name": "OpenPype Gizmo", + "gizmo_source_dir": { + "windows": [], + "darwin": [], + "linux": [] + }, + "toolbar_icon_path": { + "windows": "", + "darwin": "", + "linux": "" + }, + "gizmo_definition": [ + { + "gizmo_toolbar_path": "/path/to/menu", + "sub_gizmo_list": [ + { + "sourcetype": "python", + "title": "Gizmo Note", + "command": "nuke.nodes.StickyNote(label='You can create your own toolbar menu in the Nuke GizmoMenu of OpenPype')", + "shortcut": "" + } + ] + } + ] + } + ], "create": { "CreateWriteRender": { "fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}", @@ -87,10 +318,11 @@ "camera", "gizmo", "source", - "render" + "render", + "write" ] }, - "ValidateInstanceInContext": { + "ValidateCorrectAssetName": { "enabled": true, "optional": true, "active": true @@ -125,6 +357,9 @@ }, "ExtractThumbnail": { "enabled": true, + "use_rendered": true, + "bake_viewer_process": true, + "bake_viewer_input_process": true, "nodes": { "Reformat": [ [ @@ -199,7 +434,7 @@ } ], "extension": "mov", - "add_tags": [] + "add_custom_tags": [] } } }, @@ -243,7 +478,11 @@ "LoadClip": { "enabled": true, "_representations": [], - "node_name_template": "{class_name}_{ext}" + "node_name_template": "{class_name}_{ext}", + "options_defaults": { + "start_at_workfile": true, + "add_retime": true + } } }, "workfile_builder": { @@ -277,5 +516,8 @@ } ] }, + "templated_workfile_build": { + "profiles": [] + }, "filters": {} } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/photoshop.json b/openpype/settings/defaults/project_settings/photoshop.json index d9b7a8083f..fa0dc7b1c4 100644 --- a/openpype/settings/defaults/project_settings/photoshop.json +++ b/openpype/settings/defaults/project_settings/photoshop.json @@ -8,13 +8,19 @@ }, "publish": { "CollectColorCodedInstances": { - "create_flatten_image": false, + "create_flatten_image": "no", "flatten_subset_template": "", "color_code_mapping": [] }, "CollectInstances": { "flatten_subset_template": "" }, + "CollectReview": { + "publish": true + }, + "CollectVersion": { + "enabled": false + }, "ValidateContainers": { "enabled": true, "optional": true, @@ -32,8 +38,12 @@ }, "ExtractReview": { "make_image_sequence": false, + "max_downscale_size": 8192, "jpg_options": { - "tags": [] + "tags": [ + "review", + "ftrackreview" + ] }, "mov_options": { "tags": [ diff --git a/openpype/settings/defaults/project_settings/shotgrid.json b/openpype/settings/defaults/project_settings/shotgrid.json new file mode 100644 index 0000000000..774bce714b --- /dev/null +++ b/openpype/settings/defaults/project_settings/shotgrid.json @@ -0,0 +1,22 @@ +{ + "shotgrid_project_id": 0, + "shotgrid_server": "", + "event": { + "enabled": false + }, + "fields": { + "asset": { + "type": "sg_asset_type" + }, + "sequence": { + "episode_link": "episode" + }, + "shot": { + "episode_link": "sg_episode", + "sequence_link": "sg_sequence" + }, + "task": { + "step": "step" + } + } +} \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/standalonepublisher.json b/openpype/settings/defaults/project_settings/standalonepublisher.json index e36232d3f7..b6e2e056a1 100644 --- a/openpype/settings/defaults/project_settings/standalonepublisher.json +++ b/openpype/settings/defaults/project_settings/standalonepublisher.json @@ -257,12 +257,14 @@ ] }, "CollectHierarchyInstance": { + "shot_rename": true, "shot_rename_template": "{project[code]}_{_sequence_}_{_shot_}", "shot_rename_search_patterns": { - "_sequence_": "(\\d{4})(?=_\\d{4})", - "_shot_": "(\\d{4})(?!_\\d{4})" + "_sequence_": "(sc\\d{3})", + "_shot_": "(sh\\d{3})" }, "shot_add_hierarchy": { + "enabled": true, "parents_path": "{project}/{folder}/{sequence}", "parents": { "project": "{project[name]}", diff --git a/openpype/settings/defaults/project_settings/traypublisher.json b/openpype/settings/defaults/project_settings/traypublisher.json index 0b54cfd39e..e99b96b8c4 100644 --- a/openpype/settings/defaults/project_settings/traypublisher.json +++ b/openpype/settings/defaults/project_settings/traypublisher.json @@ -8,9 +8,10 @@ "default_variants": [ "Main" ], - "description": "Publish workfile backup", - "detailed_description": "", - "allow_sequences": true, + "description": "Backup of a working scene", + "detailed_description": "Workfiles are full scenes from any application that are directly edited by artists. They represent a state of work on a task at a given point and are usually not directly referenced into other scenes.", + "allow_sequences": false, + "allow_multiple_items": false, "extensions": [ ".ma", ".mb", @@ -30,6 +31,284 @@ ".psb", ".aep" ] + }, + { + "family": "model", + "identifier": "", + "label": "Model", + "icon": "fa.cubes", + "default_variants": [ + "Main", + "Proxy", + "Sculpt" + ], + "description": "Clean models", + "detailed_description": "Models should only contain geometry data, without any extras like cameras, locators or bones.\n\nKeep in mind that models published from tray publisher are not validated for correctness. ", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [ + ".ma", + ".mb", + ".obj", + ".abc", + ".fbx", + ".bgeo", + ".bgeogz", + ".bgeosc", + ".usd", + ".blend" + ] + }, + { + "family": "pointcache", + "identifier": "", + "label": "Pointcache", + "icon": "fa.gears", + "default_variants": [ + "Main" + ], + "description": "Geometry Caches", + "detailed_description": "Alembic or bgeo cache of animated data", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".abc", + ".bgeo", + ".bgeogz", + ".bgeosc" + ] + }, + { + "family": "plate", + "identifier": "", + "label": "Plate", + "icon": "mdi.camera-image", + "default_variants": [ + "Main", + "BG", + "Animatic", + "Reference", + "Offline" + ], + "description": "Footage Plates", + "detailed_description": "Any type of image seqeuence coming from outside of the studio. Usually camera footage, but could also be animatics used for reference.", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".exr", + ".png", + ".dpx", + ".jpg", + ".tiff", + ".tif", + ".mov", + ".mp4", + ".avi" + ] + }, + { + "family": "render", + "identifier": "", + "label": "Render", + "icon": "mdi.folder-multiple-image", + "default_variants": [], + "description": "Rendered images or video", + "detailed_description": "Sequence or single file renders", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".exr", + ".png", + ".dpx", + ".jpg", + ".jpeg", + ".tiff", + ".tif", + ".mov", + ".mp4", + ".avi" + ] + }, + { + "family": "camera", + "identifier": "", + "label": "Camera", + "icon": "fa.video-camera", + "default_variants": [], + "description": "3d Camera", + "detailed_description": "Ideally this should be only camera itself with baked animation, however, it can technically also include helper geometry.", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [ + ".abc", + ".ma", + ".hip", + ".blend", + ".fbx", + ".usd" + ] + }, + { + "family": "image", + "identifier": "", + "label": "Image", + "icon": "fa.image", + "default_variants": [ + "Reference", + "Texture", + "Concept", + "Background" + ], + "description": "Single image", + "detailed_description": "Any image data can be published as image family. References, textures, concept art, matte paints. This is a fallback 2d family for everything that doesn't fit more specific family.", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [ + ".exr", + ".jpg", + ".jpeg", + ".dpx", + ".bmp", + ".tif", + ".tiff", + ".png", + ".psb", + ".psd" + ] + }, + { + "family": "vdb", + "identifier": "", + "label": "VDB Volumes", + "icon": "fa.cloud", + "default_variants": [], + "description": "Sparse volumetric data", + "detailed_description": "Hierarchical data structure for the efficient storage and manipulation of sparse volumetric data discretized on three-dimensional grids", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".vdb" + ] + }, + { + "family": "matchmove", + "identifier": "", + "label": "Matchmove", + "icon": "fa.empire", + "default_variants": [ + "Camera", + "Object", + "Mocap" + ], + "description": "Matchmoving script", + "detailed_description": "Script exported from matchmoving application to be later processed into a tracked camera with additional data", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [] + }, + { + "family": "rig", + "identifier": "", + "label": "Rig", + "icon": "fa.wheelchair", + "default_variants": [], + "description": "CG rig file", + "detailed_description": "CG rigged character or prop. Rig should be clean of any extra data and directly loadable into it's respective application\t", + "allow_sequences": false, + "allow_multiple_items": false, + "extensions": [ + ".ma", + ".blend", + ".hip", + ".hda" + ] + }, + { + "family": "simpleUnrealTexture", + "identifier": "", + "label": "Simple UE texture", + "icon": "fa.image", + "default_variants": [], + "description": "Simple Unreal Engine texture", + "detailed_description": "Texture files with Unreal Engine naming conventions", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [] } - ] + ], + "editorial_creators": { + "editorial_simple": { + "default_variants": [ + "Main" + ], + "clip_name_tokenizer": { + "_sequence_": "(sc\\d{3})", + "_shot_": "(sh\\d{3})" + }, + "shot_rename": { + "enabled": true, + "shot_rename_template": "{project[code]}_{_sequence_}_{_shot_}" + }, + "shot_hierarchy": { + "enabled": true, + "parents_path": "{project}/{folder}/{sequence}", + "parents": [ + { + "type": "Project", + "name": "project", + "value": "{project[name]}" + }, + { + "type": "Folder", + "name": "folder", + "value": "shots" + }, + { + "type": "Sequence", + "name": "sequence", + "value": "{_sequence_}" + } + ] + }, + "shot_add_tasks": {}, + "family_presets": [ + { + "family": "review", + "variant": "Reference", + "review": true, + "output_file_type": ".mp4" + }, + { + "family": "plate", + "variant": "", + "review": false, + "output_file_type": ".mov" + }, + { + "family": "audio", + "variant": "", + "review": false, + "output_file_type": ".wav" + } + ] + } + }, + "BatchMovieCreator": { + "default_variants": [ + "Main" + ], + "default_tasks": [ + "Compositing" + ], + "extensions": [ + ".mov" + ] + }, + "publish": { + "ValidateFrameRange": { + "enabled": true, + "optional": true, + "active": true + } + } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/unreal.json b/openpype/settings/defaults/project_settings/unreal.json index dad61cd1f0..391e2415a5 100644 --- a/openpype/settings/defaults/project_settings/unreal.json +++ b/openpype/settings/defaults/project_settings/unreal.json @@ -1,4 +1,6 @@ { + "level_sequences_for_layouts": false, + "delete_unmatched_assets": false, "project_setup": { "dev_mode": true } diff --git a/openpype/settings/defaults/project_settings/webpublisher.json b/openpype/settings/defaults/project_settings/webpublisher.json index 77168c25e6..09c7d3ec94 100644 --- a/openpype/settings/defaults/project_settings/webpublisher.json +++ b/openpype/settings/defaults/project_settings/webpublisher.json @@ -1,6 +1,16 @@ { + "timeout_profiles": [ + { + "hosts": [ + "photoshop" + ], + "task_types": [], + "timeout": 600 + } + ], "publish": { "CollectPublishedFiles": { + "sync_next_version": false, "task_type_to_family": { "Animation": [ { diff --git a/openpype/settings/defaults/system_settings/applications.json b/openpype/settings/defaults/system_settings/applications.json index aaecef3494..a4db0dd327 100644 --- a/openpype/settings/defaults/system_settings/applications.json +++ b/openpype/settings/defaults/system_settings/applications.json @@ -12,6 +12,26 @@ "LC_ALL": "C" }, "variants": { + "2023": { + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\Autodesk\\Maya2023\\bin\\maya.exe" + ], + "darwin": [], + "linux": [ + "/usr/autodesk/maya2023/bin/maya" + ] + }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, + "environment": { + "MAYA_VERSION": "2023" + } + }, "2022": { "use_python_2": false, "executables": { @@ -91,9 +111,6 @@ "environment": { "MAYA_VERSION": "2018" } - }, - "__dynamic_keys_labels__": { - "2022": "2022" } } }, @@ -175,6 +192,24 @@ ] }, "variants": { + "13-2": { + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\Nuke13.2v1\\Nuke13.2.exe" + ], + "darwin": [], + "linux": [ + "/usr/local/Nuke13.2v1/Nuke13.2" + ] + }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, + "environment": {} + }, "13-0": { "use_python_2": false, "executables": { @@ -264,6 +299,7 @@ "environment": {} }, "__dynamic_keys_labels__": { + "13-2": "13.2", "13-0": "13.0", "12-2": "12.2", "12-0": "12.0", @@ -275,7 +311,7 @@ "nukex": { "enabled": true, "label": "Nuke X", - "icon": "{}/app_icons/nuke.png", + "icon": "{}/app_icons/nukex.png", "host_name": "nuke", "environment": { "NUKE_PATH": [ @@ -284,6 +320,30 @@ ] }, "variants": { + "13-2": { + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\Nuke13.2v1\\Nuke13.2.exe" + ], + "darwin": [], + "linux": [ + "/usr/local/Nuke13.2v1/Nuke13.2" + ] + }, + "arguments": { + "windows": [ + "--nukex" + ], + "darwin": [ + "--nukex" + ], + "linux": [ + "--nukex" + ] + }, + "environment": {} + }, "13-0": { "use_python_2": false, "executables": { @@ -403,6 +463,7 @@ "environment": {} }, "__dynamic_keys_labels__": { + "13-2": "13.2", "13-0": "13.0", "12-2": "12.2", "12-0": "12.0", @@ -414,13 +475,37 @@ "nukestudio": { "enabled": true, "label": "Nuke Studio", - "icon": "{}/app_icons/nuke.png", + "icon": "{}/app_icons/nukestudio.png", "host_name": "hiero", "environment": { "WORKFILES_STARTUP": "0", "TAG_ASSETBUILD_STARTUP": "0" }, "variants": { + "13-2": { + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\Nuke13.2v1\\Nuke13.2.exe" + ], + "darwin": [], + "linux": [ + "/usr/local/Nuke13.2v1/Nuke13.2" + ] + }, + "arguments": { + "windows": [ + "--studio" + ], + "darwin": [ + "--studio" + ], + "linux": [ + "--studio" + ] + }, + "environment": {} + }, "13-0": { "use_python_2": false, "executables": { @@ -538,6 +623,7 @@ "environment": {} }, "__dynamic_keys_labels__": { + "13-2": "13.2", "13-0": "13.0", "12-2": "12.2", "12-0": "12.0", @@ -556,6 +642,30 @@ "TAG_ASSETBUILD_STARTUP": "0" }, "variants": { + "13-2": { + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\Nuke13.2v1\\Nuke13.2.exe" + ], + "darwin": [], + "linux": [ + "/usr/local/Nuke13.2v1/Nuke13.2" + ] + }, + "arguments": { + "windows": [ + "--hiero" + ], + "darwin": [ + "--hiero" + ], + "linux": [ + "--hiero" + ] + }, + "environment": {} + }, "13-0": { "use_python_2": false, "executables": { @@ -675,6 +785,7 @@ "environment": {} }, "__dynamic_keys_labels__": { + "13-2": "13.2", "13-0": "13.0", "12-2": "12.2", "12-0": "12.0", @@ -689,30 +800,28 @@ "icon": "{}/app_icons/fusion.png", "host_name": "fusion", "environment": { - "FUSION_UTILITY_SCRIPTS_SOURCE_DIR": [], - "FUSION_UTILITY_SCRIPTS_DIR": { - "windows": "{PROGRAMDATA}/Blackmagic Design/Fusion/Scripts/Comp", - "darwin": "/Library/Application Support/Blackmagic Design/Fusion/Scripts/Comp", - "linux": "/opt/Fusion/Scripts/Comp" - }, - "PYTHON36": { + "FUSION_PYTHON3_HOME": { "windows": "{LOCALAPPDATA}/Programs/Python/Python36", "darwin": "~/Library/Python/3.6/bin", "linux": "/opt/Python/3.6/bin" - }, - "PYTHONPATH": [ - "{PYTHON36}/Lib/site-packages", - "{VIRTUAL_ENV}/Lib/site-packages", - "{PYTHONPATH}" - ], - "PATH": [ - "{PYTHON36}", - "{PYTHON36}/Scripts", - "{PATH}" - ], - "OPENPYPE_LOG_NO_COLORS": "Yes" + } }, "variants": { + "18": { + "executables": { + "windows": [ + "C:\\Program Files\\Blackmagic Design\\Fusion 18\\Fusion.exe" + ], + "darwin": [], + "linux": [] + }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, + "environment": {} + }, "17": { "executables": { "windows": [ @@ -767,41 +876,11 @@ "host_name": "resolve", "environment": { "RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR": [], - "RESOLVE_SCRIPT_API": { - "windows": "{PROGRAMDATA}/Blackmagic Design/DaVinci Resolve/Support/Developer/Scripting", - "darwin": "/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting", - "linux": "/opt/resolve/Developer/Scripting" - }, - "RESOLVE_SCRIPT_LIB": { - "windows": "C:/Program Files/Blackmagic Design/DaVinci Resolve/fusionscript.dll", - "darwin": "/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/fusionscript.so", - "linux": "/opt/resolve/libs/Fusion/fusionscript.so" - }, - "RESOLVE_UTILITY_SCRIPTS_DIR": { - "windows": "{PROGRAMDATA}/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp", - "darwin": "/Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp", - "linux": "/opt/resolve/Fusion/Scripts/Comp" - }, - "PYTHON36_RESOLVE": { + "RESOLVE_PYTHON3_HOME": { "windows": "{LOCALAPPDATA}/Programs/Python/Python36", "darwin": "~/Library/Python/3.6/bin", "linux": "/opt/Python/3.6/bin" - }, - "PYTHONPATH": [ - "{PYTHON36_RESOLVE}/Lib/site-packages", - "{VIRTUAL_ENV}/Lib/site-packages", - "{PYTHONPATH}", - "{RESOLVE_SCRIPT_API}/Modules", - "{PYTHONPATH}" - ], - "PATH": [ - "{PYTHON36_RESOLVE}", - "{PYTHON36_RESOLVE}/Scripts", - "{PATH}" - ], - "PRE_PYTHON_SCRIPT": "{OPENPYPE_REPOS_ROOT}/openpype/resolve/preload_console.py", - "OPENPYPE_LOG_NO_COLORS": "True", - "RESOLVE_DEV": "True" + } }, "variants": { "stable": { @@ -969,8 +1048,6 @@ }, "variants": { "21": { - "enabled": true, - "variant_label": "21", "executables": { "windows": [ "c:\\Program Files (x86)\\Toon Boom Animation\\Toon Boom Harmony 21 Premium\\win64\\bin\\HarmonyPremium.exe" @@ -988,8 +1065,6 @@ "environment": {} }, "20": { - "enabled": true, - "variant_label": "20", "executables": { "windows": [ "c:\\Program Files (x86)\\Toon Boom Animation\\Toon Boom Harmony 20 Premium\\win64\\bin\\HarmonyPremium.exe" @@ -1007,8 +1082,6 @@ "environment": {} }, "17": { - "enabled": true, - "variant_label": "17", "executables": { "windows": [ "c:\\Program Files (x86)\\Toon Boom Animation\\Toon Boom Harmony 17 Premium\\win64\\bin\\HarmonyPremium.exe" @@ -1138,11 +1211,9 @@ }, "variants": { "2020": { - "enabled": true, - "variant_label": "2020", "executables": { "windows": [ - "" + "C:\\Program Files\\Adobe\\Adobe After Effects 2020\\Support Files\\AfterFX.exe" ], "darwin": [], "linux": [] @@ -1155,8 +1226,6 @@ "environment": {} }, "2021": { - "enabled": true, - "variant_label": "2021", "executables": { "windows": [ "C:\\Program Files\\Adobe\\Adobe After Effects 2021\\Support Files\\AfterFX.exe" @@ -1172,8 +1241,6 @@ "environment": {} }, "2022": { - "enabled": true, - "variant_label": "2022", "executables": { "windows": [ "C:\\Program Files\\Adobe\\Adobe After Effects 2022\\Support Files\\AfterFX.exe" @@ -1226,9 +1293,19 @@ "host_name": "unreal", "environment": {}, "variants": { - "4-26": { + "4-27": { "use_python_2": false, "environment": {} + }, + "5-0": { + "use_python_2": false, + "environment": { + "UE_PYTHONPATH": "{PYTHONPATH}" + } + }, + "__dynamic_keys_labels__": { + "4-27": "4.27", + "5-0": "5.0" } } }, diff --git a/openpype/settings/defaults/system_settings/general.json b/openpype/settings/defaults/system_settings/general.json index a06947ba77..909ffc1ee4 100644 --- a/openpype/settings/defaults/system_settings/general.json +++ b/openpype/settings/defaults/system_settings/general.json @@ -2,11 +2,7 @@ "studio_name": "Studio name", "studio_code": "stu", "admin_password": "", - "environment": { - "__environment_keys__": { - "global": [] - } - }, + "environment": {}, "log_to_server": true, "disk_mapping": { "windows": [], diff --git a/openpype/settings/defaults/system_settings/modules.json b/openpype/settings/defaults/system_settings/modules.json index d74269922f..c84d23d3fc 100644 --- a/openpype/settings/defaults/system_settings/modules.json +++ b/openpype/settings/defaults/system_settings/modules.json @@ -26,13 +26,14 @@ "linux": [] }, "intent": { + "allow_empty_intent": true, + "empty_intent_label": "", "items": { - "-": "-", "wip": "WIP", "final": "Final", "test": "Test" }, - "default": "-" + "default": "" }, "custom_attributes": { "show": { @@ -59,13 +60,11 @@ "applications": { "write_security_roles": [ "API", - "Administrator", - "Pypeclub" + "Administrator" ], "read_security_roles": [ "API", - "Administrator", - "Pypeclub" + "Administrator" ] } }, @@ -73,25 +72,21 @@ "tools_env": { "write_security_roles": [ "API", - "Administrator", - "Pypeclub" + "Administrator" ], "read_security_roles": [ "API", - "Administrator", - "Pypeclub" + "Administrator" ] }, "avalon_mongo_id": { "write_security_roles": [ "API", - "Administrator", - "Pypeclub" + "Administrator" ], "read_security_roles": [ "API", - "Administrator", - "Pypeclub" + "Administrator" ] }, "fps": { @@ -137,6 +132,17 @@ } } }, + "kitsu": { + "enabled": false, + "server": "" + }, + "shotgrid": { + "enabled": false, + "leecher_manager_url": "http://127.0.0.1:3000", + "leecher_backend_url": "http://127.0.0.1:8090", + "filter_projects_by_login": true, + "shotgrid_settings": {} + }, "timers_manager": { "enabled": true, "auto_stop": true, diff --git a/openpype/settings/entities/__init__.py b/openpype/settings/entities/__init__.py index a173e2454f..b2cb2204f4 100644 --- a/openpype/settings/entities/__init__.py +++ b/openpype/settings/entities/__init__.py @@ -107,6 +107,7 @@ from .enum_entity import ( TaskTypeEnumEntity, DeadlineUrlEnumEntity, AnatomyTemplatesEnumEntity, + ShotgridUrlEnumEntity ) from .list_entity import ListEntity @@ -171,6 +172,7 @@ __all__ = ( "ToolsEnumEntity", "TaskTypeEnumEntity", "DeadlineUrlEnumEntity", + "ShotgridUrlEnumEntity", "AnatomyTemplatesEnumEntity", "ListEntity", diff --git a/openpype/settings/entities/base_entity.py b/openpype/settings/entities/base_entity.py index 741f13c49b..f28fefdf5a 100644 --- a/openpype/settings/entities/base_entity.py +++ b/openpype/settings/entities/base_entity.py @@ -15,7 +15,7 @@ from .exceptions import ( EntitySchemaError ) -from openpype.lib import PypeLogger +from openpype.lib import Logger @six.add_metaclass(ABCMeta) @@ -478,7 +478,7 @@ class BaseItemEntity(BaseEntity): def log(self): """Auto created logger for debugging or warnings.""" if self._log is None: - self._log = PypeLogger.get_logger(self.__class__.__name__) + self._log = Logger.get_logger(self.__class__.__name__) return self._log @abstractproperty diff --git a/openpype/settings/entities/enum_entity.py b/openpype/settings/entities/enum_entity.py index b6004a3feb..c07350ba07 100644 --- a/openpype/settings/entities/enum_entity.py +++ b/openpype/settings/entities/enum_entity.py @@ -1,10 +1,7 @@ import copy from .input_entities import InputEntity from .exceptions import EntitySchemaError -from .lib import ( - NOT_SET, - STRING_TYPE -) +from .lib import NOT_SET, STRING_TYPE class BaseEnumEntity(InputEntity): @@ -26,7 +23,7 @@ class BaseEnumEntity(InputEntity): for item in self.enum_items: key = tuple(item.keys())[0] if key in enum_keys: - reason = "Key \"{}\" is more than once in enum items.".format( + reason = 'Key "{}" is more than once in enum items.'.format( key ) raise EntitySchemaError(self, reason) @@ -34,7 +31,7 @@ class BaseEnumEntity(InputEntity): enum_keys.add(key) if not isinstance(key, STRING_TYPE): - reason = "Key \"{}\" has invalid type {}, expected {}.".format( + reason = 'Key "{}" has invalid type {}, expected {}.'.format( key, type(key), STRING_TYPE ) raise EntitySchemaError(self, reason) @@ -59,7 +56,7 @@ class BaseEnumEntity(InputEntity): for item in check_values: if item not in self.valid_keys: raise ValueError( - "{} Invalid value \"{}\". Expected one of: {}".format( + '{} Invalid value "{}". Expected one of: {}'.format( self.path, item, self.valid_keys ) ) @@ -84,7 +81,7 @@ class EnumEntity(BaseEnumEntity): self.valid_keys = set(all_keys) if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) value_on_not_set = [] if enum_default: if not isinstance(enum_default, list): @@ -109,7 +106,7 @@ class EnumEntity(BaseEnumEntity): self.value_on_not_set = key break - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) # GUI attribute self.placeholder = self.schema_data.get("placeholder") @@ -152,6 +149,7 @@ class HostsEnumEntity(BaseEnumEntity): Host name is not the same as application name. Host name defines implementation instead of application name. """ + schema_types = ["hosts-enum"] all_host_names = [ "3dsmax", @@ -170,6 +168,7 @@ class HostsEnumEntity(BaseEnumEntity): "tvpaint", "unreal", "standalonepublisher", + "traypublisher", "webpublisher" ] @@ -211,7 +210,7 @@ class HostsEnumEntity(BaseEnumEntity): self.valid_keys = valid_keys if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.value_on_not_set = [] else: for key in valid_keys: @@ -219,7 +218,7 @@ class HostsEnumEntity(BaseEnumEntity): self.value_on_not_set = key break - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) # GUI attribute self.placeholder = self.schema_data.get("placeholder") @@ -227,14 +226,10 @@ class HostsEnumEntity(BaseEnumEntity): def schema_validations(self): if self.hosts_filter: enum_len = len(self.enum_items) - if ( - enum_len == 0 - or (enum_len == 1 and self.use_empty_value) - ): - joined_filters = ", ".join([ - '"{}"'.format(item) - for item in self.hosts_filter - ]) + if enum_len == 0 or (enum_len == 1 and self.use_empty_value): + joined_filters = ", ".join( + ['"{}"'.format(item) for item in self.hosts_filter] + ) reason = ( "All host names were removed after applying" " host filters. {}" @@ -247,24 +242,25 @@ class HostsEnumEntity(BaseEnumEntity): invalid_filters.add(item) if invalid_filters: - joined_filters = ", ".join([ - '"{}"'.format(item) - for item in self.hosts_filter - ]) - expected_hosts = ", ".join([ - '"{}"'.format(item) - for item in self.all_host_names - ]) - self.log.warning(( - "Host filters containt invalid host names:" - " \"{}\" Expected values are {}" - ).format(joined_filters, expected_hosts)) + joined_filters = ", ".join( + ['"{}"'.format(item) for item in self.hosts_filter] + ) + expected_hosts = ", ".join( + ['"{}"'.format(item) for item in self.all_host_names] + ) + self.log.warning( + ( + "Host filters containt invalid host names:" + ' "{}" Expected values are {}' + ).format(joined_filters, expected_hosts) + ) super(HostsEnumEntity, self).schema_validations() class AppsEnumEntity(BaseEnumEntity): """Enum of applications for project anatomy attributes.""" + schema_types = ["apps-enum"] def _item_initialization(self): @@ -272,7 +268,7 @@ class AppsEnumEntity(BaseEnumEntity): self.value_on_not_set = [] self.enum_items = [] self.valid_keys = set() - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.placeholder = None def _get_enum_values(self): @@ -353,7 +349,7 @@ class ToolsEnumEntity(BaseEnumEntity): self.value_on_not_set = [] self.enum_items = [] self.valid_keys = set() - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.placeholder = None def _get_enum_values(self): @@ -410,10 +406,10 @@ class TaskTypeEnumEntity(BaseEnumEntity): def _item_initialization(self): self.multiselection = self.schema_data.get("multiselection", True) if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.value_on_not_set = [] else: - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) self.value_on_not_set = "" self.enum_items = [] @@ -508,7 +504,8 @@ class DeadlineUrlEnumEntity(BaseEnumEntity): enum_items_list = [] for server_name, url_entity in deadline_urls_entity.items(): enum_items_list.append( - {server_name: "{}: {}".format(server_name, url_entity.value)}) + {server_name: "{}: {}".format(server_name, url_entity.value)} + ) valid_keys.add(server_name) return enum_items_list, valid_keys @@ -531,6 +528,50 @@ class DeadlineUrlEnumEntity(BaseEnumEntity): self._current_value = tuple(self.valid_keys)[0] +class ShotgridUrlEnumEntity(BaseEnumEntity): + schema_types = ["shotgrid_url-enum"] + + def _item_initialization(self): + self.multiselection = False + + self.enum_items = [] + self.valid_keys = set() + + self.valid_value_types = (STRING_TYPE,) + self.value_on_not_set = "" + + # GUI attribute + self.placeholder = self.schema_data.get("placeholder") + + def _get_enum_values(self): + shotgrid_settings = self.get_entity_from_path( + "system_settings/modules/shotgrid/shotgrid_settings" + ) + + valid_keys = set() + enum_items_list = [] + for server_name, settings in shotgrid_settings.items(): + enum_items_list.append( + { + server_name: "{}: {}".format( + server_name, settings["shotgrid_url"].value + ) + } + ) + valid_keys.add(server_name) + return enum_items_list, valid_keys + + def set_override_state(self, *args, **kwargs): + super(ShotgridUrlEnumEntity, self).set_override_state(*args, **kwargs) + + self.enum_items, self.valid_keys = self._get_enum_values() + if not self.valid_keys: + self._current_value = "" + + elif self._current_value not in self.valid_keys: + self._current_value = tuple(self.valid_keys)[0] + + class AnatomyTemplatesEnumEntity(BaseEnumEntity): schema_types = ["anatomy-templates-enum"] diff --git a/openpype/settings/entities/schemas/projects_schema/schema_main.json b/openpype/settings/entities/schemas/projects_schema/schema_main.json index dbddd18c80..0b9fbf7470 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_main.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_main.json @@ -62,6 +62,14 @@ "type": "schema", "name": "schema_project_ftrack" }, + { + "type": "schema", + "name": "schema_project_shotgrid" + }, + { + "type": "schema", + "name": "schema_project_kitsu" + }, { "type": "schema", "name": "schema_project_deadline" @@ -82,6 +90,10 @@ "type": "schema", "name": "schema_project_nuke" }, + { + "type": "schema", + "name": "schema_project_fusion" + }, { "type": "schema", "name": "schema_project_hiero" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_blender.json b/openpype/settings/entities/schemas/projects_schema/schema_project_blender.json index af09329a03..4c72ebda2f 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_blender.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_blender.json @@ -12,6 +12,10 @@ "workfile_builder/builder_on_start", "workfile_builder/profiles" ] + }, + { + "type": "schema", + "name": "schema_blender_publish" } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json index ace404b47a..73664300aa 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json @@ -5,6 +5,69 @@ "label": "Flame", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "key": "project", + "type": "dict", + "label": "Project", + "collapsible": false, + "children": [ + { + "type": "form", + "children": [ + { + "type": "text", + "key": "colourPolicy", + "label": "Colour Policy (name or path)" + }, + { + "type": "text", + "key": "frameDepth", + "label": "Image Depth" + }, + { + "type": "text", + "key": "fieldDominance", + "label": "Field Dominance" + } + ] + } + ] + }, + { + "key": "profilesMapping", + "type": "dict", + "label": "Profile names mapping", + "collapsible": true, + "children": [ + { + "type": "list", + "key": "inputs", + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "flameName", + "label": "Flame name" + }, + { + "type": "text", + "key": "ocioName", + "label": "OCIO name" + } + ] + } + } + ] + } + ] + }, { "type": "dict", "collapsible": true, @@ -123,6 +186,21 @@ "type": "number", "key": "handleEnd", "label": "Handle end (tail)" + }, + { + "type": "boolean", + "key": "includeHandles", + "label": "Enable handles including" + }, + { + "type": "boolean", + "key": "retimedHandles", + "label": "Enable retimed handles" + }, + { + "type": "boolean", + "key": "retimedFramerange", + "label": "Enable retimed shot frameranges" } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json index 7db490b114..da414cc961 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json @@ -299,24 +299,6 @@ } ] }, - { - "type": "dict", - "key": "first_version_status", - "label": "Set status on first created version", - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "type": "text", - "key": "status", - "label": "Status" - } - ] - }, { "type": "dict", "key": "next_task_update", @@ -369,6 +351,97 @@ "key": "name_sorting" } ] + }, + { + "type": "dict", + "key": "transfer_values_of_hierarchical_attributes", + "label": "Action to transfer hierarchical attribute values", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "role_list", + "label": "Roles", + "object_type": "text" + } + ] + }, + { + "key": "create_daily_review_session", + "label": "Create daily review session", + "type": "dict", + "is_group": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled" + }, + { + "type": "list", + "key": "role_list", + "label": "Roles", + "object_type": "text", + "use_label_wrap": true + }, + { + "type": "boolean", + "key": "cycle_enabled", + "label": "Run automatically every day" + }, + { + "type": "separator" + }, + { + "type": "list-strict", + "key": "cycle_hour_start", + "label": "Create daily review session at", + "tooltip": "This may take affect on next day", + "object_types": [ + { + "label": "H:", + "type": "number", + "minimum": 0, + "maximum": 23, + "decimal": 0 + }, { + "label": "M:", + "type": "number", + "minimum": 0, + "maximum": 59, + "decimal": 0 + }, { + "label": "S:", + "type": "number", + "minimum": 0, + "maximum": 59, + "decimal": 0 + } + ] + }, + { + "type": "label", + "label": "This can't be overriden per project and any change will take effect on the next day or on restart of event server." + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "review_session_template", + "label": "ReviewSession template", + "placeholder": "Default: {yy}{mm}{dd}" + }, + { + "type": "label", + "label": "Possible formatting keys in template:
- \"project_name\" - <Name of project>
- \"d\" - <Day of month number> in shortest possible way.
- \"dd\" - <Day of month number> with 2 digits.
- \"ddd\" - <Week day name> shortened week day. e.g.: `Mon`, ...
- \"dddd\" - <Week day name> full name of week day. e.g.: `Monday`, ...
- \"m\" - <Month number> in shortest possible way. e.g.: `1` if January
- \"mm\" - <Month number> with 2 digits.
- \"mmm\" - <Month name> shortened month name. e.g.: `Jan`, ...
- \"mmmm\" -<Month name> full month name. e.g.: `January`, ...
- \"yy\" - <Year number> shortened year. e.g.: `19`, `20`, ...
- \"yyyy\" - <Year number> full year. e.g.: `2019`, `2020`, ..." + } + ] } ] }, @@ -750,6 +823,44 @@ } ] }, + { + "type": "dict", + "key": "IntegrateHierarchyToFtrack", + "label": "Integrate Hierarchy to ftrack", + "is_group": true, + "collapsible": true, + "children": [ + { + "type": "label", + "label": "Set task status on new task creation. Ftrack's default status is used otherwise." + }, + { + "type": "list", + "key": "create_task_status_profiles", + "object_type": { + "type": "dict", + "children": [ + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "text", + "key": "status_name", + "label": "Status name" + } + ] + } + } + ] + }, { "type": "dict", "collapsible": true, @@ -765,7 +876,7 @@ }, { "type": "label", - "label": "Template may contain formatting keys intent, comment, host_name, app_name, app_label and published_paths." + "label": "Template may contain formatting keys intent, comment, host_name, app_name, app_label, published_paths and source." }, { "type": "text", @@ -839,10 +950,25 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "IntegrateFtrackComponentOverwrite", + "label": "IntegrateFtrackComponentOverwrite", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { "type": "dict", "key": "IntegrateFtrackInstance", - "label": "IntegrateFtrackInstance", + "label": "Integrate Ftrack Instance", "is_group": true, "children": [ { @@ -895,6 +1021,82 @@ } ] } + }, + { + "key": "additional_metadata_keys", + "label": "Additional metadata keys on components", + "type": "enum", + "multiselection": true, + "enum_items": [ + {"openpype_version": "OpenPype version"}, + {"frame_start": "Frame start"}, + {"frame_end": "Frame end"}, + {"duration": "Duration"}, + {"width": "Resolution width"}, + {"height": "Resolution height"}, + {"fps": "FPS"}, + {"code": "Codec"} + ] + } + ] + }, + { + "type": "dict", + "key": "IntegrateFtrackFarmStatus", + "label": "Integrate Ftrack Farm Status", + "children": [ + { + "type": "label", + "label": "Change status of task when it's subset is submitted to farm" + }, + { + "type": "list", + "collapsible": true, + "key": "farm_status_profiles", + "label": "Farm status profiles", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "hosts", + "label": "Host names", + "type": "hosts-enum", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "key": "subsets", + "label": "Subset names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "key": "status_name", + "label": "Status name", + "type": "text" + } + ] + } } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_fusion.json b/openpype/settings/entities/schemas/projects_schema/schema_project_fusion.json new file mode 100644 index 0000000000..8f98a8173f --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_fusion.json @@ -0,0 +1,38 @@ +{ + "type": "dict", + "collapsible": true, + "key": "fusion", + "label": "Fusion", + "is_file": true, + "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "collapsible": true, + "children": [ + { + "key": "ocio", + "type": "dict", + "label": "OpenColorIO (OCIO)", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Set OCIO variable for Fusion" + }, + { + "type": "path", + "key": "configFilePath", + "label": "OCIO Config File Path", + "multiplatform": true, + "multipath": true + } + ] + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json b/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json index c049ce3084..311f742f81 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json @@ -5,6 +5,34 @@ "label": "Harmony", "is_file": true, "children": [ + { + "type": "dict", + "collapsible": true, + "key": "load", + "label": "Loader plugins", + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "ImageSequenceLoader", + "label": "Load Image Sequence", + "children": [ + { + "type": "list", + "key": "family", + "label": "Families", + "object_type": "text" + }, + { + "type": "list", + "key": "representations", + "label": "Representations", + "object_type": "text" + } + ] + } + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json b/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json index f717eff7dd..9e18522def 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json @@ -5,6 +5,116 @@ "label": "Hiero", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "collapsible": true, + "children": [ + { + "key": "workfile", + "type": "dict", + "label": "Workfile", + "collapsible": false, + "children": [ + { + "type": "form", + "children": [ + { + "type": "enum", + "key": "ocioConfigName", + "label": "OpenColorIO Config", + "enum_items": [ + { + "nuke-default": "nuke-default" + }, + { + "aces_1.0.3": "aces_1.0.3" + }, + { + "aces_1.1": "aces_1.1" + }, + { + "custom": "custom" + } + ] + }, + { + "type": "path", + "key": "ocioconfigpath", + "label": "Custom OCIO path", + "multiplatform": true, + "multipath": true + }, + { + "type": "text", + "key": "workingSpace", + "label": "Working Space" + }, + { + "type": "text", + "key": "sixteenBitLut", + "label": "16 Bit Files" + }, + { + "type": "text", + "key": "eightBitLut", + "label": "8 Bit Files" + }, + { + "type": "text", + "key": "floatLut", + "label": "Floating Point Files" + }, + { + "type": "text", + "key": "logLut", + "label": "Log Files" + }, + { + "type": "text", + "key": "viewerLut", + "label": "Viewer" + }, + { + "type": "text", + "key": "thumbnailLut", + "label": "Thumbnails" + } + ] + } + ] + }, + { + "key": "regexInputs", + "type": "dict", + "label": "Colorspace on Inputs by regex detection", + "collapsible": true, + "children": [ + { + "type": "list", + "key": "inputs", + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "regex", + "label": "Regex" + }, + { + "type": "text", + "key": "colorspace", + "label": "Colorspace" + } + ] + } + } + ] + } + ] + }, { "type": "dict", "collapsible": true, @@ -206,6 +316,10 @@ { "type": "schema", "name": "schema_publish_gui_filter" + }, + { + "type": "schema", + "name": "schema_scriptsmenu" } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json b/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json index cad99dde22..808f154226 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json @@ -5,27 +5,17 @@ "label": "Houdini", "is_file": true, "children": [ + { + "type": "schema", + "name": "schema_houdini_scriptshelf" + }, { "type": "schema", "name": "schema_houdini_create" }, { - "type": "dict", - "collapsible": true, - "key": "publish", - "label": "Publish plugins", - "children": [ - { - "type": "schema_template", - "name": "template_publish_plugin", - "template_data": [ - { - "key": "ValidateContainers", - "label": "ValidateContainers" - } - ] - } - ] + "type": "schema", + "name": "schema_houdini_publish" } ] -} +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_kitsu.json b/openpype/settings/entities/schemas/projects_schema/schema_project_kitsu.json new file mode 100644 index 0000000000..fb47670e74 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_kitsu.json @@ -0,0 +1,61 @@ +{ + "type": "dict", + "key": "kitsu", + "label": "Kitsu", + "collapsible": true, + "is_file": true, + "children": [ + { + "type": "dict", + "key": "entities_naming_pattern", + "label": "Entities naming pattern", + "children": [ + { + "type": "text", + "key": "episode", + "label": "Episode:" + }, + { + "type": "text", + "key": "sequence", + "label": "Sequence:" + }, + { + "type": "text", + "key": "shot", + "label": "Shot:" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "publish", + "label": "Publish plugins", + "children": [ + { + "type": "label", + "label": "Integrator" + }, + { + "type": "dict", + "collapsible": true, + "key": "IntegrateKitsuNote", + "label": "Integrate Kitsu Note", + "children": [ + { + "type": "boolean", + "key": "set_status_note", + "label": "Set status on note" + }, + { + "type": "text", + "key": "note_status_shortname", + "label": "Note shortname" + } + ] + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json b/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json index cc70516c72..b2d79797a3 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json @@ -5,6 +5,83 @@ "label": "Maya", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "collapsible": true, + "is_group": true, + "children": [ + { + "key": "colorManagementPreference_v2", + "type": "dict", + "label": "Color Management Preference v2 (Maya 2022+)", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Use Color Management Preference v2" + }, + { + "type": "path", + "key": "configFilePath", + "label": "OCIO Config File Path", + "multiplatform": true, + "multipath": true + }, + { + "type": "text", + "key": "renderSpace", + "label": "Rendering Space" + }, + { + "type": "text", + "key": "displayName", + "label": "Display" + }, + { + "type": "text", + "key": "viewName", + "label": "View" + } + ] + }, + { + "key": "colorManagementPreference", + "type": "dict", + "label": "Color Management Preference (legacy)", + "collapsible": true, + "children": [ + { + "type": "path", + "key": "configFilePath", + "label": "OCIO Config File Path", + "multiplatform": true, + "multipath": true + }, + { + "type": "text", + "key": "renderSpace", + "label": "Rendering Space" + }, + { + "type": "text", + "key": "viewTransform", + "label": "Viewer Transform" + } + ] + } + ] + }, + { + "type": "text", + "multiline" : true, + "use_label_wrap": true, + "key": "mel_workspace", + "label": "Maya MEL Workspace" + }, { "type": "dict-modifiable", "key": "ext_mapping", @@ -22,6 +99,12 @@ "label": "Maya Directory Mapping", "is_group": true, "children": [ + { + "type": "boolean", + "key": "use_env_var_as_root", + "label": "Use env var placeholder in referenced paths", + "docstring": "Use ${} placeholder instead of absolute value of a root in referenced filepaths." + }, { "type": "boolean", "key": "enabled", @@ -49,7 +132,11 @@ }, { "type": "schema", - "name": "schema_maya_scriptsmenu" + "name": "schema_scriptsmenu" + }, + { + "type": "schema", + "name": "schema_maya_render_settings" }, { "type": "schema", @@ -67,6 +154,10 @@ "type": "schema", "name": "schema_workfile_build" }, + { + "type": "schema", + "name": "schema_templated_workfile_build" + }, { "type": "schema", "name": "schema_publish_gui_filter" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json b/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json index bc572cbdc8..154eca254b 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json @@ -46,6 +46,10 @@ } ] }, + { + "type": "schema", + "name": "schema_nuke_imageio" + }, { "type": "dict", "collapsible": true, @@ -79,6 +83,14 @@ } ] }, + { + "type": "schema", + "name": "schema_scriptsmenu" + }, + { + "type": "schema", + "name": "schema_nuke_scriptsgizmo" + }, { "type": "dict", "collapsible": true, @@ -300,6 +312,10 @@ "type": "schema_template", "name": "template_workfile_options" }, + { + "type": "schema", + "name": "schema_templated_workfile_build" + }, { "type": "schema", "name": "schema_publish_gui_filter" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json b/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json index badf94229b..b768db30ee 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json @@ -45,9 +45,15 @@ "label": "Set color for publishable layers, set its resulting family and template for subset name. \nCan create flatten image from published instances.(Applicable only for remote publishing!)" }, { - "type": "boolean", "key": "create_flatten_image", - "label": "Create flatten image" + "label": "Create flatten image", + "type": "enum", + "multiselection": false, + "enum_items": [ + { "flatten_with_images": "Flatten with images" }, + { "flatten_only": "Flatten only" }, + { "no": "No" } + ] }, { "type": "text", @@ -125,6 +131,35 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "CollectReview", + "label": "Collect Review", + "children": [ + { + "type": "boolean", + "key": "publish", + "label": "Active" + } + ] + }, + { + "type": "dict", + "key": "CollectVersion", + "label": "Collect Version", + "children": [ + { + "type": "label", + "label": "Synchronize version for image and review instances by workfile version." + }, + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { "type": "schema_template", "name": "template_publish_plugin", @@ -186,6 +221,15 @@ "key": "make_image_sequence", "label": "Makes an image sequence instead of a flatten image" }, + { + "type": "number", + "key": "max_downscale_size", + "label": "Maximum size of sources for review", + "tooltip": "FFMpeg can only handle limited resolution for creation of review and/or thumbnail", + "minimum": 300, + "maximum": 16384, + "decimal": 0 + }, { "type": "dict", "collapsible": false, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json b/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json new file mode 100644 index 0000000000..4faeca89f3 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json @@ -0,0 +1,98 @@ +{ + "type": "dict", + "key": "shotgrid", + "label": "Shotgrid", + "collapsible": true, + "is_file": true, + "children": [ + { + "type": "number", + "key": "shotgrid_project_id", + "label": "Shotgrid project id" + }, + { + "type": "shotgrid_url-enum", + "key": "shotgrid_server", + "label": "Shotgrid Server" + }, + { + "type": "dict", + "key": "event", + "label": "Event Handler", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, + { + "type": "dict", + "key": "fields", + "label": "Fields Template", + "collapsible": true, + "children": [ + { + "type": "dict", + "key": "asset", + "label": "Asset", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "type", + "label": "Asset Type" + } + ] + }, + { + "type": "dict", + "key": "sequence", + "label": "Sequence", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "episode_link", + "label": "Episode link" + } + ] + }, + { + "type": "dict", + "key": "shot", + "label": "Shot", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "episode_link", + "label": "Episode link" + }, + { + "type": "text", + "key": "sequence_link", + "label": "Sequence link" + } + ] + }, + { + "type": "dict", + "key": "task", + "label": "Task", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "step", + "label": "Step link" + } + ] + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_standalonepublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_standalonepublisher.json index 37fcaac69f..ae25007683 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_standalonepublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_standalonepublisher.json @@ -271,6 +271,11 @@ "label": "Collect Instance Hierarchy", "is_group": true, "children": [ + { + "type": "boolean", + "key": "shot_rename", + "label": "Shot Rename" + }, { "type": "text", "key": "shot_rename_template", @@ -289,7 +294,13 @@ "type": "dict", "key": "shot_add_hierarchy", "label": "Shot hierarchy", + "checkbox_key": "enabled", "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { "type": "text", "key": "parents_path", @@ -343,8 +354,8 @@ "type": "number", "key": "timeline_frame_start", "label": "Timeline start frame", - "default": 900000, - "minimum": 1, + "default": 90000, + "minimum": 0, "maximum": 10000000 }, { diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json index 55c1b7b7d7..faa5033d2a 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json @@ -67,6 +67,11 @@ "label": "Allow sequences", "type": "boolean" }, + { + "key": "allow_multiple_items", + "label": "Allow multiple items", + "type": "boolean" + }, { "type": "list", "key": "extensions", @@ -78,6 +83,252 @@ } ] } + }, + { + "type": "dict", + "collapsible": true, + "key": "editorial_creators", + "label": "Editorial creator plugins", + "use_label_wrap": true, + "collapsible_key": true, + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "editorial_simple", + "label": "Editorial simple creator", + "use_label_wrap": true, + "collapsible_key": true, + "children": [ + + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + }, + { + "type": "splitter" + }, + { + "type": "collapsible-wrap", + "label": "Shot metadata creator", + "collapsible": true, + "collapsed": true, + "children": [ + { + "key": "clip_name_tokenizer", + "label": "Clip name tokenizer", + "type": "dict-modifiable", + "highlight_content": true, + "tooltip": "Using Regex expression to create tokens. \nThose can be used later in \"Shot rename\" creator \nor \"Shot hierarchy\". \n\nTokens should be decorated with \"_\" on each side", + "object_type": { + "type": "text" + } + }, + { + "type": "dict", + "key": "shot_rename", + "label": "Shot rename", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "shot_rename_template", + "label": "Shot rename template", + "tooltip":"Template only supports Anatomy keys and Tokens \nfrom \"Clip name tokenizer\"" + } + ] + }, + { + "type": "dict", + "key": "shot_hierarchy", + "label": "Shot hierarchy", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "parents_path", + "label": "Parents path template", + "tooltip": "Using keys from \"Token to parent convertor\" or tokens directly" + }, + { + "key": "parents", + "label": "Token to parent convertor", + "type": "list", + "highlight_content": true, + "tooltip": "The left side is key to be used in template. \nThe right is value build from Tokens comming from \n\"Clip name tokenizer\"", + "object_type": { + "type": "dict", + "children": [ + { + "type": "enum", + "key": "type", + "label": "Parent type", + "enum_items": [ + {"Project": "Project"}, + {"Folder": "Folder"}, + {"Episode": "Episode"}, + {"Sequence": "Sequence"} + ] + }, + { + "type": "text", + "key": "name", + "label": "Parent token name", + "tooltip": "Unique name used in \"Parent path template\"" + }, + { + "type": "text", + "key": "value", + "label": "Parent name value", + "tooltip": "Template where any text, Anatomy keys and Tokens could be used" + } + ] + } + } + ] + }, + { + "key": "shot_add_tasks", + "label": "Add tasks to shot", + "type": "dict-modifiable", + "highlight_content": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "task-types-enum", + "key": "type", + "label": "Task type", + "multiselection": false + } + ] + } + } + ] + }, + { + "type": "collapsible-wrap", + "label": "Shot's subset creator", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "list", + "key": "family_presets", + "label": "Family presets", + "object_type": { + "type": "dict", + "children": [ + { + "type": "enum", + "key": "family", + "label": "Family", + "enum_items": [ + {"review": "review"}, + {"plate": "plate"}, + {"audio": "audio"} + ] + }, + { + "type": "text", + "key": "variant", + "label": "Variant", + "placeholder": "< Inherited >" + }, + { + "type": "boolean", + "key": "review", + "label": "Review", + "default": true + }, + { + "type": "enum", + "key": "output_file_type", + "label": "Integrating file type", + "enum_items": [ + {".mp4": "MP4"}, + {".mov": "MOV"}, + {".wav": "WAV"} + ] + } + ] + } + } + ] + } + ] + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "BatchMovieCreator", + "label": "Batch Movie Creator", + "collapsible_key": true, + "children": [ + { + "type": "label", + "label": "Allows to publish multiple video files in one go.
Name of matching asset is parsed from file names ('asset.mov', 'asset_v001.mov', 'my_asset_to_publish.mov')" + }, + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + }, + { + "type": "list", + "key": "default_tasks", + "label": "Default tasks", + "object_type": { + "type": "text" + } + }, + { + "type": "list", + "key": "extensions", + "label": "Extensions", + "use_label_wrap": true, + "collapsible_key": true, + "collapsed": false, + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "publish", + "label": "Publish plugins", + "children": [ + { + "type": "schema_template", + "name": "template_validate_plugin", + "template_data": [ + { + "key": "ValidateFrameRange", + "label": "Validate frame range" + } + ] + } + ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json b/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json index 4e197e9fc8..09e5791ac4 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json @@ -5,6 +5,16 @@ "label": "Unreal Engine", "is_file": true, "children": [ + { + "type": "boolean", + "key": "level_sequences_for_layouts", + "label": "Generate level sequences when loading layouts" + }, + { + "type": "boolean", + "key": "delete_unmatched_assets", + "label": "Delete assets that are not matched" + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json index b76a0fa844..a81a403bcb 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json @@ -5,6 +5,38 @@ "label": "Web Publisher", "is_file": true, "children": [ + { + "type": "list", + "collapsible": true, + "use_label_wrap": true, + "key": "timeout_profiles", + "label": "Timeout profiles", + "object_type": { + "type": "dict", + "children": [ + { + "key": "hosts", + "label": "Host names", + "type": "hosts-enum", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum", + "multiselection": true + }, + { + "type": "separator" + }, + { + "type": "number", + "key": "timeout", + "label": "Timeout (sec)" + } + ] + } + }, { "type": "dict", "collapsible": true, @@ -17,6 +49,19 @@ "key": "CollectPublishedFiles", "label": "Collect Published Files", "children": [ + { + "type": "label", + "label": "Select if all versions of published items should be kept same. (As max(published) + 1.)" + }, + { + "type": "boolean", + "key": "sync_next_version", + "label": "Sync next publish version" + }, + { + "type": "label", + "label": "Configure resulting family and tags on representation based on uploaded file and task.
Eg. '.png' is uploaded >> create instance of 'render' family
'Create review' in Tags >> mark representation to create review from." + }, { "type": "dict-modifiable", "collapsible": true, @@ -42,6 +87,9 @@ "label": "Extensions", "object_type": "text" }, + { + "type": "separator" + }, { "type": "list", "key": "families", @@ -52,9 +100,6 @@ "type": "schema", "name": "schema_representation_tags" }, - { - "type": "separator" - }, { "type": "text", "key": "result_family", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json index a2a566da0e..3667c9d5d8 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json @@ -16,22 +16,26 @@ { "type": "number", "key": "frameStart", - "label": "Frame Start" + "label": "Frame Start", + "maximum": 999999999 }, { "type": "number", "key": "frameEnd", - "label": "Frame End" + "label": "Frame End", + "maximum": 999999999 }, { "type": "number", "key": "clipIn", - "label": "Clip In" + "label": "Clip In", + "maximum": 999999999 }, { "type": "number", "key": "clipOut", - "label": "Clip Out" + "label": "Clip Out", + "maximum": 999999999 }, { "type": "number", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json index ef8c907dda..93b6adae6b 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json @@ -1,10 +1,14 @@ { "type": "dict", "key": "imageio", - "label": "Color Management and Output Formats", + "label": "Color Management and Output Formats (Deprecated)", "is_file": true, "is_group": true, "children": [ + { + "type": "label", + "label": "These settings are deprecated and have moved to: project_settings/{app}/imageio.
You can right click to copy each host's values and paste them to apply to each host as needed.
Changing these values here will not do anything." + }, { "key": "hiero", "type": "dict", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_blender_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_blender_publish.json new file mode 100644 index 0000000000..58428ad60a --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_blender_publish.json @@ -0,0 +1,113 @@ +{ + "type": "dict", + "collapsible": true, + "key": "publish", + "label": "Publish plugins", + "children": [ + { + "type": "label", + "label": "Validators" + }, + { + "type": "schema_template", + "name": "template_publish_plugin", + "template_data": [ + { + "key": "ValidateCameraZeroKeyframe", + "label": "Validate Camera Zero Keyframe" + } + ] + }, + { + "type": "collapsible-wrap", + "label": "Model", + "children": [ + { + "type": "schema_template", + "name": "template_publish_plugin", + "template_data": [ + { + "key": "ValidateMeshHasUvs", + "label": "Validate Mesh Has UVs" + }, + { + "key": "ValidateMeshNoNegativeScale", + "label": "Validate Mesh No Negative Scale" + }, + { + "key": "ValidateTransformZero", + "label": "Validate Transform Zero" + } + ] + } + ] + }, + { + "type": "splitter" + }, + { + "type": "label", + "label": "Extractors" + }, + { + "type": "dict", + "collapsible": true, + "key": "ExtractBlend", + "label": "Extract Blend", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + } + ] + }, + { + "type": "schema_template", + "name": "template_publish_plugin", + "template_data": [ + { + "key": "ExtractFBX", + "label": "Extract FBX (model and rig)" + }, + { + "key": "ExtractABC", + "label": "Extract ABC (model and pointcache)" + }, + { + "key": "ExtractBlendAnimation", + "label": "Extract Animation as Blend" + }, + { + "key": "ExtractAnimationFBX", + "label": "Extract Animation as FBX" + }, + { + "key": "ExtractCamera", + "label": "Extract FBX Camera as FBX" + }, + { + "key": "ExtractLayout", + "label": "Extract Layout as JSON" + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index 061874e31c..742437fbde 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -18,6 +18,27 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "CollectAudio", + "label": "Collect Audio", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "key": "audio_subset_name", + "label": "Name of audio variant", + "type": "text", + "placeholder": "audioMain" + } + ] + }, { "type": "dict", "collapsible": true, @@ -126,8 +147,8 @@ "type": "dict", "collapsible": true, "checkbox_key": "enabled", - "key": "ExtractJpegEXR", - "label": "ExtractJpegEXR", + "key": "ExtractThumbnail", + "label": "ExtractThumbnail", "is_group": true, "children": [ { @@ -274,6 +295,29 @@ "label": "Subsets", "type": "list", "object_type": "text" + }, + { + "type": "separator" + }, + { + "key": "custom_tags", + "label": "Custom Tags", + "type": "list", + "object_type": "text" + }, + { + "type": "label", + "label": "Use output always / only if input is 1 frame image / only if has 2+ frames or is video" + }, + { + "type": "enum", + "key": "single_frame_filter", + "default": "everytime", + "enum_items": [ + {"everytime": "Always"}, + {"single_frame": "Only if input has 1 image frame"}, + {"multi_frame": "Only if input is video or sequence of frames"} + ] } ] }, @@ -319,6 +363,15 @@ "minimum": 0, "maximum": 100000 }, + { + "type": "label", + "label": "Rescale input when it's pixel aspect ratio is not 1. Usefull for anamorph reviews." + }, + { + "key": "scale_pixel_aspect", + "label": "Scale pixel aspect", + "type": "boolean" + }, { "type": "label", "label": "Background color is used only when input have transparency and Alpha is higher than 0." @@ -528,22 +581,28 @@ { "type": "dict", "collapsible": true, - "key": "IntegrateAssetNew", - "label": "IntegrateAssetNew", + "key": "PreIntegrateThumbnails", + "label": "Override Integrate Thumbnail Representations", "is_group": true, + "checkbox_key": "enabled", "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "Explicitly set if Thumbnail representation should be integrated into DB.
If no matching profile set, existing state from Host implementation is kept." + }, { "type": "list", - "key": "template_name_profiles", - "label": "Template name profiles", + "key": "integrate_profiles", + "label": "Integrate profiles", "use_label_wrap": true, "object_type": { "type": "dict", "children": [ - { - "type": "label", - "label": "" - }, { "key": "families", "label": "Families", @@ -562,22 +621,37 @@ "type": "task-types-enum" }, { - "key": "tasks", + "key": "task_names", "label": "Task names", "type": "list", "object_type": "text" }, + { + "key": "subsets", + "label": "Subset names", + "type": "list", + "object_type": "text" + }, { "type": "separator" }, { - "type": "text", - "key": "template_name", - "label": "Template name" + "type": "boolean", + "key": "integrate_thumbnail", + "label": "Integrate thumbnail" } ] } - }, + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "IntegrateSubsetGroup", + "label": "Integrate Subset Group", + "is_group": true, + "children": [ { "type": "list", "key": "subset_grouping_profiles", @@ -626,6 +700,142 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "IntegrateAssetNew", + "label": "IntegrateAsset (Legacy)", + "is_group": true, + "children": [ + { + "type": "label", + "label": "NOTE: Subset grouping profiles settings were moved to Integrate Subset Group. Please move values there." + }, + { + "type": "list", + "key": "subset_grouping_profiles", + "label": "Subset grouping profiles (DEPRECATED)", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "hosts-enum", + "key": "hosts", + "label": "Hosts", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "tasks", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template", + "label": "Template" + } + ] + } + }, + { + "type": "label", + "label": "NOTE: Publish template profiles settings were moved to Tools/Publish/Template name profiles. Please move values there." + }, + { + "type": "list", + "key": "template_name_profiles", + "label": "Template name profiles (DEPRECATED)", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "label", + "label": "" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "hosts-enum", + "key": "hosts", + "label": "Hosts", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "tasks", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template_name", + "label": "Template name" + } + ] + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "IntegrateAsset", + "label": "Integrate Asset", + "is_group": true, + "children": [ + { + "type": "list", + "key": "skip_host_families", + "label": "Skip hosts and families", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "hosts-enum", + "key": "host", + "label": "Host" + }, + { + "type": "list", + "key": "families", + "label": "Families", + "object_type": "text" + } + ] + } + } + ] + }, { "type": "dict", "collapsible": true, @@ -655,10 +865,14 @@ "type": "list", "object_type": "text" }, + { + "type": "label", + "label": "NOTE: Hero publish template profiles settings were moved to Tools/Publish/Hero template name profiles. Please move values there." + }, { "type": "list", "key": "template_name_profiles", - "label": "Template name profiles", + "label": "Template name profiles (DEPRECATED)", "use_label_wrap": true, "object_type": { "type": "dict", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json index f8c9482e5f..962008d476 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json @@ -149,6 +149,11 @@ "type": "boolean", "key": "enabled", "label": "Enabled" + }, + { + "type": "boolean", + "key": "use_last_published_workfile", + "label": "Use last published workfile" } ] } @@ -238,6 +243,31 @@ } ] } + }, + { + "type": "list", + "key": "workfile_lock_profiles", + "label": "Workfile lock profiles", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "hosts-enum", + "key": "host_name", + "label": "Hosts", + "multiselection": true + }, + { + "type": "splitter" + }, + { + "key": "enabled", + "label": "Enabled", + "type": "boolean" + } + ] + } } ] }, @@ -284,6 +314,102 @@ } } ] + }, + { + "type": "dict", + "key": "publish", + "label": "Publish", + "children": [ + { + "type": "label", + "label": "NOTE: For backwards compatibility can be value empty and in that case are used values from IntegrateAssetNew. This will change in future so please move all values here as soon as possible." + }, + { + "type": "list", + "key": "template_name_profiles", + "label": "Template name profiles", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "hosts-enum", + "key": "hosts", + "label": "Hosts", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template_name", + "label": "Template name" + } + ] + } + }, + { + "type": "list", + "key": "hero_template_name_profiles", + "label": "Hero template name profiles", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "hosts-enum", + "key": "hosts", + "label": "Hosts", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template_name", + "label": "Template name", + "tooltip": "Name of template from Anatomy templates" + } + ] + } + } + ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_publish.json new file mode 100644 index 0000000000..aa6eaf5164 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_publish.json @@ -0,0 +1,50 @@ +{ + "type": "dict", + "collapsible": true, + "key": "publish", + "label": "Publish plugins", + "children": [ + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "ValidateWorkfilePaths", + "label": "Validate Workfile Paths", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "key": "node_types", + "label": "Node types", + "type": "list", + "object_type": "text" + }, + { + "key": "prohibited_vars", + "label": "Prohibited variables", + "type": "list", + "object_type": "text" + } + ] + }, + { + "type": "schema_template", + "name": "template_publish_plugin", + "template_data": [ + { + "key": "ValidateContainers", + "label": "ValidateContainers" + } + ] + } + ] +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_scriptshelf.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_scriptshelf.json new file mode 100644 index 0000000000..bab9b604b4 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_scriptshelf.json @@ -0,0 +1,71 @@ +{ + "type": "list", + "key": "shelves", + "label": "Shelves Manager", + "is_group": true, + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "shelf_set_name", + "label": "Shelf Set Name" + }, + { + "type": "path", + "key": "shelf_set_source_path", + "label": "Shelf Set Path (optional)", + "multipath": false, + "multiplatform": true + }, + { + "type": "list", + "key": "shelf_definition", + "label": "Shelves", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "shelf_name", + "label": "Shelf Name" + }, + { + "type": "list", + "key": "tools_list", + "label": "Tools", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "label", + "label": "Name" + }, + { + "type": "path", + "key": "script", + "label": "Script" + }, + { + "type": "path", + "key": "icon", + "label": "Icon" + }, + { + "type": "text", + "key": "help", + "label": "Help" + } + ] + } + } + ] + } + } + ] + } +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json index d6b81c8687..62c33f55fc 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json @@ -94,18 +94,6 @@ } ] }, - - { - "type": "dict", - "key": "PanZoom", - "children": [ - { - "type": "boolean", - "key": "pan_zoom", - "label": " Pan Zoom" - } - ] - }, { "type": "splitter" }, @@ -153,19 +141,6 @@ "decimal": 0, "minimum": 0, "maximum": 99999 - }, - { - "type": "number", - "key": "percent", - "label": "percent", - "decimal": 1, - "minimum": 0, - "maximum": 200 - }, - { - "type": "text", - "key": "mode", - "label": "Mode" } ] }, @@ -195,12 +170,46 @@ { "nolights": "No Lights"} ] }, + { + "type": "boolean", + "key": "displayTextures", + "label": "Display Textures" + }, { "type": "number", "key": "textureMaxResolution", "label": "Texture Clamp Resolution", "decimal": 0 }, + { + "type": "splitter" + }, + { + "type":"boolean", + "key": "renderDepthOfField", + "label": "Depth of Field" + }, + { + "type": "splitter" + }, + { + "type": "boolean", + "key": "shadows", + "label": "Display Shadows" + }, + { + "type": "boolean", + "key": "twoSidedLighting", + "label": "Two Sided Lighting" + }, + { + "type": "splitter" + }, + { + "type": "boolean", + "key": "lineAAEnable", + "label": "Enable Anti-Aliasing" + }, { "type": "number", "key": "multiSample", @@ -210,142 +219,239 @@ "maximum": 32 }, { - "type": "boolean", - "key": "shadows", - "label": "Display Shadows" - }, - { - "type": "boolean", - "key": "textures", - "label": "Display Textures" - }, - { - "type": "boolean", - "key": "twoSidedLighting", - "label": "Two Sided Lighting" + "type": "splitter" }, { "type": "boolean", "key": "ssaoEnable", "label": "Screen Space Ambient Occlusion" }, + { + "type": "number", + "key": "ssaoAmount", + "label": "SSAO Amount" + }, + { + "type": "number", + "key": "ssaoRadius", + "label": "SSAO Radius" + }, + { + "type": "number", + "key": "ssaoFilterRadius", + "label": "SSAO Filter Radius", + "decimal": 0, + "minimum": 1, + "maximum": 32 + }, + { + "type": "number", + "key": "ssaoSamples", + "label": "SSAO Samples", + "decimal": 0, + "minimum": 8, + "maximum": 32 + }, { "type": "splitter" }, + { + "type": "boolean", + "key": "fogging", + "label": "Enable Hardware Fog" + }, + { + "type": "enum", + "key": "hwFogFalloff", + "label": "Hardware Falloff", + "enum_items": [ + { "0": "Linear"}, + { "1": "Exponential"}, + { "2": "Exponential Squared"} + ] + }, + { + "type": "number", + "key": "hwFogDensity", + "label": "Fog Density", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "number", + "key": "hwFogStart", + "label": "Fog Start" + }, + { + "type": "number", + "key": "hwFogEnd", + "label": "Fog End" + }, + { + "type": "number", + "key": "hwFogAlpha", + "label": "Fog Alpha" + }, + { + "type": "number", + "key": "hwFogColorR", + "label": "Fog Color R", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "number", + "key": "hwFogColorG", + "label": "Fog Color G", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "number", + "key": "hwFogColorB", + "label": "Fog Color B", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "splitter" + }, + { + "type": "boolean", + "key": "motionBlurEnable", + "label": "Enable Motion Blur" + }, + { + "type": "number", + "key": "motionBlurSampleCount", + "label": "Motion Blur Sample Count", + "decimal": 0, + "minimum": 8, + "maximum": 32 + }, + { + "type": "number", + "key": "motionBlurShutterOpenFraction", + "label": "Shutter Open Fraction", + "decimal": 3, + "minimum": 0.01, + "maximum": 32 + }, + { + "type": "splitter" + }, + { + "type": "label", + "label": "Show" + }, { "type": "boolean", "key": "cameras", - "label": "cameras" + "label": "Cameras" }, { "type": "boolean", "key": "clipGhosts", - "label": "clipGhosts" - }, - { - "type": "boolean", - "key": "controlVertices", - "label": "controlVertices" + "label": "Clip Ghosts" }, { "type": "boolean", "key": "deformers", - "label": "deformers" + "label": "Deformers" }, { "type": "boolean", "key": "dimensions", - "label": "dimensions" + "label": "Dimensions" }, { "type": "boolean", "key": "dynamicConstraints", - "label": "dynamicConstraints" + "label": "Dynamic Constraints" }, { "type": "boolean", "key": "dynamics", - "label": "dynamics" + "label": "Dynamics" }, { "type": "boolean", "key": "fluids", - "label": "fluids" + "label": "Fluids" }, { "type": "boolean", "key": "follicles", - "label": "follicles" + "label": "Follicles" }, { "type": "boolean", "key": "gpuCacheDisplayFilter", - "label": "gpuCacheDisplayFilter" + "label": "GPU Cache" }, { "type": "boolean", "key": "greasePencils", - "label": "greasePencils" + "label": "Grease Pencil" }, { "type": "boolean", "key": "grid", - "label": "grid" + "label": "Grid" }, { "type": "boolean", "key": "hairSystems", - "label": "hairSystems" + "label": "Hair Systems" }, { "type": "boolean", "key": "handles", - "label": "handles" + "label": "Handles" }, { "type": "boolean", - "key": "hud", - "label": "hud" - }, - { - "type": "boolean", - "key": "hulls", - "label": "hulls" + "key": "headsUpDisplay", + "label": "HUD" }, { "type": "boolean", "key": "ikHandles", - "label": "ikHandles" + "label": "IK Handles" }, { "type": "boolean", "key": "imagePlane", - "label": "imagePlane" + "label": "Image Planes" }, { "type": "boolean", "key": "joints", - "label": "joints" + "label": "Joints" }, { "type": "boolean", "key": "lights", - "label": "lights" + "label": "Lights" }, { "type": "boolean", "key": "locators", - "label": "locators" + "label": "Locators" }, { "type": "boolean", "key": "manipulators", - "label": "manipulators" + "label": "Manipulators" }, { "type": "boolean", "key": "motionTrails", - "label": "motionTrails" + "label": "Motion Trails" }, { "type": "boolean", @@ -362,50 +468,65 @@ "key": "nRigids", "label": "nRigids" }, + { + "type": "boolean", + "key": "controlVertices", + "label": "NURBS CVs" + }, { "type": "boolean", "key": "nurbsCurves", - "label": "nurbsCurves" + "label": "NURBS Curves" + }, + { + "type": "boolean", + "key": "hulls", + "label": "NURBS Hulls" }, { "type": "boolean", "key": "nurbsSurfaces", - "label": "nurbsSurfaces" + "label": "NURBS Surfaces" }, { "type": "boolean", "key": "particleInstancers", - "label": "particleInstancers" + "label": "Particle Instancers" }, { "type": "boolean", "key": "pivots", - "label": "pivots" + "label": "Pivots" }, { "type": "boolean", "key": "planes", - "label": "planes" + "label": "Planes" }, { "type": "boolean", "key": "pluginShapes", - "label": "pluginShapes" + "label": "Plugin Shapes" }, { "type": "boolean", "key": "polymeshes", - "label": "polymeshes" + "label": "Polygons" }, { "type": "boolean", "key": "strokes", - "label": "strokes" + "label": "Strokes" }, { "type": "boolean", "key": "subdivSurfaces", - "label": "subdivSurfaces" + "label": "Subdiv Surfaces" + }, + { + "type": "boolean", + "key": "textures", + "label": "Texture Placements" } ] }, @@ -418,47 +539,47 @@ { "type": "boolean", "key": "displayGateMask", - "label": "displayGateMask" + "label": "Display Gate Mask" }, { "type": "boolean", "key": "displayResolution", - "label": "displayResolution" + "label": "Display Resolution" }, { "type": "boolean", "key": "displayFilmGate", - "label": "displayFilmGate" + "label": "Display Film Gate" }, { "type": "boolean", "key": "displayFieldChart", - "label": "displayFieldChart" + "label": "Display Field Chart" }, { "type": "boolean", "key": "displaySafeAction", - "label": "displaySafeAction" + "label": "Display Safe Action" }, { "type": "boolean", "key": "displaySafeTitle", - "label": "displaySafeTitle" + "label": "Display Safe Title" }, { "type": "boolean", "key": "displayFilmPivot", - "label": "displayFilmPivot" + "label": "Display Film Pivot" }, { "type": "boolean", "key": "displayFilmOrigin", - "label": "displayFilmOrigin" + "label": "Display Film Origin" }, { "type": "number", "key": "overscan", - "label": "overscan", + "label": "Overscan", "decimal": 1, "minimum": 0, "maximum": 10 diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json index 6dc10ed2a5..bc6520474d 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json @@ -29,42 +29,9 @@ } ] }, - { - "type": "dict", - "collapsible": true, - "key": "CreateRender", - "label": "Create Render", - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "type": "list", - "key": "defaults", - "label": "Default Subsets", - "object_type": "text" - }, - { - "key": "aov_separator", - "label": "AOV Separator character", - "type": "enum", - "multiselection": false, - "default": "underscore", - "enum_items": [ - {"dash": "- (dash)"}, - {"underscore": "_ (underscore)"}, - {"dot": ". (dot)"} - ] - }, - { - "type": "text", - "key": "default_render_image_folder", - "label": "Default render image folder" - } - ] + { + "type": "schema", + "name": "schema_maya_create_render" }, { "type": "dict", @@ -124,13 +91,131 @@ ] }, + { + "type": "dict", + "collapsible": true, + "key": "CreateMultiverseLook", + "label": "Create Multiverse Look", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "publish_mip_map", + "label": "Publish Mip Maps" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreateAnimation", + "label": "Create Animation", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "write_color_sets", + "label": "Write Color Sets" + }, + { + "type": "boolean", + "key": "write_face_sets", + "label": "Write Face Sets" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreateModel", + "label": "Create Model", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "write_color_sets", + "label": "Write Color Sets" + }, + { + "type": "boolean", + "key": "write_face_sets", + "label": "Write Face Sets" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreatePointCache", + "label": "Create Point Cache", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "write_color_sets", + "label": "Write Color Sets" + }, + { + "type": "boolean", + "key": "write_face_sets", + "label": "Write Face Sets" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] + }, + { "type": "schema_template", "name": "template_create_plugin", "template_data": [ { - "key": "CreateAnimation", - "label": "Create Animation" + "key": "CreateMultiverseUsd", + "label": "Create Multiverse USD" + }, + { + "key": "CreateMultiverseUsdComp", + "label": "Create Multiverse USD Composition" + }, + { + "key": "CreateMultiverseUsdOver", + "label": "Create Multiverse USD Override" }, { "key": "CreateAss", @@ -152,14 +237,6 @@ "key": "CreateMayaScene", "label": "Create Maya Scene" }, - { - "key": "CreateModel", - "label": "Create Model" - }, - { - "key": "CreatePointCache", - "label": "Create Cache" - }, { "key": "CreateRenderSetup", "label": "Create Render Setup" diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json new file mode 100644 index 0000000000..68ad7ad63d --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json @@ -0,0 +1,20 @@ +{ + "type": "dict", + "collapsible": true, + "key": "CreateRender", + "label": "Create Render", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json index 2e5bc64e1c..ab8c6b885e 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json @@ -21,6 +21,20 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "CollectFbxCamera", + "label": "Collect Camera for FBX export", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { "type": "splitter" }, @@ -48,13 +62,36 @@ } ] }, - { - "type": "schema_template", - "name": "template_publish_plugin", - "template_data": [ + { + "type": "dict", + "collapsible": true, + "key": "ValidateFrameRange", + "label": "Validate Frame Range", + "checkbox_key": "enabled", + "children": [ { - "key": "ValidateFrameRange", - "label": "Validate Frame Range" + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "type": "splitter" + }, + { + "key": "exclude_families", + "label": "Families", + "type": "list", + "object_type": "text" } ] }, @@ -70,6 +107,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "label", "label": "Shader name regex can use named capture group asset to validate against current asset name.

Example:
^.*(?P=<asset>.+)_SHD

" @@ -122,6 +164,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "boolean", "key": "whitelist_native_plugins", @@ -209,6 +256,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "boolean", "key": "validate_mesh", @@ -295,6 +347,72 @@ } ] }, + { + "type": "schema_template", + "name": "template_publish_plugin", + "template_data": [ + { + "key": "ValidateCurrentRenderLayerIsRenderable", + "label": "Validate Current Render Layer Has Renderable Camera" + }, + { + "key": "ValidateRenderImageRule", + "label": "Validate Images File Rule (Workspace)" + }, + { + "key": "ValidateRenderNoDefaultCameras", + "label": "Validate No Default Cameras Renderable" + }, + { + "key": "ValidateRenderSingleCamera", + "label": "Validate Render Single Camera" + }, + { + "key": "ValidateRenderLayerAOVs", + "label": "Validate Render Passes / AOVs Are Registered" + }, + { + "key": "ValidateStepSize", + "label": "Validate Step Size" + }, + { + "key": "ValidateVRayDistributedRendering", + "label": "VRay Distributed Rendering" + }, + { + "key": "ValidateVrayReferencedAOVs", + "label": "VRay Referenced AOVs" + }, + { + "key": "ValidateVRayTranslatorEnabled", + "label": "VRay Translator Settings" + }, + { + "key": "ValidateVrayProxy", + "label": "VRay Proxy Settings" + }, + { + "key": "ValidateVrayProxyMembers", + "label": "VRay Proxy Members" + }, + { + "key": "ValidateYetiRenderScriptCallbacks", + "label": "Yeti Render Script Callbacks" + }, + { + "key": "ValidateYetiRigCacheState", + "label": "Yeti Rig Cache State" + }, + { + "key": "ValidateYetiRigInputShapesInInstance", + "label": "Yeti Rig Input Shapes In Instance" + }, + { + "key": "ValidateYetiRigSettings", + "label": "Yeti Rig Settings" + } + ] + }, { "type": "collapsible-wrap", "label": "Model", @@ -379,6 +497,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "label", "label": "Validates transform suffix based on the type of its children shapes." @@ -435,6 +558,14 @@ "key": "ValidateMeshNonManifold", "label": "ValidateMeshNonManifold" }, + { + "key": "ValidateMeshNoNegativeScale", + "label": "Validate Mesh No Negative Scale" + }, + { + "key": "ValidateMeshNonZeroEdgeLength", + "label": "Validate Mesh Edge Length Non Zero" + }, { "key": "ValidateMeshNormalsUnlocked", "label": "ValidateMeshNormalsUnlocked" @@ -488,6 +619,61 @@ { "key": "ValidateUniqueNames", "label": "ValidateUniqueNames" + }, + { + "key": "ValidateNoVRayMesh", + "label": "Validate No V-Ray Proxies (VRayMesh)" + }, + { + "key": "ValidateUnrealMeshTriangulated", + "label": "Validate if Mesh is Triangulated" + }, + { + "key": "ValidateAlembicVisibleOnly", + "label": "Validate Alembic visible node" + } + ] + }, + { + "type": "label", + "label": "Extractors" + }, + { + "type": "dict", + "collapsible": true, + "key": "ExtractAlembic", + "label": "Extract Alembic", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "ExtractObj", + "label": "Extract OBJ", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" } ] } @@ -512,6 +698,26 @@ { "key": "ValidateRigControllers", "label": "Validate Rig Controllers" + }, + { + "key": "ValidateAnimationContent", + "label": "Validate Animation Content" + }, + { + "key": "ValidateOutRelatedNodeIds", + "label": "Validate Animation Out Set Related Node Ids" + }, + { + "key": "ValidateRigControllersArnoldAttributes", + "label": "Validate Rig Controllers (Arnold Attributes)" + }, + { + "key": "ValidateSkeletalMeshHierarchy", + "label": "Validate Skeletal Mesh Top Node" + }, + { + "key": "ValidateSkinclusterDeformerSet", + "label": "Validate Skincluster Deformer Relationships" } ] }, @@ -528,6 +734,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "boolean", "key": "allow_history_only", @@ -550,9 +761,57 @@ "key": "ValidateAssemblyName", "label": "Validate Assembly Name" }, + { + "key": "ValidateAssemblyNamespaces", + "label": "Validate Assembly Namespaces" + }, + { + "key": "ValidateAssemblyModelTransforms", + "label": "Validate Assembly Model Transforms" + }, { "key": "ValidateAssRelativePaths", "label": "ValidateAssRelativePaths" + }, + { + "key": "ValidateInstancerContent", + "label": "Validate Instancer Content" + }, + { + "key": "ValidateInstancerFrameRanges", + "label": "Validate Instancer Cache Frame Ranges" + }, + { + "key": "ValidateNoDefaultCameras", + "label": "Validate No Default Cameras" + }, + { + "key": "ValidateUnrealUpAxis", + "label": "Validate Unreal Up-Axis check" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "ValidateCameraContents", + "label": "Validate Camera Content", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "validate_shapes", + "label": "Validate presence of shapes" } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json new file mode 100644 index 0000000000..0cbb684fc6 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json @@ -0,0 +1,423 @@ +{ + "type": "dict", + "collapsible": true, + "key": "RenderSettings", + "label": "Render Settings", + "children": [ + { + "type": "boolean", + "key": "apply_render_settings", + "label": "Apply Render Settings on creation" + }, + { + "type": "text", + "key": "default_render_image_folder", + "label": "Default render image folder" + }, + { + "type": "boolean", + "key": "enable_all_lights", + "label": "Include all lights in Render Setup Layers by default" + }, + { + "key": "aov_separator", + "label": "AOV Separator character", + "type": "enum", + "multiselection": false, + "default": "underscore", + "enum_items": [ + {"dash": "- (dash)"}, + {"underscore": "_ (underscore)"}, + {"dot": ". (dot)"} + ] + }, + { + "key": "reset_current_frame", + "label": "Reset Current Frame", + "type": "boolean" + }, + { + "type": "dict", + "collapsible": true, + "key": "arnold_renderer", + "label": "Arnold Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"jpeg": "jpeg"}, + {"png": "png"}, + {"deepexr": "deep exr"}, + {"tif": "tif"}, + {"exr": "exr"}, + {"maya": "maya"}, + {"mtoa_shaders": "mtoa_shaders"} + ] + }, + { + "key": "multilayer_exr", + "label": "Multilayer (exr)", + "type": "boolean" + }, + { + "key": "tiled", + "label": "Tiled (tif, exr)", + "type": "boolean" + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< empty >"}, + {"ID": "ID"}, + {"N": "N"}, + {"P": "P"}, + {"Pref": "Pref"}, + {"RGBA": "RGBA"}, + {"Z": "Z"}, + {"albedo": "albedo"}, + {"background": "background"}, + {"coat": "coat"}, + {"coat_albedo": "coat_albedo"}, + {"coat_direct": "coat_direct"}, + {"coat_indirect": "coat_indirect"}, + {"cputime": "cputime"}, + {"crypto_asset": "crypto_asset"}, + {"crypto_material": "cypto_material"}, + {"crypto_object": "crypto_object"}, + {"diffuse": "diffuse"}, + {"diffuse_albedo": "diffuse_albedo"}, + {"diffuse_direct": "diffuse_direct"}, + {"diffuse_indirect": "diffuse_indirect"}, + {"direct": "direct"}, + {"emission": "emission"}, + {"highlight": "highlight"}, + {"indirect": "indirect"}, + {"motionvector": "motionvector"}, + {"opacity": "opacity"}, + {"raycount": "raycount"}, + {"rim_light": "rim_light"}, + {"shadow": "shadow"}, + {"shadow_diff": "shadow_diff"}, + {"shadow_mask": "shadow_mask"}, + {"shadow_matte": "shadow_matte"}, + {"sheen": "sheen"}, + {"sheen_albedo": "sheen_albedo"}, + {"sheen_direct": "sheen_direct"}, + {"sheen_indirect": "sheen_indirect"}, + {"specular": "specular"}, + {"specular_albedo": "specular_albedo"}, + {"specular_direct": "specular_direct"}, + {"specular_indirect": "specular_indirect"}, + {"sss": "sss"}, + {"sss_albedo": "sss_albedo"}, + {"sss_direct": "sss_direct"}, + {"sss_indirect": "sss_indirect"}, + {"transmission": "transmission"}, + {"transmission_albedo": "transmission_albedo"}, + {"transmission_direct": "transmission_direct"}, + {"transmission_indirect": "transmission_indirect"}, + {"volume": "volume"}, + {"volume_Z": "volume_Z"}, + {"volume_albedo": "volume_albedo"}, + {"volume_direct": "volume_direct"}, + {"volume_indirect": "volume_indirect"}, + {"volume_opacity": "volume_opacity"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like defaultArnoldRenderOptions.AASamples = 4" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "vray_renderer", + "label": "V-Ray Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "engine", + "label": "Production Engine", + "type": "enum", + "multiselection": false, + "defaults": "1", + "enum_items": [ + {"1": "V-Ray"}, + {"2": "V-Ray GPU"} + ] + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"png": "png"}, + {"jpg": "jpg"}, + {"vrimg": "vrimg"}, + {"hdr": "hdr"}, + {"exr": "exr"}, + {"exr (multichannel)": "exr (multichannel)"}, + {"exr (deep)": "exr (deep)"}, + {"tga": "tga"}, + {"bmp": "bmp"}, + {"sgi": "sgi"} + ] + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< empty >"}, + {"atmosphereChannel": "atmosphere"}, + {"backgroundChannel": "background"}, + {"bumpNormalsChannel": "bumpnormals"}, + {"causticsChannel": "caustics"}, + {"coatFilterChannel": "coat_filter"}, + {"coatGlossinessChannel": "coatGloss"}, + {"coatReflectionChannel": "coat_reflection"}, + {"vrayCoatChannel": "coat_specular"}, + {"CoverageChannel": "coverage"}, + {"cryptomatteChannel": "cryptomatte"}, + {"customColor": "custom_color"}, + {"drBucketChannel": "DR"}, + {"denoiserChannel": "denoiser"}, + {"diffuseChannel": "diffuse"}, + {"ExtraTexElement": "extraTex"}, + {"giChannel": "GI"}, + {"LightMixElement": "None"}, + {"lightingChannel": "lighting"}, + {"LightingAnalysisChannel": "LightingAnalysis"}, + {"materialIDChannel": "materialID"}, + {"MaterialSelectElement": "materialSelect"}, + {"matteShadowChannel": "matteShadow"}, + {"MultiMatteElement": "multimatte"}, + {"multimatteIDChannel": "multimatteID"}, + {"normalsChannel": "normals"}, + {"nodeIDChannel": "objectId"}, + {"objectSelectChannel": "objectSelect"}, + {"rawCoatFilterChannel": "raw_coat_filter"}, + {"rawCoatReflectionChannel": "raw_coat_reflection"}, + {"rawDiffuseFilterChannel": "rawDiffuseFilter"}, + {"rawGiChannel": "rawGI"}, + {"rawLightChannel": "rawLight"}, + {"rawReflectionChannel": "rawReflection"}, + {"rawReflectionFilterChannel": "rawReflectionFilter"}, + {"rawRefractionChannel": "rawRefraction"}, + {"rawRefractionFilterChannel": "rawRefractionFilter"}, + {"rawShadowChannel": "rawShadow"}, + {"rawSheenFilterChannel": "raw_sheen_filter"}, + {"rawSheenReflectionChannel": "raw_sheen_reflection"}, + {"rawTotalLightChannel": "rawTotalLight"}, + {"reflectIORChannel": "reflIOR"}, + {"reflectChannel": "reflect"}, + {"reflectionFilterChannel": "reflectionFilter"}, + {"reflectGlossinessChannel": "reflGloss"}, + {"refractChannel": "refract"}, + {"refractionFilterChannel": "refractionFilter"}, + {"refractGlossinessChannel": "refrGloss"}, + {"renderIDChannel": "renderId"}, + {"FastSSS2Channel": "SSS"}, + {"sampleRateChannel": "sampleRate"}, + {"samplerInfo": "samplerInfo"}, + {"selfIllumChannel": "selfIllum"}, + {"shadowChannel": "shadow"}, + {"sheenFilterChannel": "sheen_filter"}, + {"sheenGlossinessChannel": "sheenGloss"}, + {"sheenReflectionChannel": "sheen_reflection"}, + {"vraySheenChannel": "sheen_specular"}, + {"specularChannel": "specular"}, + {"Toon": "Toon"}, + {"toonLightingChannel": "toonLighting"}, + {"toonSpecularChannel": "toonSpecular"}, + {"totalLightChannel": "totalLight"}, + {"unclampedColorChannel": "unclampedColor"}, + {"VRScansPaintMaskChannel": "VRScansPaintMask"}, + {"VRScansZoneMaskChannel": "VRScansZoneMask"}, + {"velocityChannel": "velocity"}, + {"zdepthChannel": "zDepth"}, + {"LightSelectElement": "lightselect"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like vraySettings.aaFilterSize = 1.5" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "redshift_renderer", + "label": "Redshift Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "primary_gi_engine", + "label": "Primary GI Engine", + "type": "enum", + "multiselection": false, + "defaults": "0", + "enum_items": [ + {"0": "None"}, + {"1": "Photon Map"}, + {"2": "Irradiance Cache"}, + {"3": "Brute Force"} + ] + }, + { + "key": "secondary_gi_engine", + "label": "Secondary GI Engine", + "type": "enum", + "multiselection": false, + "defaults": "0", + "enum_items": [ + {"0": "None"}, + {"1": "Photon Map"}, + {"2": "Irradiance Cache"}, + {"3": "Brute Force"} + ] + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"iff": "Maya IFF"}, + {"exr": "OpenEXR"}, + {"tif": "TIFF"}, + {"png": "PNG"}, + {"tga": "Targa"}, + {"jpg": "JPEG"} + ] + }, + { + "key": "multilayer_exr", + "label": "Multilayer (exr)", + "type": "boolean" + }, + { + "key": "force_combine", + "label": "Force combine beauty and AOVs", + "type": "boolean" + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< none >"}, + {"AO": "Ambient Occlusion"}, + {"Background": "Background"}, + {"Beauty": "Beauty"}, + {"BumpNormals": "Bump Normals"}, + {"Caustics": "Caustics"}, + {"CausticsRaw": "Caustics Raw"}, + {"Cryptomatte": "Cryptomatte"}, + {"Custom": "Custom"}, + {"Z": "Depth"}, + {"DiffuseFilter": "Diffuse Filter"}, + {"DiffuseLighting": "Diffuse Lighting"}, + {"DiffuseLightingRaw": "Diffuse Lighting Raw"}, + {"Emission": "Emission"}, + {"GI": "Global Illumination"}, + {"GIRaw": "Global Illumination Raw"}, + {"Matte": "Matte"}, + {"MotionVectors": "Ambient Occlusion"}, + {"N": "Normals"}, + {"ID": "ObjectID"}, + {"ObjectBumpNormal": "Object-Space Bump Normals"}, + {"ObjectPosition": "Object-Space Positions"}, + {"PuzzleMatte": "Puzzle Matte"}, + {"Reflections": "Reflections"}, + {"ReflectionsFilter": "Reflections Filter"}, + {"ReflectionsRaw": "Reflections Raw"}, + {"Refractions": "Refractions"}, + {"RefractionsFilter": "Refractions Filter"}, + {"RefractionsRaw": "Refractions Filter"}, + {"Shadows": "Shadows"}, + {"SpecularLighting": "Specular Lighting"}, + {"SSS": "Sub Surface Scatter"}, + {"SSSRaw": "Sub Surface Scatter Raw"}, + {"TotalDiffuseLightingRaw": "Total Diffuse Lighting Raw"}, + {"TotalTransLightingRaw": "Total Translucency Filter"}, + {"TransTint": "Translucency Filter"}, + {"TransGIRaw": "Translucency Lighting Raw"}, + {"VolumeFogEmission": "Volume Fog Emission"}, + {"VolumeFogTint": "Volume Fog Tint"}, + {"VolumeLighting": "Volume Lighting"}, + {"P": "World Position"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like redshiftOptions.reflectionMaxTraceDepth = 3" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_imageio.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_imageio.json new file mode 100644 index 0000000000..52db853ef6 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_imageio.json @@ -0,0 +1,254 @@ +{ + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "checkbox_key": "enabled", + "collapsible": true, + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "key": "viewer", + "type": "dict", + "label": "Viewer", + "collapsible": false, + "children": [ + { + "type": "text", + "key": "viewerProcess", + "label": "Viewer Process" + } + ] + }, + { + "key": "baking", + "type": "dict", + "label": "Extract-review baking profile", + "collapsible": false, + "children": [ + { + "type": "text", + "key": "viewerProcess", + "label": "Viewer Process" + } + ] + }, + { + "key": "workfile", + "type": "dict", + "label": "Workfile", + "collapsible": false, + "children": [ + { + "type": "form", + "children": [ + { + "type": "enum", + "key": "colorManagement", + "label": "color management", + "enum_items": [ + { + "Nuke": "Nuke" + }, + { + "OCIO": "OCIO" + } + ] + }, + { + "type": "enum", + "key": "OCIO_config", + "label": "OpenColorIO Config", + "enum_items": [ + { + "nuke-default": "nuke-default" + }, + { + "spi-vfx": "spi-vfx" + }, + { + "spi-anim": "spi-anim" + }, + { + "aces_0.1.1": "aces_0.1.1" + }, + { + "aces_0.7.1": "aces_0.7.1" + }, + { + "aces_1.0.1": "aces_1.0.1" + }, + { + "aces_1.0.3": "aces_1.0.3" + }, + { + "aces_1.1": "aces_1.1" + }, + { + "aces_1.2": "aces_1.2" + }, + { + "custom": "custom" + } + ] + }, + { + "type": "path", + "key": "customOCIOConfigPath", + "label": "Custom OCIO config path", + "multiplatform": true, + "multipath": true + }, + { + "type": "text", + "key": "workingSpaceLUT", + "label": "Working Space" + }, + { + "type": "text", + "key": "monitorLut", + "label": "monitor" + }, + { + "type": "text", + "key": "int8Lut", + "label": "8-bit files" + }, + { + "type": "text", + "key": "int16Lut", + "label": "16-bit files" + }, + { + "type": "text", + "key": "logLut", + "label": "log files" + }, + { + "type": "text", + "key": "floatLut", + "label": "float files" + } + ] + } + ] + }, + { + "key": "nodes", + "type": "dict", + "label": "Nodes", + "collapsible": true, + "children": [ + { + "key": "requiredNodes", + "type": "list", + "label": "Plugin required", + "object_type": { + "type": "dict", + "children": [ + { + "type": "list", + "key": "plugins", + "label": "Used in plugins", + "object_type": { + "type": "text", + "key": "pluginClass" + } + }, + { + "type": "text", + "key": "nukeNodeClass", + "label": "Nuke Node Class" + }, + { + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Knobs", + "key": "knobs" + } + ] + } + + ] + } + }, + { + "type": "splitter" + }, + { + "type": "list", + "key": "overrideNodes", + "label": "Plugin's node overrides", + "object_type": { + "type": "dict", + "children": [ + { + "type": "list", + "key": "plugins", + "label": "Used in plugins", + "object_type": { + "type": "text", + "key": "pluginClass" + } + }, + { + "type": "text", + "key": "nukeNodeClass", + "label": "Nuke Node Class" + }, + { + "key": "subsets", + "label": "Subsets", + "type": "list", + "object_type": "text" + }, + { + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Knobs overrides", + "key": "knobs" + } + ] + } + ] + } + } + ] + }, + { + "key": "regexInputs", + "type": "dict", + "label": "Colorspace on Inputs by regex detection", + "collapsible": true, + "children": [ + { + "type": "list", + "key": "inputs", + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "regex", + "label": "Regex" + }, + { + "type": "text", + "key": "colorspace", + "label": "Colorspace" + } + ] + } + } + ] + } + ] +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json index 5bd8337e4c..805424c632 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json @@ -11,10 +11,52 @@ { "key": "LoadImage", "label": "Image Loader" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "LoadClip", + "label": "Clip Loader", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" }, { - "key": "LoadClip", - "label": "Clip Loader" + "type": "list", + "key": "_representations", + "label": "Representations", + "object_type": "text" + }, + { + "type": "text", + "key": "node_name_template", + "label": "Node name template" + }, + { + "type": "splitter" + }, + { + "type": "dict", + "collapsible": false, + "key": "options_defaults", + "label": "Loader option defaults", + "children": [ + { + "type": "boolean", + "key": "start_at_workfile", + "label": "Start at worfile beggining" + }, + { + "type": "boolean", + "key": "add_retime", + "label": "Add retime" + } + ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json index 94b52bba13..c91d3c0e3d 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json @@ -41,6 +41,9 @@ }, { "render": "render" + }, + { + "write": "write" } ] } @@ -58,8 +61,8 @@ "name": "template_publish_plugin", "template_data": [ { - "key": "ValidateInstanceInContext", - "label": "Validate Instance In Context" + "key": "ValidateCorrectAssetName", + "label": "Validate Correct Asset name" } ] }, @@ -132,9 +135,31 @@ "label": "Enabled" }, { - "type": "raw-json", - "key": "nodes", - "label": "Nodes" + "type": "boolean", + "key": "use_rendered", + "label": "Use rendered images" + }, + { + "type": "boolean", + "key": "bake_viewer_process", + "label": "Bake viewer process" + }, + { + "type": "boolean", + "key": "bake_viewer_input_process", + "label": "Bake viewer input process" + }, + { + "type": "collapsible-wrap", + "label": "Nodes", + "collapsible": true, + "children": [ + { + "type": "raw-json", + "key": "nodes", + "label": "Nodes" + } + ] } ] }, @@ -271,8 +296,8 @@ "label": "Write node file type" }, { - "key": "add_tags", - "label": "Add additional tags to representations", + "key": "add_custom_tags", + "label": "Add custom tags", "type": "list", "object_type": "text" } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_scriptsgizmo.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_scriptsgizmo.json new file mode 100644 index 0000000000..abe14970c5 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_scriptsgizmo.json @@ -0,0 +1,124 @@ +{ + "type": "list", + "key": "gizmo", + "label": "Gizmo Menu", + "is_group": true, + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "toolbar_menu_name", + "label": "Toolbar Menu Name" + }, + { + "type": "path", + "key": "gizmo_source_dir", + "label": "Gizmo directory path", + "multipath": true, + "multiplatform": true + }, + { + "type": "collapsible-wrap", + "label": "Options", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "path", + "key": "toolbar_icon_path", + "label": "Toolbar Icon Path", + "multipath": false, + "multiplatform": true + }, + { + "type": "splitter" + }, + { + "type": "list", + "key": "gizmo_definition", + "label": "Gizmo definitions", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "gizmo_toolbar_path", + "label": "Gizmo Menu Path" + }, + { + "type": "list", + "key": "sub_gizmo_list", + "label": "Sub Gizmo List", + "use_label_wrap": true, + "object_type": { + "type": "dict-conditional", + "enum_key": "sourcetype", + "enum_label": "Type of usage", + "enum_children": [ + { + "key": "python", + "label": "Python", + "children": [ + { + "type": "text", + "key": "title", + "label": "Title" + }, + { + "type": "text", + "key": "command", + "label": "Python command" + }, + { + "type": "text", + "key": "shortcut", + "label": "Hotkey" + } + ] + }, + { + "key": "file", + "label": "File", + "children": [ + { + "type": "text", + "key": "title", + "label": "Title" + }, + { + "type": "text", + "key": "file_name", + "label": "Gizmo file name" + }, + { + "type": "text", + "key": "shortcut", + "label": "Hotkey" + } + ] + }, + { + "key": "separator", + "label": "Separator", + "children": [ + { + "type": "text", + "key": "gizmo_toolbar_path", + "label": "Toolbar path" + } + ] + } + ] + } + } + ] + } + } + ] + } + ] + } +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json index 484fbf9d07..a4b28f47bc 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json @@ -13,6 +13,9 @@ { "ftrackreview": "Add review to Ftrack" }, + { + "shotgridreview": "Add review to Shotgrid" + }, { "delete": "Delete output" }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_scriptsmenu.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_scriptsmenu.json similarity index 100% rename from openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_scriptsmenu.json rename to openpype/settings/entities/schemas/projects_schema/schemas/schema_scriptsmenu.json diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_templated_workfile_build.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_templated_workfile_build.json new file mode 100644 index 0000000000..99a29beb27 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_templated_workfile_build.json @@ -0,0 +1,35 @@ +{ + "type": "dict", + "collapsible": true, + "key": "templated_workfile_build", + "label": "Templated Workfile Build Settings", + "children": [ + { + "type": "list", + "key": "profiles", + "label": "Profiles", + "object_type": { + "type": "dict", + "children": [ + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "key": "path", + "label": "Path to template", + "type": "text", + "object_type": "text" + } + ] + } + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/template_validate_plugin.json b/openpype/settings/entities/schemas/projects_schema/schemas/template_validate_plugin.json new file mode 100644 index 0000000000..b57cad6719 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/template_validate_plugin.json @@ -0,0 +1,26 @@ +[ + { + "type": "dict", + "collapsible": true, + "key": "{key}", + "label": "{label}", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + } + ] + } +] diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json b/openpype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json index 334c9aa235..b92a2edf85 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json @@ -20,31 +20,21 @@ "type": "raw-json" }, { - "type": "dict", + "type": "dict-modifiable", "key": "variants", - "children": [ - { - "type": "schema_template", - "name": "template_host_variant", - "template_data": [ - { - "app_variant_label": "2020", - "app_variant": "2020", - "variant_skip_paths": ["use_python_2"] - }, - { - "app_variant_label": "2021", - "app_variant": "2021", - "variant_skip_paths": ["use_python_2"] - }, - { - "app_variant_label": "2022", - "app_variant": "2022", - "variant_skip_paths": ["use_python_2"] - } - ] - } - ] + "collapsible_key": true, + "use_label_wrap": false, + "object_type": { + "type": "dict", + "collapsible": true, + "children": [ + { + "type": "schema_template", + "name": "template_host_variant_items", + "skip_paths": ["use_python_2"] + } + ] + } } ] } diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json b/openpype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json index 69ce7735e8..d5d041d0c2 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json @@ -20,31 +20,21 @@ "type": "raw-json" }, { - "type": "dict", + "type": "dict-modifiable", "key": "variants", - "children": [ - { - "type": "schema_template", - "name": "template_host_variant", - "template_data": [ - { - "app_variant_label": "21", - "app_variant": "21", - "variant_skip_paths": ["use_python_2"] - }, - { - "app_variant_label": "20", - "app_variant": "20", - "variant_skip_paths": ["use_python_2"] - }, - { - "app_variant_label": "17", - "app_variant": "17", - "variant_skip_paths": ["use_python_2"] - } - ] - } - ] + "collapsible_key": true, + "use_label_wrap": false, + "object_type": { + "type": "dict", + "collapsible": true, + "children": [ + { + "type": "schema_template", + "name": "template_host_variant_items", + "skip_paths": ["use_python_2"] + } + ] + } } ] } diff --git a/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json b/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json index 654ddf2938..7c5774415c 100644 --- a/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json +++ b/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json @@ -50,8 +50,15 @@ "is_group": true, "children": [ { - "type": "label", - "label": "Intent" + "type": "boolean", + "key": "allow_empty_intent", + "label": "Allow empty intent" + }, + { + "type": "text", + "key": "empty_intent_label", + "label": "Empty item label", + "placeholder": "< Not set >" }, { "type": "dict-modifiable", @@ -64,7 +71,8 @@ { "key": "default", "type": "text", - "label": "Default Intent" + "label": "Default Intent", + "placeholder": "< First available >" }, { "type": "separator" diff --git a/openpype/settings/entities/schemas/system_schema/module_settings/schema_kitsu.json b/openpype/settings/entities/schemas/system_schema/module_settings/schema_kitsu.json new file mode 100644 index 0000000000..15a2ccc58d --- /dev/null +++ b/openpype/settings/entities/schemas/system_schema/module_settings/schema_kitsu.json @@ -0,0 +1,23 @@ +{ + "type": "dict", + "key": "kitsu", + "label": "Kitsu", + "collapsible": true, + "require_restart": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "server", + "label": "Server" + }, + { + "type": "splitter" + } + ] +} diff --git a/openpype/settings/entities/schemas/system_schema/schema_modules.json b/openpype/settings/entities/schemas/system_schema/schema_modules.json index 52595914ed..952b38040c 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_modules.json +++ b/openpype/settings/entities/schemas/system_schema/schema_modules.json @@ -44,6 +44,64 @@ "type": "schema", "name": "schema_ftrack" }, + { + "type": "schema", + "name": "schema_kitsu" + }, + { + "type": "dict", + "key": "shotgrid", + "label": "Shotgrid", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "leecher_manager_url", + "label": "Shotgrid Leecher Manager URL" + }, + { + "type": "text", + "key": "leecher_backend_url", + "label": "Shotgrid Leecher Backend URL" + }, + { + "type": "boolean", + "key": "filter_projects_by_login", + "label": "Filter projects by SG login" + }, + { + "type": "dict-modifiable", + "key": "shotgrid_settings", + "label": "Shotgrid Servers", + "object_type": { + "type": "dict", + "children": [ + { + "key": "shotgrid_url", + "label": "Server URL", + "type": "text" + }, + { + "key": "shotgrid_script_name", + "label": "Script Name", + "type": "text" + }, + { + "key": "shotgrid_script_key", + "label": "Script api key", + "type": "text" + } + ] + } + } + ] + }, { "type": "dict", "key": "timers_manager", diff --git a/openpype/settings/handlers.py b/openpype/settings/handlers.py index c99fc6080b..def8c16ea7 100644 --- a/openpype/settings/handlers.py +++ b/openpype/settings/handlers.py @@ -7,6 +7,8 @@ from abc import ABCMeta, abstractmethod import six import openpype.version +from openpype.client.mongo import OpenPypeMongoConnection +from openpype.client.entities import get_project_connection, get_project from .constants import ( GLOBAL_SETTINGS_KEY, @@ -20,6 +22,164 @@ from .constants import ( ) +class SettingsStateInfo: + """Helper state information about some settings state. + + Is used to hold information about last saved and last opened UI. Keep + information about the time when that happened and on which machine under + which user and on which openpype version. + + To create currrent machine and time information use 'create_new' method. + """ + + timestamp_format = "%Y-%m-%d %H:%M:%S.%f" + + def __init__( + self, + openpype_version, + settings_type, + project_name, + timestamp, + hostname, + hostip, + username, + system_name, + local_id + ): + self.openpype_version = openpype_version + self.settings_type = settings_type + self.project_name = project_name + + timestamp_obj = None + if timestamp: + timestamp_obj = datetime.datetime.strptime( + timestamp, self.timestamp_format + ) + self.timestamp = timestamp + self.timestamp_obj = timestamp_obj + self.hostname = hostname + self.hostip = hostip + self.username = username + self.system_name = system_name + self.local_id = local_id + + def copy(self): + return self.from_data(self.to_data()) + + @classmethod + def create_new( + cls, openpype_version, settings_type=None, project_name=None + ): + """Create information about this machine for current time.""" + + from openpype.lib.pype_info import get_workstation_info + + now = datetime.datetime.now() + workstation_info = get_workstation_info() + + return cls( + openpype_version, + settings_type, + project_name, + now.strftime(cls.timestamp_format), + workstation_info["hostname"], + workstation_info["hostip"], + workstation_info["username"], + workstation_info["system_name"], + workstation_info["local_id"] + ) + + @classmethod + def from_data(cls, data): + """Create object from data.""" + + return cls( + data["openpype_version"], + data["settings_type"], + data["project_name"], + data["timestamp"], + data["hostname"], + data["hostip"], + data["username"], + data["system_name"], + data["local_id"] + ) + + def to_data(self): + data = self.to_document_data() + data.update({ + "openpype_version": self.openpype_version, + "settings_type": self.settings_type, + "project_name": self.project_name + }) + return data + + @classmethod + def create_new_empty(cls, openpype_version, settings_type=None): + return cls( + openpype_version, + settings_type, + None, + None, + None, + None, + None, + None, + None + ) + + @classmethod + def from_document(cls, openpype_version, settings_type, document): + document = document or {} + project_name = document.get("project_name") + last_saved_info = document.get("last_saved_info") + if last_saved_info: + copy_last_saved_info = copy.deepcopy(last_saved_info) + copy_last_saved_info.update({ + "openpype_version": openpype_version, + "settings_type": settings_type, + "project_name": project_name, + }) + return cls.from_data(copy_last_saved_info) + return cls( + openpype_version, + settings_type, + project_name, + None, + None, + None, + None, + None, + None + ) + + def to_document_data(self): + return { + "timestamp": self.timestamp, + "hostname": self.hostname, + "hostip": self.hostip, + "username": self.username, + "system_name": self.system_name, + "local_id": self.local_id, + } + + def __eq__(self, other): + if not isinstance(other, SettingsStateInfo): + return False + + if other.timestamp_obj != self.timestamp_obj: + return False + + return ( + self.openpype_version == other.openpype_version + and self.hostname == other.hostname + and self.hostip == other.hostip + and self.username == other.username + and self.system_name == other.system_name + and self.local_id == other.local_id + ) + + @six.add_metaclass(ABCMeta) class SettingsHandler: @abstractmethod @@ -224,7 +384,7 @@ class SettingsHandler: """OpenPype versions that have any studio project anatomy overrides. Returns: - list: OpenPype versions strings. + List[str]: OpenPype versions strings. """ pass @@ -235,7 +395,7 @@ class SettingsHandler: """OpenPype versions that have any studio project settings overrides. Returns: - list: OpenPype versions strings. + List[str]: OpenPype versions strings. """ pass @@ -249,8 +409,87 @@ class SettingsHandler: project_name(str): Name of project. Returns: - list: OpenPype versions strings. + List[str]: OpenPype versions strings. """ + + pass + + @abstractmethod + def get_system_last_saved_info(self): + """State of last system settings overrides at the moment when called. + + This method must provide most recent data so using cached data is not + the way. + + Returns: + SettingsStateInfo: Information about system settings overrides. + """ + + pass + + @abstractmethod + def get_project_last_saved_info(self, project_name): + """State of last project settings overrides at the moment when called. + + This method must provide most recent data so using cached data is not + the way. + + Args: + project_name (Union[None, str]): Project name for which state + should be returned. + + Returns: + SettingsStateInfo: Information about project settings overrides. + """ + + pass + + # UI related calls + @abstractmethod + def get_last_opened_info(self): + """Get information about last opened UI. + + Last opened UI is empty if there is noone who would have opened UI at + the moment when called. + + Returns: + Union[None, SettingsStateInfo]: Information about machine who had + opened Settings UI. + """ + + pass + + @abstractmethod + def opened_settings_ui(self): + """Callback called when settings UI is opened. + + Information about this machine must be available when + 'get_last_opened_info' is called from anywhere until + 'closed_settings_ui' is called again. + + Returns: + SettingsStateInfo: Object representing information about this + machine. Must be passed to 'closed_settings_ui' when finished. + """ + + pass + + @abstractmethod + def closed_settings_ui(self, info_obj): + """Callback called when settings UI is closed. + + From the moment this method is called the information about this + machine is removed and no more available when 'get_last_opened_info' + is called. + + Callback should validate if this machine is still stored as opened ui + before changing any value. + + Args: + info_obj (SettingsStateInfo): Object created when + 'opened_settings_ui' was called. + """ + pass @@ -283,19 +522,22 @@ class CacheValues: self.data = None self.creation_time = None self.version = None + self.last_saved_info = None def data_copy(self): if not self.data: return {} return copy.deepcopy(self.data) - def update_data(self, data, version=None): + def update_data(self, data, version): self.data = data self.creation_time = datetime.datetime.now() - if version is not None: - self.version = version + self.version = version - def update_from_document(self, document, version=None): + def update_last_saved_info(self, last_saved_info): + self.last_saved_info = last_saved_info + + def update_from_document(self, document, version): data = {} if document: if "data" in document: @@ -304,9 +546,9 @@ class CacheValues: value = document["value"] if value: data = json.loads(value) + self.data = data - if version is not None: - self.version = version + self.version = version def to_json_string(self): return json.dumps(self.data or {}) @@ -318,6 +560,9 @@ class CacheValues: delta = (datetime.datetime.now() - self.creation_time).seconds return delta > self.cache_lifetime + def set_outdated(self): + self.create_time = None + class MongoSettingsHandler(SettingsHandler): """Settings handler that use mongo for storing and loading of settings.""" @@ -337,9 +582,6 @@ class MongoSettingsHandler(SettingsHandler): def __init__(self): # Get mongo connection - from openpype.lib import OpenPypeMongoConnection - from openpype.pipeline import AvalonMongoDB - settings_collection = OpenPypeMongoConnection.get_mongo_client() self._anatomy_keys = None @@ -362,7 +604,6 @@ class MongoSettingsHandler(SettingsHandler): self.collection_name = collection_name self.collection = settings_collection[database_name][collection_name] - self.avalon_db = AvalonMongoDB() self.system_settings_cache = CacheValues() self.project_settings_cache = collections.defaultdict(CacheValues) @@ -511,6 +752,14 @@ class MongoSettingsHandler(SettingsHandler): # Update cache self.system_settings_cache.update_data(data, self._current_version) + last_saved_info = SettingsStateInfo.create_new( + self._current_version, + SYSTEM_SETTINGS_KEY + ) + self.system_settings_cache.update_last_saved_info( + last_saved_info + ) + # Get copy of just updated cache system_settings_data = self.system_settings_cache.data_copy() @@ -519,20 +768,29 @@ class MongoSettingsHandler(SettingsHandler): system_settings_data ) - # Store system settings - self.collection.replace_one( + system_settings_doc = self.collection.find_one( { "type": self._system_settings_key, "version": self._current_version }, - { - "type": self._system_settings_key, - "data": system_settings_data, - "version": self._current_version - }, - upsert=True + {"_id": True} ) + # Store system settings + new_system_settings_doc = { + "type": self._system_settings_key, + "version": self._current_version, + "data": system_settings_data, + "last_saved_info": last_saved_info.to_document_data() + } + if not system_settings_doc: + self.collection.insert_one(new_system_settings_doc) + else: + self.collection.update_one( + {"_id": system_settings_doc["_id"]}, + {"$set": new_system_settings_doc} + ) + # Store global settings self.collection.replace_one( { @@ -564,8 +822,19 @@ class MongoSettingsHandler(SettingsHandler): data_cache = self.project_settings_cache[project_name] data_cache.update_data(overrides, self._current_version) + last_saved_info = SettingsStateInfo.create_new( + self._current_version, + PROJECT_SETTINGS_KEY, + project_name + ) + + data_cache.update_last_saved_info(last_saved_info) + self._save_project_data( - project_name, self._project_settings_key, data_cache + project_name, + self._project_settings_key, + data_cache, + last_saved_info ) def save_project_anatomy(self, project_name, anatomy_data): @@ -583,8 +852,16 @@ class MongoSettingsHandler(SettingsHandler): self._save_project_anatomy_data(project_name, data_cache) else: + last_saved_info = SettingsStateInfo.create_new( + self._current_version, + PROJECT_ANATOMY_KEY, + project_name + ) self._save_project_data( - project_name, self._project_anatomy_key, data_cache + project_name, + self._project_anatomy_key, + data_cache, + last_saved_info ) @classmethod @@ -607,16 +884,14 @@ class MongoSettingsHandler(SettingsHandler): new_data = data_cache.data_copy() # Prepare avalon project document - collection = self.avalon_db.database[project_name] - project_doc = collection.find_one({ - "type": "project" - }) + project_doc = get_project(project_name) if not project_doc: raise ValueError(( "Project document of project \"{}\" does not exists." " Create project first." ).format(project_name)) + collection = get_project_connection(project_name) # Project's data update_dict_data = {} project_doc_data = project_doc.get("data") or {} @@ -667,28 +942,39 @@ class MongoSettingsHandler(SettingsHandler): {"$set": update_dict} ) - def _save_project_data(self, project_name, doc_type, data_cache): + def _save_project_data( + self, project_name, doc_type, data_cache, last_saved_info + ): is_default = bool(project_name is None) - replace_filter = { + query_filter = { "type": doc_type, "is_default": is_default, "version": self._current_version } - replace_data = { + + new_project_settings_doc = { "type": doc_type, "data": data_cache.data, "is_default": is_default, - "version": self._current_version + "version": self._current_version, + "last_saved_info": last_saved_info.to_data() } - if not is_default: - replace_filter["project_name"] = project_name - replace_data["project_name"] = project_name - self.collection.replace_one( - replace_filter, - replace_data, - upsert=True + if not is_default: + query_filter["project_name"] = project_name + new_project_settings_doc["project_name"] = project_name + + project_settings_doc = self.collection.find_one( + query_filter, + {"_id": True} ) + if project_settings_doc: + self.collection.update_one( + {"_id": project_settings_doc["_id"]}, + {"$set": new_project_settings_doc} + ) + else: + self.collection.insert_one(new_project_settings_doc) def _get_versions_order_doc(self, projection=None): # TODO cache @@ -1015,19 +1301,11 @@ class MongoSettingsHandler(SettingsHandler): globals_document = self.collection.find_one({ "type": GLOBAL_SETTINGS_KEY }) - document = ( - self._get_studio_system_settings_overrides_for_version() + document, version = self._get_system_settings_overrides_doc() + + last_saved_info = SettingsStateInfo.from_document( + version, SYSTEM_SETTINGS_KEY, document ) - if document is None: - document = self._find_closest_system_settings() - - version = None - if document: - if document["type"] == self._system_settings_key: - version = document["version"] - else: - version = LEGACY_SETTINGS_VERSION - merged_document = self._apply_global_settings( document, globals_document ) @@ -1035,6 +1313,9 @@ class MongoSettingsHandler(SettingsHandler): self.system_settings_cache.update_from_document( merged_document, version ) + self.system_settings_cache.update_last_saved_info( + last_saved_info + ) cache = self.system_settings_cache data = cache.data_copy() @@ -1042,24 +1323,43 @@ class MongoSettingsHandler(SettingsHandler): return data, cache.version return data + def _get_system_settings_overrides_doc(self): + document = ( + self._get_studio_system_settings_overrides_for_version() + ) + if document is None: + document = self._find_closest_system_settings() + + version = None + if document: + if document["type"] == self._system_settings_key: + version = document["version"] + else: + version = LEGACY_SETTINGS_VERSION + + return document, version + + def get_system_last_saved_info(self): + # Make sure settings are recaches + self.system_settings_cache.set_outdated() + self.get_studio_system_settings_overrides(False) + + return self.system_settings_cache.last_saved_info.copy() + def _get_project_settings_overrides(self, project_name, return_version): if self.project_settings_cache[project_name].is_outdated: - document = self._get_project_settings_overrides_for_version( + document, version = self._get_project_settings_overrides_doc( project_name ) - if document is None: - document = self._find_closest_project_settings(project_name) - - version = None - if document: - if document["type"] == self._project_settings_key: - version = document["version"] - else: - version = LEGACY_SETTINGS_VERSION - self.project_settings_cache[project_name].update_from_document( document, version ) + last_saved_info = SettingsStateInfo.from_document( + version, PROJECT_SETTINGS_KEY, document + ) + self.project_settings_cache[project_name].update_last_saved_info( + last_saved_info + ) cache = self.project_settings_cache[project_name] data = cache.data_copy() @@ -1067,6 +1367,29 @@ class MongoSettingsHandler(SettingsHandler): return data, cache.version return data + def _get_project_settings_overrides_doc(self, project_name): + document = self._get_project_settings_overrides_for_version( + project_name + ) + if document is None: + document = self._find_closest_project_settings(project_name) + + version = None + if document: + if document["type"] == self._project_settings_key: + version = document["version"] + else: + version = LEGACY_SETTINGS_VERSION + + return document, version + + def get_project_last_saved_info(self, project_name): + # Make sure settings are recaches + self.project_settings_cache[project_name].set_outdated() + self._get_project_settings_overrides(project_name, False) + + return self.project_settings_cache[project_name].last_saved_info.copy() + def get_studio_project_settings_overrides(self, return_version): """Studio overrides of default project settings.""" return self._get_project_settings_overrides(None, return_version) @@ -1144,9 +1467,9 @@ class MongoSettingsHandler(SettingsHandler): self.project_anatomy_cache[project_name].update_from_document( document, version ) + else: - collection = self.avalon_db.database[project_name] - project_doc = collection.find_one({"type": "project"}) + project_doc = get_project(project_name) self.project_anatomy_cache[project_name].update_data( self.project_doc_to_anatomy_data(project_doc), self._current_version @@ -1364,6 +1687,64 @@ class MongoSettingsHandler(SettingsHandler): return output return self._sort_versions(output) + def get_last_opened_info(self): + doc = self.collection.find_one({ + "type": "last_opened_settings_ui", + "version": self._current_version + }) or {} + info_data = doc.get("info") + if not info_data: + return None + + # Fill not available information + info_data["openpype_version"] = self._current_version + info_data["settings_type"] = None + info_data["project_name"] = None + return SettingsStateInfo.from_data(info_data) + + def opened_settings_ui(self): + doc_filter = { + "type": "last_opened_settings_ui", + "version": self._current_version + } + + opened_info = SettingsStateInfo.create_new(self._current_version) + new_doc_data = copy.deepcopy(doc_filter) + new_doc_data["info"] = opened_info.to_document_data() + + doc = self.collection.find_one( + doc_filter, + {"_id": True} + ) + if doc: + self.collection.update_one( + {"_id": doc["_id"]}, + {"$set": new_doc_data} + ) + else: + self.collection.insert_one(new_doc_data) + return opened_info + + def closed_settings_ui(self, info_obj): + doc_filter = { + "type": "last_opened_settings_ui", + "version": self._current_version + } + doc = self.collection.find_one(doc_filter) or {} + info_data = doc.get("info") + if not info_data: + return + + info_data["openpype_version"] = self._current_version + info_data["settings_type"] = None + info_data["project_name"] = None + current_info = SettingsStateInfo.from_data(info_data) + if current_info == info_obj: + self.collection.update_one( + {"_id": doc["_id"]}, + {"$set": {"info": None}} + ) + class MongoLocalSettingsHandler(LocalSettingsHandler): """Settings handler that use mongo for store and load local settings. @@ -1410,7 +1791,7 @@ class MongoLocalSettingsHandler(LocalSettingsHandler): """ data = data or {} - self.local_settings_cache.update_data(data) + self.local_settings_cache.update_data(data, None) self.collection.replace_one( { @@ -1433,6 +1814,6 @@ class MongoLocalSettingsHandler(LocalSettingsHandler): "site_id": self.local_site_id }) - self.local_settings_cache.update_from_document(document) + self.local_settings_cache.update_from_document(document, None) return self.local_settings_cache.data_copy() diff --git a/openpype/settings/lib.py b/openpype/settings/lib.py index 6df41112c8..288c587d03 100644 --- a/openpype/settings/lib.py +++ b/openpype/settings/lib.py @@ -91,6 +91,31 @@ def calculate_changes(old_value, new_value): return changes +@require_handler +def get_system_last_saved_info(): + return _SETTINGS_HANDLER.get_system_last_saved_info() + + +@require_handler +def get_project_last_saved_info(project_name): + return _SETTINGS_HANDLER.get_project_last_saved_info(project_name) + + +@require_handler +def get_last_opened_info(): + return _SETTINGS_HANDLER.get_last_opened_info() + + +@require_handler +def opened_settings_ui(): + return _SETTINGS_HANDLER.opened_settings_ui() + + +@require_handler +def closed_settings_ui(info_obj): + return _SETTINGS_HANDLER.closed_settings_ui(info_obj) + + @require_handler def save_studio_settings(data): """Save studio overrides of system settings. @@ -113,8 +138,7 @@ def save_studio_settings(data): SaveWarningExc: If any module raises the exception. """ # Notify Pype modules - from openpype.modules import ModulesManager - from openpype_interfaces import ISettingsChangeListener + from openpype.modules import ModulesManager, ISettingsChangeListener old_data = get_system_settings() default_values = get_default_settings()[SYSTEM_SETTINGS_KEY] @@ -161,8 +185,7 @@ def save_project_settings(project_name, overrides): SaveWarningExc: If any module raises the exception. """ # Notify Pype modules - from openpype.modules import ModulesManager - from openpype_interfaces import ISettingsChangeListener + from openpype.modules import ModulesManager, ISettingsChangeListener default_values = get_default_settings()[PROJECT_SETTINGS_KEY] if project_name: @@ -223,8 +246,7 @@ def save_project_anatomy(project_name, anatomy_data): SaveWarningExc: If any module raises the exception. """ # Notify Pype modules - from openpype.modules import ModulesManager - from openpype_interfaces import ISettingsChangeListener + from openpype.modules import ModulesManager, ISettingsChangeListener default_values = get_default_settings()[PROJECT_ANATOMY_KEY] if project_name: diff --git a/openpype/style/__init__.py b/openpype/style/__init__.py index b2a1a4ce6c..473fb42bb5 100644 --- a/openpype/style/__init__.py +++ b/openpype/style/__init__.py @@ -1,4 +1,5 @@ import os +import copy import json import collections import six @@ -19,6 +20,9 @@ class _Cache: disabled_entity_icon_color = None deprecated_entity_font_color = None + colors_data = None + objected_colors = None + def get_style_image_path(image_name): # All filenames are lowered @@ -46,8 +50,11 @@ def _get_colors_raw_data(): def get_colors_data(): """Only color data from stylesheet data.""" - data = _get_colors_raw_data() - return data.get("color") or {} + if _Cache.colors_data is None: + data = _get_colors_raw_data() + color_data = data.get("color") or {} + _Cache.colors_data = color_data + return copy.deepcopy(_Cache.colors_data) def _convert_color_values_to_objects(value): @@ -75,17 +82,38 @@ def _convert_color_values_to_objects(value): return parse_color(value) -def get_objected_colors(): +def get_objected_colors(*keys): """Colors parsed from stylesheet data into color definitions. + You can pass multiple arguments to get a key from the data dict's colors. + Because this functions returns a deep copy of the cached data this allows + a much smaller dataset to be copied and thus result in a faster function. + It is however a micro-optimization in the area of 0.001s and smaller. + + For example: + >>> get_colors_data() # copy of full colors dict + >>> get_colors_data("font") + >>> get_colors_data("loader", "asset-view") + + Args: + *keys: Each key argument will return a key nested deeper in the + objected colors data. + Returns: - dict: Parsed color objects by keys in data. + Any: Parsed color objects by keys in data. """ - colors_data = get_colors_data() - output = {} - for key, value in colors_data.items(): - output[key] = _convert_color_values_to_objects(value) - return output + if _Cache.objected_colors is None: + colors_data = get_colors_data() + output = {} + for key, value in colors_data.items(): + output[key] = _convert_color_values_to_objects(value) + + _Cache.objected_colors = output + + output = _Cache.objected_colors + for key in keys: + output = output[key] + return copy.deepcopy(output) def _load_stylesheet(): diff --git a/openpype/style/color_defs.py b/openpype/style/color_defs.py index 0f4e145ca0..f1eab38c24 100644 --- a/openpype/style/color_defs.py +++ b/openpype/style/color_defs.py @@ -296,7 +296,7 @@ class HSLColor: if "%" in sat_str: sat = float(sat_str.rstrip("%")) / 100 else: - sat = float(sat) + sat = float(sat_str) if "%" in light_str: light = float(light_str.rstrip("%")) / 100 @@ -337,8 +337,8 @@ class HSLAColor: as float (0-1 range). Examples: - "hsl(27, 0.7, 0.3)" - "hsl(27, 70%, 30%)" + "hsla(27, 0.7, 0.3, 0.5)" + "hsla(27, 70%, 30%, 0.5)" """ def __init__(self, value): modified_color = value.lower().strip() @@ -350,7 +350,7 @@ class HSLAColor: if "%" in sat_str: sat = float(sat_str.rstrip("%")) / 100 else: - sat = float(sat) + sat = float(sat_str) if "%" in light_str: light = float(light_str.rstrip("%")) / 100 diff --git a/openpype/style/data.json b/openpype/style/data.json index 15d9472e3e..404ca6944c 100644 --- a/openpype/style/data.json +++ b/openpype/style/data.json @@ -20,14 +20,14 @@ "color": { "font": "#D3D8DE", "font-hover": "#F0F2F5", - "font-disabled": "#99A3B2", + "font-disabled": "#5b6779", "font-view-selection": "#ffffff", "font-view-hover": "#F0F2F5", "bg": "#2C313A", "bg-inputs": "#21252B", "bg-buttons": "#434a56", - "bg-button-hover": "rgba(168, 175, 189, 0.3)", + "bg-button-hover": "rgb(81, 86, 97)", "bg-inputs-disabled": "#2C313A", "bg-buttons-disabled": "#434a56", @@ -64,7 +64,9 @@ "overlay-messages": { "close-btn": "#D3D8DE", "bg-success": "#458056", - "bg-success-hover": "#55a066" + "bg-success-hover": "#55a066", + "bg-error": "#AD2E2E", + "bg-error-hover": "#C93636" }, "tab-widget": { "bg": "#21252B", @@ -89,8 +91,10 @@ }, "publisher": { "error": "#AA5050", + "crash": "#FF6432", "success": "#458056", "warning": "#ffc671", + "tab-bg": "#16191d", "list-view-group": { "bg": "#434a56", "bg-hover": "rgba(168, 175, 189, 0.3)", diff --git a/openpype/style/style.css b/openpype/style/style.css index d76d833be1..a7a48cdb9d 100644 --- a/openpype/style/style.css +++ b/openpype/style/style.css @@ -688,22 +688,23 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { } /* Messages overlay */ -#OverlayMessageWidget { +OverlayMessageWidget { border-radius: 0.2em; - background: {color:bg-buttons}; -} - -#OverlayMessageWidget:hover { - background: {color:bg-button-hover}; -} -#OverlayMessageWidget { background: {color:overlay-messages:bg-success}; } -#OverlayMessageWidget:hover { + +OverlayMessageWidget:hover { background: {color:overlay-messages:bg-success-hover}; } -#OverlayMessageWidget QWidget { +OverlayMessageWidget[type="error"] { + background: {color:overlay-messages:bg-error}; +} +OverlayMessageWidget[type="error"]:hover { + background: {color:overlay-messages:bg-error-hover}; +} + +OverlayMessageWidget QWidget { background: transparent; } @@ -856,6 +857,53 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { } /* New Create/Publish UI */ +PublisherTabsWidget { + background: {color:publisher:tab-bg}; +} + +PublisherTabBtn { + border-radius: 0px; + background: {color:bg-inputs}; + font-size: 9pt; + font-weight: regular; + padding: 0.5em 1em 0.5em 1em; +} + +PublisherTabBtn:disabled { + background: {color:bg-inputs}; +} + +PublisherTabBtn:hover { + background: {color:bg-buttons}; +} + +PublisherTabBtn[active="1"] { + background: {color:bg}; +} +PublisherTabBtn[active="1"]:hover { + background: {color:bg}; +} + +PixmapButton{ + border: 0px solid transparent; + border-radius: 0.2em; + background: {color:bg-buttons}; +} +PixmapButton:hover { + background: {color:bg-button-hover}; +} +PixmapButton:disabled { + background: {color:bg-buttons-disabled}; +} + +#ThumbnailPixmapHoverButton { + font-size: 11pt; + background: {color:bg-view}; +} +#ThumbnailPixmapHoverButton:hover { + background: {color:bg-button-hover}; +} + #CreatorDetailedDescription { padding-left: 5px; padding-right: 5px; @@ -865,18 +913,16 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { } #CreateDialogHelpButton { - background: rgba(255, 255, 255, 31); + background: {color:bg-buttons}; border-top-left-radius: 0.2em; border-bottom-left-radius: 0.2em; border-top-right-radius: 0; border-bottom-right-radius: 0; - font-size: 10pt; font-weight: bold; - padding: 0px; } #CreateDialogHelpButton:hover { - background: rgba(255, 255, 255, 63); + background: {color:bg-button-hover}; } #CreateDialogHelpButton QWidget { background: transparent; @@ -885,11 +931,11 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { #PublishLogConsole { font-family: "Noto Sans Mono"; } -VariantInputsWidget QLineEdit { +#VariantInputsWidget QLineEdit { border-bottom-right-radius: 0px; border-top-right-radius: 0px; } -VariantInputsWidget QToolButton { +#VariantInputsWidget QToolButton { border-bottom-left-radius: 0px; border-top-left-radius: 0px; padding-top: 0.5em; @@ -944,38 +990,26 @@ VariantInputsWidget QToolButton { color: {color:publisher:error}; } -#PublishFrame { - background: rgba(0, 0, 0, 127); -} -#PublishFrame[state="1"] { - background: rgb(22, 25, 29); -} -#PublishFrame[state="2"] { - background: {color:bg}; -} - #PublishInfoFrame { background: {color:bg}; - border: 2px solid black; border-radius: 0.3em; } - -#PublishInfoFrame[state="-1"] { - background: rgb(194, 226, 236); -} - #PublishInfoFrame[state="0"] { - background: {color:publisher:error}; + background: {color:publisher:success}; } #PublishInfoFrame[state="1"] { - background: {color:publisher:success}; + background: {color:publisher:crash}; } #PublishInfoFrame[state="2"] { background: {color:publisher:warning}; } +#PublishInfoFrame[state="3"], #PublishInfoFrame[state="4"] { + background: rgb(194, 226, 236); +} + #PublishInfoFrame QLabel { color: black; font-style: bold; @@ -989,6 +1023,11 @@ VariantInputsWidget QToolButton { font-size: 13pt; } +ValidationArtistMessage QLabel { + font-size: 20pt; + font-weight: bold; +} + #ValidationActionButton { border-radius: 0.2em; padding: 4px 6px 4px 6px; @@ -1005,17 +1044,16 @@ VariantInputsWidget QToolButton { } #ValidationErrorTitleFrame { - background: {color:bg-inputs}; - border-left: 4px solid transparent; + border-radius: 0.2em; + background: {color:bg-buttons}; } #ValidationErrorTitleFrame:hover { - border-left-color: {color:border}; + background: {color:bg-buttons-hover}; } #ValidationErrorTitleFrame[selected="1"] { - background: {color:bg}; - border-left-color: {palette:blue-light}; + background: {color:bg-view-selection}; } #ValidationErrorInstanceList { @@ -1068,7 +1106,7 @@ VariantInputsWidget QToolButton { border-color: {color:publisher:error}; } -#PublishProgressBar[state="0"]::chunk { +#PublishProgressBar[state="1"]::chunk, #PublishProgressBar[state="4"]::chunk { background: {color:bg-buttons}; } @@ -1088,6 +1126,10 @@ VariantInputsWidget QToolButton { background: transparent; } +CreateNextPageOverlay { + font-size: 32pt; +} + /* Settings - NOT USED YET - we need to define font family for settings UI */ @@ -1418,3 +1460,6 @@ InViewButton, InViewButton:disabled { InViewButton:hover { background: rgba(255, 255, 255, 37); } +SupportLabel { + color: {color:font-disabled}; +} diff --git a/openpype/tests/test_lib_restructuralization.py b/openpype/tests/test_lib_restructuralization.py index 94080e550d..c8952e5a1c 100644 --- a/openpype/tests/test_lib_restructuralization.py +++ b/openpype/tests/test_lib_restructuralization.py @@ -21,9 +21,7 @@ def test_backward_compatibility(printer): from openpype.lib import is_latest from openpype.lib import any_outdated from openpype.lib import get_asset - from openpype.lib import get_hierarchy from openpype.lib import get_linked_assets - from openpype.lib import get_latest_version from openpype.lib import get_ffprobe_streams from openpype.hosts.fusion.lib import switch_item diff --git a/openpype/tools/assetlinks/widgets.py b/openpype/tools/assetlinks/widgets.py index 22e8848a60..1b168e542c 100644 --- a/openpype/tools/assetlinks/widgets.py +++ b/openpype/tools/assetlinks/widgets.py @@ -1,10 +1,16 @@ +import collections +from openpype.client import ( + get_versions, + get_subsets, + get_assets, + get_output_link_versions, +) from Qt import QtWidgets class SimpleLinkView(QtWidgets.QWidget): - - def __init__(self, dbcon, parent=None): + def __init__(self, dbcon, parent): super(SimpleLinkView, self).__init__(parent=parent) self.dbcon = dbcon @@ -24,6 +30,11 @@ class SimpleLinkView(QtWidgets.QWidget): self._in_view = in_view self._out_view = out_view + self._version_doc_to_process = None + + @property + def project_name(self): + return self.dbcon.current_project() def clear(self): self._in_view.clear() @@ -31,60 +42,114 @@ class SimpleLinkView(QtWidgets.QWidget): def set_version(self, version_doc): self.clear() - if not version_doc or not self.isVisible(): - return + self._version_doc_to_process = version_doc + if version_doc and self.isVisible(): + self._fill_values() - # inputs - # + def showEvent(self, event): + super(SimpleLinkView, self).showEvent(event) + self._fill_values() + + def _fill_values(self): + if self._version_doc_to_process is None: + return + version_doc = self._version_doc_to_process + self._version_doc_to_process = None + self._fill_inputs(version_doc) + self._fill_outputs(version_doc) + + def _fill_inputs(self, version_doc): + version_ids = set() for link in version_doc["data"].get("inputLinks", []): # Backwards compatibility for "input" key used as "id" if "id" not in link: link_id = link["input"] else: link_id = link["id"] - version = self.dbcon.find_one( - {"_id": link_id, "type": "version"}, - projection={"name": 1, "parent": 1} - ) - if not version: - continue - subset = self.dbcon.find_one( - {"_id": version["parent"], "type": "subset"}, - projection={"name": 1, "parent": 1} - ) - if not subset: - continue - asset = self.dbcon.find_one( - {"_id": subset["parent"], "type": "asset"}, - projection={"name": 1} - ) + version_ids.add(link_id) - self._in_view.addItem("{asset} {subset} v{version:0>3}".format( - asset=asset["name"], - subset=subset["name"], - version=version["name"], + version_docs = list(get_versions( + self.project_name, + version_ids=version_ids, + fields=["name", "parent"] + )) + + versions_by_subset_id = collections.defaultdict(list) + for version_doc in version_docs: + subset_id = version_doc["parent"] + versions_by_subset_id[subset_id].append(version_doc) + + subset_docs = [] + if versions_by_subset_id: + subset_docs = list(get_subsets( + self.project_name, + subset_ids=versions_by_subset_id.keys(), + fields=["_id", "name", "parent"] )) - # outputs - # - outputs = self.dbcon.find( - {"type": "version", "data.inputLinks.input": version_doc["_id"]}, - projection={"name": 1, "parent": 1} - ) - for version in outputs or []: - subset = self.dbcon.find_one( - {"_id": version["parent"], "type": "subset"}, - projection={"name": 1, "parent": 1} - ) - if not subset: - continue - asset = self.dbcon.find_one( - {"_id": subset["parent"], "type": "asset"}, - projection={"name": 1} - ) + asset_docs = [] + subsets_by_asset_id = collections.defaultdict(list) + if subset_docs: + for subset_doc in subset_docs: + asset_id = subset_doc["parent"] + subsets_by_asset_id[asset_id].append(subset_doc) - self._out_view.addItem("{asset} {subset} v{version:0>3}".format( - asset=asset["name"], - subset=subset["name"], - version=version["name"], + asset_docs = list(get_assets( + self.project_name, + asset_ids=subsets_by_asset_id.keys(), + fields=["_id", "name"] )) + + for asset_doc in asset_docs: + asset_id = asset_doc["_id"] + for subset_doc in subsets_by_asset_id[asset_id]: + subset_id = subset_doc["_id"] + for version_doc in versions_by_subset_id[subset_id]: + self._in_view.addItem("{} {} v{:0>3}".format( + asset_doc["name"], + subset_doc["name"], + version_doc["name"], + )) + + def _fill_outputs(self, version_doc): + version_docs = list(get_output_link_versions( + self.project_name, + version_doc["_id"], + fields=["name", "parent"] + )) + versions_by_subset_id = collections.defaultdict(list) + for version_doc in version_docs: + subset_id = version_doc["parent"] + versions_by_subset_id[subset_id].append(version_doc) + + subset_docs = [] + if versions_by_subset_id: + subset_docs = list(get_subsets( + self.project_name, + subset_ids=versions_by_subset_id.keys(), + fields=["_id", "name", "parent"] + )) + + asset_docs = [] + subsets_by_asset_id = collections.defaultdict(list) + if subset_docs: + for subset_doc in subset_docs: + asset_id = subset_doc["parent"] + subsets_by_asset_id[asset_id].append(subset_doc) + + asset_docs = list(get_assets( + self.project_name, + asset_ids=subsets_by_asset_id.keys(), + fields=["_id", "name"] + )) + + for asset_doc in asset_docs: + asset_id = asset_doc["_id"] + for subset_doc in subsets_by_asset_id[asset_id]: + subset_id = subset_doc["_id"] + for version_doc in versions_by_subset_id[subset_id]: + self._out_view.addItem("{} {} v{:0>3}".format( + asset_doc["name"], + subset_doc["name"], + version_doc["name"], + )) diff --git a/openpype/widgets/attribute_defs/__init__.py b/openpype/tools/attribute_defs/__init__.py similarity index 65% rename from openpype/widgets/attribute_defs/__init__.py rename to openpype/tools/attribute_defs/__init__.py index ce6b80109e..f991fdec3d 100644 --- a/openpype/widgets/attribute_defs/__init__.py +++ b/openpype/tools/attribute_defs/__init__.py @@ -3,8 +3,14 @@ from .widgets import ( AttributeDefinitionsWidget, ) +from .dialog import ( + AttributeDefinitionsDialog, +) + __all__ = ( "create_widget_for_attr_def", "AttributeDefinitionsWidget", + + "AttributeDefinitionsDialog", ) diff --git a/openpype/tools/attribute_defs/dialog.py b/openpype/tools/attribute_defs/dialog.py new file mode 100644 index 0000000000..69923d54e5 --- /dev/null +++ b/openpype/tools/attribute_defs/dialog.py @@ -0,0 +1,33 @@ +from Qt import QtWidgets + +from .widgets import AttributeDefinitionsWidget + + +class AttributeDefinitionsDialog(QtWidgets.QDialog): + def __init__(self, attr_defs, parent=None): + super(AttributeDefinitionsDialog, self).__init__(parent) + + attrs_widget = AttributeDefinitionsWidget(attr_defs, self) + + btns_widget = QtWidgets.QWidget(self) + ok_btn = QtWidgets.QPushButton("OK", btns_widget) + cancel_btn = QtWidgets.QPushButton("Cancel", btns_widget) + + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + btns_layout.setContentsMargins(0, 0, 0, 0) + btns_layout.addStretch(1) + btns_layout.addWidget(ok_btn, 0) + btns_layout.addWidget(cancel_btn, 0) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.addWidget(attrs_widget, 0) + main_layout.addStretch(1) + main_layout.addWidget(btns_widget, 0) + + ok_btn.clicked.connect(self.accept) + cancel_btn.clicked.connect(self.reject) + + self._attrs_widget = attrs_widget + + def get_values(self): + return self._attrs_widget.current_value() diff --git a/openpype/widgets/attribute_defs/files_widget.py b/openpype/tools/attribute_defs/files_widget.py similarity index 62% rename from openpype/widgets/attribute_defs/files_widget.py rename to openpype/tools/attribute_defs/files_widget.py index 23cf8342b1..738e50ba07 100644 --- a/openpype/widgets/attribute_defs/files_widget.py +++ b/openpype/tools/attribute_defs/files_widget.py @@ -1,6 +1,7 @@ import os import collections import uuid +import json from Qt import QtWidgets, QtCore, QtGui @@ -26,26 +27,174 @@ IS_SEQUENCE_ROLE = QtCore.Qt.UserRole + 7 EXT_ROLE = QtCore.Qt.UserRole + 8 +def convert_bytes_to_json(bytes_value): + if isinstance(bytes_value, QtCore.QByteArray): + # Raw data are already QByteArray and we don't have to load them + encoded_data = bytes_value + else: + encoded_data = QtCore.QByteArray.fromRawData(bytes_value) + stream = QtCore.QDataStream(encoded_data, QtCore.QIODevice.ReadOnly) + text = stream.readQString() + try: + return json.loads(text) + except Exception: + return None + + +def convert_data_to_bytes(data): + bytes_value = QtCore.QByteArray() + stream = QtCore.QDataStream(bytes_value, QtCore.QIODevice.WriteOnly) + stream.writeQString(json.dumps(data)) + return bytes_value + + +class SupportLabel(QtWidgets.QLabel): + pass + + class DropEmpty(QtWidgets.QWidget): - _drop_enabled_text = "Drag & Drop\n(drop files here)" + _empty_extensions = "Any file" - def __init__(self, parent): + def __init__(self, single_item, allow_sequences, extensions_label, parent): super(DropEmpty, self).__init__(parent) - label_widget = QtWidgets.QLabel(self._drop_enabled_text, self) - label_widget.setAlignment(QtCore.Qt.AlignCenter) - label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + drop_label_widget = QtWidgets.QLabel("Drag & Drop files here", self) - layout = QtWidgets.QHBoxLayout(self) + items_label_widget = SupportLabel(self) + items_label_widget.setWordWrap(True) + + layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) - layout.addSpacing(10) + layout.addSpacing(20) layout.addWidget( - label_widget, - alignment=QtCore.Qt.AlignCenter + drop_label_widget, 0, alignment=QtCore.Qt.AlignCenter + ) + layout.addSpacing(30) + layout.addStretch(1) + layout.addWidget( + items_label_widget, 0, alignment=QtCore.Qt.AlignCenter ) layout.addSpacing(10) - self._label_widget = label_widget + for widget in ( + drop_label_widget, + items_label_widget, + ): + widget.setAlignment(QtCore.Qt.AlignCenter) + widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + update_size_timer = QtCore.QTimer() + update_size_timer.setInterval(10) + update_size_timer.setSingleShot(True) + + update_size_timer.timeout.connect(self._on_update_size_timer) + + self._update_size_timer = update_size_timer + + if extensions_label and not extensions_label.startswith(" "): + extensions_label = " " + extensions_label + + self._single_item = single_item + self._extensions_label = extensions_label + self._allow_sequences = allow_sequences + self._allowed_extensions = set() + self._allow_folders = None + + self._drop_label_widget = drop_label_widget + self._items_label_widget = items_label_widget + + self.set_allow_folders(False) + + def set_extensions(self, extensions): + if extensions: + extensions = { + ext.replace(".", "") + for ext in extensions + } + if extensions == self._allowed_extensions: + return + self._allowed_extensions = extensions + + self._update_items_label() + + def set_allow_folders(self, allowed): + if self._allow_folders == allowed: + return + + self._allow_folders = allowed + self._update_items_label() + + def _update_items_label(self): + allowed_items = [] + if self._allow_folders: + allowed_items.append("folder") + + if self._allowed_extensions: + allowed_items.append("file") + if self._allow_sequences: + allowed_items.append("sequence") + + if not self._single_item: + allowed_items = [item + "s" for item in allowed_items] + + if not allowed_items: + self._drop_label_widget.setVisible(False) + self._items_label_widget.setText( + "It is not allowed to add anything here!" + ) + return + + self._drop_label_widget.setVisible(True) + items_label = "Multiple " + if self._single_item: + items_label = "Single " + + if len(allowed_items) == 1: + extensions_label = allowed_items[0] + elif len(allowed_items) == 2: + extensions_label = " or ".join(allowed_items) + else: + last_item = allowed_items.pop(-1) + new_last_item = " or ".join(last_item, allowed_items.pop(-1)) + allowed_items.append(new_last_item) + extensions_label = ", ".join(allowed_items) + + allowed_items_label = extensions_label + + items_label += allowed_items_label + label_tooltip = None + if self._allowed_extensions: + items_label += " of\n{}".format( + ", ".join(sorted(self._allowed_extensions)) + ) + + if self._extensions_label: + label_tooltip = items_label + items_label = self._extensions_label + + if self._items_label_widget.text() == items_label: + return + + self._items_label_widget.setToolTip(label_tooltip) + self._items_label_widget.setText(items_label) + self._update_size_timer.start() + + def resizeEvent(self, event): + super(DropEmpty, self).resizeEvent(event) + self._update_size_timer.start() + + def _on_update_size_timer(self): + """Recalculate height of label with extensions. + + Dynamic QLabel with word wrap does not handle properly it's sizeHint + calculations on show. This way it is recalculated. It is good practice + to trigger this method with small offset using '_update_size_timer'. + """ + + width = self._items_label_widget.width() + height = self._items_label_widget.heightForWidth(width) + self._items_label_widget.setMinimumHeight(height) + self._items_label_widget.updateGeometry() def paintEvent(self, event): super(DropEmpty, self).paintEvent(event) @@ -78,6 +227,7 @@ class FilesModel(QtGui.QStandardItemModel): def __init__(self, single_item, allow_sequences): super(FilesModel, self).__init__() + self._id = str(uuid.uuid4()) self._single_item = single_item self._multivalue = False self._allow_sequences = allow_sequences @@ -87,6 +237,41 @@ class FilesModel(QtGui.QStandardItemModel): self._filenames_by_dirpath = collections.defaultdict(set) self._items_by_dirpath = collections.defaultdict(list) + self.rowsAboutToBeRemoved.connect(self._on_about_to_be_removed) + self.rowsInserted.connect(self._on_insert) + + @property + def id(self): + return self._id + + def _on_about_to_be_removed(self, parent_index, start, end): + """Make sure that removed items are removed from items mapping. + + Connected with '_on_insert'. When user drag item and drop it to same + view the item is actually removed and creted again but it happens in + inner calls of Qt. + """ + + for row in range(start, end + 1): + index = self.index(row, 0, parent_index) + item_id = index.data(ITEM_ID_ROLE) + if item_id is not None: + self._items_by_id.pop(item_id, None) + + def _on_insert(self, parent_index, start, end): + """Make sure new added items are stored in items mapping. + + Connected to '_on_about_to_be_removed'. Some items are not created + using '_create_item' but are recreated using Qt. So the item is not in + mapping and if it would it would not lead to same item pointer. + """ + + for row in range(start, end + 1): + index = self.index(start, end, parent_index) + item_id = index.data(ITEM_ID_ROLE) + if item_id not in self._items_by_id: + self._items_by_id[item_id] = self.item(row) + def set_multivalue(self, multivalue): """Disable filtering.""" @@ -98,6 +283,15 @@ class FilesModel(QtGui.QStandardItemModel): if not items: return + if self._multivalue: + _items = [] + for item in items: + if isinstance(item, (tuple, list, set)): + _items.extend(item) + else: + _items.append(item) + items = _items + file_items = FileDefItem.from_value(items, self._allow_sequences) if not file_items: return @@ -155,12 +349,78 @@ class FilesModel(QtGui.QStandardItemModel): item.setData(file_item.filenames, FILENAMES_ROLE) item.setData(file_item.directory, DIRPATH_ROLE) item.setData(icon_pixmap, ITEM_ICON_ROLE) - item.setData(file_item.ext, EXT_ROLE) + item.setData(file_item.lower_ext, EXT_ROLE) item.setData(file_item.is_dir, IS_DIR_ROLE) item.setData(file_item.is_sequence, IS_SEQUENCE_ROLE) return item_id, item + def mimeData(self, indexes): + item_ids = [ + index.data(ITEM_ID_ROLE) + for index in indexes + ] + + item_ids_data = convert_data_to_bytes(item_ids) + mime_data = super(FilesModel, self).mimeData(indexes) + mime_data.setData("files_widget/internal_move", item_ids_data) + + file_items = [] + for item_id in item_ids: + file_item = self.get_file_item_by_id(item_id) + if file_item: + file_items.append(file_item.to_dict()) + + full_item_data = convert_data_to_bytes({ + "items": file_items, + "id": self._id + }) + mime_data.setData("files_widget/full_data", full_item_data) + return mime_data + + def dropMimeData(self, mime_data, action, row, col, index): + item_ids = convert_bytes_to_json( + mime_data.data("files_widget/internal_move") + ) + if item_ids is None: + return False + + # Find matching item after which will be items moved + # - store item before moved items are removed + root = self.invisibleRootItem() + if row >= 0: + src_item = self.item(row) + else: + src_item_id = index.data(ITEM_ID_ROLE) + src_item = self._items_by_id.get(src_item_id) + + src_row = None + if src_item: + src_row = src_item.row() + + # Take out items that should be moved + items = [] + for item_id in item_ids: + item = self._items_by_id.get(item_id) + if item: + self.takeRow(item.row()) + items.append(item) + + # Skip if there are not items that can be moved + if not items: + return False + + # Calculate row where items should be inserted + row_count = root.rowCount() + if src_row is None: + src_row = row_count + + if src_row > row_count: + src_row = row_count + + root.insertRow(src_row, items) + return True + class FilesProxyModel(QtCore.QSortFilterProxyModel): def __init__(self, *args, **kwargs): @@ -188,7 +448,12 @@ class FilesProxyModel(QtCore.QSortFilterProxyModel): def set_allowed_extensions(self, extensions=None): if extensions is not None: - extensions = set(extensions) + _extensions = set() + for ext in set(extensions): + if not ext.startswith("."): + ext = ".{}".format(ext) + _extensions.add(ext.lower()) + extensions = _extensions if self._allowed_extensions != extensions: self._allowed_extensions = extensions @@ -198,7 +463,7 @@ class FilesProxyModel(QtCore.QSortFilterProxyModel): for filepath in filepaths: if os.path.isfile(filepath): _, ext = os.path.splitext(filepath) - if ext in self._allowed_extensions: + if ext.lower() in self._allowed_extensions: return True elif self._allow_folders: @@ -210,7 +475,7 @@ class FilesProxyModel(QtCore.QSortFilterProxyModel): for filepath in filepaths: if os.path.isfile(filepath): _, ext = os.path.splitext(filepath) - if ext in self._allowed_extensions: + if ext.lower() in self._allowed_extensions: filtered_paths.append(filepath) elif self._allow_folders: @@ -339,6 +604,9 @@ class FilesView(QtWidgets.QListView): QtWidgets.QAbstractItemView.ExtendedSelection ) self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + self.setAcceptDrops(True) + self.setDragEnabled(True) + self.setDragDropMode(self.InternalMove) remove_btn = InViewButton(self) pix_enabled = paint_image_with_color( @@ -356,6 +624,7 @@ class FilesView(QtWidgets.QListView): self.customContextMenuRequested.connect(self._on_context_menu_request) self._remove_btn = remove_btn + self._multivalue = False def setSelectionModel(self, *args, **kwargs): """Catch selection model set to register signal callback. @@ -370,8 +639,16 @@ class FilesView(QtWidgets.QListView): def set_multivalue(self, multivalue): """Disable remove button on multivalue.""" + self._multivalue = multivalue self._remove_btn.setVisible(not multivalue) + def update_remove_btn_visibility(self): + model = self.model() + visible = False + if not self._multivalue and model: + visible = model.rowCount() > 0 + self._remove_btn.setVisible(visible) + def has_selected_item_ids(self): """Is any index selected.""" for index in self.selectionModel().selectedIndexes(): @@ -435,28 +712,32 @@ class FilesView(QtWidgets.QListView): def showEvent(self, event): super(FilesView, self).showEvent(event) self._update_remove_btn() + self.update_remove_btn_visibility() class FilesWidget(QtWidgets.QFrame): value_changed = QtCore.Signal() - def __init__(self, single_item, allow_sequences, parent): + def __init__(self, single_item, allow_sequences, extensions_label, parent): super(FilesWidget, self).__init__(parent) self.setAcceptDrops(True) - empty_widget = DropEmpty(self) + empty_widget = DropEmpty( + single_item, allow_sequences, extensions_label, self + ) files_model = FilesModel(single_item, allow_sequences) files_proxy_model = FilesProxyModel() files_proxy_model.setSourceModel(files_model) files_view = FilesView(self) files_view.setModel(files_proxy_model) - files_view.setVisible(False) - layout = QtWidgets.QHBoxLayout(self) + layout = QtWidgets.QStackedLayout(self) layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(empty_widget, 1) - layout.addWidget(files_view, 1) + layout.setStackingMode(layout.StackAll) + layout.addWidget(empty_widget) + layout.addWidget(files_view) + layout.setCurrentWidget(empty_widget) files_proxy_model.rowsInserted.connect(self._on_rows_inserted) files_proxy_model.rowsRemoved.connect(self._on_rows_removed) @@ -464,6 +745,7 @@ class FilesWidget(QtWidgets.QFrame): files_view.context_menu_requested.connect( self._on_context_menu_requested ) + self._in_set_value = False self._single_item = single_item self._multivalue = False @@ -475,13 +757,16 @@ class FilesWidget(QtWidgets.QFrame): self._widgets_by_id = {} + self._layout = layout + def _set_multivalue(self, multivalue): - if self._multivalue == multivalue: + if self._multivalue is multivalue: return self._multivalue = multivalue self._files_view.set_multivalue(multivalue) self._files_model.set_multivalue(multivalue) self._files_proxy_model.set_multivalue(multivalue) + self.setEnabled(not multivalue) def set_value(self, value, multivalue): self._in_set_value = True @@ -519,6 +804,8 @@ class FilesWidget(QtWidgets.QFrame): def set_filters(self, folders_allowed, exts_filter): self._files_proxy_model.set_allow_folders(folders_allowed) self._files_proxy_model.set_allowed_extensions(exts_filter) + self._empty_widget.set_extensions(exts_filter) + self._empty_widget.set_allow_folders(folders_allowed) def _on_rows_inserted(self, parent_index, start_row, end_row): for row in range(start_row, end_row + 1): @@ -546,11 +833,11 @@ class FilesWidget(QtWidgets.QFrame): ) self._widgets_by_id[item_id] = widget - self._files_proxy_model.sort(0) - if not self._in_set_value: self.value_changed.emit() + self._update_visibility() + def _on_rows_removed(self, parent_index, start_row, end_row): available_item_ids = set() for row in range(self._files_proxy_model.rowCount()): @@ -570,6 +857,7 @@ class FilesWidget(QtWidgets.QFrame): if not self._in_set_value: self.value_changed.emit() + self._update_visibility() def _on_split_request(self): if self._multivalue: @@ -613,29 +901,6 @@ class FilesWidget(QtWidgets.QFrame): menu.popup(pos) - def sizeHint(self): - # Get size hints of widget and visible widgets - result = super(FilesWidget, self).sizeHint() - if not self._files_view.isVisible(): - not_visible_hint = self._files_view.sizeHint() - else: - not_visible_hint = self._empty_widget.sizeHint() - - # Get margins of this widget - margins = self.layout().contentsMargins() - - # Change size hint based on result of maximum size hint of widgets - result.setWidth(max( - result.width(), - not_visible_hint.width() + margins.left() + margins.right() - )) - result.setHeight(max( - result.height(), - not_visible_hint.height() + margins.top() + margins.bottom() - )) - - return result - def dragEnterEvent(self, event): if self._multivalue: return @@ -652,12 +917,21 @@ class FilesWidget(QtWidgets.QFrame): event.setDropAction(QtCore.Qt.CopyAction) event.accept() + full_data_value = mime_data.data("files_widget/full_data") + if self._handle_full_data_drag(full_data_value): + event.setDropAction(QtCore.Qt.CopyAction) + event.accept() + def dragLeaveEvent(self, event): event.accept() def dropEvent(self, event): + if self._multivalue: + return + mime_data = event.mimeData() - if not self._multivalue and mime_data.hasUrls(): + if mime_data.hasUrls(): + event.accept() filepaths = [] for url in mime_data.urls(): filepath = url.toLocalFile() @@ -668,17 +942,70 @@ class FilesWidget(QtWidgets.QFrame): filepaths = self._files_proxy_model.filter_valid_files(filepaths) if filepaths: self._add_filepaths(filepaths) - event.accept() + + if self._handle_full_data_drop( + mime_data.data("files_widget/full_data") + ): + event.setDropAction(QtCore.Qt.CopyAction) + event.accept() + + super(FilesWidget, self).dropEvent(event) + + def _handle_full_data_drag(self, value): + if value is None: + return False + + full_data = convert_bytes_to_json(value) + if full_data is None: + return False + + if full_data["id"] == self._files_model.id: + return False + return True + + def _handle_full_data_drop(self, value): + if value is None: + return False + + full_data = convert_bytes_to_json(value) + if full_data is None: + return False + + if full_data["id"] == self._files_model.id: + return False + + for item in full_data["items"]: + filepaths = [ + os.path.join(item["directory"], filename) + for filename in item["filenames"] + ] + filepaths = self._files_proxy_model.filter_valid_files(filepaths) + if filepaths: + self._add_filepaths(filepaths) + + if self._copy_modifiers_enabled(): + return False + return True + + def _copy_modifiers_enabled(self): + if ( + QtWidgets.QApplication.keyboardModifiers() + & QtCore.Qt.ControlModifier + ): + return True + return False def _add_filepaths(self, filepaths): self._files_model.add_filepaths(filepaths) - self._update_visibility() def _remove_item_by_ids(self, item_ids): self._files_model.remove_item_by_ids(item_ids) - self._update_visibility() def _update_visibility(self): files_exists = self._files_proxy_model.rowCount() > 0 - self._files_view.setVisible(files_exists) - self._empty_widget.setVisible(not files_exists) + if files_exists: + current_widget = self._files_view + else: + current_widget = self._empty_widget + self._layout.setCurrentWidget(current_widget) + self._files_view.update_remove_btn_visibility() diff --git a/openpype/widgets/attribute_defs/widgets.py b/openpype/tools/attribute_defs/widgets.py similarity index 92% rename from openpype/widgets/attribute_defs/widgets.py rename to openpype/tools/attribute_defs/widgets.py index b6493b80a8..dc697b08a6 100644 --- a/openpype/widgets/attribute_defs/widgets.py +++ b/openpype/tools/attribute_defs/widgets.py @@ -15,6 +15,7 @@ from openpype.lib.attribute_definitions import ( UISeparatorDef, UILabelDef ) +from openpype.tools.utils import CustomTextComboBox from openpype.widgets.nice_checkbox import NiceCheckbox from .files_widget import FilesWidget @@ -107,10 +108,12 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget): row = 0 for attr_def in attr_defs: - if attr_def.key in self._current_keys: - raise KeyError("Duplicated key \"{}\"".format(attr_def.key)) + if not isinstance(attr_def, UIDef): + if attr_def.key in self._current_keys: + raise KeyError( + "Duplicated key \"{}\"".format(attr_def.key)) - self._current_keys.add(attr_def.key) + self._current_keys.add(attr_def.key) widget = create_widget_for_attr_def(attr_def, self) expand_cols = 2 @@ -369,8 +372,12 @@ class BoolAttrWidget(_BaseAttrDefWidget): class EnumAttrWidget(_BaseAttrDefWidget): + def __init__(self, *args, **kwargs): + self._multivalue = False + super(EnumAttrWidget, self).__init__(*args, **kwargs) + def _ui_init(self): - input_widget = QtWidgets.QComboBox(self) + input_widget = CustomTextComboBox(self) combo_delegate = QtWidgets.QStyledItemDelegate(input_widget) input_widget.setItemDelegate(combo_delegate) @@ -394,6 +401,9 @@ class EnumAttrWidget(_BaseAttrDefWidget): def _on_value_change(self): new_value = self.current_value() + if self._multivalue: + self._multivalue = False + self._input_widget.set_custom_text(None) self.value_changed.emit(new_value, self.attr_def.id) def current_value(self): @@ -401,14 +411,23 @@ class EnumAttrWidget(_BaseAttrDefWidget): return self._input_widget.itemData(idx) def set_value(self, value, multivalue=False): + if multivalue: + set_value = set(value) + if len(set_value) == 1: + multivalue = False + value = tuple(set_value)[0] + if not multivalue: idx = self._input_widget.findData(value) cur_idx = self._input_widget.currentIndex() if idx != cur_idx and idx >= 0: self._input_widget.setCurrentIndex(idx) - else: - self._input_widget.lineEdit().setText("Multiselection") + custom_text = None + if multivalue: + custom_text = "< Multiselection >" + self._input_widget.set_custom_text(custom_text) + self._multivalue = multivalue class UnknownAttrWidget(_BaseAttrDefWidget): @@ -443,7 +462,10 @@ class UnknownAttrWidget(_BaseAttrDefWidget): class FileAttrWidget(_BaseAttrDefWidget): def _ui_init(self): input_widget = FilesWidget( - self.attr_def.single_item, self.attr_def.allow_sequences, self + self.attr_def.single_item, + self.attr_def.allow_sequences, + self.attr_def.extensions_label, + self ) if self.attr_def.tooltip: diff --git a/openpype/tools/creator/window.py b/openpype/tools/creator/window.py index e0c329fb78..e2396ed29e 100644 --- a/openpype/tools/creator/window.py +++ b/openpype/tools/creator/window.py @@ -4,8 +4,9 @@ import re from Qt import QtWidgets, QtCore +from openpype.client import get_asset_by_name, get_subsets from openpype import style -from openpype.api import get_current_project_settings +from openpype.settings import get_current_project_settings from openpype.tools.utils.lib import qt_app_context from openpype.pipeline import legacy_io from openpype.pipeline.create import ( @@ -215,12 +216,12 @@ class CreatorWindow(QtWidgets.QDialog): self._set_valid_state(False) return + project_name = legacy_io.active_project() asset_doc = None if creator_plugin: # Get the asset from the database which match with the name - asset_doc = legacy_io.find_one( - {"name": asset_name, "type": "asset"}, - projection={"_id": 1} + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["_id"] ) # Get plugin @@ -235,7 +236,6 @@ class CreatorWindow(QtWidgets.QDialog): self._set_valid_state(False) return - project_name = legacy_io.Session["AVALON_PROJECT"] asset_id = asset_doc["_id"] task_name = legacy_io.Session["AVALON_TASK"] @@ -269,14 +269,13 @@ class CreatorWindow(QtWidgets.QDialog): self._subset_name_input.setText(subset_name) # Get all subsets of the current asset - subset_docs = legacy_io.find( - { - "type": "subset", - "parent": asset_id - }, - {"name": 1} + subset_docs = get_subsets( + project_name, asset_ids=[asset_id], fields=["name"] ) - existing_subset_names = set(subset_docs.distinct("name")) + existing_subset_names = { + subset_doc["name"] + for subset_doc in subset_docs + } existing_subset_names_low = set( _name.lower() for _name in existing_subset_names diff --git a/openpype/tools/experimental_tools/tools_def.py b/openpype/tools/experimental_tools/tools_def.py index fa2971dc1d..d3a1caa60e 100644 --- a/openpype/tools/experimental_tools/tools_def.py +++ b/openpype/tools/experimental_tools/tools_def.py @@ -164,9 +164,9 @@ class ExperimentalTools: def _show_publisher(self): if self._publisher_tool is None: - from openpype.tools import publisher + from openpype.tools.publisher.window import PublisherWindow - self._publisher_tool = publisher.PublisherWindow( + self._publisher_tool = PublisherWindow( parent=self._parent_widget ) diff --git a/openpype/tools/launcher/actions.py b/openpype/tools/launcher/actions.py index 546bda1c34..34d06f72cc 100644 --- a/openpype/tools/launcher/actions.py +++ b/openpype/tools/launcher/actions.py @@ -4,8 +4,9 @@ from Qt import QtWidgets, QtGui from openpype import PLUGINS_DIR from openpype import style -from openpype.api import Logger, resources +from openpype import resources from openpype.lib import ( + Logger, ApplictionExecutableNotFound, ApplicationLaunchFailed ) diff --git a/openpype/tools/launcher/lib.py b/openpype/tools/launcher/lib.py index c1392b7b8f..68e57c6b92 100644 --- a/openpype/tools/launcher/lib.py +++ b/openpype/tools/launcher/lib.py @@ -1,7 +1,7 @@ import os from Qt import QtGui import qtawesome -from openpype.api import resources +from openpype import resources ICON_CACHE = {} NOT_FOUND = type("NotFound", (object, ), {}) diff --git a/openpype/tools/launcher/models.py b/openpype/tools/launcher/models.py index 13567e7916..6e3b531018 100644 --- a/openpype/tools/launcher/models.py +++ b/openpype/tools/launcher/models.py @@ -9,6 +9,11 @@ import appdirs from Qt import QtCore, QtGui import qtawesome +from openpype.client import ( + get_projects, + get_project, + get_assets, +) from openpype.lib import JSONSettingRegistry from openpype.lib.applications import ( CUSTOM_LAUNCH_APP_GROUPS, @@ -81,13 +86,11 @@ class ActionModel(QtGui.QStandardItemModel): def get_application_actions(self): actions = [] - if not self.dbcon.Session.get("AVALON_PROJECT"): + if not self.dbcon.current_project(): return actions - project_doc = self.dbcon.find_one( - {"type": "project"}, - {"config.apps": True} - ) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name, fields=["config.apps"]) if not project_doc: return actions @@ -278,18 +281,25 @@ class ActionModel(QtGui.QStandardItemModel): if not action_item: return - action = action_item.data(ACTION_ROLE) - actual_data = self._prepare_compare_data(action) + actions = action_item.data(ACTION_ROLE) + if not isinstance(actions, list): + actions = [actions] + + action_actions_data = [ + self._prepare_compare_data(action) + for action in actions + ] stored = self.launcher_registry.get_item("force_not_open_workfile") - if is_checked: - stored.append(actual_data) - else: - final_values = [] - for config in stored: - if config != actual_data: - final_values.append(config) - stored = final_values + for actual_data in action_actions_data: + if is_checked: + stored.append(actual_data) + else: + final_values = [] + for config in stored: + if config != actual_data: + final_values.append(config) + stored = final_values self.launcher_registry.set_item("force_not_open_workfile", stored) self.launcher_registry._get_item.cache_clear() @@ -326,21 +336,24 @@ class ActionModel(QtGui.QStandardItemModel): item (QStandardItem) stored (list) of dict """ - action = item.data(ACTION_ROLE) - if not self.is_application_action(action): + + actions = item.data(ACTION_ROLE) + if not isinstance(actions, list): + actions = [actions] + + if not self.is_application_action(actions[0]): return False - actual_data = self._prepare_compare_data(action) + action_actions_data = [ + self._prepare_compare_data(action) + for action in actions + ] for config in stored: - if config == actual_data: + if config in action_actions_data: return True - return False def _prepare_compare_data(self, action): - if isinstance(action, list) and action: - action = action[0] - compare_data = {} if action and action.label: compare_data = { @@ -448,7 +461,7 @@ class LauncherModel(QtCore.QObject): @property def project_name(self): """Current project name.""" - return self._dbcon.Session.get("AVALON_PROJECT") + return self._dbcon.current_project() @property def refreshing_assets(self): @@ -525,7 +538,7 @@ class LauncherModel(QtCore.QObject): current_project = self.project_name project_names = set() project_docs_by_name = {} - for project_doc in self._dbcon.projects(only_active=True): + for project_doc in get_projects(): project_name = project_doc["name"] project_names.add(project_name) project_docs_by_name[project_name] = project_doc @@ -649,9 +662,8 @@ class LauncherModel(QtCore.QObject): self._asset_refresh_thread = None def _refresh_assets(self): - asset_docs = list(self._dbcon.find( - {"type": "asset"}, - self._asset_projection + asset_docs = list(get_assets( + self._last_project_name, fields=self._asset_projection.keys() )) if not self._refreshing_assets: return diff --git a/openpype/tools/launcher/widgets.py b/openpype/tools/launcher/widgets.py index 62599664fe..774ceb659d 100644 --- a/openpype/tools/launcher/widgets.py +++ b/openpype/tools/launcher/widgets.py @@ -312,11 +312,12 @@ class ActionBar(QtWidgets.QWidget): is_group = index.data(GROUP_ROLE) is_variant_group = index.data(VARIANT_GROUP_ROLE) + force_not_open_workfile = index.data(FORCE_NOT_OPEN_WORKFILE_ROLE) if not is_group and not is_variant_group: action = index.data(ACTION_ROLE) # Change data of application action if issubclass(action, ApplicationAction): - if index.data(FORCE_NOT_OPEN_WORKFILE_ROLE): + if force_not_open_workfile: action.data["start_last_workfile"] = False else: action.data.pop("start_last_workfile", None) @@ -385,10 +386,18 @@ class ActionBar(QtWidgets.QWidget): menu.addMenu(sub_menu) result = menu.exec_(QtGui.QCursor.pos()) - if result: - action = actions_mapping[result] - self._start_animation(index) - self.action_clicked.emit(action) + if not result: + return + + action = actions_mapping[result] + if issubclass(action, ApplicationAction): + if force_not_open_workfile: + action.data["start_last_workfile"] = False + else: + action.data.pop("start_last_workfile", None) + + self._start_animation(index) + self.action_clicked.emit(action) class ActionHistory(QtWidgets.QPushButton): diff --git a/openpype/tools/launcher/window.py b/openpype/tools/launcher/window.py index dab6949613..a9eaa932bb 100644 --- a/openpype/tools/launcher/window.py +++ b/openpype/tools/launcher/window.py @@ -4,7 +4,7 @@ import logging from Qt import QtWidgets, QtCore, QtGui from openpype import style -from openpype.api import resources +from openpype import resources from openpype.pipeline import AvalonMongoDB import qtawesome diff --git a/openpype/tools/libraryloader/app.py b/openpype/tools/libraryloader/app.py index 7fda6bd6f9..d2af1b7151 100644 --- a/openpype/tools/libraryloader/app.py +++ b/openpype/tools/libraryloader/app.py @@ -3,6 +3,7 @@ import sys from Qt import QtWidgets, QtCore, QtGui from openpype import style +from openpype.client import get_projects, get_project from openpype.pipeline import AvalonMongoDB from openpype.tools.utils import lib as tools_lib from openpype.tools.loader.widgets import ( @@ -238,7 +239,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): def get_filtered_projects(self): projects = list() - for project in self.dbcon.projects(): + for project in get_projects(fields=["name", "data.library_project"]): is_library = project.get("data", {}).get("library_project", False) if ( (is_library and self.show_libraries) or @@ -303,14 +304,26 @@ class LibraryLoaderWindow(QtWidgets.QDialog): families = self._subsets_widget.get_subsets_families() self._families_filter_view.set_enabled_families(families) - def set_context(self, context, refresh=True): - self.echo("Setting context: {}".format(context)) - lib.schedule( - lambda: self._set_context(context, refresh=refresh), - 50, channel="mongo" - ) - # ------------------------------ + def set_context(self, context, refresh=True): + """Set the selection in the interface using a context. + The context must contain `asset` data by name. + + Args: + context (dict): The context to apply. + Returns: + None + """ + + asset_name = context.get("asset", None) + if asset_name is None: + return + + if refresh: + self._refresh_assets() + + self._assets_widget.select_asset_by_name(asset_name) + def _on_family_filter_change(self, families): self._subsets_widget.set_family_filters(families) @@ -323,10 +336,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): """Load assets from database""" if self.current_project is not None: # Ensure a project is loaded - project_doc = self.dbcon.find_one( - {"type": "project"}, - {"type": 1} - ) + project_doc = get_project(self.current_project, fields=["_id"]) assert project_doc, "This is a bug" self._families_filter_view.set_enabled_families(set()) @@ -371,7 +381,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): # Clear the version information on asset change self._version_info_widget.set_version(None) - self._thumbnail_widget.set_thumbnail(asset_ids) + self._thumbnail_widget.set_thumbnail("asset", asset_ids) self.data["state"]["assetIds"] = asset_ids @@ -426,34 +436,17 @@ class LibraryLoaderWindow(QtWidgets.QDialog): version_doc["_id"] for version_doc in version_docs ] + src_type = "version" if not thumbnail_src_ids: + src_type = "asset" thumbnail_src_ids = self._assets_widget.get_selected_asset_ids() - self._thumbnail_widget.set_thumbnail(thumbnail_src_ids) + self._thumbnail_widget.set_thumbnail(src_type, thumbnail_src_ids) version_ids = [doc["_id"] for doc in version_docs or []] if self._repres_widget: self._repres_widget.set_version_ids(version_ids) - def _set_context(self, context, refresh=True): - """Set the selection in the interface using a context. - The context must contain `asset` data by name. - - Args: - context (dict): The context to apply. - Returns: - None - """ - - asset_name = context.get("asset", None) - if asset_name is None: - return - - if refresh: - self._refresh_assets() - - self._assets_widget.select_asset_by_name(asset_name) - def _on_message_timeout(self): self._message_label.setText("") diff --git a/openpype/tools/loader/app.py b/openpype/tools/loader/app.py index bb589c199d..1917f23c60 100644 --- a/openpype/tools/loader/app.py +++ b/openpype/tools/loader/app.py @@ -3,6 +3,7 @@ import traceback from Qt import QtWidgets, QtCore +from openpype.client import get_projects, get_project from openpype import style from openpype.lib import register_event_callback from openpype.pipeline import ( @@ -39,7 +40,7 @@ class LoaderWindow(QtWidgets.QDialog): def __init__(self, parent=None): super(LoaderWindow, self).__init__(parent) title = "Asset Loader 2.1" - project_name = legacy_io.Session.get("AVALON_PROJECT") + project_name = legacy_io.active_project() if project_name: title += " - {}".format(project_name) self.setWindowTitle(title) @@ -274,8 +275,9 @@ class LoaderWindow(QtWidgets.QDialog): """Load assets from database""" # Ensure a project is loaded - project = legacy_io.find_one({"type": "project"}, {"type": 1}) - assert project, "Project was not found! This is a bug" + project_name = legacy_io.active_project() + project_doc = get_project(project_name, fields=["_id"]) + assert project_doc, "Project was not found! This is a bug" self._assets_widget.refresh() self._assets_widget.setFocus() @@ -314,7 +316,7 @@ class LoaderWindow(QtWidgets.QDialog): ) # Clear the version information on asset change - self._thumbnail_widget.set_thumbnail(asset_ids) + self._thumbnail_widget.set_thumbnail("asset", asset_ids) self._version_info_widget.set_version(None) self.data["state"]["assetIds"] = asset_ids @@ -371,10 +373,12 @@ class LoaderWindow(QtWidgets.QDialog): version_doc["_id"] for version_doc in version_docs ] + source_type = "version" if not thumbnail_src_ids: + source_type = "asset" thumbnail_src_ids = self._assets_widget.get_selected_asset_ids() - self._thumbnail_widget.set_thumbnail(thumbnail_src_ids) + self._thumbnail_widget.set_thumbnail(source_type, thumbnail_src_ids) if self._repres_widget is not None: version_ids = [doc["_id"] for doc in version_docs] @@ -576,8 +580,7 @@ def show(debug=False, parent=None, use_context=False): legacy_io.install() any_project = next( - project for project in legacy_io.projects() - if project.get("active", True) is not False + project for project in get_projects(fields=["name"]) ) legacy_io.Session["AVALON_PROJECT"] = any_project["name"] diff --git a/openpype/tools/loader/delegates.py b/openpype/tools/loader/delegates.py new file mode 100644 index 0000000000..e6663d48f1 --- /dev/null +++ b/openpype/tools/loader/delegates.py @@ -0,0 +1,28 @@ +from Qt import QtWidgets, QtGui, QtCore + + +class LoadedInSceneDelegate(QtWidgets.QStyledItemDelegate): + """Delegate for Loaded in Scene state columns. + + Shows "yes" or "no" for True or False values + Colorizes green or dark grey based on True or False values + + """ + + def __init__(self, *args, **kwargs): + super(LoadedInSceneDelegate, self).__init__(*args, **kwargs) + self._colors = { + True: QtGui.QColor(80, 170, 80), + False: QtGui.QColor(90, 90, 90) + } + + def displayText(self, value, locale): + return "yes" if value else "no" + + def initStyleOption(self, option, index): + super(LoadedInSceneDelegate, self).initStyleOption(option, index) + + # Colorize based on value + value = index.data(QtCore.Qt.DisplayRole) + color = self._colors[bool(value)] + option.palette.setBrush(QtGui.QPalette.Text, color) diff --git a/openpype/tools/loader/lib.py b/openpype/tools/loader/lib.py index 28e94237ec..78a25d8d85 100644 --- a/openpype/tools/loader/lib.py +++ b/openpype/tools/loader/lib.py @@ -2,6 +2,8 @@ import inspect from Qt import QtGui import qtawesome +from openpype.lib.attribute_definitions import AbtractAttrDef +from openpype.tools.attribute_defs import AttributeDefinitionsDialog from openpype.tools.utils.widgets import ( OptionalAction, OptionDialog @@ -34,21 +36,30 @@ def get_options(action, loader, parent, repre_contexts): None when dialog was closed or cancelled, in all other cases {} if no options """ + # Pop option dialog options = {} loader_options = loader.get_options(repre_contexts) - if getattr(action, "optioned", False) and loader_options: + if not getattr(action, "optioned", False) or not loader_options: + return options + + if isinstance(loader_options[0], AbtractAttrDef): + qargparse_options = False + dialog = AttributeDefinitionsDialog(loader_options, parent) + else: + qargparse_options = True dialog = OptionDialog(parent) - dialog.setWindowTitle(action.label + " Options") dialog.create(loader_options) - if not dialog.exec_(): - return None + dialog.setWindowTitle(action.label + " Options") - # Get option - options = dialog.parse() + if not dialog.exec_(): + return None - return options + # Get option + if qargparse_options: + return dialog.parse() + return dialog.get_values() def add_representation_loaders_to_menu(loaders, menu, repre_contexts): diff --git a/openpype/tools/loader/model.py b/openpype/tools/loader/model.py index 8cb8f30013..77a8669c46 100644 --- a/openpype/tools/loader/model.py +++ b/openpype/tools/loader/model.py @@ -1,12 +1,23 @@ import copy import re import math +import time from uuid import uuid4 from Qt import QtCore, QtGui import qtawesome +from openpype.client import ( + get_assets, + get_subsets, + get_last_versions, + get_versions, + get_hero_versions, + get_version_by_name, + get_representations +) from openpype.pipeline import ( + registered_host, HeroVersionType, schema, ) @@ -14,6 +25,7 @@ from openpype.pipeline import ( from openpype.style import get_default_entity_icon_color from openpype.tools.utils.models import TreeModel, Item from openpype.tools.utils import lib +from openpype.host import ILoadHost from openpype.modules import ModulesManager from openpype.tools.utils.constants import ( @@ -38,6 +50,14 @@ def is_filtering_recursible(): class BaseRepresentationModel(object): """Methods for SyncServer useful in multiple models""" + # Cheap & hackish way how to avoid refreshing of whole sync server module + # on each selection change + _last_project = None + _modules_manager = None + _last_project_cache = 0 + _last_manager_cache = 0 + _max_project_cache_time = 30 + _max_manager_cache_time = 60 def reset_sync_server(self, project_name=None): """Sets/Resets sync server vars after every change (refresh.)""" @@ -47,28 +67,53 @@ class BaseRepresentationModel(object): remote_site = remote_provider = None if not project_name: - project_name = self.dbcon.Session["AVALON_PROJECT"] + project_name = self.dbcon.active_project() else: self.dbcon.Session["AVALON_PROJECT"] = project_name - if project_name: - manager = ModulesManager() - sync_server = manager.modules_by_name["sync_server"] + if not project_name: + self.repre_icons = repre_icons + self.sync_server = sync_server + self.active_site = active_site + self.active_provider = active_provider + self.remote_site = remote_site + self.remote_provider = remote_provider + return - if project_name in sync_server.get_enabled_projects(): - active_site = sync_server.get_active_site(project_name) - active_provider = sync_server.get_provider_for_site( - project_name, active_site) - if active_site == 'studio': # for studio use explicit icon - active_provider = 'studio' + now_time = time.time() + project_cache_diff = now_time - self._last_project_cache + if project_cache_diff > self._max_project_cache_time: + self._last_project = None - remote_site = sync_server.get_remote_site(project_name) - remote_provider = sync_server.get_provider_for_site( - project_name, remote_site) - if remote_site == 'studio': # for studio use explicit icon - remote_provider = 'studio' + if project_name == self._last_project: + return - repre_icons = lib.get_repre_icons() + self._last_project = project_name + self._last_project_cache = now_time + + manager_cache_diff = now_time - self._last_manager_cache + if manager_cache_diff > self._max_manager_cache_time: + self._modules_manager = None + + if self._modules_manager is None: + self._modules_manager = ModulesManager() + self._last_manager_cache = now_time + + sync_server = self._modules_manager.modules_by_name["sync_server"] + if sync_server.is_project_enabled(project_name, single=True): + active_site = sync_server.get_active_site(project_name) + active_provider = sync_server.get_provider_for_site( + project_name, active_site) + if active_site == 'studio': # for studio use explicit icon + active_provider = 'studio' + + remote_site = sync_server.get_remote_site(project_name) + remote_provider = sync_server.get_provider_for_site( + project_name, remote_site) + if remote_site == 'studio': # for studio use explicit icon + remote_provider = 'studio' + + repre_icons = lib.get_repre_icons() self.repre_icons = repre_icons self.sync_server = sync_server @@ -93,6 +138,7 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): "duration", "handles", "step", + "loaded_in_scene", "repre_info" ] @@ -107,6 +153,7 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): "duration": "Duration", "handles": "Handles", "step": "Step", + "loaded_in_scene": "In scene", "repre_info": "Availability" } @@ -163,9 +210,6 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): if subset_doc_projection: self.subset_doc_projection = subset_doc_projection - self.asset_doc_projection = asset_doc_projection - self.subset_doc_projection = subset_doc_projection - self.repre_icons = {} self.sync_server = None self.active_site = self.active_provider = None @@ -191,8 +235,14 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): self._doc_fetching_stop = False self._doc_payload = {} - self.doc_fetched.connect(self.on_doc_fetched) + self._host = registered_host() + self._loaded_representation_ids = set() + # Refresh loaded scene containers only every 3 seconds at most + self._host_loaded_refresh_timeout = 3 + self._host_loaded_refresh_time = 0 + + self.doc_fetched.connect(self._on_doc_fetched) self.refresh() def get_item_by_id(self, item_id): @@ -210,7 +260,7 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): def set_grouping(self, state): self._grouping = state - self.on_doc_fetched() + self._on_doc_fetched() def get_subsets_families(self): return self._doc_payload.get("subset_families") or set() @@ -220,57 +270,63 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): # because it also updates the information in other columns if index.column() == self.columns_index["version"]: item = index.internalPointer() - parent = item["_id"] + subset_id = item["_id"] if isinstance(value, HeroVersionType): - versions = list(self.dbcon.find({ - "type": {"$in": ["version", "hero_version"]}, - "parent": parent - }, sort=[("name", -1)])) - - version = None - last_version = None - for __version in versions: - if __version["type"] == "hero_version": - version = __version - elif last_version is None: - last_version = __version - - if version is not None and last_version is not None: - break - - _version = None - for __version in versions: - if __version["_id"] == version["version_id"]: - _version = __version - break - - version["data"] = _version["data"] - version["name"] = _version["name"] - version["is_from_latest"] = ( - last_version["_id"] == _version["_id"] - ) + version_doc = self._get_hero_version(subset_id) else: - version = self.dbcon.find_one({ - "name": value, - "type": "version", - "parent": parent - }) + project_name = self.dbcon.active_project() + version_doc = get_version_by_name( + project_name, value, subset_id + ) # update availability on active site when version changes - if self.sync_server.enabled and version: - query = self._repre_per_version_pipeline([version["_id"]], - self.active_site, - self.remote_site) - docs = list(self.dbcon.aggregate(query)) - if docs: - repre = docs.pop() - version["data"].update(self._get_repre_dict(repre)) + if self.sync_server.enabled and version_doc: + repres_info = list( + self.sync_server.get_repre_info_for_versions( + project_name, + [version_doc["_id"]], + self.active_site, + self.remote_site + ) + ) + if repres_info: + version_doc["data"].update( + self._get_repre_dict(repres_info[0])) - self.set_version(index, version) + self.set_version(index, version_doc) return super(SubsetsModel, self).setData(index, value, role) + def _get_hero_version(self, subset_id): + project_name = self.dbcon.active_project() + version_docs = get_versions( + project_name, subset_ids=[subset_id], hero=True + ) + standard_versions = [] + hero_version_doc = None + for version_doc in version_docs: + if version_doc["type"] == "hero_version": + hero_version_doc = version_doc + continue + standard_versions.append(version_doc) + + src_version_id = hero_version_doc["version_id"] + src_version = None + is_from_latest = True + for version_doc in reversed(sorted( + standard_versions, key=lambda item: item["name"] + )): + if version_doc["_id"] == src_version_id: + src_version = version_doc + break + is_from_latest = False + + hero_version_doc["data"] = src_version["data"] + hero_version_doc["name"] = src_version["name"] + hero_version_doc["is_from_latest"] = is_from_latest + return hero_version_doc + def set_version(self, index, version): """Update the version data of the given index. @@ -357,26 +413,25 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): item["repre_info"] = repre_info def _fetch(self): - asset_docs = self.dbcon.find( - { - "type": "asset", - "_id": {"$in": self._asset_ids} - }, - self.asset_doc_projection + project_name = self.dbcon.active_project() + asset_docs = get_assets( + project_name, + asset_ids=self._asset_ids, + fields=self.asset_doc_projection.keys() ) + asset_docs_by_id = { asset_doc["_id"]: asset_doc for asset_doc in asset_docs } subset_docs_by_id = {} - subset_docs = self.dbcon.find( - { - "type": "subset", - "parent": {"$in": self._asset_ids} - }, - self.subset_doc_projection + subset_docs = get_subsets( + project_name, + asset_ids=self._asset_ids, + fields=self.subset_doc_projection.keys() ) + subset_families = set() for subset_doc in subset_docs: if self._doc_fetching_stop: @@ -389,37 +444,13 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): subset_docs_by_id[subset_doc["_id"]] = subset_doc subset_ids = list(subset_docs_by_id.keys()) - _pipeline = [ - # Find all versions of those subsets - {"$match": { - "type": "version", - "parent": {"$in": subset_ids} - }}, - # Sorting versions all together - {"$sort": {"name": 1}}, - # Group them by "parent", but only take the last - {"$group": { - "_id": "$parent", - "_version_id": {"$last": "$_id"}, - "name": {"$last": "$name"}, - "type": {"$last": "$type"}, - "data": {"$last": "$data"}, - "locations": {"$last": "$locations"}, - "schema": {"$last": "$schema"} - }} - ] - last_versions_by_subset_id = dict() - for doc in self.dbcon.aggregate(_pipeline): - if self._doc_fetching_stop: - return - doc["parent"] = doc["_id"] - doc["_id"] = doc.pop("_version_id") - last_versions_by_subset_id[doc["parent"]] = doc + last_versions_by_subset_id = get_last_versions( + project_name, + subset_ids, + fields=["_id", "parent", "name", "type", "data", "schema"] + ) - hero_versions = self.dbcon.find({ - "type": "hero_version", - "parent": {"$in": subset_ids} - }) + hero_versions = get_hero_versions(project_name, subset_ids=subset_ids) missing_versions = [] for hero_version in hero_versions: version_id = hero_version["version_id"] @@ -428,10 +459,9 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): missing_versions_by_id = {} if missing_versions: - missing_version_docs = self.dbcon.find({ - "type": "version", - "_id": {"$in": missing_versions} - }) + missing_version_docs = get_versions( + project_name, version_ids=missing_versions + ) missing_versions_by_id = { missing_version_doc["_id"]: missing_version_doc for missing_version_doc in missing_version_docs @@ -454,32 +484,58 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): last_versions_by_subset_id[subset_id] = hero_version + # Check loaded subsets + loaded_subset_ids = set() + ids = self._loaded_representation_ids + if ids: + if self._doc_fetching_stop: + return + + # Get subset ids from loaded representations in workfile + # todo: optimize with aggregation query to distinct subset id + representations = get_representations(project_name, + representation_ids=ids, + fields=["parent"]) + version_ids = set(repre["parent"] for repre in representations) + versions = get_versions(project_name, + version_ids=version_ids, + fields=["parent"]) + loaded_subset_ids = set(version["parent"] for version in versions) + + if self._doc_fetching_stop: + return + + repre_info_by_version_id = {} + if self.sync_server.enabled: + versions_by_id = {} + for _subset_id, doc in last_versions_by_subset_id.items(): + versions_by_id[doc["_id"]] = doc + + repres_info = self.sync_server.get_repre_info_for_versions( + project_name, + list(versions_by_id.keys()), + self.active_site, + self.remote_site + ) + for repre_info in repres_info: + if self._doc_fetching_stop: + return + + version_id = repre_info["_id"] + doc = versions_by_id[version_id] + doc["active_provider"] = self.active_provider + doc["remote_provider"] = self.remote_provider + repre_info_by_version_id[version_id] = repre_info + self._doc_payload = { "asset_docs_by_id": asset_docs_by_id, "subset_docs_by_id": subset_docs_by_id, "subset_families": subset_families, - "last_versions_by_subset_id": last_versions_by_subset_id + "last_versions_by_subset_id": last_versions_by_subset_id, + "repre_info_by_version_id": repre_info_by_version_id, + "subsets_loaded_by_id": loaded_subset_ids } - if self.sync_server.enabled: - version_ids = set() - for _subset_id, doc in last_versions_by_subset_id.items(): - version_ids.add(doc["_id"]) - - query = self._repre_per_version_pipeline(list(version_ids), - self.active_site, - self.remote_site) - - repre_info = {} - for doc in self.dbcon.aggregate(query): - if self._doc_fetching_stop: - return - doc["active_provider"] = self.active_provider - doc["remote_provider"] = self.remote_provider - repre_info[doc["_id"]] = doc - - self._doc_payload["repre_info_by_version_id"] = repre_info - self.doc_fetched.emit() def fetch_subset_and_version(self): @@ -509,9 +565,23 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): self.doc_fetched.emit() return + # Collect scene container representations to compare loaded state + # This runs in the main thread because it involves the host DCC + if self._host: + time_since_refresh = time.time() - self._host_loaded_refresh_time + if time_since_refresh > self._host_loaded_refresh_timeout: + if isinstance(self._host, ILoadHost): + containers = self._host.get_containers() + else: + containers = self._host.ls() + + repre_ids = {con.get("representation") for con in containers} + self._loaded_representation_ids = repre_ids + self._host_loaded_refresh_time = time.time() + self.fetch_subset_and_version() - def on_doc_fetched(self): + def _on_doc_fetched(self): self.clear() self._items_by_id = {} self.beginResetModel() @@ -530,6 +600,10 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): "repre_info_by_version_id" ) + subsets_loaded_by_id = self._doc_payload.get( + "subsets_loaded_by_id" + ) + if ( asset_docs_by_id is None or subset_docs_by_id is None @@ -544,7 +618,8 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): asset_docs_by_id, subset_docs_by_id, last_versions_by_subset_id, - repre_info_by_version_id + repre_info_by_version_id, + subsets_loaded_by_id ) self.endResetModel() self.refreshed.emit(True) @@ -572,8 +647,12 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): return merge_group def _fill_subset_items( - self, asset_docs_by_id, subset_docs_by_id, last_versions_by_subset_id, - repre_info_by_version_id + self, + asset_docs_by_id, + subset_docs_by_id, + last_versions_by_subset_id, + repre_info_by_version_id, + subsets_loaded_by_id ): _groups_tuple = self.groups_config.split_subsets_for_groups( subset_docs_by_id.values(), self._grouping @@ -597,6 +676,35 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): "index": self.index(group_item.row(), 0) } + def _add_subset_item(subset_doc, parent_item, parent_index): + last_version = last_versions_by_subset_id.get( + subset_doc["_id"] + ) + # do not show subset without version + if not last_version: + return + + data = copy.deepcopy(subset_doc) + data["subset"] = subset_doc["name"] + + asset_id = subset_doc["parent"] + data["asset"] = asset_docs_by_id[asset_id]["name"] + + data["last_version"] = last_version + data["loaded_in_scene"] = subset_doc["_id"] in subsets_loaded_by_id + + # Sync server data + data.update( + self._get_last_repre_info(repre_info_by_version_id, + last_version["_id"])) + + item = Item() + item.update(data) + self.add_child(item, parent_item) + + index = self.index(item.row(), 0, parent_index) + self.set_version(index, last_version) + subset_counter = 0 for group_name, subset_docs_by_name in subset_docs_by_group.items(): parent_item = group_item_by_name[group_name]["item"] @@ -619,31 +727,9 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): _parent_index = parent_index for subset_doc in subset_docs: - asset_id = subset_doc["parent"] - - data = copy.deepcopy(subset_doc) - data["subset"] = subset_name - data["asset"] = asset_docs_by_id[asset_id]["name"] - - last_version = last_versions_by_subset_id.get( - subset_doc["_id"] - ) - data["last_version"] = last_version - - # do not show subset without version - if not last_version: - continue - - data.update( - self._get_last_repre_info(repre_info_by_version_id, - last_version["_id"])) - - item = Item() - item.update(data) - self.add_child(item, _parent_item) - - index = self.index(item.row(), 0, _parent_index) - self.set_version(index, last_version) + _add_subset_item(subset_doc, + parent_item=_parent_item, + parent_index=_parent_index) for subset_name in sorted(subset_docs_without_group.keys()): subset_docs = subset_docs_without_group[subset_name] @@ -658,31 +744,9 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): subset_counter += 1 for subset_doc in subset_docs: - asset_id = subset_doc["parent"] - - data = copy.deepcopy(subset_doc) - data["subset"] = subset_name - data["asset"] = asset_docs_by_id[asset_id]["name"] - - last_version = last_versions_by_subset_id.get( - subset_doc["_id"] - ) - data["last_version"] = last_version - - # do not show subset without version - if not last_version: - continue - - data.update( - self._get_last_repre_info(repre_info_by_version_id, - last_version["_id"])) - - item = Item() - item.update(data) - self.add_child(item, parent_item) - - index = self.index(item.row(), 0, parent_index) - self.set_version(index, last_version) + _add_subset_item(subset_doc, + parent_item=parent_item, + parent_index=parent_index) def data(self, index, role): if not index.isValid(): @@ -810,83 +874,6 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): return data - def _repre_per_version_pipeline(self, version_ids, - active_site, remote_site): - query = [ - {"$match": {"parent": {"$in": version_ids}, - "type": "representation", - "files.sites.name": {"$exists": 1}}}, - {"$unwind": "$files"}, - {'$addFields': { - 'order_local': { - '$filter': { - 'input': '$files.sites', 'as': 'p', - 'cond': {'$eq': ['$$p.name', active_site]} - } - } - }}, - {'$addFields': { - 'order_remote': { - '$filter': { - 'input': '$files.sites', 'as': 'p', - 'cond': {'$eq': ['$$p.name', remote_site]} - } - } - }}, - {'$addFields': { - 'progress_local': {"$arrayElemAt": [{ - '$cond': [ - {'$size': "$order_local.progress"}, - "$order_local.progress", - # if exists created_dt count is as available - {'$cond': [ - {'$size': "$order_local.created_dt"}, - [1], - [0] - ]} - ]}, - 0 - ]} - }}, - {'$addFields': { - 'progress_remote': {"$arrayElemAt": [{ - '$cond': [ - {'$size': "$order_remote.progress"}, - "$order_remote.progress", - # if exists created_dt count is as available - {'$cond': [ - {'$size': "$order_remote.created_dt"}, - [1], - [0] - ]} - ]}, - 0 - ]} - }}, - {'$group': { # first group by repre - '_id': '$_id', - 'parent': {'$first': '$parent'}, - 'avail_ratio_local': { - '$first': { - '$divide': [{'$sum': "$progress_local"}, {'$sum': 1}] - } - }, - 'avail_ratio_remote': { - '$first': { - '$divide': [{'$sum': "$progress_remote"}, {'$sum': 1}] - } - } - }}, - {'$group': { # second group by parent, eg version_id - '_id': '$parent', - 'repre_count': {'$sum': 1}, # total representations - # fully available representation for site - 'avail_repre_local': {'$sum': "$avail_ratio_local"}, - 'avail_repre_remote': {'$sum': "$avail_ratio_remote"}, - }}, - ] - return query - class GroupMemberFilterProxyModel(QtCore.QSortFilterProxyModel): """Provide the feature of filtering group by the acceptance of members @@ -1002,7 +989,6 @@ class RepresentationSortProxyModel(GroupMemberFilterProxyModel): class RepresentationModel(TreeModel, BaseRepresentationModel): - doc_fetched = QtCore.Signal() refreshed = QtCore.Signal(bool) @@ -1028,33 +1014,43 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): "remote_site": "Remote" } - def __init__(self, dbcon, header, version_ids): + repre_projection = { + "_id": 1, + "name": 1, + "context.subset": 1, + "context.asset": 1, + "context.version": 1, + "context.representation": 1, + 'files.sites': 1 + } + + def __init__(self, dbcon, header): super(RepresentationModel, self).__init__() self.dbcon = dbcon self._data = [] self._header = header - self.version_ids = version_ids + self._version_ids = [] manager = ModulesManager() sync_server = active_site = remote_site = None active_provider = remote_provider = None - project = dbcon.Session["AVALON_PROJECT"] - if project: + project_name = dbcon.current_project() + if project_name: sync_server = manager.modules_by_name["sync_server"] - active_site = sync_server.get_active_site(project) - remote_site = sync_server.get_remote_site(project) + active_site = sync_server.get_active_site(project_name) + remote_site = sync_server.get_remote_site(project_name) # TODO refactor - active_provider = \ - sync_server.get_provider_for_site(project, - active_site) + active_provider = sync_server.get_provider_for_site( + project_name, active_site + ) if active_site == 'studio': active_provider = 'studio' - remote_provider = \ - sync_server.get_provider_for_site(project, - remote_site) + remote_provider = sync_server.get_provider_for_site( + project_name, remote_site + ) if remote_site == 'studio': remote_provider = 'studio' @@ -1065,7 +1061,7 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): self.remote_site = remote_site self.remote_provider = remote_provider - self.doc_fetched.connect(self.on_doc_fetched) + self.doc_fetched.connect(self._on_doc_fetched) self._docs = {} self._icons = lib.get_repre_icons() @@ -1076,7 +1072,7 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): self._items_by_id = {} def set_version_ids(self, version_ids): - self.version_ids = version_ids + self._version_ids = version_ids self.refresh() def data(self, index, role): @@ -1093,8 +1089,7 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): if index.column() == self.Columns.index("name"): if item.get("isMerged"): return item["icon"] - else: - return self._icons["repre"] + return self._icons["repre"] active_index = self.Columns.index("active_site") remote_index = self.Columns.index("remote_site") @@ -1110,12 +1105,12 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): # site added, sync in progress progress_str = "not avail." if progress >= 0: - # progress == 0 for isMerged is unavailable if progress == 0 and item.get("isMerged"): progress_str = "not avail." else: - progress_str = "{}% {}".format(int(progress * 100), - label) + progress_str = "{}% {}".format( + int(progress * 100), label + ) return progress_str @@ -1145,7 +1140,7 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): return super(RepresentationModel, self).data(index, role) - def on_doc_fetched(self): + def _on_doc_fetched(self): self.clear() self.beginResetModel() subsets = set() @@ -1155,10 +1150,9 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): group = None self._items_by_id = {} for doc in self._docs: - if len(self.version_ids) > 1: + if len(self._version_ids) > 1: group = repre_groups.get(doc["name"]) if not group: - group_item = Item() item_id = str(uuid4()) group_item.update({ @@ -1179,9 +1173,9 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): repre_groups_items[doc["name"]] = 0 group = group_item - progress = lib.get_progress_for_repre(doc, - self.active_site, - self.remote_site) + progress = lib.get_progress_for_repre( + doc, self.active_site, self.remote_site + ) active_site_icon = self._icons.get(self.active_provider) remote_site_icon = self._icons.get(self.remote_provider) @@ -1214,9 +1208,9 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): 'remote_site_progress': progress[self.remote_site] } if group: - group = self._sum_group_progress(doc["name"], group, - current_progress, - repre_groups_items) + group = self._sum_group_progress( + doc["name"], group, current_progress, repre_groups_items + ) self.add_child(item, group) @@ -1235,47 +1229,39 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): return self._items_by_id.get(item_id) def refresh(self): - docs = [] - session_project = self.dbcon.Session['AVALON_PROJECT'] - if not session_project: + project_name = self.dbcon.current_project() + if not project_name: return - if self.version_ids: + repre_docs = [] + if self._version_ids: # Simple find here for now, expected to receive lower number of # representations and logic could be in Python - docs = list(self.dbcon.find( - {"type": "representation", "parent": {"$in": self.version_ids}, - "files.sites.name": {"$exists": 1}}, self.projection())) - self._docs = docs + repre_docs = list(get_representations( + project_name, + version_ids=self._version_ids, + fields=self.repre_projection.keys() + )) + + self._docs = repre_docs self.doc_fetched.emit() - @classmethod - def projection(cls): - return { - "_id": 1, - "name": 1, - "context.subset": 1, - "context.asset": 1, - "context.version": 1, - "context.representation": 1, - 'files.sites': 1 - } + def _sum_group_progress( + self, repre_name, group, current_item_progress, repre_groups_items + ): + """Update final group progress - def _sum_group_progress(self, repre_name, group, current_item_progress, - repre_groups_items): - """ - Update final group progress - Called after every item in group is added + Called after every item in group is added - Args: - repre_name(string) - group(dict): info about group of selected items - current_item_progress(dict): {'active_site_progress': XX, - 'remote_site_progress': YY} - repre_groups_items(dict) - Returns: - (dict): updated group info + Args: + repre_name(string) + group(dict): info about group of selected items + current_item_progress(dict): {'active_site_progress': XX, + 'remote_site_progress': YY} + repre_groups_items(dict) + Returns: + (dict): updated group info """ repre_groups_items[repre_name] += 1 diff --git a/openpype/tools/loader/widgets.py b/openpype/tools/loader/widgets.py index 42fb62b632..826c7110da 100644 --- a/openpype/tools/loader/widgets.py +++ b/openpype/tools/loader/widgets.py @@ -7,8 +7,18 @@ import collections from Qt import QtWidgets, QtCore, QtGui -from openpype.api import Anatomy -from openpype.pipeline import HeroVersionType +from openpype.client import ( + get_subset_families, + get_subset_by_id, + get_subsets, + get_version_by_id, + get_versions, + get_representations, + get_thumbnail_id_from_source, + get_thumbnail, +) +from openpype.client.operations import OperationsSession, REMOVED_VALUE +from openpype.pipeline import HeroVersionType, Anatomy from openpype.pipeline.thumbnail import get_thumbnail_binary from openpype.pipeline.load import ( discover_loader_plugins, @@ -48,6 +58,7 @@ from .model import ( ITEM_ID_ROLE ) from . import lib +from .delegates import LoadedInSceneDelegate from openpype.tools.utils.constants import ( LOCAL_PROVIDER_ROLE, @@ -159,6 +170,7 @@ class SubsetWidget(QtWidgets.QWidget): ("duration", 60), ("handles", 55), ("step", 10), + ("loaded_in_scene", 25), ("repre_info", 65) ) @@ -224,6 +236,10 @@ class SubsetWidget(QtWidgets.QWidget): column = model.Columns.index("repre_info") view.setItemDelegateForColumn(column, avail_delegate) + loaded_in_scene_delegate = LoadedInSceneDelegate(view) + column = model.Columns.index("loaded_in_scene") + view.setItemDelegateForColumn(column, loaded_in_scene_delegate) + layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) layout.addLayout(top_bar_layout) @@ -237,8 +253,7 @@ class SubsetWidget(QtWidgets.QWidget): self.model = model self.view = view - actual_project = dbcon.Session["AVALON_PROJECT"] - self.on_project_change(actual_project) + self.on_project_change(dbcon.current_project()) view.customContextMenuRequested.connect(self.on_context_menu) @@ -302,33 +317,23 @@ class SubsetWidget(QtWidgets.QWidget): item["version_document"] ) - subset_docs = list(self.dbcon.find( - { - "_id": {"$in": list(version_docs_by_subset_id.keys())}, - "type": "subset" - }, - { - "schema": 1, - "data.families": 1 - } + project_name = self.dbcon.active_project() + subset_docs = list(get_subsets( + project_name, + subset_ids=version_docs_by_subset_id.keys(), + fields=["schema", "data.families"] )) subset_docs_by_id = { subset_doc["_id"]: subset_doc for subset_doc in subset_docs } version_ids = list(version_docs_by_id.keys()) - repre_docs = self.dbcon.find( - # Query all representations for selected versions at once - { - "type": "representation", - "parent": {"$in": version_ids} - }, - # Query only name and parent from representation - { - "name": 1, - "parent": 1 - } + repre_docs = get_representations( + project_name, + version_ids=version_ids, + fields=["name", "parent"] ) + repre_docs_by_version_id = { version_id: [] for version_id in version_ids @@ -356,9 +361,10 @@ class SubsetWidget(QtWidgets.QWidget): enabled = False if project_name: self.model.reset_sync_server(project_name) - if self.model.sync_server: - enabled_proj = self.model.sync_server.get_enabled_projects() - enabled = project_name in enabled_proj + sync_server = self.model.sync_server + if sync_server: + enabled = sync_server.is_project_enabled(project_name, + single=True) lib.change_visibility(self.model, self.view, "repre_info", enabled) @@ -435,7 +441,8 @@ class SubsetWidget(QtWidgets.QWidget): # Get all representation->loader combinations available for the # index under the cursor, so we can list the user the options. - available_loaders = discover_loader_plugins() + project_name = self.dbcon.active_project() + available_loaders = discover_loader_plugins(project_name) if self.tool_name: available_loaders = lib.remove_tool_name_from_loaders( available_loaders, self.tool_name @@ -508,7 +515,7 @@ class SubsetWidget(QtWidgets.QWidget): if not one_item_selected: # Filter loaders from first subset by intersected combinations for repre, loader in first_loaders: - if (repre["name"], loader) not in found_combinations: + if (repre["name"].lower(), loader) not in found_combinations: continue loaders.append((repre, loader)) @@ -566,28 +573,43 @@ class SubsetWidget(QtWidgets.QWidget): # same representation available # Trigger - repre_ids = [] + project_name = self.dbcon.active_project() + subset_name_by_version_id = dict() for item in items: - representation = self.dbcon.find_one( - { - "type": "representation", - "name": representation_name, - "parent": item["version_document"]["_id"] - }, - {"_id": 1} - ) - if not representation: - self.echo("Subset '{}' has no representation '{}'".format( - item["subset"], representation_name - )) - continue - repre_ids.append(representation["_id"]) + version_id = item["version_document"]["_id"] + subset_name_by_version_id[version_id] = item["subset"] + + version_ids = set(subset_name_by_version_id.keys()) + repre_docs = get_representations( + project_name, + representation_names=[representation_name], + version_ids=version_ids, + fields=["_id", "parent"] + ) + + repre_ids = [] + for repre_doc in repre_docs: + repre_ids.append(repre_doc["_id"]) + + # keep only version ids without representation with that name + version_id = repre_doc["parent"] + version_ids.discard(version_id) + + if version_ids: + # report versions that didn't have valid representation + joined_subset_names = ", ".join([ + '"{}"'.format(subset_name_by_version_id[version_id]) + for version_id in version_ids + ]) + self.echo("Subsets {} don't have representation '{}'".format( + joined_subset_names, representation_name + )) # get contexts only for selected menu option repre_contexts = get_repres_contexts(repre_ids, self.dbcon) - options = lib.get_options(action, loader, self, - list(repre_contexts.values())) - + options = lib.get_options( + action, loader, self, list(repre_contexts.values()) + ) error_info = _load_representations_by_loader( loader, repre_contexts, options=options ) @@ -599,26 +621,30 @@ class SubsetWidget(QtWidgets.QWidget): box.show() def group_subsets(self, name, asset_ids, items): - field = "data.subsetGroup" + subset_ids = { + item["_id"] + for item in items + if item.get("_id") + } + if not subset_ids: + return if name: - update = {"$set": {field: name}} self.echo("Group subsets to '%s'.." % name) else: - update = {"$unset": {field: ""}} self.echo("Ungroup subsets..") - subsets = list() - for item in items: - subsets.append(item["subset"]) + project_name = self.dbcon.active_project() + op_session = OperationsSession() + for subset_id in subset_ids: + op_session.update_entity( + project_name, + "subset", + subset_id, + {"data.subsetGroup": name or REMOVED_VALUE} + ) - for asset_id in asset_ids: - filtr = { - "type": "subset", - "parent": asset_id, - "name": {"$in": subsets}, - } - self.dbcon.update_many(filtr, update) + op_session.commit() def echo(self, message): print(message) @@ -661,27 +687,21 @@ class VersionTextEdit(QtWidgets.QTextEdit): print("Querying..") + project_name = self.dbcon.active_project() if not version_doc: - version_doc = self.dbcon.find_one({ - "_id": version_id, - "type": {"$in": ["version", "hero_version"]} - }) + version_doc = get_version_by_id(project_name, version_id) assert version_doc, "Not a valid version id" if version_doc["type"] == "hero_version": - _version_doc = self.dbcon.find_one({ - "_id": version_doc["version_id"], - "type": "version" - }) + _version_doc = get_version_by_id( + project_name, version_doc["version_id"] + ) version_doc["data"] = _version_doc["data"] version_doc["name"] = HeroVersionType( _version_doc["name"] ) - subset = self.dbcon.find_one({ - "_id": version_doc["parent"], - "type": "subset" - }) + subset = get_subset_by_id(project_name, version_doc["parent"]) assert subset, "No valid subset parent for version" # Define readable creation timestamp @@ -752,7 +772,7 @@ class VersionTextEdit(QtWidgets.QTextEdit): if not source: return - project_name = self.dbcon.Session["AVALON_PROJECT"] + project_name = self.dbcon.current_project() if self._anatomy is None or self._anatomy.project_name != project_name: self._anatomy = Anatomy(project_name) @@ -833,24 +853,19 @@ class ThumbnailWidget(QtWidgets.QLabel): QtCore.Qt.SmoothTransformation ) - def set_thumbnail(self, doc_id=None): - if not doc_id: + def set_thumbnail(self, src_type, doc_ids): + if not doc_ids: self.set_pixmap() return - if isinstance(doc_id, (list, tuple)): - if len(doc_id) < 1: - self.set_pixmap() - return - doc_id = doc_id[0] + src_id = doc_ids[0] - doc = self.dbcon.find_one( - {"_id": doc_id}, - {"data.thumbnail_id"} + project_name = self.dbcon.active_project() + thumbnail_id = get_thumbnail_id_from_source( + project_name, + src_type, + src_id, ) - thumbnail_id = None - if doc: - thumbnail_id = doc.get("data", {}).get("thumbnail_id") if thumbnail_id == self.current_thumb_id: if self.current_thumbnail is None: self.set_pixmap() @@ -861,9 +876,7 @@ class ThumbnailWidget(QtWidgets.QLabel): self.set_pixmap() return - thumbnail_ent = self.dbcon.find_one( - {"type": "thumbnail", "_id": thumbnail_id} - ) + thumbnail_ent = get_thumbnail(project_name, thumbnail_id) if not thumbnail_ent: return @@ -917,21 +930,9 @@ class FamilyModel(QtGui.QStandardItemModel): def refresh(self): families = set() - if self.dbcon.Session.get("AVALON_PROJECT"): - result = list(self.dbcon.aggregate([ - {"$match": { - "type": "subset" - }}, - {"$project": { - "family": {"$arrayElemAt": ["$data.families", 0]} - }}, - {"$group": { - "_id": "family_group", - "families": {"$addToSet": "$family"} - }} - ])) - if result: - families = set(result[0]["families"]) + project_name = self.dbcon.current_project() + if project_name: + families = get_subset_families(project_name) root_item = self.invisibleRootItem() @@ -1176,7 +1177,7 @@ class RepresentationWidget(QtWidgets.QWidget): headers = [item[0] for item in self.default_widths] - model = RepresentationModel(self.dbcon, headers, []) + model = RepresentationModel(self.dbcon, headers) proxy_model = RepresentationSortProxyModel(self) proxy_model.setSourceModel(model) @@ -1213,8 +1214,8 @@ class RepresentationWidget(QtWidgets.QWidget): self.proxy_model = proxy_model self.sync_server_enabled = False - actual_project = dbcon.Session["AVALON_PROJECT"] - self.on_project_change(actual_project) + + self.on_project_change(dbcon.current_project()) self.model.refresh() @@ -1228,9 +1229,10 @@ class RepresentationWidget(QtWidgets.QWidget): enabled = False if project_name: self.model.reset_sync_server(project_name) - if self.model.sync_server: - enabled_proj = self.model.sync_server.get_enabled_projects() - enabled = project_name in enabled_proj + sync_server = self.model.sync_server + if sync_server: + enabled = sync_server.is_project_enabled(project_name, + single=True) self.sync_server_enabled = enabled lib.change_visibility(self.model, self.tree_view, @@ -1243,23 +1245,22 @@ class RepresentationWidget(QtWidgets.QWidget): for item in items: repre_ids.append(item["_id"]) - repre_docs = list(self.dbcon.find( - { - "type": "representation", - "_id": {"$in": repre_ids} - }, - { - "name": 1, - "parent": 1 - } + project_name = self.dbcon.active_project() + repre_docs = list(get_representations( + project_name, + representation_ids=repre_ids, + fields=["name", "parent"] )) + version_ids = [ repre_doc["parent"] for repre_doc in repre_docs ] - version_docs = self.dbcon.find({ - "_id": {"$in": version_ids} - }) + version_docs = get_versions( + project_name, + version_ids=version_ids, + hero=True + ) version_docs_by_id = {} version_docs_by_subset_id = collections.defaultdict(list) @@ -1269,15 +1270,10 @@ class RepresentationWidget(QtWidgets.QWidget): version_docs_by_id[version_id] = version_doc version_docs_by_subset_id[subset_id].append(version_doc) - subset_docs = list(self.dbcon.find( - { - "_id": {"$in": list(version_docs_by_subset_id.keys())}, - "type": "subset" - }, - { - "schema": 1, - "data.families": 1 - } + subset_docs = list(get_subsets( + project_name, + subset_ids=version_docs_by_subset_id.keys(), + fields=["schema", "data.families"] )) subset_docs_by_id = { subset_doc["_id"]: subset_doc @@ -1351,7 +1347,8 @@ class RepresentationWidget(QtWidgets.QWidget): selected_side = self._get_selected_side(point_index, rows) # Get all representation->loader combinations available for the # index under the cursor, so we can list the user the options. - available_loaders = discover_loader_plugins() + project_name = self.dbcon.active_project() + available_loaders = discover_loader_plugins(project_name) filtered_loaders = [] for loader in available_loaders: @@ -1446,13 +1443,12 @@ class RepresentationWidget(QtWidgets.QWidget): self._process_action(items, menu, point) def _process_action(self, items, menu, point): - """ - Show the context action menu and process selected + """Show the context action menu and process selected - Args: - items(dict): menu items - menu(OptionalMenu) - point(PointIndex) + Args: + items(dict): menu items + menu(OptionalMenu) + point(PointIndex) """ global_point = self.tree_view.mapToGlobal(point) action = menu.exec_(global_point) @@ -1468,21 +1464,23 @@ class RepresentationWidget(QtWidgets.QWidget): data_by_repre_id = {} selected_side = action_representation.get("selected_side") + is_sync_loader = tools_lib.is_sync_loader(loader) for item in items: - if tools_lib.is_sync_loader(loader): - site_name = "{}_site_name".format(selected_side) - data = { - "_id": item.get("_id"), - "site_name": item.get(site_name), - "project_name": self.dbcon.Session["AVALON_PROJECT"] - } + item_id = item.get("_id") + repre_ids.append(item_id) + if not is_sync_loader: + continue - if not data["site_name"]: - continue + site_name = "{}_site_name".format(selected_side) + data_site_name = item.get(site_name) + if not data_site_name: + continue - data_by_repre_id[data["_id"]] = data - - repre_ids.append(item.get("_id")) + data_by_repre_id[item_id] = { + "_id": item_id, + "site_name": data_site_name, + "project_name": self.dbcon.active_project() + } repre_contexts = get_repres_contexts(repre_ids, self.dbcon) options = lib.get_options(action, loader, self, @@ -1564,6 +1562,11 @@ def _load_representations_by_loader(loader, repre_contexts, return for repre_context in repre_contexts.values(): + version_doc = repre_context["version"] + if version_doc["type"] == "hero_version": + version_name = "Hero" + else: + version_name = version_doc.get("name") try: if data_by_repre_id: _id = repre_context["representation"]["_id"] @@ -1581,7 +1584,7 @@ def _load_representations_by_loader(loader, repre_contexts, None, repre_context["representation"]["name"], repre_context["subset"]["name"], - repre_context["version"]["name"] + version_name )) except Exception as exc: @@ -1594,7 +1597,7 @@ def _load_representations_by_loader(loader, repre_contexts, formatted_traceback, repre_context["representation"]["name"], repre_context["subset"]["name"], - repre_context["version"]["name"] + version_name )) return error_info diff --git a/openpype/tools/mayalookassigner/app.py b/openpype/tools/mayalookassigner/app.py index 1b6cad77a8..5665acea42 100644 --- a/openpype/tools/mayalookassigner/app.py +++ b/openpype/tools/mayalookassigner/app.py @@ -4,6 +4,7 @@ import logging from Qt import QtWidgets, QtCore +from openpype.client import get_last_version_by_subset_id from openpype import style from openpype.pipeline import legacy_io from openpype.tools.utils.lib import qt_app_context @@ -211,6 +212,7 @@ class MayaLookAssignerWindow(QtWidgets.QWidget): selection = self.assign_selected.isChecked() asset_nodes = self.asset_outliner.get_nodes(selection=selection) + project_name = legacy_io.active_project() start = time.time() for i, (asset, item) in enumerate(asset_nodes.items()): @@ -222,23 +224,20 @@ class MayaLookAssignerWindow(QtWidgets.QWidget): assign_look = next((subset for subset in item["looks"] if subset["name"] in looks), None) if not assign_look: - self.echo("{} No matching selected " - "look for {}".format(prefix, asset)) + self.echo( + "{} No matching selected look for {}".format(prefix, asset) + ) continue # Get the latest version of this asset's look subset - version = legacy_io.find_one( - { - "type": "version", - "parent": assign_look["_id"] - }, - sort=[("name", -1)] + version = get_last_version_by_subset_id( + project_name, assign_look["_id"], fields=["_id"] ) subset_name = assign_look["name"] - self.echo("{} Assigning {} to {}\t".format(prefix, - subset_name, - asset)) + self.echo("{} Assigning {} to {}\t".format( + prefix, subset_name, asset + )) nodes = item["nodes"] if cmds.pluginInfo('vrayformaya', query=True, loaded=True): diff --git a/openpype/tools/mayalookassigner/commands.py b/openpype/tools/mayalookassigner/commands.py index d41d8ca5a2..2e7a51efde 100644 --- a/openpype/tools/mayalookassigner/commands.py +++ b/openpype/tools/mayalookassigner/commands.py @@ -2,9 +2,9 @@ from collections import defaultdict import logging import os -from bson.objectid import ObjectId import maya.cmds as cmds +from openpype.client import get_asset_by_id from openpype.pipeline import ( legacy_io, remove_container, @@ -159,11 +159,9 @@ def create_items_from_nodes(nodes): log.warning("No id hashes") return asset_view_items + project_name = legacy_io.active_project() for _id, id_nodes in id_hashes.items(): - asset = legacy_io.find_one( - {"_id": ObjectId(_id)}, - projection={"name": True} - ) + asset = get_asset_by_id(project_name, _id, fields=["name"]) # Skip if asset id is not found if not asset: @@ -180,10 +178,12 @@ def create_items_from_nodes(nodes): namespace = get_namespace_from_node(node) namespaces.add(namespace) - asset_view_items.append({"label": asset["name"], - "asset": asset, - "looks": looks, - "namespaces": namespaces}) + asset_view_items.append({ + "label": asset["name"], + "asset": asset, + "looks": looks, + "namespaces": namespaces + }) return asset_view_items diff --git a/openpype/tools/mayalookassigner/vray_proxies.py b/openpype/tools/mayalookassigner/vray_proxies.py index 3523b24bf3..889396e555 100644 --- a/openpype/tools/mayalookassigner/vray_proxies.py +++ b/openpype/tools/mayalookassigner/vray_proxies.py @@ -6,11 +6,14 @@ import logging import json import six -from bson.objectid import ObjectId import alembic.Abc from maya import cmds +from openpype.client import ( + get_representation_by_name, + get_last_version_by_subset_name, +) from openpype.pipeline import ( legacy_io, load_container, @@ -155,13 +158,12 @@ def get_look_relationships(version_id): Returns: dict: Dictionary of relations. - """ - json_representation = legacy_io.find_one({ - "type": "representation", - "parent": version_id, - "name": "json" - }) + + project_name = legacy_io.active_project() + json_representation = get_representation_by_name( + project_name, representation_name="json", version_id=version_id + ) # Load relationships shader_relation = get_representation_path(json_representation) @@ -184,12 +186,12 @@ def load_look(version_id): list of shader nodes. """ + + project_name = legacy_io.active_project() # Get representations of shader file and relationships - look_representation = legacy_io.find_one({ - "type": "representation", - "parent": version_id, - "name": "ma" - }) + look_representation = get_representation_by_name( + project_name, representation_name="ma", version_id=version_id + ) # See if representation is already loaded, if so reuse it. host = registered_host() @@ -220,42 +222,6 @@ def load_look(version_id): return shader_nodes -def get_latest_version(asset_id, subset): - # type: (str, str) -> dict - """Get latest version of subset. - - Args: - asset_id (str): Asset ID - subset (str): Subset name. - - Returns: - Latest version - - Throws: - RuntimeError: When subset or version doesn't exist. - - """ - subset = legacy_io.find_one({ - "name": subset, - "parent": ObjectId(asset_id), - "type": "subset" - }) - if not subset: - raise RuntimeError("Subset does not exist: %s" % subset) - - version = legacy_io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[("name", -1)] - ) - if not version: - raise RuntimeError("Version does not exist.") - - return version - - def vrayproxy_assign_look(vrayproxy, subset="lookDefault"): # type: (str, str) -> None """Assign look to vray proxy. @@ -281,13 +247,20 @@ def vrayproxy_assign_look(vrayproxy, subset="lookDefault"): asset_id = node_id.split(":", 1)[0] node_ids_by_asset_id[asset_id].add(node_id) + project_name = legacy_io.active_project() for asset_id, node_ids in node_ids_by_asset_id.items(): # Get latest look version - try: - version = get_latest_version(asset_id, subset=subset) - except RuntimeError as exc: - print(exc) + version = get_last_version_by_subset_name( + project_name, + subset_name=subset, + asset_id=asset_id, + fields=["_id"] + ) + if not version: + print("Didn't find last version for subset name {}".format( + subset + )) continue relationships = get_look_relationships(version["_id"]) diff --git a/openpype/tools/project_manager/project_manager/delegates.py b/openpype/tools/project_manager/project_manager/delegates.py index 31487ff132..b066bbb159 100644 --- a/openpype/tools/project_manager/project_manager/delegates.py +++ b/openpype/tools/project_manager/project_manager/delegates.py @@ -205,3 +205,9 @@ class ToolsDelegate(QtWidgets.QStyledItemDelegate): def setModelData(self, editor, model, index): model.setData(index, editor.value(), QtCore.Qt.EditRole) + + def displayText(self, value, locale): + if value: + return ", ".join(value) + else: + return diff --git a/openpype/tools/project_manager/project_manager/model.py b/openpype/tools/project_manager/project_manager/model.py index 871704e13c..6f40140e5e 100644 --- a/openpype/tools/project_manager/project_manager/model.py +++ b/openpype/tools/project_manager/project_manager/model.py @@ -7,10 +7,14 @@ from pymongo import UpdateOne, DeleteOne from Qt import QtCore, QtGui -from openpype.lib import ( - CURRENT_DOC_SCHEMAS, - PypeLogger, +from openpype.client import ( + get_projects, + get_project, + get_assets, + get_asset_ids_with_subsets, ) +from openpype.client.operations import CURRENT_ASSET_DOC_SCHEMA +from openpype.lib import Logger from .constants import ( IDENTIFIER_ROLE, @@ -49,12 +53,8 @@ class ProjectModel(QtGui.QStandardItemModel): self._items_by_name[None] = none_project new_project_items.append(none_project) - project_docs = self.dbcon.projects( - projection={"name": 1}, - only_active=True - ) project_names = set() - for project_doc in project_docs: + for project_doc in get_projects(fields=["name"]): project_name = project_doc.get("name") if not project_name: continue @@ -201,7 +201,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): @property def log(self): if self._log is None: - self._log = PypeLogger.get_logger("ProjectManagerModel") + self._log = Logger.get_logger("ProjectManagerModel") return self._log @property @@ -255,10 +255,11 @@ class HierarchyModel(QtCore.QAbstractItemModel): return # Find project'd document - project_doc = self.dbcon.database[project_name].find_one( - {"type": "project"}, - ProjectItem.query_projection + project_doc = get_project( + project_name, + fields=list(ProjectItem.query_projection.keys()) ) + # Skip if project document does not exist # - this shouldn't happen using only UI elements if not project_doc: @@ -269,9 +270,8 @@ class HierarchyModel(QtCore.QAbstractItemModel): self.add_item(project_item) # Query all assets of the project - asset_docs = self.dbcon.database[project_name].find( - {"type": "asset"}, - AssetItem.query_projection + asset_docs = get_assets( + project_name, fields=AssetItem.query_projection.keys() ) asset_docs_by_id = { asset_doc["_id"]: asset_doc @@ -282,31 +282,16 @@ class HierarchyModel(QtCore.QAbstractItemModel): # if asset item can be modified (name and hierarchy change) # - the same must be applied to all it's parents asset_ids = list(asset_docs_by_id.keys()) - result = [] + asset_ids_with_subsets = [] if asset_ids: - result = self.dbcon.database[project_name].aggregate([ - { - "$match": { - "type": "subset", - "parent": {"$in": asset_ids} - } - }, - { - "$group": { - "_id": "$parent", - "count": {"$sum": 1} - } - } - ]) + asset_ids_with_subsets = get_asset_ids_with_subsets( + project_name, asset_ids=asset_ids + ) asset_modifiable = { - asset_id: True + asset_id: asset_id not in asset_ids_with_subsets for asset_id in asset_docs_by_id.keys() } - for item in result: - asset_id = item["_id"] - count = item["count"] - asset_modifiable[asset_id] = count < 1 # Store assets by their visual parent to be able create their hierarchy asset_docs_by_parent_id = collections.defaultdict(list) @@ -1472,12 +1457,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): mimedata.setData("application/copy_task", encoded_data) return mimedata - def paste_mime_data(self, index, mime_data): - if not index.isValid(): - return - - item_id = index.data(IDENTIFIER_ROLE) - item = self._items_by_id[item_id] + def _paste_mime_data(self, item, mime_data): if not isinstance(item, (AssetItem, TaskItem)): return @@ -1511,6 +1491,25 @@ class HierarchyModel(QtCore.QAbstractItemModel): task_item = TaskItem(task_data, True) self.add_item(task_item, parent) + def paste(self, indexes, mime_data): + + # Get the selected Assets uniquely + items = set() + for index in indexes: + if not index.isValid(): + return + item_id = index.data(IDENTIFIER_ROLE) + item = self._items_by_id[item_id] + + # Do not copy into the Task Item so get parent Asset instead + if isinstance(item, TaskItem): + item = item.parent() + + items.add(item) + + for item in items: + self._paste_mime_data(item, mime_data) + class BaseItem: """Base item for HierarchyModel. @@ -1960,7 +1959,7 @@ class AssetItem(BaseItem): } schema_name = ( self._origin_asset_doc.get("schema") - or CURRENT_DOC_SCHEMAS["asset"] + or CURRENT_ASSET_DOC_SCHEMA ) doc = { diff --git a/openpype/tools/project_manager/project_manager/view.py b/openpype/tools/project_manager/project_manager/view.py index 74f5a06b71..8d1fe54e83 100644 --- a/openpype/tools/project_manager/project_manager/view.py +++ b/openpype/tools/project_manager/project_manager/view.py @@ -3,6 +3,7 @@ from queue import Queue from Qt import QtWidgets, QtCore, QtGui +from openpype.client import get_project from .delegates import ( NumberDelegate, NameDelegate, @@ -27,7 +28,7 @@ class NameDef: class NumberDef: def __init__(self, minimum=None, maximum=None, decimals=None): self.minimum = 0 if minimum is None else minimum - self.maximum = 999999 if maximum is None else maximum + self.maximum = 999999999 if maximum is None else maximum self.decimals = 0 if decimals is None else decimals @@ -47,12 +48,8 @@ class ProjectDocCache: def set_project(self, project_name): self.project_doc = None - if not project_name: - return - - self.project_doc = self.dbcon.database[project_name].find_one( - {"type": "project"} - ) + if project_name: + self.project_doc = get_project(project_name) class ToolsCache: @@ -139,6 +136,7 @@ class HierarchyView(QtWidgets.QTreeView): self.setAlternatingRowColors(True) self.setSelectionMode(HierarchyView.ExtendedSelection) self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + self.setEditTriggers(HierarchyView.AllEditTriggers) column_delegates = {} column_key_to_index = {} @@ -195,13 +193,13 @@ class HierarchyView(QtWidgets.QTreeView): for idx, width in widths_by_idx.items(): self.setColumnWidth(idx, width) - def set_project(self, project_name): + def set_project(self, project_name, force=False): # Trigger helpers first self._project_doc_cache.set_project(project_name) self._tools_cache.refresh() # Trigger update of model after all data for delegates are filled - self._source_model.set_project(project_name) + self._source_model.set_project(project_name, force) def _on_project_reset(self): self.header_init() @@ -301,16 +299,6 @@ class HierarchyView(QtWidgets.QTreeView): def rowsInserted(self, parent_index, start, end): super(HierarchyView, self).rowsInserted(parent_index, start, end) - for row in range(start, end + 1): - for key, column in self._column_key_to_index.items(): - if key not in self.persistent_columns: - continue - col_index = self._source_model.index(row, column, parent_index) - if bool( - self._source_model.flags(col_index) - & QtCore.Qt.ItemIsEditable - ): - self.openPersistentEditor(col_index) # Expand parent on insert if not self.isExpanded(parent_index): @@ -365,20 +353,24 @@ class HierarchyView(QtWidgets.QTreeView): event.accept() def _copy_items(self, indexes=None): + clipboard = QtWidgets.QApplication.clipboard() try: if indexes is None: indexes = self.selectedIndexes() mime_data = self._source_model.copy_mime_data(indexes) - QtWidgets.QApplication.clipboard().setMimeData(mime_data) + clipboard.setMimeData(mime_data) self._show_message("Tasks copied") except ValueError as exc: + # Change clipboard to contain empty data + empty_mime_data = QtCore.QMimeData() + clipboard.setMimeData(empty_mime_data) self._show_message(str(exc)) def _paste_items(self): - index = self.currentIndex() mime_data = QtWidgets.QApplication.clipboard().mimeData() - self._source_model.paste_mime_data(index, mime_data) + rows = self.selectionModel().selectedRows() + self._source_model.paste(rows, mime_data) def _delete_items(self, indexes=None): if indexes is None: @@ -386,7 +378,7 @@ class HierarchyView(QtWidgets.QTreeView): self._source_model.delete_indexes(indexes) def _on_ctrl_shift_enter_pressed(self): - self._add_task_and_edit() + self.add_task_and_edit() def add_asset(self, parent_index=None): if parent_index is None: @@ -428,9 +420,9 @@ class HierarchyView(QtWidgets.QTreeView): self.edit(new_index) def _add_task_action(self): - self._add_task_and_edit() + self.add_task_and_edit() - def _add_task_and_edit(self): + def add_task_and_edit(self): new_index = self.add_task() if new_index is None: return diff --git a/openpype/tools/project_manager/project_manager/widgets.py b/openpype/tools/project_manager/project_manager/widgets.py index dc75b30bd7..4bc968347a 100644 --- a/openpype/tools/project_manager/project_manager/widgets.py +++ b/openpype/tools/project_manager/project_manager/widgets.py @@ -1,13 +1,13 @@ import re +from openpype.client import get_projects, create_project from .constants import ( NAME_ALLOWED_SYMBOLS, NAME_REGEX ) -from openpype.lib import ( - create_project, +from openpype.client.operations import ( PROJECT_NAME_ALLOWED_SYMBOLS, - PROJECT_NAME_REGEX + PROJECT_NAME_REGEX, ) from openpype.style import load_stylesheet from openpype.pipeline import AvalonMongoDB @@ -265,22 +265,16 @@ class CreateProjectDialog(QtWidgets.QDialog): project_name = self.project_name_input.text() project_code = self.project_code_input.text() library_project = self.library_project_input.isChecked() - create_project(project_name, project_code, library_project, self.dbcon) + create_project(project_name, project_code, library_project) self.done(1) def _get_existing_projects(self): project_names = set() project_codes = set() - for project_name in self.dbcon.database.collection_names(): - # Each collection will have exactly one project document - project_doc = self.dbcon.database[project_name].find_one( - {"type": "project"}, - {"name": 1, "data.code": 1} - ) - if not project_doc: - continue - + for project_doc in get_projects( + inactive=True, fields=["name", "data.code"] + ): project_name = project_doc.get("name") if not project_name: continue diff --git a/openpype/tools/project_manager/project_manager/window.py b/openpype/tools/project_manager/project_manager/window.py index c281479d4f..3b2dea8ca3 100644 --- a/openpype/tools/project_manager/project_manager/window.py +++ b/openpype/tools/project_manager/project_manager/window.py @@ -1,5 +1,12 @@ from Qt import QtWidgets, QtCore, QtGui +from openpype import resources +from openpype.style import load_stylesheet +from openpype.widgets import PasswordDialog +from openpype.lib import is_admin_password_required, Logger +from openpype.pipeline import AvalonMongoDB +from openpype.pipeline.project_folders import create_project_folders + from . import ( ProjectModel, ProjectProxyFilter, @@ -13,17 +20,6 @@ from . import ( ) from .widgets import ConfirmProjectDeletion from .style import ResourceCache -from openpype.style import load_stylesheet -from openpype.lib import is_admin_password_required -from openpype.widgets import PasswordDialog -from openpype.pipeline import AvalonMongoDB - -from openpype import resources -from openpype.api import ( - get_project_basic_paths, - create_project_folders, - Logger -) class ProjectManagerWindow(QtWidgets.QWidget): @@ -184,14 +180,14 @@ class ProjectManagerWindow(QtWidgets.QWidget): self.resize(1200, 600) self.setStyleSheet(load_stylesheet()) - def _set_project(self, project_name=None): + def _set_project(self, project_name=None, force=False): self._create_folders_btn.setEnabled(project_name is not None) self._remove_projects_btn.setEnabled(project_name is not None) self._add_asset_btn.setEnabled(project_name is not None) self._add_task_btn.setEnabled(project_name is not None) self._save_btn.setEnabled(project_name is not None) self._project_proxy_model.set_filter_default(project_name is not None) - self.hierarchy_view.set_project(project_name) + self.hierarchy_view.set_project(project_name, force) def _current_project(self): row = self._project_combobox.currentIndex() @@ -229,11 +225,11 @@ class ProjectManagerWindow(QtWidgets.QWidget): self._project_combobox.setCurrentIndex(row) selected_project = self._current_project() - self._set_project(selected_project) + self._set_project(selected_project, True) def _on_project_change(self): selected_project = self._current_project() - self._set_project(selected_project) + self._set_project(selected_project, False) def _on_project_refresh(self): self.refresh_projects() @@ -245,7 +241,7 @@ class ProjectManagerWindow(QtWidgets.QWidget): self.hierarchy_view.add_asset() def _on_add_task(self): - self.hierarchy_view.add_task() + self.hierarchy_view.add_task_and_edit() def _on_create_folders(self): project_name = self._current_project() @@ -259,12 +255,8 @@ class ProjectManagerWindow(QtWidgets.QWidget): qm.Yes | qm.No) if ans == qm.Yes: try: - # Get paths based on presets - basic_paths = get_project_basic_paths(project_name) - if not basic_paths: - pass # Invoking OpenPype API to create the project folders - create_project_folders(basic_paths, project_name) + create_project_folders(project_name) except Exception as exc: self.log.warning( "Cannot create starting folders: {}".format(exc), diff --git a/openpype/tools/publisher/__init__.py b/openpype/tools/publisher/__init__.py index a7b597eece..e69de29bb2 100644 --- a/openpype/tools/publisher/__init__.py +++ b/openpype/tools/publisher/__init__.py @@ -1,7 +0,0 @@ -from .app import show -from .window import PublisherWindow - -__all__ = ( - "show", - "PublisherWindow" -) diff --git a/openpype/tools/publisher/constants.py b/openpype/tools/publisher/constants.py index dc44aade45..74337ea1ab 100644 --- a/openpype/tools/publisher/constants.py +++ b/openpype/tools/publisher/constants.py @@ -3,6 +3,10 @@ from Qt import QtCore # ID of context item in instance view CONTEXT_ID = "context" CONTEXT_LABEL = "Options" +# Not showed anywhere - used as identifier +CONTEXT_GROUP = "__ContextGroup__" + +CONVERTOR_ITEM_GROUP = "Incompatible subsets" # Allowed symbols for subset name (and variant) # - characters, numbers, unsercore and dash @@ -16,7 +20,10 @@ INSTANCE_ID_ROLE = QtCore.Qt.UserRole + 1 SORT_VALUE_ROLE = QtCore.Qt.UserRole + 2 IS_GROUP_ROLE = QtCore.Qt.UserRole + 3 CREATOR_IDENTIFIER_ROLE = QtCore.Qt.UserRole + 4 -FAMILY_ROLE = QtCore.Qt.UserRole + 5 +CREATOR_THUMBNAIL_ENABLED_ROLE = QtCore.Qt.UserRole + 5 +FAMILY_ROLE = QtCore.Qt.UserRole + 6 +GROUP_ROLE = QtCore.Qt.UserRole + 7 +CONVERTER_IDENTIFIER_ROLE = QtCore.Qt.UserRole + 8 __all__ = ( diff --git a/openpype/tools/publisher/control.py b/openpype/tools/publisher/control.py index 2973d6a5bb..615f3eb8d9 100644 --- a/openpype/tools/publisher/control.py +++ b/openpype/tools/publisher/control.py @@ -1,32 +1,56 @@ import os import copy -import inspect import logging import traceback import collections +import uuid +import tempfile +import shutil +from abc import ABCMeta, abstractmethod, abstractproperty -import weakref -try: - from weakref import WeakMethod -except Exception: - from openpype.lib.python_2_comp import WeakMethod - +import six import pyblish.api +from openpype.client import ( + get_assets, + get_asset_by_id, + get_subsets, +) +from openpype.lib.events import EventSystem +from openpype.lib.attribute_definitions import ( + serialize_attr_defs, + deserialize_attr_defs, +) from openpype.pipeline import ( PublishValidationError, + KnownPublishError, registered_host, + legacy_io, + get_process_id, +) +from openpype.pipeline.create import ( + CreateContext, + AutoCreator, + HiddenCreator, + Creator, +) +from openpype.pipeline.create.context import ( + CreatorsOperationFailed, + ConvertorsOperationFailed, ) -from openpype.pipeline.create import CreateContext - -from Qt import QtCore # Define constant for plugin orders offset PLUGIN_ORDER_OFFSET = 0.5 +class CardMessageTypes: + standard = None + error = "error" + + class MainThreadItem: """Callback with args and kwargs.""" + def __init__(self, callback, *args, **kwargs): self.callback = callback self.args = args @@ -36,64 +60,9 @@ class MainThreadItem: self.callback(*self.args, **self.kwargs) -class MainThreadProcess(QtCore.QObject): - """Qt based main thread process executor. - - Has timer which controls each 50ms if there is new item to process. - - This approach gives ability to update UI meanwhile plugin is in progress. - """ - - count_timeout = 2 - - def __init__(self): - super(MainThreadProcess, self).__init__() - self._items_to_process = collections.deque() - - timer = QtCore.QTimer() - timer.setInterval(0) - - timer.timeout.connect(self._execute) - - self._timer = timer - self._switch_counter = self.count_timeout - - def process(self, func, *args, **kwargs): - item = MainThreadItem(func, *args, **kwargs) - self.add_item(item) - - def add_item(self, item): - self._items_to_process.append(item) - - def _execute(self): - if not self._items_to_process: - return - - if self._switch_counter > 0: - self._switch_counter -= 1 - return - - self._switch_counter = self.count_timeout - - item = self._items_to_process.popleft() - item.process() - - def start(self): - if not self._timer.isActive(): - self._timer.start() - - def stop(self): - if self._timer.isActive(): - self._timer.stop() - - def clear(self): - if self._timer.isActive(): - self._timer.stop() - self._items_to_process = collections.deque() - - class AssetDocsCache: """Cache asset documents for creation part.""" + projection = { "_id": True, "name": True, @@ -104,44 +73,100 @@ class AssetDocsCache: def __init__(self, controller): self._controller = controller self._asset_docs = None + self._asset_docs_hierarchy = None self._task_names_by_asset_name = {} - - @property - def dbcon(self): - return self._controller.dbcon + self._asset_docs_by_name = {} + self._full_asset_docs_by_name = {} def reset(self): self._asset_docs = None + self._asset_docs_hierarchy = None self._task_names_by_asset_name = {} + self._asset_docs_by_name = {} + self._full_asset_docs_by_name = {} def _query(self): - if self._asset_docs is None: - asset_docs = list(self.dbcon.find( - {"type": "asset"}, - self.projection - )) - task_names_by_asset_name = {} - for asset_doc in asset_docs: - asset_name = asset_doc["name"] - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_names_by_asset_name[asset_name] = list(asset_tasks.keys()) - self._asset_docs = asset_docs - self._task_names_by_asset_name = task_names_by_asset_name + if self._asset_docs is not None: + return + + project_name = self._controller.project_name + asset_docs = list(get_assets( + project_name, fields=self.projection.keys() + )) + asset_docs_by_name = {} + task_names_by_asset_name = {} + for asset_doc in asset_docs: + if "data" not in asset_doc: + asset_doc["data"] = {"tasks": {}, "visualParent": None} + elif "tasks" not in asset_doc["data"]: + asset_doc["data"]["tasks"] = {} + + asset_name = asset_doc["name"] + asset_tasks = asset_doc["data"]["tasks"] + task_names_by_asset_name[asset_name] = list(asset_tasks.keys()) + asset_docs_by_name[asset_name] = asset_doc + + self._asset_docs = asset_docs + self._asset_docs_by_name = asset_docs_by_name + self._task_names_by_asset_name = task_names_by_asset_name def get_asset_docs(self): self._query() return copy.deepcopy(self._asset_docs) + def get_asset_hierarchy(self): + """Prepare asset documents into hierarchy. + + Convert ObjectId to string. Asset id is not used during whole + process of publisher but asset name is used rather. + + Returns: + Dict[Union[str, None]: Any]: Mapping of parent id to it's children. + Top level assets have parent id 'None'. + """ + + if self._asset_docs_hierarchy is None: + _queue = collections.deque(self.get_asset_docs()) + + output = collections.defaultdict(list) + while _queue: + asset_doc = _queue.popleft() + asset_doc["_id"] = str(asset_doc["_id"]) + parent_id = asset_doc["data"]["visualParent"] + if parent_id is not None: + parent_id = str(parent_id) + asset_doc["data"]["visualParent"] = parent_id + output[parent_id].append(asset_doc) + self._asset_docs_hierarchy = output + return copy.deepcopy(self._asset_docs_hierarchy) + def get_task_names_by_asset_name(self): self._query() return copy.deepcopy(self._task_names_by_asset_name) + def get_asset_by_name(self, asset_name): + self._query() + asset_doc = self._asset_docs_by_name.get(asset_name) + if asset_doc is None: + return None + return copy.deepcopy(asset_doc) + + def get_full_asset_by_name(self, asset_name): + self._query() + if asset_name not in self._full_asset_docs_by_name: + asset_doc = self._asset_docs_by_name.get(asset_name) + project_name = self._controller.project_name + full_asset_doc = get_asset_by_id(project_name, asset_doc["_id"]) + self._full_asset_docs_by_name[asset_name] = full_asset_doc + return copy.deepcopy(self._full_asset_docs_by_name[asset_name]) + class PublishReport: """Report for single publishing process. Report keeps current state of publishing and currently processed plugin. """ + def __init__(self, controller): self.controller = controller self._publish_discover_result = None @@ -153,15 +178,20 @@ class PublishReport: self._all_instances_by_id = {} self._current_context = None - def reset(self, context, publish_discover_result=None): + def reset(self, context, create_context): """Reset report and clear all data.""" - self._publish_discover_result = publish_discover_result + + self._publish_discover_result = create_context.publish_discover_result self._plugin_data = [] self._plugin_data_with_plugin = [] self._current_plugin_data = {} self._all_instances_by_id = {} self._current_context = context + for plugin in create_context.publish_plugins_mismatch_targets: + plugin_data = self._add_plugin_data_item(plugin) + plugin_data["skipped"] = True + def add_plugin_iter(self, plugin, context): """Add report about single iteration of plugin.""" for instance in context: @@ -204,6 +234,7 @@ class PublishReport: "name": plugin.__name__, "label": label, "order": plugin.order, + "targets": list(plugin.targets), "instances_data": [], "actions_data": [], "skipped": False, @@ -216,13 +247,15 @@ class PublishReport: def add_result(self, result): """Handle result of one plugin and it's instance.""" + instance = result["instance"] instance_id = None if instance is not None: instance_id = instance.id self._current_plugin_data["instances_data"].append({ "id": instance_id, - "logs": self._extract_instance_log_items(result) + "logs": self._extract_instance_log_items(result), + "process_time": result["duration"] }) def add_action_result(self, action, result): @@ -272,12 +305,17 @@ class PublishReport: "plugins_data": plugins_data, "instances": instances_details, "context": self._extract_context_data(self._current_context), - "crashed_file_paths": crashed_file_paths + "crashed_file_paths": crashed_file_paths, + "id": str(uuid.uuid4()), + "report_version": "1.0.0" } def _extract_context_data(self, context): + context_label = "Context" + if context is not None: + context_label = context.data.get("label") return { - "label": context.data.get("label") + "label": context_label } def _extract_instance_data(self, instance, exists): @@ -344,7 +382,1204 @@ class PublishReport: return output -class PublisherController: +class PublishPluginsProxy: + """Wrapper around publish plugin. + + Prepare mapping for publish plugins and actions. Also can create + serializable data for plugin actions so UI don't have to have access to + them. + + This object is created in process where publishing is actually running. + + Notes: + Actions have id but single action can be used on multiple plugins so + to run an action is needed combination of plugin and action. + + Args: + plugins [List[pyblish.api.Plugin]]: Discovered plugins that will be + processed. + """ + + def __init__(self, plugins): + plugins_by_id = {} + actions_by_id = {} + action_ids_by_plugin_id = {} + for plugin in plugins: + plugin_id = plugin.id + plugins_by_id[plugin_id] = plugin + + action_ids = set() + action_ids_by_plugin_id[plugin_id] = action_ids + + actions = getattr(plugin, "actions", None) or [] + for action in actions: + action_id = action.id + action_ids.add(action_id) + actions_by_id[action_id] = action + + self._plugins_by_id = plugins_by_id + self._actions_by_id = actions_by_id + self._action_ids_by_plugin_id = action_ids_by_plugin_id + + def get_action(self, action_id): + return self._actions_by_id[action_id] + + def get_plugin(self, plugin_id): + return self._plugins_by_id[plugin_id] + + def get_plugin_id(self, plugin): + """Get id of plugin based on plugin object. + + It's used for validation errors report. + + Args: + plugin (pyblish.api.Plugin): Publish plugin for which id should be + returned. + + Returns: + str: Plugin id. + """ + + return plugin.id + + def get_plugin_action_items(self, plugin_id): + """Get plugin action items for plugin by it's id. + + Args: + plugin_id (str): Publish plugin id. + + Returns: + List[PublishPluginActionItem]: Items with information about publish + plugin actions. + """ + + return [ + self._create_action_item(self._actions_by_id[action_id], plugin_id) + for action_id in self._action_ids_by_plugin_id[plugin_id] + ] + + def _create_action_item(self, action, plugin_id): + label = action.label or action.__name__ + icon = getattr(action, "icon", None) + return PublishPluginActionItem( + action.id, + plugin_id, + action.active, + action.on, + label, + icon + ) + + +class PublishPluginActionItem: + """Representation of publish plugin action. + + Data driven object which is used as proxy for controller and UI. + + Args: + action_id (str): Action id. + plugin_id (str): Plugin id. + active (bool): Action is active. + on_filter (str): Actions have 'on' attribte which define when can be + action triggered (e.g. 'all', 'failed', ...). + label (str): Action's label. + icon (Union[str, None]) Action's icon. + """ + + def __init__(self, action_id, plugin_id, active, on_filter, label, icon): + self.action_id = action_id + self.plugin_id = plugin_id + self.active = active + self.on_filter = on_filter + self.label = label + self.icon = icon + + def to_data(self): + """Serialize object to dictionary. + + Returns: + Dict[str, Union[str,bool,None]]: Serialized object. + """ + + return { + "action_id": self.action_id, + "plugin_id": self.plugin_id, + "active": self.active, + "on_filter": self.on_filter, + "label": self.label, + "icon": self.icon + } + + @classmethod + def from_data(cls, data): + """Create object from data. + + Args: + data (Dict[str, Union[str,bool,None]]): Data used to recreate + object. + + Returns: + PublishPluginActionItem: Object created using data. + """ + + return cls(**data) + + +class ValidationErrorItem: + """Data driven validation error item. + + Prepared data container with information about validation error and it's + source plugin. + + Can be converted to raw data and recreated should be used for controller + and UI connection. + + Args: + instance_id (str): Id of pyblish instance to which is validation error + connected. + instance_label (str): Prepared instance label. + plugin_id (str): Id of pyblish Plugin which triggered the validation + error. Id is generated using 'PublishPluginsProxy'. + """ + + def __init__( + self, + instance_id, + instance_label, + plugin_id, + context_validation, + title, + description, + detail, + ): + self.instance_id = instance_id + self.instance_label = instance_label + self.plugin_id = plugin_id + self.context_validation = context_validation + self.title = title + self.description = description + self.detail = detail + + def to_data(self): + """Serialize object to dictionary. + + Returns: + Dict[str, Union[str, bool, None]]: Serialized object data. + """ + + return { + "instance_id": self.instance_id, + "instance_label": self.instance_label, + "plugin_id": self.plugin_id, + "context_validation": self.context_validation, + "title": self.title, + "description": self.description, + "detail": self.detail, + } + + @classmethod + def from_result(cls, plugin_id, error, instance): + """Create new object based on resukt from controller. + + Returns: + ValidationErrorItem: New object with filled data. + """ + + instance_label = None + instance_id = None + if instance is not None: + instance_label = ( + instance.data.get("label") or instance.data.get("name") + ) + instance_id = instance.id + + return cls( + instance_id, + instance_label, + plugin_id, + instance is None, + error.title, + error.description, + error.detail, + ) + + @classmethod + def from_data(cls, data): + return cls(**data) + + +class PublishValidationErrorsReport: + """Publish validation errors report that can be parsed to raw data. + + Args: + error_items (List[ValidationErrorItem]): List of validation errors. + plugin_action_items (Dict[str, PublishPluginActionItem]): Action items + by plugin id. + """ + + def __init__(self, error_items, plugin_action_items): + self._error_items = error_items + self._plugin_action_items = plugin_action_items + + def __iter__(self): + for item in self._error_items: + yield item + + def group_items_by_title(self): + """Group errors by plugin and their titles. + + Items are grouped by plugin and title -> same title from different + plugin is different item. Items are ordered by plugin order. + + Returns: + List[Dict[str, Any]]: List where each item title, instance + information related to title and possible plugin actions. + """ + + ordered_plugin_ids = [] + error_items_by_plugin_id = collections.defaultdict(list) + for error_item in self._error_items: + plugin_id = error_item.plugin_id + if plugin_id not in ordered_plugin_ids: + ordered_plugin_ids.append(plugin_id) + error_items_by_plugin_id[plugin_id].append(error_item) + + grouped_error_items = [] + for plugin_id in ordered_plugin_ids: + plugin_action_items = self._plugin_action_items[plugin_id] + error_items = error_items_by_plugin_id[plugin_id] + + titles = [] + error_items_by_title = collections.defaultdict(list) + for error_item in error_items: + title = error_item.title + if title not in titles: + titles.append(error_item.title) + error_items_by_title[title].append(error_item) + + for title in titles: + grouped_error_items.append({ + "plugin_action_items": list(plugin_action_items), + "error_items": error_items_by_title[title], + "title": title + }) + return grouped_error_items + + def to_data(self): + """Serialize object to dictionary. + + Returns: + Dict[str, Any]: Serialized data. + """ + + error_items = [ + item.to_data() + for item in self._error_items + ] + + plugin_action_items = { + plugin_id: [ + action_item.to_data() + for action_item in action_items + ] + for plugin_id, action_items in self._plugin_action_items.items() + } + + return { + "error_items": error_items, + "plugin_action_items": plugin_action_items + } + + @classmethod + def from_data(cls, data): + """Recreate object from data. + + Args: + data (dict[str, Any]): Data to recreate object. Can be created + using 'to_data' method. + + Returns: + PublishValidationErrorsReport: New object based on data. + """ + + error_items = [ + ValidationErrorItem.from_data(error_item) + for error_item in data["error_items"] + ] + plugin_action_items = [ + PublishPluginActionItem.from_data(action_item) + for action_item in data["plugin_action_items"] + ] + return cls(error_items, plugin_action_items) + + +class PublishValidationErrors: + """Object to keep track about validation errors by plugin.""" + + def __init__(self): + self._plugins_proxy = None + self._error_items = [] + self._plugin_action_items = {} + + def __bool__(self): + return self.has_errors + + @property + def has_errors(self): + """At least one error was added.""" + + return bool(self._error_items) + + def reset(self, plugins_proxy): + """Reset object to default state. + + Args: + plugins_proxy (PublishPluginsProxy): Proxy which store plugins, + actions by ids and create mapping of action ids by plugin ids. + """ + + self._plugins_proxy = plugins_proxy + self._error_items = [] + self._plugin_action_items = {} + + def create_report(self): + """Create report based on currently existing errors. + + Returns: + PublishValidationErrorsReport: Validation error report with all + error information and publish plugin action items. + """ + + return PublishValidationErrorsReport( + self._error_items, self._plugin_action_items + ) + + def add_error(self, plugin, error, instance): + """Add error from pyblish result. + + Args: + plugin (pyblish.api.Plugin): Plugin which triggered error. + error (ValidationException): Validation error. + instance (Union[pyblish.api.Instance, None]): Instance on which was + error raised or None if was raised on context. + """ + + # Make sure the cached report is cleared + plugin_id = self._plugins_proxy.get_plugin_id(plugin) + self._error_items.append( + ValidationErrorItem.from_result(plugin_id, error, instance) + ) + if plugin_id in self._plugin_action_items: + return + + plugin_actions = self._plugins_proxy.get_plugin_action_items( + plugin_id + ) + self._plugin_action_items[plugin_id] = plugin_actions + + +class CreatorType: + def __init__(self, name): + self.name = name + + def __str__(self): + return self.name + + def __eq__(self, other): + return self.name == str(other) + + +class CreatorTypes: + base = CreatorType("base") + auto = CreatorType("auto") + hidden = CreatorType("hidden") + artist = CreatorType("artist") + + @classmethod + def from_str(cls, value): + for creator_type in ( + cls.base, + cls.auto, + cls.hidden, + cls.artist + ): + if value == creator_type: + return creator_type + raise ValueError("Unknown type \"{}\"".format(str(value))) + + +class CreatorItem: + """Wrapper around Creator plugin. + + Object can be serialized and recreated. + """ + + def __init__( + self, + identifier, + creator_type, + family, + label, + group_label, + icon, + instance_attributes_defs, + description, + detailed_description, + default_variant, + default_variants, + create_allow_context_change, + create_allow_thumbnail, + pre_create_attributes_defs + ): + self.identifier = identifier + self.creator_type = creator_type + self.family = family + self.label = label + self.group_label = group_label + self.icon = icon + self.description = description + self.detailed_description = detailed_description + self.default_variant = default_variant + self.default_variants = default_variants + self.create_allow_context_change = create_allow_context_change + self.create_allow_thumbnail = create_allow_thumbnail + self.instance_attributes_defs = instance_attributes_defs + self.pre_create_attributes_defs = pre_create_attributes_defs + + def get_instance_attr_defs(self): + return self.instance_attributes_defs + + def get_group_label(self): + return self.group_label + + @classmethod + def from_creator(cls, creator): + if isinstance(creator, AutoCreator): + creator_type = CreatorTypes.auto + elif isinstance(creator, HiddenCreator): + creator_type = CreatorTypes.hidden + elif isinstance(creator, Creator): + creator_type = CreatorTypes.artist + else: + creator_type = CreatorTypes.base + + description = None + detail_description = None + default_variant = None + default_variants = None + pre_create_attr_defs = None + create_allow_context_change = None + create_allow_thumbnail = None + if creator_type is CreatorTypes.artist: + description = creator.get_description() + detail_description = creator.get_detail_description() + default_variant = creator.get_default_variant() + default_variants = creator.get_default_variants() + pre_create_attr_defs = creator.get_pre_create_attr_defs() + create_allow_context_change = creator.create_allow_context_change + create_allow_thumbnail = creator.create_allow_thumbnail + + identifier = creator.identifier + return cls( + identifier, + creator_type, + creator.family, + creator.label or identifier, + creator.get_group_label(), + creator.get_icon(), + creator.get_instance_attr_defs(), + description, + detail_description, + default_variant, + default_variants, + create_allow_context_change, + create_allow_thumbnail, + pre_create_attr_defs + ) + + def to_data(self): + instance_attributes_defs = None + if self.instance_attributes_defs is not None: + instance_attributes_defs = serialize_attr_defs( + self.instance_attributes_defs + ) + + pre_create_attributes_defs = None + if self.pre_create_attributes_defs is not None: + instance_attributes_defs = serialize_attr_defs( + self.pre_create_attributes_defs + ) + + return { + "identifier": self.identifier, + "creator_type": str(self.creator_type), + "family": self.family, + "label": self.label, + "group_label": self.group_label, + "icon": self.icon, + "description": self.description, + "detailed_description": self.detailed_description, + "default_variant": self.default_variant, + "default_variants": self.default_variants, + "create_allow_context_change": self.create_allow_context_change, + "create_allow_thumbnail": self.create_allow_thumbnail, + "instance_attributes_defs": instance_attributes_defs, + "pre_create_attributes_defs": pre_create_attributes_defs, + } + + @classmethod + def from_data(cls, data): + instance_attributes_defs = data["instance_attributes_defs"] + if instance_attributes_defs is not None: + data["instance_attributes_defs"] = deserialize_attr_defs( + instance_attributes_defs + ) + + pre_create_attributes_defs = data["pre_create_attributes_defs"] + if pre_create_attributes_defs is not None: + data["pre_create_attributes_defs"] = deserialize_attr_defs( + pre_create_attributes_defs + ) + + data["creator_type"] = CreatorTypes.from_str(data["creator_type"]) + return cls(**data) + + +@six.add_metaclass(ABCMeta) +class AbstractPublisherController(object): + """Publisher tool controller. + + Define what must be implemented to be able use Publisher functionality. + + Goal is to have "data driven" controller that can be used to control UI + running in different process. That lead to some disadvantages like UI can't + access objects directly but by using wrappers that can be serialized. + """ + + @abstractproperty + def log(self): + """Controller's logger object. + + Returns: + logging.Logger: Logger object that can be used for logging. + """ + + pass + + @abstractproperty + def event_system(self): + """Inner event system for publisher controller.""" + + pass + + @abstractproperty + def project_name(self): + """Current context project name. + + Returns: + str: Name of project. + """ + + pass + + @abstractproperty + def current_asset_name(self): + """Current context asset name. + + Returns: + Union[str, None]: Name of asset. + """ + + pass + + @abstractproperty + def current_task_name(self): + """Current context task name. + + Returns: + Union[str, None]: Name of task. + """ + + pass + + @abstractproperty + def host_is_valid(self): + """Host is valid for creation part. + + Host must have implemented certain functionality to be able create + in Publisher tool. + + Returns: + bool: Host can handle creation of instances. + """ + + pass + + @abstractproperty + def instances(self): + """Collected/created instances. + + Returns: + List[CreatedInstance]: List of created instances. + """ + + pass + + @abstractmethod + def get_context_title(self): + """Get context title for artist shown at the top of main window. + + Returns: + Union[str, None]: Context title for window or None. In case of None + a warning is displayed (not nice for artists). + """ + + pass + + @abstractmethod + def get_asset_docs(self): + pass + + @abstractmethod + def get_asset_hierarchy(self): + pass + + @abstractmethod + def get_task_names_by_asset_names(self, asset_names): + pass + + @abstractmethod + def get_existing_subset_names(self, asset_name): + pass + + @abstractmethod + def reset(self): + """Reset whole controller. + + This should reset create context, publish context and all variables + that are related to it. + """ + + pass + + @abstractmethod + def get_creator_attribute_definitions(self, instances): + pass + + @abstractmethod + def get_publish_attribute_definitions(self, instances, include_context): + pass + + @abstractmethod + def get_creator_icon(self, identifier): + """Receive creator's icon by identifier. + + Args: + identifier (str): Creator's identifier. + + Returns: + Union[str, None]: Creator's icon string. + """ + + pass + + @abstractmethod + def get_subset_name( + self, + creator_identifier, + variant, + task_name, + asset_name, + instance_id=None + ): + """Get subset name based on passed data. + + Args: + creator_identifier (str): Identifier of creator which should be + responsible for subset name creation. + variant (str): Variant value from user's input. + task_name (str): Name of task for which is instance created. + asset_name (str): Name of asset for which is instance created. + instance_id (Union[str, None]): Existing instance id when subset + name is updated. + """ + + pass + + @abstractmethod + def create( + self, creator_identifier, subset_name, instance_data, options + ): + """Trigger creation by creator identifier. + + Should also trigger refresh of instanes. + + Args: + creator_identifier (str): Identifier of Creator plugin. + subset_name (str): Calculated subset name. + instance_data (Dict[str, Any]): Base instance data with variant, + asset name and task name. + options (Dict[str, Any]): Data from pre-create attributes. + """ + + pass + + @abstractmethod + def save_changes(self): + """Save changes in create context.""" + + pass + + @abstractmethod + def remove_instances(self, instance_ids): + """Remove list of instances from create context.""" + # TODO expect instance ids + + pass + + @abstractproperty + def publish_has_finished(self): + """Has publishing finished. + + Returns: + bool: If publishing finished and all plugins were iterated. + """ + + pass + + @abstractproperty + def publish_is_running(self): + """Publishing is running right now. + + Returns: + bool: If publishing is in progress. + """ + + pass + + @abstractproperty + def publish_has_validated(self): + """Publish validation passed. + + Returns: + bool: If publishing passed last possible validation order. + """ + + pass + + @abstractproperty + def publish_has_crashed(self): + """Publishing crashed for any reason. + + Returns: + bool: Publishing crashed. + """ + + pass + + @abstractproperty + def publish_has_validation_errors(self): + """During validation happened at least one validation error. + + Returns: + bool: Validation error was raised during validation. + """ + + pass + + @abstractproperty + def publish_max_progress(self): + """Get maximum possible progress number. + + Returns: + int: Number that can be used as 100% of publish progress bar. + """ + + pass + + @abstractproperty + def publish_progress(self): + """Current progress number. + + Returns: + int: Current progress value from 0 to 'publish_max_progress'. + """ + + pass + + @abstractproperty + def publish_error_msg(self): + """Current error message which cause fail of publishing. + + Returns: + Union[str, None]: Message which will be showed to artist or + None. + """ + + pass + + @abstractmethod + def get_publish_report(self): + pass + + @abstractmethod + def get_validation_errors(self): + pass + + @abstractmethod + def publish(self): + """Trigger publishing without any order limitations.""" + + pass + + @abstractmethod + def validate(self): + """Trigger publishing which will stop after validation order.""" + + pass + + @abstractmethod + def stop_publish(self): + """Stop publishing can be also used to pause publishing. + + Pause of publishing is possible only if all plugins successfully + finished. + """ + + pass + + @abstractmethod + def run_action(self, plugin_id, action_id): + """Trigger pyblish action on a plugin. + + Args: + plugin_id (str): Id of publish plugin. + action_id (str): Id of publish action. + """ + + pass + + @abstractproperty + def convertor_items(self): + pass + + @abstractmethod + def trigger_convertor_items(self, convertor_identifiers): + pass + + @abstractmethod + def get_thumbnail_paths_for_instances(self, instance_ids): + pass + + @abstractmethod + def set_thumbnail_paths_for_instances(self, thumbnail_path_mapping): + pass + + @abstractmethod + def set_comment(self, comment): + """Set comment on pyblish context. + + Set "comment" key on current pyblish.api.Context data. + + Args: + comment (str): Artist's comment. + """ + + pass + + @abstractmethod + def emit_card_message( + self, message, message_type=CardMessageTypes.standard + ): + """Emit a card message which can have a lifetime. + + This is for UI purposes. Method can be extended to more arguments + in future e.g. different message timeout or type (color). + + Args: + message (str): Message that will be showed. + """ + + pass + + @abstractmethod + def get_thumbnail_temp_dir_path(self): + """Return path to directory where thumbnails can be temporary stored. + + Returns: + str: Path to a directory. + """ + + pass + + @abstractmethod + def clear_thumbnail_temp_dir_path(self): + """Remove content of thumbnail temp directory.""" + + pass + + +class BasePublisherController(AbstractPublisherController): + """Implement common logic for controllers. + + Implement event system, logger and common attributes. Attributes are + triggering value changes so anyone can listen to their topics. + + Prepare implementation for creator items. Controller must implement just + their filling by '_collect_creator_items'. + + All prepared implementation is based on calling super '__init__'. + """ + + def __init__(self): + self._log = None + self._event_system = None + + # Host is valid for creation + self._host_is_valid = False + + # Any other exception that happened during publishing + self._publish_error_msg = None + # Publishing is in progress + self._publish_is_running = False + # Publishing is over validation order + self._publish_has_validated = False + + self._publish_has_validation_errors = False + self._publish_has_crashed = False + # All publish plugins are processed + self._publish_has_finished = False + self._publish_max_progress = 0 + self._publish_progress = 0 + + # Controller must '_collect_creator_items' to fill the value + self._creator_items = None + + @property + def log(self): + """Controller's logger object. + + Returns: + logging.Logger: Logger object that can be used for logging. + """ + + if self._log is None: + self._log = logging.getLogget(self.__class__.__name__) + return self._log + + @property + def event_system(self): + """Inner event system for publisher controller. + + Is used for communication with UI. Event system is autocreated. + + Known topics: + "show.detailed.help" - Detailed help requested (UI related). + "show.card.message" - Show card message request (UI related). + "instances.refresh.finished" - Instances are refreshed. + "plugins.refresh.finished" - Plugins refreshed. + "publish.reset.finished" - Publish context reset finished. + "controller.reset.finished" - Controller reset finished. + "publish.process.started" - Publishing started. Can be started from + paused state. + "publish.process.stopped" - Publishing stopped/paused process. + "publish.process.plugin.changed" - Plugin state has changed. + "publish.process.instance.changed" - Instance state has changed. + "publish.has_validated.changed" - Attr 'publish_has_validated' + changed. + "publish.is_running.changed" - Attr 'publish_is_running' changed. + "publish.has_crashed.changed" - Attr 'publish_has_crashed' changed. + "publish.publish_error.changed" - Attr 'publish_error' + "publish.has_validation_errors.changed" - Attr + 'has_validation_errors' changed. + "publish.max_progress.changed" - Attr 'publish_max_progress' + changed. + "publish.progress.changed" - Attr 'publish_progress' changed. + "publish.host_is_valid.changed" - Attr 'host_is_valid' changed. + "publish.finished.changed" - Attr 'publish_has_finished' changed. + + Returns: + EventSystem: Event system which can trigger callbacks for topics. + """ + + if self._event_system is None: + self._event_system = EventSystem() + return self._event_system + + def _emit_event(self, topic, data=None): + if data is None: + data = {} + self.event_system.emit(topic, data, "controller") + + def _get_host_is_valid(self): + return self._host_is_valid + + def _set_host_is_valid(self, value): + if self._host_is_valid != value: + self._host_is_valid = value + self._emit_event("publish.host_is_valid.changed", {"value": value}) + + def _get_publish_has_finished(self): + return self._publish_has_finished + + def _set_publish_has_finished(self, value): + if self._publish_has_finished != value: + self._publish_has_finished = value + self._emit_event("publish.finished.changed", {"value": value}) + + def _get_publish_is_running(self): + return self._publish_is_running + + def _set_publish_is_running(self, value): + if self._publish_is_running != value: + self._publish_is_running = value + self._emit_event("publish.is_running.changed", {"value": value}) + + def _get_publish_has_validated(self): + return self._publish_has_validated + + def _set_publish_has_validated(self, value): + if self._publish_has_validated != value: + self._publish_has_validated = value + self._emit_event("publish.has_validated.changed", {"value": value}) + + def _get_publish_has_crashed(self): + return self._publish_has_crashed + + def _set_publish_has_crashed(self, value): + if self._publish_has_crashed != value: + self._publish_has_crashed = value + self._emit_event("publish.has_crashed.changed", {"value": value}) + + def _get_publish_has_validation_errors(self): + return self._publish_has_validation_errors + + def _set_publish_has_validation_errors(self, value): + if self._publish_has_validation_errors != value: + self._publish_has_validation_errors = value + self._emit_event( + "publish.has_validation_errors.changed", + {"value": value} + ) + + def _get_publish_max_progress(self): + return self._publish_max_progress + + def _set_publish_max_progress(self, value): + if self._publish_max_progress != value: + self._publish_max_progress = value + self._emit_event("publish.max_progress.changed", {"value": value}) + + def _get_publish_progress(self): + return self._publish_progress + + def _set_publish_progress(self, value): + if self._publish_progress != value: + self._publish_progress = value + self._emit_event("publish.progress.changed", {"value": value}) + + def _get_publish_error_msg(self): + return self._publish_error_msg + + def _set_publish_error_msg(self, value): + if self._publish_error_msg != value: + self._publish_error_msg = value + self._emit_event("publish.publish_error.changed", {"value": value}) + + host_is_valid = property( + _get_host_is_valid, _set_host_is_valid + ) + publish_has_finished = property( + _get_publish_has_finished, _set_publish_has_finished + ) + publish_is_running = property( + _get_publish_is_running, _set_publish_is_running + ) + publish_has_validated = property( + _get_publish_has_validated, _set_publish_has_validated + ) + publish_has_crashed = property( + _get_publish_has_crashed, _set_publish_has_crashed + ) + publish_has_validation_errors = property( + _get_publish_has_validation_errors, _set_publish_has_validation_errors + ) + publish_max_progress = property( + _get_publish_max_progress, _set_publish_max_progress + ) + publish_progress = property( + _get_publish_progress, _set_publish_progress + ) + publish_error_msg = property( + _get_publish_error_msg, _set_publish_error_msg + ) + + def _reset_attributes(self): + """Reset most of attributes that can be reset.""" + + # Reset creator items + self._creator_items = None + + self.publish_is_running = False + self.publish_has_validated = False + self.publish_has_crashed = False + self.publish_has_validation_errors = False + self.publish_has_finished = False + + self.publish_error_msg = None + self.publish_progress = 0 + + @property + def creator_items(self): + """Creators that can be shown in create dialog.""" + if self._creator_items is None: + self._creator_items = self._collect_creator_items() + return self._creator_items + + @abstractmethod + def _collect_creator_items(self): + """Receive CreatorItems to work with. + + Returns: + Dict[str, CreatorItem]: Creator items by their identifier. + """ + + pass + + def get_creator_icon(self, identifier): + """Function to receive icon for creator identifier. + + Args: + str: Creator's identifier for which should be icon returned. + """ + + creator_item = self.creator_items.get(identifier) + if creator_item is not None: + return creator_item.icon + return None + + def get_thumbnail_temp_dir_path(self): + """Return path to directory where thumbnails can be temporary stored. + + Returns: + str: Path to a directory. + """ + + return os.path.join( + tempfile.gettempdir(), + "publisher_thumbnails", + get_process_id() + ) + + def clear_thumbnail_temp_dir_path(self): + """Remove content of thumbnail temp directory.""" + + dirpath = self.get_thumbnail_temp_dir_path() + if os.path.exists(dirpath): + shutil.rmtree(dirpath) + + +class PublisherController(BasePublisherController): """Middleware between UI, CreateContext and publish Context. Handle both creation and publishing parts. @@ -353,35 +1588,30 @@ class PublisherController: dbcon (AvalonMongoDB): Connection to mongo with context. headless (bool): Headless publishing. ATM not implemented or used. """ - def __init__(self, dbcon=None, headless=False): - self.log = logging.getLogger("PublisherController") - self.host = registered_host() - self.headless = headless - self.create_context = CreateContext( - self.host, dbcon, headless=headless, reset=False + _log = None + + def __init__(self, dbcon=None, headless=False): + super(PublisherController, self).__init__() + + self._host = registered_host() + self._headless = headless + + self._create_context = CreateContext( + self._host, dbcon, headless=headless, reset=False ) + self._publish_plugins_proxy = None + # pyblish.api.Context self._publish_context = None # Pyblish report self._publish_report = PublishReport(self) # Store exceptions of validation error - self._publish_validation_errors = [] - # Currently processing plugin errors - self._publish_current_plugin_validation_errors = None - # Any other exception that happened during publishing - self._publish_error = None - # Publishing is in progress - self._publish_is_running = False - # Publishing is over validation order - self._publish_validated = False + self._publish_validation_errors = PublishValidationErrors() + # Publishing should stop at validation stage self._publish_up_validation = False - # All publish plugins are processed - self._publish_finished = False - self._publish_max_progress = 0 - self._publish_progress = 0 # This information is not much important for controller but for widget # which can change (and set) the comment. self._publish_comment_is_set = False @@ -393,23 +1623,9 @@ class PublisherController: pyblish.api.ValidatorOrder + PLUGIN_ORDER_OFFSET ) - # Qt based main thread processor - self._main_thread_processor = MainThreadProcess() # Plugin iterator self._main_thread_iter = None - # Variables where callbacks are stored - self._instances_refresh_callback_refs = set() - self._plugins_refresh_callback_refs = set() - - self._publish_reset_callback_refs = set() - self._publish_started_callback_refs = set() - self._publish_validated_callback_refs = set() - self._publish_stopped_callback_refs = set() - - self._publish_instance_changed_callback_refs = set() - self._publish_plugin_changed_callback_refs = set() - # State flags to prevent executing method which is already in progress self._resetting_plugins = False self._resetting_instances = False @@ -419,105 +1635,74 @@ class PublisherController: @property def project_name(self): - """Current project context.""" - return self.dbcon.Session["AVALON_PROJECT"] + """Current project context defined by host. + + Returns: + str: Project name. + """ + + if not hasattr(self._host, "get_current_context"): + return legacy_io.active_project() + + return self._host.get_current_context()["project_name"] @property - def dbcon(self): - """Pointer to AvalonMongoDB in creator context.""" - return self.create_context.dbcon + def current_asset_name(self): + """Current context asset name defined by host. + + Returns: + Union[str, None]: Asset name or None if asset is not set. + """ + + if not hasattr(self._host, "get_current_context"): + return legacy_io.Session["AVALON_ASSET"] + + return self._host.get_current_context()["asset_name"] + + @property + def current_task_name(self): + """Current context task name defined by host. + + Returns: + Union[str, None]: Task name or None if task is not set. + """ + + if not hasattr(self._host, "get_current_context"): + return legacy_io.Session["AVALON_TASK"] + + return self._host.get_current_context()["task_name"] @property def instances(self): """Current instances in create context.""" - return self.create_context.instances + return self._create_context.instances_by_id @property - def creators(self): + def convertor_items(self): + return self._create_context.convertor_items_by_id + + @property + def _creators(self): """All creators loaded in create context.""" - return self.create_context.creators + + return self._create_context.creators @property - def manual_creators(self): - """Creators that can be shown in create dialog.""" - return self.create_context.manual_creators - - @property - def host_is_valid(self): - """Host is valid for creation.""" - return self.create_context.host_is_valid - - @property - def publish_plugins(self): + def _publish_plugins(self): """Publish plugins.""" - return self.create_context.publish_plugins - - @property - def plugins_with_defs(self): - """Publish plugins with possible attribute definitions.""" - return self.create_context.plugins_with_defs - - def _create_reference(self, callback): - if inspect.ismethod(callback): - ref = WeakMethod(callback) - elif callable(callback): - ref = weakref.ref(callback) - else: - raise TypeError("Expected function or method got {}".format( - str(type(callback)) - )) - return ref - - def add_instances_refresh_callback(self, callback): - """Callbacks triggered on instances refresh.""" - ref = self._create_reference(callback) - self._instances_refresh_callback_refs.add(ref) - - def add_plugins_refresh_callback(self, callback): - """Callbacks triggered on plugins refresh.""" - ref = self._create_reference(callback) - self._plugins_refresh_callback_refs.add(ref) + return self._create_context.publish_plugins # --- Publish specific callbacks --- - def add_publish_reset_callback(self, callback): - """Callbacks triggered on publishing reset.""" - ref = self._create_reference(callback) - self._publish_reset_callback_refs.add(ref) - - def add_publish_started_callback(self, callback): - """Callbacks triggered on publishing start.""" - ref = self._create_reference(callback) - self._publish_started_callback_refs.add(ref) - - def add_publish_validated_callback(self, callback): - """Callbacks triggered on passing last possible validation order.""" - ref = self._create_reference(callback) - self._publish_validated_callback_refs.add(ref) - - def add_instance_change_callback(self, callback): - """Callbacks triggered before next publish instance process.""" - ref = self._create_reference(callback) - self._publish_instance_changed_callback_refs.add(ref) - - def add_plugin_change_callback(self, callback): - """Callbacks triggered before next plugin processing.""" - ref = self._create_reference(callback) - self._publish_plugin_changed_callback_refs.add(ref) - - def add_publish_stopped_callback(self, callback): - """Callbacks triggered on publishing stop (any reason).""" - ref = self._create_reference(callback) - self._publish_stopped_callback_refs.add(ref) - def get_asset_docs(self): """Get asset documents from cache for whole project.""" return self._asset_docs_cache.get_asset_docs() def get_context_title(self): """Get context title for artist shown at the top of main window.""" + context_title = None - if hasattr(self.host, "get_context_title"): - context_title = self.host.get_context_title() + if hasattr(self._host, "get_context_title"): + context_title = self._host.get_context_title() if context_title is None: context_title = os.environ.get("AVALON_APP_NAME") @@ -528,14 +1713,8 @@ class PublisherController: def get_asset_hierarchy(self): """Prepare asset documents into hierarchy.""" - _queue = collections.deque(self.get_asset_docs()) - output = collections.defaultdict(list) - while _queue: - asset_doc = _queue.popleft() - parent_id = asset_doc["data"]["visualParent"] - output[parent_id].append(asset_doc) - return output + return self._asset_docs_cache.get_asset_hierarchy() def get_task_names_by_asset_names(self, asset_names): """Prepare task names by asset name.""" @@ -549,33 +1728,45 @@ class PublisherController: ) return result - def _trigger_callbacks(self, callbacks, *args, **kwargs): - """Helper method to trigger callbacks stored by their rerence.""" - # Trigger reset callbacks - to_remove = set() - for ref in callbacks: - callback = ref() - if callback: - callback(*args, **kwargs) - else: - to_remove.add(ref) + def get_existing_subset_names(self, asset_name): + project_name = self.project_name + asset_doc = self._asset_docs_cache.get_asset_by_name(asset_name) + if not asset_doc: + return None - for ref in to_remove: - callbacks.remove(ref) + asset_id = asset_doc["_id"] + subset_docs = get_subsets( + project_name, asset_ids=[asset_id], fields=["name"] + ) + return { + subset_doc["name"] + for subset_doc in subset_docs + } def reset(self): """Reset everything related to creation and publishing.""" - # Stop publishing self.stop_publish() + self.host_is_valid = self._create_context.host_is_valid + + self._create_context.reset_preparation() + # Reset avalon context - self.create_context.reset_avalon_context() + self._create_context.reset_avalon_context() + + self._asset_docs_cache.reset() self._reset_plugins() # Publish part must be reset after plugins self._reset_publish() self._reset_instances() + self._create_context.reset_finalization() + + self._emit_event("controller.reset.finished") + + self.emit_card_message("Refreshed..") + def _reset_plugins(self): """Reset to initial state.""" if self._resetting_plugins: @@ -583,11 +1774,17 @@ class PublisherController: self._resetting_plugins = True - self.create_context.reset_plugins() + self._create_context.reset_plugins() self._resetting_plugins = False - self._trigger_callbacks(self._plugins_refresh_callback_refs) + self._emit_event("plugins.refresh.finished") + + def _collect_creator_items(self): + return { + identifier: CreatorItem.from_creator(creator) + for identifier, creator in self._create_context.creators.items() + } def _reset_instances(self): """Reset create instances.""" @@ -596,26 +1793,94 @@ class PublisherController: self._resetting_instances = True - self.create_context.reset_context_data() - with self.create_context.bulk_instances_collection(): - self.create_context.reset_instances() - self.create_context.execute_autocreators() + self._create_context.reset_context_data() + with self._create_context.bulk_instances_collection(): + try: + self._create_context.reset_instances() + except CreatorsOperationFailed as exc: + self._emit_event( + "instances.collection.failed", + { + "title": "Instance collection failed", + "failed_info": exc.failed_info + } + ) + + try: + self._create_context.find_convertor_items() + except ConvertorsOperationFailed as exc: + self._emit_event( + "convertors.find.failed", + { + "title": "Collection of unsupported subset failed", + "failed_info": exc.failed_info + } + ) + + try: + self._create_context.execute_autocreators() + + except CreatorsOperationFailed as exc: + self._emit_event( + "instances.create.failed", + { + "title": "AutoCreation failed", + "failed_info": exc.failed_info + } + ) self._resetting_instances = False - self._trigger_callbacks(self._instances_refresh_callback_refs) + self._on_create_instance_change() + + def get_thumbnail_paths_for_instances(self, instance_ids): + thumbnail_paths_by_instance_id = ( + self._create_context.thumbnail_paths_by_instance_id + ) + return { + instance_id: thumbnail_paths_by_instance_id.get(instance_id) + for instance_id in instance_ids + } + + def set_thumbnail_paths_for_instances(self, thumbnail_path_mapping): + thumbnail_paths_by_instance_id = ( + self._create_context.thumbnail_paths_by_instance_id + ) + for instance_id, thumbnail_path in thumbnail_path_mapping.items(): + thumbnail_paths_by_instance_id[instance_id] = thumbnail_path + + self._emit_event( + "instance.thumbnail.changed", + { + "mapping": thumbnail_path_mapping + } + ) + + def emit_card_message( + self, message, message_type=CardMessageTypes.standard + ): + self._emit_event( + "show.card.message", + { + "message": message, + "message_type": message_type + } + ) def get_creator_attribute_definitions(self, instances): """Collect creator attribute definitions for multuple instances. Args: - instances(list): List of created instances for + instances(List[CreatedInstance]): List of created instances for which should be attribute definitions returned. """ + output = [] _attr_defs = {} for instance in instances: - for attr_def in instance.creator_attribute_defs: + creator_identifier = instance.creator_identifier + creator_item = self.creator_items[creator_identifier] + for attr_def in creator_item.instance_attributes_defs: found_idx = None for idx, _attr_def in _attr_defs.items(): if attr_def == _attr_def: @@ -643,9 +1908,10 @@ class PublisherController: which should be attribute definitions returned. include_context(bool): Add context specific attribute definitions. """ + _tmp_items = [] if include_context: - _tmp_items.append(self.create_context) + _tmp_items.append(self._create_context) for instance in instances: _tmp_items.append(instance) @@ -675,7 +1941,7 @@ class PublisherController: attr_values.append((item, value)) output = [] - for plugin in self.plugins_with_defs: + for plugin in self._create_context.plugins_with_defs: plugin_name = plugin.__name__ if plugin_name not in all_defs_by_plugin_name: continue @@ -686,86 +1952,147 @@ class PublisherController: )) return output - def get_icon_for_family(self, family): - """TODO rename to get creator icon.""" - creator = self.creators.get(family) - if creator is not None: - return creator.get_icon() - return None + def get_subset_name( + self, + creator_identifier, + variant, + task_name, + asset_name, + instance_id=None + ): + """Get subset name based on passed data. + + Args: + creator_identifier (str): Identifier of creator which should be + responsible for subset name creation. + variant (str): Variant value from user's input. + task_name (str): Name of task for which is instance created. + asset_name (str): Name of asset for which is instance created. + instance_id (Union[str, None]): Existing instance id when subset + name is updated. + """ + + creator = self._creators[creator_identifier] + project_name = self.project_name + asset_doc = self._asset_docs_cache.get_full_asset_by_name(asset_name) + instance = None + if instance_id: + instance = self.instances[instance_id] + + return creator.get_subset_name( + variant, task_name, asset_doc, project_name, instance=instance + ) + + def trigger_convertor_items(self, convertor_identifiers): + self.save_changes() + + success = True + try: + self._create_context.run_convertors(convertor_identifiers) + + except ConvertorsOperationFailed as exc: + success = False + self._emit_event( + "convertors.convert.failed", + { + "title": "Conversion failed", + "failed_info": exc.failed_info + } + ) + + if success: + self.emit_card_message("Conversion finished") + else: + self.emit_card_message("Conversion failed", CardMessageTypes.error) + + self.reset() def create( self, creator_identifier, subset_name, instance_data, options ): """Trigger creation and refresh of instances in UI.""" - creator = self.creators[creator_identifier] - creator.create(subset_name, instance_data, options) - self._trigger_callbacks(self._instances_refresh_callback_refs) + success = True + try: + self._create_context.create( + creator_identifier, subset_name, instance_data, options + ) + except CreatorsOperationFailed as exc: + success = False + self._emit_event( + "instances.create.failed", + { + "title": "Creation failed", + "failed_info": exc.failed_info + } + ) + + self._on_create_instance_change() + return success def save_changes(self): """Save changes happened during creation.""" - if self.create_context.host_is_valid: - self.create_context.save_changes() + if not self._create_context.host_is_valid: + return - def remove_instances(self, instances): - """""" + try: + self._create_context.save_changes() + + except CreatorsOperationFailed as exc: + self._emit_event( + "instances.save.failed", + { + "title": "Instances save failed", + "failed_info": exc.failed_info + } + ) + + def remove_instances(self, instance_ids): + """Remove instances based on instance ids. + + Args: + instance_ids (List[str]): List of instance ids to remove. + """ # QUESTION Expect that instances are really removed? In that case save # reset is not required and save changes too. self.save_changes() - self.create_context.remove_instances(instances) + self._remove_instances_from_context(instance_ids) - self._trigger_callbacks(self._instances_refresh_callback_refs) + self._on_create_instance_change() - # --- Publish specific implementations --- - @property - def publish_has_finished(self): - return self._publish_finished + def _remove_instances_from_context(self, instance_ids): + instances_by_id = self._create_context.instances_by_id + instances = [ + instances_by_id[instance_id] + for instance_id in instance_ids + ] + try: + self._create_context.remove_instances(instances) + except CreatorsOperationFailed as exc: + self._emit_event( + "instances.remove.failed", + { + "title": "Instance removement failed", + "failed_info": exc.failed_info + } + ) - @property - def publish_is_running(self): - return self._publish_is_running - - @property - def publish_has_validated(self): - return self._publish_validated - - @property - def publish_has_crashed(self): - return bool(self._publish_error) - - @property - def publish_has_validation_errors(self): - return bool(self._publish_validation_errors) - - @property - def publish_max_progress(self): - return self._publish_max_progress - - @property - def publish_progress(self): - return self._publish_progress - - @property - def publish_comment_is_set(self): - return self._publish_comment_is_set - - def get_publish_crash_error(self): - return self._publish_error + def _on_create_instance_change(self): + self._emit_event("instances.refresh.finished") def get_publish_report(self): - return self._publish_report.get_report(self.publish_plugins) + return self._publish_report.get_report(self._publish_plugins) def get_validation_errors(self): - return self._publish_validation_errors + return self._publish_validation_errors.create_report() def _reset_publish(self): - self._publish_is_running = False - self._publish_validated = False + self._reset_attributes() + self._publish_up_validation = False - self._publish_finished = False self._publish_comment_is_set = False - self._main_thread_processor.clear() + self._main_thread_iter = self._publish_iterator() self._publish_context = pyblish.api.Context() # Make sure "comment" is set on publish context @@ -774,24 +2101,30 @@ class PublisherController: # - must not be used for changing CreatedInstances during publishing! # QUESTION # - pop the key after first collector using it would be safest option? - self._publish_context.data["create_context"] = self.create_context + self._publish_context.data["create_context"] = self._create_context - self._publish_report.reset( - self._publish_context, - self.create_context.publish_discover_result + self._publish_plugins_proxy = PublishPluginsProxy( + self._publish_plugins ) - self._publish_validation_errors = [] - self._publish_current_plugin_validation_errors = None - self._publish_error = None - self._publish_max_progress = len(self.publish_plugins) - self._publish_progress = 0 + self._publish_report.reset(self._publish_context, self._create_context) + self._publish_validation_errors.reset(self._publish_plugins_proxy) - self._trigger_callbacks(self._publish_reset_callback_refs) + self.publish_max_progress = len(self._publish_plugins) + + self._emit_event("publish.reset.finished") def set_comment(self, comment): - self._publish_context.data["comment"] = comment - self._publish_comment_is_set = True + """Set comment from ui to pyblish context. + + This should be called always before publishing is started but should + happen only once on first publish start thus variable + '_publish_comment_is_set' is used to keep track about the information. + """ + + if not self._publish_comment_is_set: + self._publish_context.data["comment"] = comment + self._publish_comment_is_set = True def publish(self): """Run publishing.""" @@ -800,37 +2133,42 @@ class PublisherController: def validate(self): """Run publishing and stop after Validation.""" - if self._publish_validated: + if self.publish_has_validated: return self._publish_up_validation = True self._start_publish() def _start_publish(self): """Start or continue in publishing.""" - if self._publish_is_running: + if self.publish_is_running: return # Make sure changes are saved self.save_changes() - self._publish_is_running = True - self._trigger_callbacks(self._publish_started_callback_refs) - self._main_thread_processor.start() + self.publish_is_running = True + + self._emit_event("publish.process.started") + self._publish_next_process() def _stop_publish(self): """Stop or pause publishing.""" - self._publish_is_running = False - self._main_thread_processor.stop() - self._trigger_callbacks(self._publish_stopped_callback_refs) + self.publish_is_running = False + + self._emit_event("publish.process.stopped") def stop_publish(self): """Stop publishing process (any reason).""" - if self._publish_is_running: + + if self.publish_is_running: self._stop_publish() - def run_action(self, plugin, action): + def run_action(self, plugin_id, action_id): # TODO handle result in UI + plugin = self._publish_plugins_proxy.get_plugin(plugin_id) + action = self._publish_plugins_proxy.get_action(action_id) + result = pyblish.plugin.process( plugin, self._publish_context, None, action.id ) @@ -844,21 +2182,24 @@ class PublisherController: # There are validation errors and validation is passed # - can't do any progree if ( - self._publish_validated - and self._publish_validation_errors + self.publish_has_validated + and self.publish_has_validation_errors ): item = MainThreadItem(self.stop_publish) # Any unexpected error happened # - everything should stop - elif self._publish_error: + elif self.publish_has_crashed: item = MainThreadItem(self.stop_publish) # Everything is ok so try to get new processing item else: item = next(self._main_thread_iter) - self._main_thread_processor.add_item(item) + self._process_main_thread_item(item) + + def _process_main_thread_item(self, item): + item() def _publish_iterator(self): """Main logic center of publishing. @@ -873,32 +2214,24 @@ class PublisherController: QUESTION: Does validate button still make sense? """ - for idx, plugin in enumerate(self.publish_plugins): + for idx, plugin in enumerate(self._publish_plugins): self._publish_progress = idx - # Reset current plugin validations error - self._publish_current_plugin_validation_errors = None - # Check if plugin is over validation order - if not self._publish_validated: - self._publish_validated = ( + if not self.publish_has_validated: + self.publish_has_validated = ( plugin.order >= self._validation_order ) - # Trigger callbacks when validation stage is passed - if self._publish_validated: - self._trigger_callbacks( - self._publish_validated_callback_refs - ) # Stop if plugin is over validation order and process # should process up to validation. - if self._publish_up_validation and self._publish_validated: + if self._publish_up_validation and self.publish_has_validated: yield MainThreadItem(self.stop_publish) # Stop if validation is over and validation errors happened if ( - self._publish_validated - and self._publish_validation_errors + self.publish_has_validated + and self.publish_has_validation_errors ): yield MainThreadItem(self.stop_publish) @@ -906,9 +2239,14 @@ class PublisherController: self._publish_report.add_plugin_iter(plugin, self._publish_context) # Trigger callback that new plugin is going to be processed - self._trigger_callbacks( - self._publish_plugin_changed_callback_refs, plugin + plugin_label = plugin.__name__ + if hasattr(plugin, "label") and plugin.label: + plugin_label = plugin.label + self._emit_event( + "publish.process.plugin.changed", + {"plugin_label": plugin_label} ) + # Plugin is instance plugin if plugin.__instanceEnabled__: instances = pyblish.logic.instances_by_plugin( @@ -922,11 +2260,15 @@ class PublisherController: if instance.data.get("publish") is False: continue - self._trigger_callbacks( - self._publish_instance_changed_callback_refs, - self._publish_context, - instance + instance_label = ( + instance.data.get("label") + or instance.data["name"] ) + self._emit_event( + "publish.process.instance.changed", + {"instance_label": instance_label} + ) + yield MainThreadItem( self._process_and_continue, plugin, instance ) @@ -938,10 +2280,14 @@ class PublisherController: [plugin], families ) if plugins: - self._trigger_callbacks( - self._publish_instance_changed_callback_refs, - self._publish_context, - None + instance_label = ( + self._publish_context.data.get("label") + or self._publish_context.data.get("name") + or "Context" + ) + self._emit_event( + "publish.process.instance.changed", + {"instance_label": instance_label} ) yield MainThreadItem( self._process_and_continue, plugin, None @@ -950,24 +2296,17 @@ class PublisherController: self._publish_report.set_plugin_skipped() # Cleanup of publishing process - self._publish_finished = True - self._publish_progress = self._publish_max_progress + self.publish_has_finished = True + self.publish_progress = self.publish_max_progress yield MainThreadItem(self.stop_publish) def _add_validation_error(self, result): - if self._publish_current_plugin_validation_errors is None: - self._publish_current_plugin_validation_errors = { - "plugin": result["plugin"], - "errors": [] - } - self._publish_validation_errors.append( - self._publish_current_plugin_validation_errors - ) - - self._publish_current_plugin_validation_errors["errors"].append({ - "exception": result["error"], - "instance": result["instance"] - }) + self.publish_has_validation_errors = True + self._publish_validation_errors.add_error( + result["plugin"], + result["error"], + result["instance"] + ) def _process_and_continue(self, plugin, instance): result = pyblish.plugin.process( @@ -980,18 +2319,23 @@ class PublisherController: if exception: if ( isinstance(exception, PublishValidationError) - and not self._publish_validated + and not self.publish_has_validated ): self._add_validation_error(result) else: - self._publish_error = exception + if isinstance(exception, KnownPublishError): + msg = str(exception) + else: + msg = ( + "Something went wrong. Send report" + " to your supervisor or OpenPype." + ) + self.publish_error_msg = msg + self.publish_has_crashed = True self._publish_next_process() - def reset_project_data_cache(self): - self._asset_docs_cache.reset() - def collect_families_from_instances(instances, only_active=False): """Collect all families for passed publish instances. diff --git a/openpype/tools/publisher/control_qt.py b/openpype/tools/publisher/control_qt.py new file mode 100644 index 0000000000..8b5856f234 --- /dev/null +++ b/openpype/tools/publisher/control_qt.py @@ -0,0 +1,451 @@ +import collections +from abc import abstractmethod, abstractproperty + +from Qt import QtCore + +from openpype.lib.events import Event +from openpype.pipeline.create import CreatedInstance + +from .control import ( + MainThreadItem, + PublisherController, + BasePublisherController, +) + + +class MainThreadProcess(QtCore.QObject): + """Qt based main thread process executor. + + Has timer which controls each 50ms if there is new item to process. + + This approach gives ability to update UI meanwhile plugin is in progress. + """ + + count_timeout = 2 + + def __init__(self): + super(MainThreadProcess, self).__init__() + self._items_to_process = collections.deque() + + timer = QtCore.QTimer() + timer.setInterval(0) + + timer.timeout.connect(self._execute) + + self._timer = timer + self._switch_counter = self.count_timeout + + def process(self, func, *args, **kwargs): + item = MainThreadItem(func, *args, **kwargs) + self.add_item(item) + + def add_item(self, item): + self._items_to_process.append(item) + + def _execute(self): + if not self._items_to_process: + return + + if self._switch_counter > 0: + self._switch_counter -= 1 + return + + self._switch_counter = self.count_timeout + + item = self._items_to_process.popleft() + item.process() + + def start(self): + if not self._timer.isActive(): + self._timer.start() + + def stop(self): + if self._timer.isActive(): + self._timer.stop() + + def clear(self): + if self._timer.isActive(): + self._timer.stop() + self._items_to_process = collections.deque() + + +class QtPublisherController(PublisherController): + def __init__(self, *args, **kwargs): + self._main_thread_processor = MainThreadProcess() + + super(QtPublisherController, self).__init__(*args, **kwargs) + + self.event_system.add_callback( + "publish.process.started", self._qt_on_publish_start + ) + self.event_system.add_callback( + "publish.process.stopped", self._qt_on_publish_stop + ) + + def _reset_publish(self): + super(QtPublisherController, self)._reset_publish() + self._main_thread_processor.clear() + + def _process_main_thread_item(self, item): + self._main_thread_processor.add_item(item) + + def _qt_on_publish_start(self): + self._main_thread_processor.start() + + def _qt_on_publish_stop(self): + self._main_thread_processor.stop() + + +class QtRemotePublishController(BasePublisherController): + """Abstract Remote controller for Qt UI. + + This controller should be used in process where UI is running and should + listen and ask for data on a client side. + + All objects that are used during UI processing should be able to convert + on client side to json serializable data and then recreated here. Keep in + mind that all changes made here should be send back to client controller + before critical actions. + + ATM Was not tested and will require some changes. All code written here is + based on theoretical idea how it could work. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self._created_instances = {} + self._thumbnail_paths_by_instance_id = None + + def _reset_attributes(self): + super()._reset_attributes() + self._thumbnail_paths_by_instance_id = None + + @abstractmethod + def _get_serialized_instances(self): + """Receive serialized instances from client process. + + Returns: + List[Dict[str, Any]]: Serialized instances. + """ + + pass + + def _on_create_instance_change(self): + serialized_instances = self._get_serialized_instances() + + created_instances = {} + for serialized_data in serialized_instances: + item = CreatedInstance.deserialize_on_remote( + serialized_data, + self._creator_items + ) + created_instances[item.id] = item + + self._created_instances = created_instances + self._emit_event("instances.refresh.finished") + + def remote_events_handler(self, event_data): + event = Event.from_data(event_data) + + # Topics that cause "replication" of controller changes + if event.topic == "publish.max_progress.changed": + self.publish_max_progress = event["value"] + return + + if event.topic == "publish.progress.changed": + self.publish_progress = event["value"] + return + + if event.topic == "publish.has_validated.changed": + self.publish_has_validated = event["value"] + return + + if event.topic == "publish.is_running.changed": + self.publish_is_running = event["value"] + return + + if event.topic == "publish.publish_error.changed": + self.publish_error_msg = event["value"] + return + + if event.topic == "publish.has_crashed.changed": + self.publish_has_crashed = event["value"] + return + + if event.topic == "publish.has_validation_errors.changed": + self.publish_has_validation_errors = event["value"] + return + + if event.topic == "publish.finished.changed": + self.publish_has_finished = event["value"] + return + + if event.topic == "publish.host_is_valid.changed": + self.host_is_valid = event["value"] + return + + # Don't skip because UI want know about it too + if event.topic == "instance.thumbnail.changed": + for instance_id, path in event["mapping"].items(): + self.thumbnail_paths_by_instance_id[instance_id] = path + + # Topics that can be just passed by because are not affecting + # controller itself + # - "show.card.message" + # - "show.detailed.help" + # - "publish.reset.finished" + # - "instances.refresh.finished" + # - "plugins.refresh.finished" + # - "controller.reset.finished" + # - "publish.process.started" + # - "publish.process.stopped" + # - "publish.process.plugin.changed" + # - "publish.process.instance.changed" + self.event_system.emit_event(event) + + @abstractproperty + def project_name(self): + """Current context project name from client. + + Returns: + str: Name of project. + """ + + pass + + @abstractproperty + def current_asset_name(self): + """Current context asset name from client. + + Returns: + Union[str, None]: Name of asset. + """ + + pass + + @abstractproperty + def current_task_name(self): + """Current context task name from client. + + Returns: + Union[str, None]: Name of task. + """ + + pass + + @property + def instances(self): + """Collected/created instances. + + Returns: + List[CreatedInstance]: List of created instances. + """ + + return self._created_instances + + def get_context_title(self): + """Get context title for artist shown at the top of main window. + + Returns: + Union[str, None]: Context title for window or None. In case of None + a warning is displayed (not nice for artists). + """ + + pass + + def get_asset_docs(self): + pass + + def get_asset_hierarchy(self): + pass + + def get_task_names_by_asset_names(self, asset_names): + pass + + def get_existing_subset_names(self, asset_name): + pass + + @property + def thumbnail_paths_by_instance_id(self): + if self._thumbnail_paths_by_instance_id is None: + self._thumbnail_paths_by_instance_id = ( + self._collect_thumbnail_paths_by_instance_id() + ) + return self._thumbnail_paths_by_instance_id + + def get_thumbnail_path_for_instance(self, instance_id): + return self.thumbnail_paths_by_instance_id.get(instance_id) + + def set_thumbnail_path_for_instance(self, instance_id, thumbnail_path): + self._set_thumbnail_path_on_context(self, instance_id, thumbnail_path) + + @abstractmethod + def _collect_thumbnail_paths_by_instance_id(self): + """Collect thumbnail paths by instance id in remote controller. + + These should be collected from 'CreatedContext' there. + + Returns: + Dict[str, str]: Mapping of thumbnail path by instance id. + """ + + pass + + @abstractmethod + def _set_thumbnail_path_on_context(self, instance_id, thumbnail_path): + """Send change of thumbnail path in remote controller. + + That should trigger event 'instance.thumbnail.changed' which is + captured and handled in default implementation in this class. + """ + + pass + + @abstractmethod + def get_subset_name( + self, + creator_identifier, + variant, + task_name, + asset_name, + instance_id=None + ): + """Get subset name based on passed data. + + Args: + creator_identifier (str): Identifier of creator which should be + responsible for subset name creation. + variant (str): Variant value from user's input. + task_name (str): Name of task for which is instance created. + asset_name (str): Name of asset for which is instance created. + instance_id (Union[str, None]): Existing instance id when subset + name is updated. + """ + + pass + + @abstractmethod + def create( + self, creator_identifier, subset_name, instance_data, options + ): + """Trigger creation by creator identifier. + + Should also trigger refresh of instanes. + + Args: + creator_identifier (str): Identifier of Creator plugin. + subset_name (str): Calculated subset name. + instance_data (Dict[str, Any]): Base instance data with variant, + asset name and task name. + options (Dict[str, Any]): Data from pre-create attributes. + """ + + pass + + def _get_instance_changes_for_client(self): + """Preimplemented method to receive instance changes for client.""" + + created_instance_changes = {} + for instance_id, instance in self._created_instances.items(): + created_instance_changes[instance_id] = ( + instance.remote_changes() + ) + return created_instance_changes + + @abstractmethod + def _send_instance_changes_to_client(self): + instance_changes = self._get_instance_changes_for_client() + # Implement to send 'instance_changes' value to client + + @abstractmethod + def save_changes(self): + """Save changes happened during creation.""" + + self._send_instance_changes_to_client() + + @abstractmethod + def remove_instances(self, instance_ids): + """Remove list of instances from create context.""" + # TODO add Args: + + pass + + @abstractmethod + def get_publish_report(self): + pass + + @abstractmethod + def get_validation_errors(self): + pass + + @abstractmethod + def reset(self): + """Reset whole controller. + + This should reset create context, publish context and all variables + that are related to it. + """ + + self._send_instance_changes_to_client() + pass + + @abstractmethod + def publish(self): + """Trigger publishing without any order limitations.""" + + self._send_instance_changes_to_client() + pass + + @abstractmethod + def validate(self): + """Trigger publishing which will stop after validation order.""" + + self._send_instance_changes_to_client() + pass + + @abstractmethod + def stop_publish(self): + """Stop publishing can be also used to pause publishing. + + Pause of publishing is possible only if all plugins successfully + finished. + """ + + pass + + @abstractmethod + def run_action(self, plugin_id, action_id): + """Trigger pyblish action on a plugin. + + Args: + plugin_id (str): Id of publish plugin. + action_id (str): Id of publish action. + """ + + pass + + @abstractmethod + def set_comment(self, comment): + """Set comment on pyblish context. + + Set "comment" key on current pyblish.api.Context data. + + Args: + comment (str): Artist's comment. + """ + + pass + + @abstractmethod + def emit_card_message(self, message): + """Emit a card message which can have a lifetime. + + This is for UI purposes. Method can be extended to more arguments + in future e.g. different message timeout or type (color). + + Args: + message (str): Message that will be showed. + """ + + pass diff --git a/openpype/tools/publisher/publish_report_viewer/__init__.py b/openpype/tools/publisher/publish_report_viewer/__init__.py index ce1cc3729c..bf77a6d30b 100644 --- a/openpype/tools/publisher/publish_report_viewer/__init__.py +++ b/openpype/tools/publisher/publish_report_viewer/__init__.py @@ -1,3 +1,5 @@ +from Qt import QtWidgets + from .report_items import ( PublishReport ) @@ -16,4 +18,13 @@ __all__ = ( "PublishReportViewerWidget", "PublishReportViewerWindow", + + "main", ) + + +def main(): + app = QtWidgets.QApplication([]) + window = PublishReportViewerWindow() + window.show() + return app.exec_() diff --git a/openpype/tools/publisher/publish_report_viewer/model.py b/openpype/tools/publisher/publish_report_viewer/model.py index a88129a358..704feeb4bd 100644 --- a/openpype/tools/publisher/publish_report_viewer/model.py +++ b/openpype/tools/publisher/publish_report_viewer/model.py @@ -3,6 +3,7 @@ from Qt import QtCore, QtGui import pyblish.api +from openpype.tools.utils.lib import html_escape from .constants import ( ITEM_ID_ROLE, ITEM_IS_GROUP_ROLE, @@ -45,7 +46,8 @@ class InstancesModel(QtGui.QStandardItemModel): all_removed = True for instance_item in instance_items: item = QtGui.QStandardItem(instance_item.label) - item.setData(instance_item.label, ITEM_LABEL_ROLE) + instance_label = html_escape(instance_item.label) + item.setData(instance_label, ITEM_LABEL_ROLE) item.setData(instance_item.errored, ITEM_ERRORED_ROLE) item.setData(instance_item.id, ITEM_ID_ROLE) item.setData(instance_item.removed, INSTANCE_REMOVED_ROLE) diff --git a/openpype/tools/publisher/publish_report_viewer/report_items.py b/openpype/tools/publisher/publish_report_viewer/report_items.py index b47d14da25..206f999bac 100644 --- a/openpype/tools/publisher/publish_report_viewer/report_items.py +++ b/openpype/tools/publisher/publish_report_viewer/report_items.py @@ -79,14 +79,12 @@ class PublishReport: context_data = data["context"] context_data["name"] = "context" - context_data["label"] = context_data["label"] or "Context" + context_data["label"] = context_data.get("label") or "Context" logs = [] plugins_items_by_id = {} - plugins_id_order = [] for plugin_data in data["plugins_data"]: item = PluginItem(plugin_data) - plugins_id_order.append(item.id) plugins_items_by_id[item.id] = item for instance_data_item in plugin_data["instances_data"]: instance_id = instance_data_item["id"] @@ -95,6 +93,14 @@ class PublishReport: copy.deepcopy(log_item_data), item.id, instance_id ) logs.append(log_item) + sorted_plugins = sorted( + plugins_items_by_id.values(), + key=lambda item: item.order + ) + plugins_id_order = [ + plugin_item.id + for plugin_item in sorted_plugins + ] logs_by_instance_id = collections.defaultdict(list) for log_item in logs: diff --git a/openpype/tools/publisher/publish_report_viewer/widgets.py b/openpype/tools/publisher/publish_report_viewer/widgets.py index fd226ea0e4..0d35ac3512 100644 --- a/openpype/tools/publisher/publish_report_viewer/widgets.py +++ b/openpype/tools/publisher/publish_report_viewer/widgets.py @@ -1,3 +1,4 @@ +from math import ceil from Qt import QtWidgets, QtCore, QtGui from openpype.widgets.nice_checkbox import NiceCheckbox @@ -26,6 +27,9 @@ class PluginLoadReportModel(QtGui.QStandardItemModel): parent = self.invisibleRootItem() parent.removeRows(0, parent.rowCount()) + if report is None: + return + new_items = [] new_items_by_filepath = {} for filepath in report.crashed_plugin_paths.keys(): @@ -137,13 +141,85 @@ class PluginLoadReportWidget(QtWidgets.QWidget): self._model.set_report(report) +class ZoomPlainText(QtWidgets.QPlainTextEdit): + min_point_size = 1.0 + max_point_size = 200.0 + + def __init__(self, *args, **kwargs): + super(ZoomPlainText, self).__init__(*args, **kwargs) + + anim_timer = QtCore.QTimer() + anim_timer.setInterval(20) + + anim_timer.timeout.connect(self._scaling_callback) + + self._anim_timer = anim_timer + self._scheduled_scalings = 0 + self._point_size = None + + def wheelEvent(self, event): + modifiers = QtWidgets.QApplication.keyboardModifiers() + if modifiers != QtCore.Qt.ControlModifier: + super(ZoomPlainText, self).wheelEvent(event) + return + + degrees = float(event.delta()) / 8 + steps = int(ceil(degrees / 5)) + self._scheduled_scalings += steps + if (self._scheduled_scalings * steps < 0): + self._scheduled_scalings = steps + + self._anim_timer.start() + + def _scaling_callback(self): + if self._scheduled_scalings == 0: + self._anim_timer.stop() + return + + factor = 1.0 + (self._scheduled_scalings / 300) + font = self.font() + + if self._point_size is None: + point_size = font.pointSizeF() + else: + point_size = self._point_size + + point_size *= factor + min_hit = False + max_hit = False + if point_size < self.min_point_size: + point_size = self.min_point_size + min_hit = True + elif point_size > self.max_point_size: + point_size = self.max_point_size + max_hit = True + + self._point_size = point_size + + font.setPointSizeF(point_size) + # Using 'self.setFont(font)' would not be propagated when stylesheets + # are applied on this widget + self.setStyleSheet("font-size: {}pt".format(font.pointSize())) + + if ( + (max_hit and self._scheduled_scalings > 0) + or (min_hit and self._scheduled_scalings < 0) + ): + self._scheduled_scalings = 0 + + elif self._scheduled_scalings > 0: + self._scheduled_scalings -= 1 + else: + self._scheduled_scalings += 1 + + class DetailsWidget(QtWidgets.QWidget): def __init__(self, parent): super(DetailsWidget, self).__init__(parent) - output_widget = QtWidgets.QPlainTextEdit(self) - output_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) + output_widget = ZoomPlainText(self) output_widget.setObjectName("PublishLogConsole") + output_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) @@ -268,7 +344,7 @@ class DetailsPopup(QtWidgets.QDialog): self.closed.emit() -class PublishReportViewerWidget(QtWidgets.QWidget): +class PublishReportViewerWidget(QtWidgets.QFrame): def __init__(self, parent=None): super(PublishReportViewerWidget, self).__init__(parent) diff --git a/openpype/tools/publisher/publish_report_viewer/window.py b/openpype/tools/publisher/publish_report_viewer/window.py index 678884677c..646ae69e7f 100644 --- a/openpype/tools/publisher/publish_report_viewer/window.py +++ b/openpype/tools/publisher/publish_report_viewer/window.py @@ -1,11 +1,12 @@ import os import json import six +import uuid + import appdirs from Qt import QtWidgets, QtCore, QtGui from openpype import style -from openpype.lib import JSONSettingRegistry from openpype.resources import get_openpype_icon_filepath from openpype.tools import resources from openpype.tools.utils import ( @@ -23,38 +24,198 @@ else: from report_items import PublishReport -FILEPATH_ROLE = QtCore.Qt.UserRole + 1 -MODIFIED_ROLE = QtCore.Qt.UserRole + 2 +ITEM_ID_ROLE = QtCore.Qt.UserRole + 1 -class PublisherReportRegistry(JSONSettingRegistry): - """Class handling storing publish report tool. - - Attributes: - vendor (str): Name used for path construction. - product (str): Additional name used for path construction. +def get_reports_dir(): + """Root directory where publish reports are stored for next session. + Returns: + str: Path to directory where reports are stored. """ + report_dir = os.path.join( + appdirs.user_data_dir("openpype", "pypeclub"), + "publish_report_viewer" + ) + if not os.path.exists(report_dir): + os.makedirs(report_dir) + return report_dir + + +class PublishReportItem: + """Report item representing one file in report directory.""" + + def __init__(self, content): + item_id = content.get("id") + changed = False + if not item_id: + item_id = str(uuid.uuid4()) + changed = True + content["id"] = item_id + + if not content.get("report_version"): + changed = True + content["report_version"] = "0.0.1" + + report_path = os.path.join(get_reports_dir(), item_id) + file_modified = None + if os.path.exists(report_path): + file_modified = os.path.getmtime(report_path) + self.content = content + self.report_path = report_path + self.file_modified = file_modified + self._loaded_label = content.get("label") + self._changed = changed + self.publish_report = PublishReport(content) + + @property + def version(self): + return self.content["report_version"] + + @property + def id(self): + return self.content["id"] + + def get_label(self): + return self.content.get("label") or "Unfilled label" + + def set_label(self, label): + if not label: + self.content.pop("label", None) + self.content["label"] = label + + label = property(get_label, set_label) + + def save(self): + save = False + if ( + self._changed + or self._loaded_label != self.label + or not os.path.exists(self.report_path) + or self.file_modified != os.path.getmtime(self.report_path) + ): + save = True + + if not save: + return + + with open(self.report_path, "w") as stream: + json.dump(self.content, stream) + + self._loaded_label = self.content.get("label") + self._changed = False + self.file_modified = os.path.getmtime(self.report_path) + + @classmethod + def from_filepath(cls, filepath): + if not os.path.exists(filepath): + return None + + try: + with open(filepath, "r") as stream: + content = json.load(stream) + + return cls(content) + except Exception: + return None + + def remove_file(self): + if os.path.exists(self.report_path): + os.remove(self.report_path) + + def update_file_content(self): + if not os.path.exists(self.report_path): + return + + file_modified = os.path.getmtime(self.report_path) + if file_modified == self.file_modified: + return + + with open(self.report_path, "r") as stream: + content = json.load(self.content, stream) + + item_id = content.get("id") + version = content.get("report_version") + if not item_id: + item_id = str(uuid.uuid4()) + content["id"] = item_id + + if not version: + version = "0.0.1" + content["report_version"] = version + + self.content = content + self.file_modified = file_modified + + +class PublisherReportHandler: + """Class handling storing publish report tool.""" + def __init__(self): - self.vendor = "pypeclub" - self.product = "openpype" - name = "publish_report_viewer" - path = appdirs.user_data_dir(self.product, self.vendor) - super(PublisherReportRegistry, self).__init__(name, path) + self._reports = None + self._reports_by_id = {} + + def reset(self): + self._reports = None + self._reports_by_id = {} + + def list_reports(self): + if self._reports is not None: + return self._reports + + reports = [] + reports_by_id = {} + report_dir = get_reports_dir() + for filename in os.listdir(report_dir): + ext = os.path.splitext(filename)[-1] + if ext == ".json": + continue + filepath = os.path.join(report_dir, filename) + item = PublishReportItem.from_filepath(filepath) + reports.append(item) + reports_by_id[item.id] = item + + self._reports = reports + self._reports_by_id = reports_by_id + return reports + + def remove_report_items(self, item_id): + item = self._reports_by_id.get(item_id) + if item: + try: + item.remove_file() + self._reports_by_id.get(item_id) + except Exception: + pass -class LoadedFilesMopdel(QtGui.QStandardItemModel): +class LoadedFilesModel(QtGui.QStandardItemModel): def __init__(self, *args, **kwargs): - super(LoadedFilesMopdel, self).__init__(*args, **kwargs) - self.setColumnCount(2) - self._items_by_filepath = {} - self._reports_by_filepath = {} + super(LoadedFilesModel, self).__init__(*args, **kwargs) - self._registry = PublisherReportRegistry() + self._items_by_id = {} + self._report_items_by_id = {} + + self._handler = PublisherReportHandler() self._loading_registry = False - self._load_registry() + + def refresh(self): + self._handler.reset() + self._items_by_id = {} + self._report_items_by_id = {} + + new_items = [] + for report_item in self._handler.list_reports(): + item = self._create_item(report_item) + self._report_items_by_id[report_item.id] = report_item + self._items_by_id[report_item.id] = item + new_items.append(item) + + if new_items: + root_item = self.invisibleRootItem() + root_item.appendRows(new_items) def headerData(self, section, orientation, role): if role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole): @@ -63,22 +224,7 @@ class LoadedFilesMopdel(QtGui.QStandardItemModel): if section == 1: return "Modified" return "" - super(LoadedFilesMopdel, self).headerData(section, orientation, role) - - def _load_registry(self): - self._loading_registry = True - try: - filepaths = self._registry.get_item("filepaths") - self.add_filepaths(filepaths) - except ValueError: - pass - self._loading_registry = False - - def _store_registry(self): - if self._loading_registry: - return - filepaths = list(self._items_by_filepath.keys()) - self._registry.set_item("filepaths", filepaths) + super(LoadedFilesModel, self).headerData(section, orientation, role) def data(self, index, role=None): if role is None: @@ -88,17 +234,28 @@ class LoadedFilesMopdel(QtGui.QStandardItemModel): if col != 0: index = self.index(index.row(), 0, index.parent()) - if role == QtCore.Qt.ToolTipRole: - if col == 0: - role = FILEPATH_ROLE - elif col == 1: - return "File modified" + return super(LoadedFilesModel, self).data(index, role) + + def setData(self, index, value, role): + if role == QtCore.Qt.EditRole: + item_id = index.data(ITEM_ID_ROLE) + report_item = self._report_items_by_id.get(item_id) + if report_item is not None: + report_item.label = value + report_item.save() + value = report_item.label + + return super(LoadedFilesModel, self).setData(index, value, role) + + def _create_item(self, report_item): + if report_item.id in self._items_by_id: return None - elif role == QtCore.Qt.DisplayRole: - if col == 1: - role = MODIFIED_ROLE - return super(LoadedFilesMopdel, self).data(index, role) + item = QtGui.QStandardItem(report_item.label) + item.setColumnCount(self.columnCount()) + item.setData(report_item.id, ITEM_ID_ROLE) + + return item def add_filepaths(self, filepaths): if not filepaths: @@ -110,9 +267,6 @@ class LoadedFilesMopdel(QtGui.QStandardItemModel): filtered_paths = [] for filepath in filepaths: normalized_path = os.path.normpath(filepath) - if normalized_path in self._items_by_filepath: - continue - if ( os.path.exists(normalized_path) and normalized_path not in filtered_paths @@ -127,54 +281,46 @@ class LoadedFilesMopdel(QtGui.QStandardItemModel): try: with open(normalized_path, "r") as stream: data = json.load(stream) - report = PublishReport(data) + report_item = PublishReportItem(data) except Exception: # TODO handle errors continue - modified = os.path.getmtime(normalized_path) - item = QtGui.QStandardItem(os.path.basename(normalized_path)) - item.setColumnCount(self.columnCount()) - item.setData(normalized_path, FILEPATH_ROLE) - item.setData(modified, MODIFIED_ROLE) + label = data.get("label") + if not label: + report_item.label = ( + os.path.splitext(os.path.basename(filepath))[0] + ) + + item = self._create_item(report_item) + if item is None: + continue + new_items.append(item) - self._items_by_filepath[normalized_path] = item - self._reports_by_filepath[normalized_path] = report + report_item.save() + self._items_by_id[report_item.id] = item + self._report_items_by_id[report_item.id] = report_item - if not new_items: + if new_items: + root_item = self.invisibleRootItem() + root_item.appendRows(new_items) + + def remove_item_by_id(self, item_id): + report_item = self._report_items_by_id.get(item_id) + if not report_item: return + self._handler.remove_report_items(item_id) + item = self._items_by_id.get(item_id) + parent = self.invisibleRootItem() - parent.appendRows(new_items) + parent.removeRow(item.row()) - self._store_registry() - - def remove_filepaths(self, filepaths): - if not filepaths: - return - - if isinstance(filepaths, six.string_types): - filepaths = [filepaths] - - filtered_paths = [] - for filepath in filepaths: - normalized_path = os.path.normpath(filepath) - if normalized_path in self._items_by_filepath: - filtered_paths.append(normalized_path) - - if not filtered_paths: - return - - parent = self.invisibleRootItem() - for filepath in filtered_paths: - self._reports_by_filepath.pop(normalized_path) - item = self._items_by_filepath.pop(filepath) - parent.removeRow(item.row()) - - self._store_registry() - - def get_report_by_filepath(self, filepath): - return self._reports_by_filepath.get(filepath) + def get_report_by_id(self, item_id): + report_item = self._report_items_by_id.get(item_id) + if report_item: + return report_item.publish_report + return None class LoadedFilesView(QtWidgets.QTreeView): @@ -182,11 +328,13 @@ class LoadedFilesView(QtWidgets.QTreeView): def __init__(self, *args, **kwargs): super(LoadedFilesView, self).__init__(*args, **kwargs) - self.setEditTriggers(self.NoEditTriggers) + self.setEditTriggers( + self.EditKeyPressed | self.SelectedClicked | self.DoubleClicked + ) self.setIndentation(0) self.setAlternatingRowColors(True) - model = LoadedFilesMopdel() + model = LoadedFilesModel() self.setModel(model) time_delegate = PrettyTimeDelegate() @@ -219,6 +367,7 @@ class LoadedFilesView(QtWidgets.QTreeView): def _on_rows_inserted(self): header = self.header() header.resizeSections(header.ResizeToContents) + self._update_remove_btn() def resizeEvent(self, event): super(LoadedFilesView, self).resizeEvent(event) @@ -226,9 +375,10 @@ class LoadedFilesView(QtWidgets.QTreeView): def showEvent(self, event): super(LoadedFilesView, self).showEvent(event) - self._update_remove_btn() + self._model.refresh() header = self.header() header.resizeSections(header.ResizeToContents) + self._update_remove_btn() def _on_selection_change(self): self.selection_changed.emit() @@ -237,14 +387,14 @@ class LoadedFilesView(QtWidgets.QTreeView): self._model.add_filepaths(filepaths) self._fill_selection() - def remove_filepaths(self, filepaths): - self._model.remove_filepaths(filepaths) + def remove_item_by_id(self, item_id): + self._model.remove_item_by_id(item_id) self._fill_selection() def _on_remove_clicked(self): index = self.currentIndex() - filepath = index.data(FILEPATH_ROLE) - self.remove_filepaths(filepath) + item_id = index.data(ITEM_ID_ROLE) + self.remove_item_by_id(item_id) def _fill_selection(self): index = self.currentIndex() @@ -257,8 +407,8 @@ class LoadedFilesView(QtWidgets.QTreeView): def get_current_report(self): index = self.currentIndex() - filepath = index.data(FILEPATH_ROLE) - return self._model.get_report_by_filepath(filepath) + item_id = index.data(ITEM_ID_ROLE) + return self._model.get_report_by_id(item_id) class LoadedFilesWidget(QtWidgets.QWidget): diff --git a/openpype/tools/publisher/widgets/__init__.py b/openpype/tools/publisher/widgets/__init__.py index 55afc349ff..042985b007 100644 --- a/openpype/tools/publisher/widgets/__init__.py +++ b/openpype/tools/publisher/widgets/__init__.py @@ -3,35 +3,21 @@ from .icons import ( get_pixmap, get_icon ) -from .border_label_widget import ( - BorderedLabelWidget -) from .widgets import ( - SubsetAttributesWidget, - StopBtn, ResetBtn, ValidateBtn, PublishBtn, - - CreateInstanceBtn, - RemoveInstanceBtn, - ChangeViewBtn + CreateNextPageOverlay, ) -from .publish_widget import ( - PublishFrame -) -from .create_dialog import ( - CreateDialog -) - -from .card_view_widgets import ( - InstanceCardView -) - -from .list_view_widgets import ( - InstanceListView +from .help_widget import ( + HelpButton, + HelpDialog, ) +from .publish_frame import PublishFrame +from .tabs_widget import PublisherTabsWidget +from .overview_widget import OverviewWidget +from .validations_widget import ValidationsWidget __all__ = ( @@ -39,22 +25,18 @@ __all__ = ( "get_pixmap", "get_icon", - "SubsetAttributesWidget", - "BorderedLabelWidget", - "StopBtn", "ResetBtn", "ValidateBtn", "PublishBtn", + "CreateNextPageOverlay", - "CreateInstanceBtn", - "RemoveInstanceBtn", - "ChangeViewBtn", + "HelpButton", + "HelpDialog", "PublishFrame", - "CreateDialog", - - "InstanceCardView", - "InstanceListView", + "PublisherTabsWidget", + "OverviewWidget", + "ValidationsWidget", ) diff --git a/openpype/tools/publisher/widgets/assets_widget.py b/openpype/tools/publisher/widgets/assets_widget.py index 46fdcc6526..996c9029d4 100644 --- a/openpype/tools/publisher/widgets/assets_widget.py +++ b/openpype/tools/publisher/widgets/assets_widget.py @@ -1,6 +1,7 @@ import collections from Qt import QtWidgets, QtCore, QtGui + from openpype.tools.utils import ( PlaceholderLineEdit, RecursiveSortFilterProxyModel, @@ -13,18 +14,17 @@ from openpype.tools.utils.assets_widget import ( ) -class CreateDialogAssetsWidget(SingleSelectAssetsWidget): +class CreateWidgetAssetsWidget(SingleSelectAssetsWidget): current_context_required = QtCore.Signal() header_height_changed = QtCore.Signal(int) def __init__(self, controller, parent): self._controller = controller - super(CreateDialogAssetsWidget, self).__init__(None, parent) + super(CreateWidgetAssetsWidget, self).__init__(None, parent) self.set_refresh_btn_visibility(False) self.set_current_asset_btn_visibility(False) - self._current_asset_name = None self._last_selection = None self._enabled = None @@ -42,11 +42,11 @@ class CreateDialogAssetsWidget(SingleSelectAssetsWidget): self.header_height_changed.emit(height) def resizeEvent(self, event): - super(CreateDialogAssetsWidget, self).resizeEvent(event) + super(CreateWidgetAssetsWidget, self).resizeEvent(event) self._check_header_height() def showEvent(self, event): - super(CreateDialogAssetsWidget, self).showEvent(event) + super(CreateWidgetAssetsWidget, self).showEvent(event) self._check_header_height() def _on_current_asset_click(self): @@ -63,19 +63,19 @@ class CreateDialogAssetsWidget(SingleSelectAssetsWidget): self.select_asset(self._last_selection) def _select_indexes(self, *args, **kwargs): - super(CreateDialogAssetsWidget, self)._select_indexes(*args, **kwargs) + super(CreateWidgetAssetsWidget, self)._select_indexes(*args, **kwargs) if self._enabled: return self._last_selection = self.get_selected_asset_id() self._clear_selection() - def set_current_asset_name(self, asset_name): - self._current_asset_name = asset_name + def update_current_asset(self): # Hide set current asset if there is no one - self.set_current_asset_btn_visibility(asset_name is not None) + asset_name = self._get_current_session_asset() + self.set_current_asset_btn_visibility(bool(asset_name)) def _get_current_session_asset(self): - return self._current_asset_name + return self._controller.current_asset_name def _create_source_model(self): return AssetsHierarchyModel(self._controller) @@ -164,6 +164,16 @@ class AssetsHierarchyModel(QtGui.QStandardItemModel): return item_name in self._items_by_name +class AssetDialogView(QtWidgets.QTreeView): + double_clicked = QtCore.Signal(QtCore.QModelIndex) + + def mouseDoubleClickEvent(self, event): + index = self.indexAt(event.pos()) + if index.isValid(): + self.double_clicked.emit(index) + event.accept() + + class AssetsDialog(QtWidgets.QDialog): """Dialog to select asset for a context of instance.""" @@ -179,7 +189,7 @@ class AssetsDialog(QtWidgets.QDialog): filter_input = PlaceholderLineEdit(self) filter_input.setPlaceholderText("Filter assets..") - asset_view = QtWidgets.QTreeView(self) + asset_view = AssetDialogView(self) asset_view.setModel(proxy_model) asset_view.setHeaderHidden(True) asset_view.setFrameShape(QtWidgets.QFrame.NoFrame) @@ -201,6 +211,7 @@ class AssetsDialog(QtWidgets.QDialog): layout.addWidget(asset_view, 1) layout.addLayout(btns_layout, 0) + asset_view.double_clicked.connect(self._on_ok_clicked) filter_input.textChanged.connect(self._on_filter_change) ok_btn.clicked.connect(self._on_ok_clicked) cancel_btn.clicked.connect(self._on_cancel_clicked) @@ -275,7 +286,7 @@ class AssetsDialog(QtWidgets.QDialog): index = self._asset_view.currentIndex() asset_name = None if index.isValid(): - asset_name = index.data(QtCore.Qt.DisplayRole) + asset_name = index.data(ASSET_NAME_ROLE) self._selected_asset = asset_name self.done(1) diff --git a/openpype/tools/publisher/widgets/border_label_widget.py b/openpype/tools/publisher/widgets/border_label_widget.py index 696a9050b8..8e09dd817e 100644 --- a/openpype/tools/publisher/widgets/border_label_widget.py +++ b/openpype/tools/publisher/widgets/border_label_widget.py @@ -158,8 +158,7 @@ class BorderedLabelWidget(QtWidgets.QFrame): """ def __init__(self, label, parent): super(BorderedLabelWidget, self).__init__(parent) - colors_data = get_objected_colors() - color_value = colors_data.get("border") + color_value = get_objected_colors("border") color = None if color_value: color = color_value.get_qcolor() diff --git a/openpype/tools/publisher/widgets/card_view_widgets.py b/openpype/tools/publisher/widgets/card_view_widgets.py index 086cd5c59c..9fd2bf0824 100644 --- a/openpype/tools/publisher/widgets/card_view_widgets.py +++ b/openpype/tools/publisher/widgets/card_view_widgets.py @@ -28,6 +28,7 @@ from Qt import QtWidgets, QtCore from openpype.widgets.nice_checkbox import NiceCheckbox from openpype.tools.utils import BaseClickableFrame +from openpype.tools.utils.lib import html_escape from .widgets import ( AbstractInstanceView, ContextWarningLabel, @@ -36,18 +37,34 @@ from .widgets import ( ) from ..constants import ( CONTEXT_ID, - CONTEXT_LABEL + CONTEXT_LABEL, + CONTEXT_GROUP, + CONVERTOR_ITEM_GROUP, ) -class GroupWidget(QtWidgets.QWidget): - """Widget wrapping instances under group.""" - selected = QtCore.Signal(str, str) - active_changed = QtCore.Signal() +class SelectionType: + def __init__(self, name): + self.name = name + + def __eq__(self, other): + if isinstance(other, SelectionType): + other = other.name + return self.name == other + + +class SelectionTypes: + clear = SelectionType("clear") + extend = SelectionType("extend") + extend_to = SelectionType("extend_to") + + +class BaseGroupWidget(QtWidgets.QWidget): + selected = QtCore.Signal(str, str, SelectionType) removed_selected = QtCore.Signal() - def __init__(self, group_name, group_icons, parent): - super(GroupWidget, self).__init__(parent) + def __init__(self, group_name, parent): + super(BaseGroupWidget, self).__init__(parent) label_widget = QtWidgets.QLabel(group_name, self) @@ -68,29 +85,144 @@ class GroupWidget(QtWidgets.QWidget): layout.addLayout(label_layout, 0) self._group = group_name - self._group_icons = group_icons self._widgets_by_id = {} + self._ordered_item_ids = [] self._label_widget = label_widget self._content_layout = layout - def get_widget_by_instance_id(self, instance_id): + @property + def group_name(self): + """Group which widget represent. + + Returns: + str: Name of group. + """ + + return self._group + + def get_widget_by_item_id(self, item_id): """Get instance widget by it's id.""" - return self._widgets_by_id.get(instance_id) + + return self._widgets_by_id.get(item_id) + + def get_selected_item_ids(self): + """Selected instance ids. + + Returns: + Set[str]: Instance ids that are selected. + """ + + return { + instance_id + for instance_id, widget in self._widgets_by_id.items() + if widget.is_selected + } + + def get_selected_widgets(self): + """Access to widgets marked as selected. + + Returns: + List[InstanceCardWidget]: Instance widgets that are selected. + """ + + return [ + widget + for instance_id, widget in self._widgets_by_id.items() + if widget.is_selected + ] + + def get_ordered_widgets(self): + """Get instance ids in order as are shown in ui. + + Returns: + List[str]: Instance ids. + """ + + return [ + self._widgets_by_id[instance_id] + for instance_id in self._ordered_item_ids + ] + + def _remove_all_except(self, item_ids): + item_ids = set(item_ids) + # Remove instance widgets that are not in passed instances + for item_id in tuple(self._widgets_by_id.keys()): + if item_id in item_ids: + continue + + widget = self._widgets_by_id.pop(item_id) + if widget.is_selected: + self.removed_selected.emit() + + widget.setVisible(False) + self._content_layout.removeWidget(widget) + widget.deleteLater() + + def _update_ordered_item_ids(self): + ordered_item_ids = [] + for idx in range(self._content_layout.count()): + if idx > 0: + item = self._content_layout.itemAt(idx) + widget = item.widget() + if widget is not None: + ordered_item_ids.append(widget.id) + + self._ordered_item_ids = ordered_item_ids + + def _on_widget_selection(self, instance_id, group_id, selection_type): + self.selected.emit(instance_id, group_id, selection_type) + + +class ConvertorItemsGroupWidget(BaseGroupWidget): + def update_items(self, items_by_id): + items_by_label = collections.defaultdict(list) + for item in items_by_id.values(): + items_by_label[item.label].append(item) + + # Remove instance widgets that are not in passed instances + self._remove_all_except(items_by_id.keys()) + + # Sort instances by subset name + sorted_labels = list(sorted(items_by_label.keys())) + + # Add new instances to widget + widget_idx = 1 + for label in sorted_labels: + for item in items_by_label[label]: + if item.id in self._widgets_by_id: + widget = self._widgets_by_id[item.id] + widget.update_item(item) + else: + widget = ConvertorItemCardWidget(item, self) + widget.selected.connect(self._on_widget_selection) + self._widgets_by_id[item.id] = widget + self._content_layout.insertWidget(widget_idx, widget) + widget_idx += 1 + + self._update_ordered_item_ids() + + +class InstanceGroupWidget(BaseGroupWidget): + """Widget wrapping instances under group.""" + + active_changed = QtCore.Signal() + + def __init__(self, group_icons, *args, **kwargs): + super(InstanceGroupWidget, self).__init__(*args, **kwargs) + + self._group_icons = group_icons + + def update_icons(self, group_icons): + self._group_icons = group_icons def update_instance_values(self): """Trigger update on instance widgets.""" + for widget in self._widgets_by_id.values(): widget.update_instance_values() - def confirm_remove_instance_id(self, instance_id): - """Delete widget by instance id.""" - widget = self._widgets_by_id.pop(instance_id) - widget.setVisible(False) - self._content_layout.removeWidget(widget) - widget.deleteLater() - def update_instances(self, instances): """Update instances for the group. @@ -98,6 +230,7 @@ class GroupWidget(QtWidgets.QWidget): instances(list): List of instances in CreateContext. """ + # Store instances by id and by subset name instances_by_id = {} instances_by_subset_name = collections.defaultdict(list) @@ -107,20 +240,11 @@ class GroupWidget(QtWidgets.QWidget): instances_by_subset_name[subset_name].append(instance) # Remove instance widgets that are not in passed instances - for instance_id in tuple(self._widgets_by_id.keys()): - if instance_id in instances_by_id: - continue - - widget = self._widgets_by_id.pop(instance_id) - if widget.is_selected: - self.removed_selected.emit() - - widget.setVisible(False) - self._content_layout.removeWidget(widget) - widget.deleteLater() + self._remove_all_except(instances_by_id.keys()) # Sort instances by subset name sorted_subset_names = list(sorted(instances_by_subset_name.keys())) + # Add new instances to widget widget_idx = 1 for subset_names in sorted_subset_names: @@ -133,16 +257,19 @@ class GroupWidget(QtWidgets.QWidget): widget = InstanceCardWidget( instance, group_icon, self ) - widget.selected.connect(self.selected) + widget.selected.connect(self._on_widget_selection) widget.active_changed.connect(self.active_changed) self._widgets_by_id[instance.id] = widget self._content_layout.insertWidget(widget_idx, widget) widget_idx += 1 + self._update_ordered_item_ids() + class CardWidget(BaseClickableFrame): """Clickable card used as bigger button.""" - selected = QtCore.Signal(str, str) + + selected = QtCore.Signal(str, str, SelectionType) # Group identifier of card # - this must be set because if send when mouse is released with card id _group_identifier = None @@ -154,6 +281,12 @@ class CardWidget(BaseClickableFrame): self._selected = False self._id = None + @property + def id(self): + """Id of card.""" + + return self._id + @property def is_selected(self): """Is card selected.""" @@ -170,7 +303,16 @@ class CardWidget(BaseClickableFrame): def _mouse_release_callback(self): """Trigger selected signal.""" - self.selected.emit(self._id, self._group_identifier) + + modifiers = QtWidgets.QApplication.keyboardModifiers() + selection_type = SelectionTypes.clear + if bool(modifiers & QtCore.Qt.ShiftModifier): + selection_type = SelectionTypes.extend_to + + elif bool(modifiers & QtCore.Qt.ControlModifier): + selection_type = SelectionTypes.extend + + self.selected.emit(self._id, self._group_identifier, selection_type) class ContextCardWidget(CardWidget): @@ -178,11 +320,12 @@ class ContextCardWidget(CardWidget): Is not visually under group widget and is always at the top of card view. """ + def __init__(self, parent): super(ContextCardWidget, self).__init__(parent) self._id = CONTEXT_ID - self._group_identifier = "" + self._group_identifier = CONTEXT_GROUP icon_widget = PublishPixmapLabel(None, self) icon_widget.setObjectName("FamilyIconLabel") @@ -202,15 +345,50 @@ class ContextCardWidget(CardWidget): self._label_widget = label_widget +class ConvertorItemCardWidget(CardWidget): + """Card for global context. + + Is not visually under group widget and is always at the top of card view. + """ + + def __init__(self, item, parent): + super(ConvertorItemCardWidget, self).__init__(parent) + + self._id = item.id + self.identifier = item.identifier + self._group_identifier = CONVERTOR_ITEM_GROUP + + icon_widget = IconValuePixmapLabel("fa.magic", self) + icon_widget.setObjectName("FamilyIconLabel") + + label_widget = QtWidgets.QLabel(item.label, self) + + icon_layout = QtWidgets.QHBoxLayout() + icon_layout.setContentsMargins(10, 5, 5, 5) + icon_layout.addWidget(icon_widget) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 5, 10, 5) + layout.addLayout(icon_layout, 0) + layout.addWidget(label_widget, 1) + + self._icon_widget = icon_widget + self._label_widget = label_widget + + def update_instance_values(self): + pass + + class InstanceCardWidget(CardWidget): """Card widget representing instance.""" + active_changed = QtCore.Signal() def __init__(self, instance, group_icon, parent): super(InstanceCardWidget, self).__init__(parent) self._id = instance.id - self._group_identifier = instance.creator_label + self._group_identifier = instance.group_label self._group_icon = group_icon self.instance = instance @@ -303,13 +481,14 @@ class InstanceCardWidget(CardWidget): self._last_variant = variant self._last_subset_name = subset_name # Make `variant` bold - found_parts = set(re.findall(variant, subset_name, re.IGNORECASE)) + label = html_escape(self.instance.label) + found_parts = set(re.findall(variant, label, re.IGNORECASE)) if found_parts: for part in found_parts: replacement = "{}".format(part) - subset_name = subset_name.replace(part, replacement) + label = label.replace(part, replacement) - self._label_widget.setText(subset_name) + self._label_widget.setText(label) # HTML text will cause that label start catch mouse clicks # - disabling with changing interaction flag self._label_widget.setTextInteractionFlags( @@ -345,10 +524,11 @@ class InstanceCardView(AbstractInstanceView): Wrapper of all widgets in card view. """ + def __init__(self, controller, parent): super(InstanceCardView, self).__init__(parent) - self.controller = controller + self._controller = controller scroll_area = QtWidgets.QScrollArea(self) scroll_area.setWidgetResizable(True) @@ -375,11 +555,13 @@ class InstanceCardView(AbstractInstanceView): self._content_layout = content_layout self._content_widget = content_widget - self._widgets_by_group = {} self._context_widget = None + self._convertor_items_group = None + self._widgets_by_group = {} + self._ordered_groups = [] - self._selected_group = None - self._selected_instance_id = None + self._explicitly_selected_instance_ids = [] + self._explicitly_selected_groups = [] self.setSizePolicy( QtWidgets.QSizePolicy.Minimum, @@ -399,43 +581,49 @@ class InstanceCardView(AbstractInstanceView): result.setWidth(width) return result - def _get_selected_widget(self): - if self._selected_instance_id == CONTEXT_ID: - return self._context_widget + def _get_selected_widgets(self): + output = [] + if ( + self._context_widget is not None + and self._context_widget.is_selected + ): + output.append(self._context_widget) - group_widget = self._widgets_by_group.get( - self._selected_group - ) - if group_widget is not None: - widget = group_widget.get_widget_by_instance_id( - self._selected_instance_id - ) - if widget is not None: - return widget + if self._convertor_items_group is not None: + output.extend(self._convertor_items_group.get_selected_widgets()) - return None + for group_widget in self._widgets_by_group.values(): + for widget in group_widget.get_selected_widgets(): + output.append(widget) + return output + + def _get_selected_instance_ids(self): + output = [] + if ( + self._context_widget is not None + and self._context_widget.is_selected + ): + output.append(CONTEXT_ID) + + if self._convertor_items_group is not None: + output.extend(self._convertor_items_group.get_selected_item_ids()) + + for group_widget in self._widgets_by_group.values(): + output.extend(group_widget.get_selected_item_ids()) + return output def refresh(self): """Refresh instances in view based on CreatedContext.""" - # Create context item if is not already existing - # - this must be as first thing to do as context item should be at the - # top - if self._context_widget is None: - widget = ContextCardWidget(self._content_widget) - widget.selected.connect(self._on_widget_selection) - self._context_widget = widget + self._make_sure_context_widget_exists() - self.selection_changed.emit() - self._content_layout.insertWidget(0, widget) - - self.select_item(CONTEXT_ID, None) + self._update_convertor_items_group() # Prepare instances by group and identifiers by group instances_by_group = collections.defaultdict(list) identifiers_by_group = collections.defaultdict(set) - for instance in self.controller.instances: - group_name = instance.creator_label + for instance in self._controller.instances.values(): + group_name = instance.group_label instances_by_group[group_name].append(instance) identifiers_by_group[group_name].add( instance.creator_identifier @@ -446,35 +634,38 @@ class InstanceCardView(AbstractInstanceView): if group_name in instances_by_group: continue - if group_name == self._selected_group: - self._on_remove_selected() widget = self._widgets_by_group.pop(group_name) widget.setVisible(False) self._content_layout.removeWidget(widget) widget.deleteLater() + if group_name in self._explicitly_selected_groups: + self._explicitly_selected_groups.remove(group_name) + # Sort groups sorted_group_names = list(sorted(instances_by_group.keys())) + # Keep track of widget indexes # - we start with 1 because Context item as at the top widget_idx = 1 + if self._convertor_items_group is not None: + widget_idx += 1 + for group_name in sorted_group_names: + group_icons = { + idenfier: self._controller.get_creator_icon(idenfier) + for idenfier in identifiers_by_group[group_name] + } if group_name in self._widgets_by_group: group_widget = self._widgets_by_group[group_name] - else: - group_icons = { - idenfier: self.controller.get_icon_for_family(idenfier) - for idenfier in identifiers_by_group[group_name] - } + group_widget.update_icons(group_icons) - group_widget = GroupWidget( - group_name, group_icons, self._content_widget + else: + group_widget = InstanceGroupWidget( + group_icons, group_name, self._content_widget ) group_widget.active_changed.connect(self._on_active_changed) group_widget.selected.connect(self._on_widget_selection) - group_widget.removed_selected.connect( - self._on_remove_selected - ) self._content_layout.insertWidget(widget_idx, group_widget) self._widgets_by_group[group_name] = group_widget @@ -483,6 +674,56 @@ class InstanceCardView(AbstractInstanceView): instances_by_group[group_name] ) + self._update_ordered_group_nameS() + + def _update_ordered_group_nameS(self): + ordered_group_names = [CONTEXT_GROUP] + for idx in range(self._content_layout.count()): + if idx > 0: + item = self._content_layout.itemAt(idx) + group_widget = item.widget() + if group_widget is not None: + ordered_group_names.append(group_widget.group_name) + + self._ordered_groups = ordered_group_names + + def _make_sure_context_widget_exists(self): + # Create context item if is not already existing + # - this must be as first thing to do as context item should be at the + # top + if self._context_widget is not None: + return + + widget = ContextCardWidget(self._content_widget) + widget.selected.connect(self._on_widget_selection) + + self._context_widget = widget + + self.selection_changed.emit() + self._content_layout.insertWidget(0, widget) + + def _update_convertor_items_group(self): + convertor_items = self._controller.convertor_items + if not convertor_items and self._convertor_items_group is None: + return + + if not convertor_items: + self._convertor_items_group.setVisible(False) + self._content_layout.removeWidget(self._convertor_items_group) + self._convertor_items_group.deleteLater() + self._convertor_items_group = None + return + + if self._convertor_items_group is None: + group_widget = ConvertorItemsGroupWidget( + CONVERTOR_ITEM_GROUP, self._content_widget + ) + group_widget.selected.connect(self._on_widget_selection) + self._content_layout.insertWidget(1, group_widget) + self._convertor_items_group = group_widget + + self._convertor_items_group.update_items(convertor_items) + def refresh_instance_states(self): """Trigger update of instances on group widgets.""" for widget in self._widgets_by_group.values(): @@ -491,10 +732,7 @@ class InstanceCardView(AbstractInstanceView): def _on_active_changed(self): self.active_changed.emit() - def _on_widget_selection(self, instance_id, group_name): - self.select_item(instance_id, group_name) - - def select_item(self, instance_id, group_name): + def _on_widget_selection(self, instance_id, group_name, selection_type): """Select specific item by instance id. Pass `CONTEXT_ID` as instance id and empty string as group to select @@ -502,38 +740,353 @@ class InstanceCardView(AbstractInstanceView): """ if instance_id == CONTEXT_ID: new_widget = self._context_widget + else: - group_widget = self._widgets_by_group[group_name] - new_widget = group_widget.get_widget_by_instance_id(instance_id) + if group_name == CONVERTOR_ITEM_GROUP: + group_widget = self._convertor_items_group + else: + group_widget = self._widgets_by_group[group_name] + new_widget = group_widget.get_widget_by_item_id(instance_id) - selected_widget = self._get_selected_widget() - if new_widget is selected_widget: - return - - if selected_widget is not None: - selected_widget.set_selected(False) - - self._selected_instance_id = instance_id - self._selected_group = group_name - if new_widget is not None: - new_widget.set_selected(True) + if selection_type is SelectionTypes.clear: + self._select_item_clear(instance_id, group_name, new_widget) + elif selection_type is SelectionTypes.extend: + self._select_item_extend(instance_id, group_name, new_widget) + elif selection_type is SelectionTypes.extend_to: + self._select_item_extend_to(instance_id, group_name, new_widget) self.selection_changed.emit() - def _on_remove_selected(self): - selected_widget = self._get_selected_widget() - if selected_widget is None: - self._on_widget_selection(CONTEXT_ID, None) + def _select_item_clear(self, instance_id, group_name, new_widget): + """Select specific item by instance id and clear previous selection. + + Pass `CONTEXT_ID` as instance id and empty string as group to select + global context item. + """ + + selected_widgets = self._get_selected_widgets() + for widget in selected_widgets: + if widget.id != instance_id: + widget.set_selected(False) + + self._explicitly_selected_groups = [group_name] + self._explicitly_selected_instance_ids = [instance_id] + + if new_widget is not None: + new_widget.set_selected(True) + + def _select_item_extend(self, instance_id, group_name, new_widget): + """Add/Remove single item to/from current selection. + + If item is already selected the selection is removed. + """ + + self._explicitly_selected_instance_ids = ( + self._get_selected_instance_ids() + ) + if new_widget.is_selected: + self._explicitly_selected_instance_ids.remove(instance_id) + new_widget.set_selected(False) + remove_group = False + if instance_id == CONTEXT_ID: + remove_group = True + else: + if group_name == CONVERTOR_ITEM_GROUP: + group_widget = self._convertor_items_group + else: + group_widget = self._widgets_by_group[group_name] + if not group_widget.get_selected_widgets(): + remove_group = True + + if remove_group: + self._explicitly_selected_groups.remove(group_name) + return + + self._explicitly_selected_instance_ids.append(instance_id) + if group_name in self._explicitly_selected_groups: + self._explicitly_selected_groups.remove(group_name) + self._explicitly_selected_groups.append(group_name) + new_widget.set_selected(True) + + def _select_item_extend_to(self, instance_id, group_name, new_widget): + """Extend selected items to specific instance id. + + This method is handling Shift+click selection of widgets. Selection + is not stored to explicit selection items. That's because user can + shift select again and it should use last explicit selected item as + source item for selection. + + Items selected via this function can get to explicit selection only if + selection is extended by one specific item ('_select_item_extend'). + From that moment the selection is locked to new last explicit selected + item. + + It's required to traverse through group widgets in their UI order and + through their instances in UI order. All explicitly selected items + must not change their selection state during this function. Passed + instance id can be above or under last selected item so a start item + and end item must be found to be able know which direction is selection + happening. + """ + + # Start group name (in '_ordered_groups') + start_group = None + # End group name (in '_ordered_groups') + end_group = None + # Instance id of first selected item + start_instance_id = None + # Instance id of last selected item + end_instance_id = None + + # Get previously selected group by explicit selected groups + previous_group = None + if self._explicitly_selected_groups: + previous_group = self._explicitly_selected_groups[-1] + + # Find last explicitly selected instance id + previous_last_selected_id = None + if self._explicitly_selected_instance_ids: + previous_last_selected_id = ( + self._explicitly_selected_instance_ids[-1] + ) + + # If last instance id was not found or available then last selected + # group is also invalid. + # NOTE: This probably never happen? + if previous_last_selected_id is None: + previous_group = None + + # Check if previously selected group is available and find out if + # new instance group is above or under previous selection + # - based on these information are start/end group/instance filled + if previous_group in self._ordered_groups: + new_idx = self._ordered_groups.index(group_name) + prev_idx = self._ordered_groups.index(previous_group) + if new_idx < prev_idx: + start_group = group_name + end_group = previous_group + start_instance_id = instance_id + end_instance_id = previous_last_selected_id + else: + start_group = previous_group + end_group = group_name + start_instance_id = previous_last_selected_id + end_instance_id = instance_id + + # If start group is not set then use context item group name + if start_group is None: + start_group = CONTEXT_GROUP + + # If start instance id is not filled then use context id (similar to + # group) + if start_instance_id is None: + start_instance_id = CONTEXT_ID + + # If end group is not defined then use passed group name + # - this can be happen when previous group was not selected + # - when this happens the selection will probably happen from context + # item to item selected by user + if end_group is None: + end_group = group_name + + # If end instance is not filled then use instance selected by user + if end_instance_id is None: + end_instance_id = instance_id + + # Start and end group are the same + # - a different logic is needed in that case + same_group = start_group == end_group + + # Process known information and change selection of items + passed_start_group = False + passed_end_group = False + # Go through ordered groups (from top to bottom) and change selection + for name in self._ordered_groups: + # Prepare sorted instance widgets + if name == CONTEXT_GROUP: + sorted_widgets = [self._context_widget] + else: + if name == CONVERTOR_ITEM_GROUP: + group_widget = self._convertor_items_group + else: + group_widget = self._widgets_by_group[name] + sorted_widgets = group_widget.get_ordered_widgets() + + # Change selection based on explicit selection if start group + # was not passed yet + if not passed_start_group: + if name != start_group: + for widget in sorted_widgets: + widget.set_selected( + widget.id in self._explicitly_selected_instance_ids + ) + continue + + # Change selection based on explicit selection if end group + # already passed + if passed_end_group: + for widget in sorted_widgets: + widget.set_selected( + widget.id in self._explicitly_selected_instance_ids + ) + continue + + # Start group is already passed and end group was not yet hit + if same_group: + passed_start_group = True + passed_end_group = True + passed_start_instance = False + passed_end_instance = False + for widget in sorted_widgets: + if not passed_start_instance: + if widget.id in (start_instance_id, end_instance_id): + if widget.id != start_instance_id: + # Swap start/end instance if start instance is + # after end + # - fix 'passed_end_instance' check + start_instance_id, end_instance_id = ( + end_instance_id, start_instance_id + ) + passed_start_instance = True + + # Find out if widget should be selected + select = False + if passed_end_instance: + select = False + + elif passed_start_instance: + select = True + + # Check if instance is in explicitly selected items if + # should ont be selected + if ( + not select + and widget.id in self._explicitly_selected_instance_ids + ): + select = True + + widget.set_selected(select) + + if ( + not passed_end_instance + and widget.id == end_instance_id + ): + passed_end_instance = True + + elif name == start_group: + # First group from which selection should start + # - look for start instance first from which the selection + # should happen + passed_start_group = True + passed_start_instance = False + for widget in sorted_widgets: + if widget.id == start_instance_id: + passed_start_instance = True + + select = False + # Check if passed start instance or instance is + # in explicitly selected items to be selected + if ( + passed_start_instance + or widget.id in self._explicitly_selected_instance_ids + ): + select = True + widget.set_selected(select) + + elif name == end_group: + # Last group where selection should happen + # - look for end instance first after which the selection + # should stop + passed_end_group = True + passed_end_instance = False + for widget in sorted_widgets: + select = False + # Check if not yet passed end instance or if instance is + # in explicitly selected items to be selected + if ( + not passed_end_instance + or widget.id in self._explicitly_selected_instance_ids + ): + select = True + + widget.set_selected(select) + + if widget.id == end_instance_id: + passed_end_instance = True + + else: + # Just select everything between start and end group + for widget in sorted_widgets: + widget.set_selected(True) def get_selected_items(self): """Get selected instance ids and context.""" + + convertor_identifiers = [] instances = [] + selected_widgets = self._get_selected_widgets() + context_selected = False - selected_widget = self._get_selected_widget() - if selected_widget is self._context_widget: - context_selected = True + for widget in selected_widgets: + if widget is self._context_widget: + context_selected = True - elif selected_widget is not None: - instances.append(selected_widget.instance) + elif isinstance(widget, InstanceCardWidget): + instances.append(widget.id) - return instances, context_selected + elif isinstance(widget, ConvertorItemCardWidget): + convertor_identifiers.append(widget.identifier) + + return instances, context_selected, convertor_identifiers + + def set_selected_items( + self, instance_ids, context_selected, convertor_identifiers + ): + s_instance_ids = set(instance_ids) + s_convertor_identifiers = set(convertor_identifiers) + cur_ids, cur_context, cur_convertor_identifiers = ( + self.get_selected_items() + ) + if ( + set(cur_ids) == s_instance_ids + and cur_context == context_selected + and set(cur_convertor_identifiers) == s_convertor_identifiers + ): + return + + selected_groups = [] + selected_instances = [] + if context_selected: + selected_groups.append(CONTEXT_GROUP) + selected_instances.append(CONTEXT_ID) + + self._context_widget.set_selected(context_selected) + + for group_name in self._ordered_groups: + if group_name == CONTEXT_GROUP: + continue + + is_convertor_group = group_name == CONVERTOR_ITEM_GROUP + if is_convertor_group: + group_widget = self._convertor_items_group + else: + group_widget = self._widgets_by_group[group_name] + + group_selected = False + for widget in group_widget.get_ordered_widgets(): + select = False + if is_convertor_group: + is_in = widget.identifier in s_convertor_identifiers + else: + is_in = widget.id in s_instance_ids + if is_in: + selected_instances.append(widget.id) + group_selected = True + select = True + widget.set_selected(select) + + if group_selected: + selected_groups.append(group_name) + + self._explicitly_selected_groups = selected_groups + self._explicitly_selected_instance_ids = selected_instances diff --git a/openpype/tools/publisher/widgets/create_dialog.py b/openpype/tools/publisher/widgets/create_dialog.py deleted file mode 100644 index 9e357f3a56..0000000000 --- a/openpype/tools/publisher/widgets/create_dialog.py +++ /dev/null @@ -1,1193 +0,0 @@ -import sys -import re -import traceback -import copy - -import qtawesome -try: - import commonmark -except Exception: - commonmark = None -from Qt import QtWidgets, QtCore, QtGui -from openpype.lib import TaskNotSetError -from openpype.pipeline.create import ( - CreatorError, - SUBSET_NAME_ALLOWED_SYMBOLS -) -from openpype.tools.utils import ( - ErrorMessageBox, - MessageOverlayObject, - ClickableFrame, -) - -from .widgets import IconValuePixmapLabel -from .assets_widget import CreateDialogAssetsWidget -from .tasks_widget import CreateDialogTasksWidget -from .precreate_widget import PreCreateWidget -from ..constants import ( - VARIANT_TOOLTIP, - CREATOR_IDENTIFIER_ROLE, - FAMILY_ROLE -) - -SEPARATORS = ("---separator---", "---") - - -class VariantInputsWidget(QtWidgets.QWidget): - resized = QtCore.Signal() - - def resizeEvent(self, event): - super(VariantInputsWidget, self).resizeEvent(event) - self.resized.emit() - - -class CreateErrorMessageBox(ErrorMessageBox): - def __init__( - self, - creator_label, - subset_name, - asset_name, - exc_msg, - formatted_traceback, - parent - ): - self._creator_label = creator_label - self._subset_name = subset_name - self._asset_name = asset_name - self._exc_msg = exc_msg - self._formatted_traceback = formatted_traceback - super(CreateErrorMessageBox, self).__init__("Creation failed", parent) - - def _create_top_widget(self, parent_widget): - label_widget = QtWidgets.QLabel(parent_widget) - label_widget.setText( - "Failed to create" - ) - return label_widget - - def _get_report_data(self): - report_message = ( - "{creator}: Failed to create Subset: \"{subset}\"" - " in Asset: \"{asset}\"" - "\n\nError: {message}" - ).format( - creator=self._creator_label, - subset=self._subset_name, - asset=self._asset_name, - message=self._exc_msg, - ) - if self._formatted_traceback: - report_message += "\n\n{}".format(self._formatted_traceback) - return [report_message] - - def _create_content(self, content_layout): - item_name_template = ( - "Creator: {}
" - "Subset: {}
" - "Asset: {}
" - ) - exc_msg_template = "{}" - - line = self._create_line() - content_layout.addWidget(line) - - item_name_widget = QtWidgets.QLabel(self) - item_name_widget.setText( - item_name_template.format( - self._creator_label, self._subset_name, self._asset_name - ) - ) - content_layout.addWidget(item_name_widget) - - message_label_widget = QtWidgets.QLabel(self) - message_label_widget.setText( - exc_msg_template.format(self.convert_text_for_html(self._exc_msg)) - ) - content_layout.addWidget(message_label_widget) - - if self._formatted_traceback: - line_widget = self._create_line() - tb_widget = self._create_traceback_widget( - self._formatted_traceback - ) - content_layout.addWidget(line_widget) - content_layout.addWidget(tb_widget) - - -# TODO add creator identifier/label to details -class CreatorShortDescWidget(QtWidgets.QWidget): - height_changed = QtCore.Signal(int) - - def __init__(self, parent=None): - super(CreatorShortDescWidget, self).__init__(parent=parent) - - # --- Short description widget --- - icon_widget = IconValuePixmapLabel(None, self) - icon_widget.setObjectName("FamilyIconLabel") - - # --- Short description inputs --- - short_desc_input_widget = QtWidgets.QWidget(self) - - family_label = QtWidgets.QLabel(short_desc_input_widget) - family_label.setAlignment( - QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft - ) - - description_label = QtWidgets.QLabel(short_desc_input_widget) - description_label.setAlignment( - QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft - ) - - short_desc_input_layout = QtWidgets.QVBoxLayout( - short_desc_input_widget - ) - short_desc_input_layout.setSpacing(0) - short_desc_input_layout.addWidget(family_label) - short_desc_input_layout.addWidget(description_label) - # -------------------------------- - - layout = QtWidgets.QHBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(icon_widget, 0) - layout.addWidget(short_desc_input_widget, 1) - # -------------------------------- - - self._icon_widget = icon_widget - self._family_label = family_label - self._description_label = description_label - - self._last_height = None - - def _check_height_change(self): - height = self.height() - if height != self._last_height: - self._last_height = height - self.height_changed.emit(height) - - def showEvent(self, event): - super(CreatorShortDescWidget, self).showEvent(event) - self._check_height_change() - - def resizeEvent(self, event): - super(CreatorShortDescWidget, self).resizeEvent(event) - self._check_height_change() - - def set_plugin(self, plugin=None): - if not plugin: - self._icon_widget.set_icon_def(None) - self._family_label.setText("") - self._description_label.setText("") - return - - plugin_icon = plugin.get_icon() - description = plugin.get_description() or "" - - self._icon_widget.set_icon_def(plugin_icon) - self._family_label.setText("{}".format(plugin.family)) - self._family_label.setTextInteractionFlags(QtCore.Qt.NoTextInteraction) - self._description_label.setText(description) - - -class HelpButton(ClickableFrame): - resized = QtCore.Signal(int) - question_mark_icon_name = "fa.question" - help_icon_name = "fa.question-circle" - hide_icon_name = "fa.angle-left" - - def __init__(self, *args, **kwargs): - super(HelpButton, self).__init__(*args, **kwargs) - self.setObjectName("CreateDialogHelpButton") - - question_mark_label = QtWidgets.QLabel(self) - help_widget = QtWidgets.QWidget(self) - - help_question = QtWidgets.QLabel(help_widget) - help_label = QtWidgets.QLabel("Help", help_widget) - hide_icon = QtWidgets.QLabel(help_widget) - - help_layout = QtWidgets.QHBoxLayout(help_widget) - help_layout.setContentsMargins(0, 0, 5, 0) - help_layout.addWidget(help_question, 0) - help_layout.addWidget(help_label, 0) - help_layout.addStretch(1) - help_layout.addWidget(hide_icon, 0) - - layout = QtWidgets.QHBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.setSpacing(0) - layout.addWidget(question_mark_label, 0) - layout.addWidget(help_widget, 1) - - help_widget.setVisible(False) - - self._question_mark_label = question_mark_label - self._help_widget = help_widget - self._help_question = help_question - self._hide_icon = hide_icon - - self._expanded = None - self.set_expanded() - - def set_expanded(self, expanded=None): - if self._expanded is expanded: - if expanded is not None: - return - expanded = False - self._expanded = expanded - self._help_widget.setVisible(expanded) - self._update_content() - - def _update_content(self): - width = self.get_icon_width() - if self._expanded: - question_mark_pix = QtGui.QPixmap(width, width) - question_mark_pix.fill(QtCore.Qt.transparent) - - else: - question_mark_icon = qtawesome.icon( - self.question_mark_icon_name, color=QtCore.Qt.white - ) - question_mark_pix = question_mark_icon.pixmap(width, width) - - hide_icon = qtawesome.icon( - self.hide_icon_name, color=QtCore.Qt.white - ) - help_question_icon = qtawesome.icon( - self.help_icon_name, color=QtCore.Qt.white - ) - self._question_mark_label.setPixmap(question_mark_pix) - self._question_mark_label.setMaximumWidth(width) - self._hide_icon.setPixmap(hide_icon.pixmap(width, width)) - self._help_question.setPixmap(help_question_icon.pixmap(width, width)) - - def get_icon_width(self): - metrics = self.fontMetrics() - return metrics.height() - - def set_pos_and_size(self, pos_x, pos_y, width, height): - update_icon = self.height() != height - self.move(pos_x, pos_y) - self.resize(width, height) - - if update_icon: - self._update_content() - self.updateGeometry() - - def showEvent(self, event): - super(HelpButton, self).showEvent(event) - self.resized.emit(self.height()) - - def resizeEvent(self, event): - super(HelpButton, self).resizeEvent(event) - self.resized.emit(self.height()) - - -class CreateDialog(QtWidgets.QDialog): - default_size = (1000, 560) - - def __init__( - self, controller, asset_name=None, task_name=None, parent=None - ): - super(CreateDialog, self).__init__(parent) - - self.setWindowTitle("Create new instance") - - self.controller = controller - - if asset_name is None: - asset_name = self.dbcon.Session.get("AVALON_ASSET") - - if task_name is None: - task_name = self.dbcon.Session.get("AVALON_TASK") - - self._asset_name = asset_name - self._task_name = task_name - - self._last_pos = None - self._asset_doc = None - self._subset_names = None - self._selected_creator = None - - self._prereq_available = False - - self._message_dialog = None - - name_pattern = "^[{}]*$".format(SUBSET_NAME_ALLOWED_SYMBOLS) - self._name_pattern = name_pattern - self._compiled_name_pattern = re.compile(name_pattern) - - overlay_object = MessageOverlayObject(self) - - context_widget = QtWidgets.QWidget(self) - - assets_widget = CreateDialogAssetsWidget(controller, context_widget) - tasks_widget = CreateDialogTasksWidget(controller, context_widget) - - context_layout = QtWidgets.QVBoxLayout(context_widget) - context_layout.setContentsMargins(0, 0, 0, 0) - context_layout.setSpacing(0) - context_layout.addWidget(assets_widget, 2) - context_layout.addWidget(tasks_widget, 1) - - # --- Creators view --- - creators_header_widget = QtWidgets.QWidget(self) - header_label_widget = QtWidgets.QLabel( - "Choose family:", creators_header_widget - ) - creators_header_layout = QtWidgets.QHBoxLayout(creators_header_widget) - creators_header_layout.setContentsMargins(0, 0, 0, 0) - creators_header_layout.addWidget(header_label_widget, 1) - - creators_view = QtWidgets.QListView(self) - creators_model = QtGui.QStandardItemModel() - creators_view.setModel(creators_model) - - variant_widget = VariantInputsWidget(self) - - variant_input = QtWidgets.QLineEdit(variant_widget) - variant_input.setObjectName("VariantInput") - variant_input.setToolTip(VARIANT_TOOLTIP) - - variant_hints_btn = QtWidgets.QToolButton(variant_widget) - variant_hints_btn.setArrowType(QtCore.Qt.DownArrow) - variant_hints_btn.setIconSize(QtCore.QSize(12, 12)) - - variant_hints_menu = QtWidgets.QMenu(variant_widget) - variant_hints_group = QtWidgets.QActionGroup(variant_hints_menu) - - variant_layout = QtWidgets.QHBoxLayout(variant_widget) - variant_layout.setContentsMargins(0, 0, 0, 0) - variant_layout.setSpacing(0) - variant_layout.addWidget(variant_input, 1) - variant_layout.addWidget(variant_hints_btn, 0, QtCore.Qt.AlignVCenter) - - subset_name_input = QtWidgets.QLineEdit(self) - subset_name_input.setEnabled(False) - - form_layout = QtWidgets.QFormLayout() - form_layout.addRow("Variant:", variant_widget) - form_layout.addRow("Subset:", subset_name_input) - - mid_widget = QtWidgets.QWidget(self) - mid_layout = QtWidgets.QVBoxLayout(mid_widget) - mid_layout.setContentsMargins(0, 0, 0, 0) - mid_layout.addWidget(creators_header_widget, 0) - mid_layout.addWidget(creators_view, 1) - mid_layout.addLayout(form_layout, 0) - # ------------ - - # --- Creator short info and attr defs --- - creator_attrs_widget = QtWidgets.QWidget(self) - - creator_short_desc_widget = CreatorShortDescWidget( - creator_attrs_widget - ) - - attr_separator_widget = QtWidgets.QWidget(self) - attr_separator_widget.setObjectName("Separator") - attr_separator_widget.setMinimumHeight(1) - attr_separator_widget.setMaximumHeight(1) - - # Precreate attributes widget - pre_create_widget = PreCreateWidget(creator_attrs_widget) - - # Create button - create_btn_wrapper = QtWidgets.QWidget(creator_attrs_widget) - create_btn = QtWidgets.QPushButton("Create", create_btn_wrapper) - create_btn.setEnabled(False) - - create_btn_wrap_layout = QtWidgets.QHBoxLayout(create_btn_wrapper) - create_btn_wrap_layout.setContentsMargins(0, 0, 0, 0) - create_btn_wrap_layout.addStretch(1) - create_btn_wrap_layout.addWidget(create_btn, 0) - - creator_attrs_layout = QtWidgets.QVBoxLayout(creator_attrs_widget) - creator_attrs_layout.setContentsMargins(0, 0, 0, 0) - creator_attrs_layout.addWidget(creator_short_desc_widget, 0) - creator_attrs_layout.addWidget(attr_separator_widget, 0) - creator_attrs_layout.addWidget(pre_create_widget, 1) - creator_attrs_layout.addWidget(create_btn_wrapper, 0) - # ------------------------------------- - - # --- Detailed information about creator --- - # Detailed description of creator - detail_description_widget = QtWidgets.QWidget(self) - - detail_placoholder_widget = QtWidgets.QWidget( - detail_description_widget - ) - detail_placoholder_widget.setAttribute( - QtCore.Qt.WA_TranslucentBackground - ) - - detail_description_input = QtWidgets.QTextEdit( - detail_description_widget - ) - detail_description_input.setObjectName("CreatorDetailedDescription") - detail_description_input.setTextInteractionFlags( - QtCore.Qt.TextBrowserInteraction - ) - - detail_description_layout = QtWidgets.QVBoxLayout( - detail_description_widget - ) - detail_description_layout.setContentsMargins(0, 0, 0, 0) - detail_description_layout.setSpacing(0) - detail_description_layout.addWidget(detail_placoholder_widget, 0) - detail_description_layout.addWidget(detail_description_input, 1) - - detail_description_widget.setVisible(False) - - # ------------------------------------------- - splitter_widget = QtWidgets.QSplitter(self) - splitter_widget.addWidget(context_widget) - splitter_widget.addWidget(mid_widget) - splitter_widget.addWidget(creator_attrs_widget) - splitter_widget.addWidget(detail_description_widget) - splitter_widget.setStretchFactor(0, 1) - splitter_widget.setStretchFactor(1, 1) - splitter_widget.setStretchFactor(2, 1) - splitter_widget.setStretchFactor(3, 1) - - layout = QtWidgets.QHBoxLayout(self) - layout.addWidget(splitter_widget, 1) - - # Floating help button - # - Create this button as last to be fully visible - help_btn = HelpButton(self) - - prereq_timer = QtCore.QTimer() - prereq_timer.setInterval(50) - prereq_timer.setSingleShot(True) - - desc_width_anim_timer = QtCore.QTimer() - desc_width_anim_timer.setInterval(10) - - prereq_timer.timeout.connect(self._on_prereq_timer) - - desc_width_anim_timer.timeout.connect(self._on_desc_animation) - - help_btn.clicked.connect(self._on_help_btn) - help_btn.resized.connect(self._on_help_btn_resize) - - assets_widget.header_height_changed.connect( - self._on_asset_filter_height_change - ) - - create_btn.clicked.connect(self._on_create) - variant_widget.resized.connect(self._on_variant_widget_resize) - variant_input.returnPressed.connect(self._on_create) - variant_input.textChanged.connect(self._on_variant_change) - creators_view.selectionModel().currentChanged.connect( - self._on_creator_item_change - ) - variant_hints_btn.clicked.connect(self._on_variant_btn_click) - variant_hints_menu.triggered.connect(self._on_variant_action) - assets_widget.selection_changed.connect(self._on_asset_change) - assets_widget.current_context_required.connect( - self._on_current_session_context_request - ) - tasks_widget.task_changed.connect(self._on_task_change) - creator_short_desc_widget.height_changed.connect( - self._on_description_height_change - ) - splitter_widget.splitterMoved.connect(self._on_splitter_move) - - controller.add_plugins_refresh_callback(self._on_plugins_refresh) - - self._overlay_object = overlay_object - - self._splitter_widget = splitter_widget - - self._context_widget = context_widget - self._assets_widget = assets_widget - self._tasks_widget = tasks_widget - - self.subset_name_input = subset_name_input - - self.variant_input = variant_input - self.variant_hints_btn = variant_hints_btn - self.variant_hints_menu = variant_hints_menu - self.variant_hints_group = variant_hints_group - - self._creators_header_widget = creators_header_widget - self.creators_model = creators_model - self.creators_view = creators_view - self.create_btn = create_btn - - self._creator_short_desc_widget = creator_short_desc_widget - self._pre_create_widget = pre_create_widget - self._attr_separator_widget = attr_separator_widget - - self._detail_placoholder_widget = detail_placoholder_widget - self._detail_description_widget = detail_description_widget - self._detail_description_input = detail_description_input - self._help_btn = help_btn - - self._prereq_timer = prereq_timer - self._first_show = True - - # Description animation - self._description_size_policy = detail_description_widget.sizePolicy() - self._desc_width_anim_timer = desc_width_anim_timer - self._desc_widget_step = 0 - self._last_description_width = None - self._last_full_width = 0 - self._expected_description_width = 0 - self._last_desc_max_width = None - self._other_widgets_widths = [] - - def _emit_message(self, message): - self._overlay_object.add_message(message) - - def _context_change_is_enabled(self): - return self._context_widget.isEnabled() - - def _get_asset_name(self): - asset_name = None - if self._context_change_is_enabled(): - asset_name = self._assets_widget.get_selected_asset_name() - - if asset_name is None: - asset_name = self._asset_name - return asset_name - - def _get_task_name(self): - task_name = None - if self._context_change_is_enabled(): - # Don't use selection of task if asset is not set - asset_name = self._assets_widget.get_selected_asset_name() - if asset_name: - task_name = self._tasks_widget.get_selected_task_name() - - if not task_name: - task_name = self._task_name - return task_name - - @property - def dbcon(self): - return self.controller.dbcon - - def _set_context_enabled(self, enabled): - self._assets_widget.set_enabled(enabled) - self._tasks_widget.set_enabled(enabled) - self._context_widget.setEnabled(enabled) - - def refresh(self): - # Get context before refresh to keep selection of asset and - # task widgets - asset_name = self._get_asset_name() - task_name = self._get_task_name() - - self._prereq_available = False - - # Disable context widget so refresh of asset will use context asset - # name - self._set_context_enabled(False) - - self._assets_widget.refresh() - - # Refresh data before update of creators - self._refresh_asset() - # Then refresh creators which may trigger callbacks using refreshed - # data - self._refresh_creators() - - self._assets_widget.set_current_asset_name(self._asset_name) - self._assets_widget.select_asset_by_name(asset_name) - self._tasks_widget.set_asset_name(asset_name) - self._tasks_widget.select_task_name(task_name) - - self._invalidate_prereq() - - def _invalidate_prereq(self): - self._prereq_timer.start() - - def _on_asset_filter_height_change(self, height): - self._creators_header_widget.setMinimumHeight(height) - self._creators_header_widget.setMaximumHeight(height) - - def _on_prereq_timer(self): - prereq_available = True - creator_btn_tooltips = [] - if self.creators_model.rowCount() < 1: - prereq_available = False - creator_btn_tooltips.append("Creator is not selected") - - if self._asset_doc is None: - # QUESTION how to handle invalid asset? - prereq_available = False - creator_btn_tooltips.append("Context is not selected") - - if prereq_available != self._prereq_available: - self._prereq_available = prereq_available - - self.create_btn.setEnabled(prereq_available) - self.creators_view.setEnabled(prereq_available) - self.variant_input.setEnabled(prereq_available) - self.variant_hints_btn.setEnabled(prereq_available) - - tooltip = "" - if creator_btn_tooltips: - tooltip = "\n".join(creator_btn_tooltips) - self.create_btn.setToolTip(tooltip) - - self._on_variant_change() - - def _refresh_asset(self): - asset_name = self._get_asset_name() - - # Skip if asset did not change - if self._asset_doc and self._asset_doc["name"] == asset_name: - return - - # Make sure `_asset_doc` and `_subset_names` variables are reset - self._asset_doc = None - self._subset_names = None - if asset_name is None: - return - - asset_doc = self.dbcon.find_one({ - "type": "asset", - "name": asset_name - }) - self._asset_doc = asset_doc - - if asset_doc: - subset_docs = self.dbcon.find( - { - "type": "subset", - "parent": asset_doc["_id"] - }, - {"name": 1} - ) - self._subset_names = set(subset_docs.distinct("name")) - - if not asset_doc: - self.subset_name_input.setText("< Asset is not set >") - - def _refresh_creators(self): - # Refresh creators and add their families to list - existing_items = {} - old_creators = set() - for row in range(self.creators_model.rowCount()): - item = self.creators_model.item(row, 0) - identifier = item.data(CREATOR_IDENTIFIER_ROLE) - existing_items[identifier] = item - old_creators.add(identifier) - - # Add new families - new_creators = set() - for identifier, creator in self.controller.manual_creators.items(): - # TODO add details about creator - new_creators.add(identifier) - if identifier in existing_items: - item = existing_items[identifier] - else: - item = QtGui.QStandardItem() - item.setFlags( - QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable - ) - self.creators_model.appendRow(item) - - label = creator.label or identifier - item.setData(label, QtCore.Qt.DisplayRole) - item.setData(identifier, CREATOR_IDENTIFIER_ROLE) - item.setData(creator.family, FAMILY_ROLE) - - # Remove families that are no more available - for identifier in (old_creators - new_creators): - item = existing_items[identifier] - self.creators_model.takeRow(item.row()) - - if self.creators_model.rowCount() < 1: - return - - # Make sure there is a selection - indexes = self.creators_view.selectedIndexes() - if not indexes: - index = self.creators_model.index(0, 0) - self.creators_view.setCurrentIndex(index) - else: - index = indexes[0] - - identifier = index.data(CREATOR_IDENTIFIER_ROLE) - - self._set_creator_by_identifier(identifier) - - def _on_plugins_refresh(self): - # Trigger refresh only if is visible - if self.isVisible(): - self.refresh() - - def _on_asset_change(self): - self._refresh_asset() - - asset_name = self._assets_widget.get_selected_asset_name() - self._tasks_widget.set_asset_name(asset_name) - if self._context_change_is_enabled(): - self._invalidate_prereq() - - def _on_task_change(self): - if self._context_change_is_enabled(): - self._invalidate_prereq() - - def _on_current_session_context_request(self): - self._assets_widget.set_current_session_asset() - if self._task_name: - self._tasks_widget.select_task_name(self._task_name) - - def _on_description_height_change(self): - # Use separator's 'y' position as height - height = self._attr_separator_widget.y() - self._detail_placoholder_widget.setMinimumHeight(height) - self._detail_placoholder_widget.setMaximumHeight(height) - - def _on_creator_item_change(self, new_index, _old_index): - identifier = None - if new_index.isValid(): - identifier = new_index.data(CREATOR_IDENTIFIER_ROLE) - self._set_creator_by_identifier(identifier) - - def _update_help_btn(self): - short_desc_rect = self._creator_short_desc_widget.rect() - - # point = short_desc_rect.topRight() - point = short_desc_rect.center() - mapped_point = self._creator_short_desc_widget.mapTo(self, point) - # pos_y = mapped_point.y() - center_pos_y = mapped_point.y() - icon_width = self._help_btn.get_icon_width() - - _height = int(icon_width * 2.5) - height = min(_height, short_desc_rect.height()) - pos_y = center_pos_y - int(height / 2) - - pos_x = self.width() - icon_width - if self._detail_placoholder_widget.isVisible(): - pos_x -= ( - self._detail_placoholder_widget.width() - + self._splitter_widget.handle(3).width() - ) - - width = self.width() - pos_x - - self._help_btn.set_pos_and_size( - max(0, pos_x), max(0, pos_y), - width, height - ) - - def _on_help_btn_resize(self, height): - if self._creator_short_desc_widget.height() != height: - self._update_help_btn() - - def _on_splitter_move(self, *args): - self._update_help_btn() - - def _on_help_btn(self): - if self._desc_width_anim_timer.isActive(): - return - - final_size = self.size() - cur_sizes = self._splitter_widget.sizes() - - if self._desc_widget_step == 0: - now_visible = self._detail_description_widget.isVisible() - else: - now_visible = self._desc_widget_step > 0 - - sizes = [] - for idx, value in enumerate(cur_sizes): - if idx < 3: - sizes.append(value) - - self._last_full_width = final_size.width() - self._other_widgets_widths = list(sizes) - - if now_visible: - cur_desc_width = self._detail_description_widget.width() - if cur_desc_width < 1: - cur_desc_width = 2 - step_size = int(cur_desc_width / 5) - if step_size < 1: - step_size = 1 - - step_size *= -1 - expected_width = 0 - desc_width = cur_desc_width - 1 - width = final_size.width() - 1 - min_max = desc_width - self._last_description_width = cur_desc_width - - else: - self._detail_description_widget.setVisible(True) - handle = self._splitter_widget.handle(3) - desc_width = handle.sizeHint().width() - if self._last_description_width: - expected_width = self._last_description_width - else: - hint = self._detail_description_widget.sizeHint() - expected_width = hint.width() - - width = final_size.width() + desc_width - step_size = int(expected_width / 5) - if step_size < 1: - step_size = 1 - min_max = 0 - - if self._last_desc_max_width is None: - self._last_desc_max_width = ( - self._detail_description_widget.maximumWidth() - ) - self._detail_description_widget.setMinimumWidth(min_max) - self._detail_description_widget.setMaximumWidth(min_max) - self._expected_description_width = expected_width - self._desc_widget_step = step_size - - self._desc_width_anim_timer.start() - - sizes.append(desc_width) - - final_size.setWidth(width) - - self._splitter_widget.setSizes(sizes) - self.resize(final_size) - - self._help_btn.set_expanded(not now_visible) - - def _on_desc_animation(self): - current_width = self._detail_description_widget.width() - - desc_width = None - last_step = False - growing = self._desc_widget_step > 0 - - # Growing - if growing: - if current_width < self._expected_description_width: - desc_width = current_width + self._desc_widget_step - if desc_width >= self._expected_description_width: - desc_width = self._expected_description_width - last_step = True - - # Decreasing - elif self._desc_widget_step < 0: - if current_width > self._expected_description_width: - desc_width = current_width + self._desc_widget_step - if desc_width <= self._expected_description_width: - desc_width = self._expected_description_width - last_step = True - - if desc_width is None: - self._desc_widget_step = 0 - self._desc_width_anim_timer.stop() - return - - if last_step and not growing: - self._detail_description_widget.setVisible(False) - QtWidgets.QApplication.processEvents() - - width = self._last_full_width - handle_width = self._splitter_widget.handle(3).width() - if growing: - width += (handle_width + desc_width) - else: - width -= self._last_description_width - if last_step: - width -= handle_width - else: - width += desc_width - - if not last_step or growing: - self._detail_description_widget.setMaximumWidth(desc_width) - self._detail_description_widget.setMinimumWidth(desc_width) - - window_size = self.size() - window_size.setWidth(width) - self.resize(window_size) - if not last_step: - return - - self._desc_widget_step = 0 - self._desc_width_anim_timer.stop() - - if not growing: - return - - self._detail_description_widget.setMinimumWidth(0) - self._detail_description_widget.setMaximumWidth( - self._last_desc_max_width - ) - self._detail_description_widget.setSizePolicy( - self._description_size_policy - ) - - sizes = list(self._other_widgets_widths) - sizes.append(desc_width) - self._splitter_widget.setSizes(sizes) - - def _set_creator_detailed_text(self, creator): - if not creator: - self._detail_description_input.setPlainText("") - return - detailed_description = creator.get_detail_description() or "" - if commonmark: - html = commonmark.commonmark(detailed_description) - self._detail_description_input.setHtml(html) - else: - self._detail_description_input.setMarkdown(detailed_description) - - def _set_creator_by_identifier(self, identifier): - creator = self.controller.manual_creators.get(identifier) - self._set_creator(creator) - - def _set_creator(self, creator): - self._creator_short_desc_widget.set_plugin(creator) - self._set_creator_detailed_text(creator) - self._pre_create_widget.set_plugin(creator) - - self._selected_creator = creator - - if not creator: - self._set_context_enabled(False) - return - - if ( - creator.create_allow_context_change - != self._context_change_is_enabled() - ): - self._set_context_enabled(creator.create_allow_context_change) - self._refresh_asset() - - default_variants = creator.get_default_variants() - if not default_variants: - default_variants = ["Main"] - - default_variant = creator.get_default_variant() - if not default_variant: - default_variant = default_variants[0] - - for action in tuple(self.variant_hints_menu.actions()): - self.variant_hints_menu.removeAction(action) - action.deleteLater() - - for variant in default_variants: - if variant in SEPARATORS: - self.variant_hints_menu.addSeparator() - elif variant: - self.variant_hints_menu.addAction(variant) - - self.variant_input.setText(default_variant or "Main") - - def _on_variant_widget_resize(self): - self.variant_hints_btn.setFixedHeight(self.variant_input.height()) - - def _on_variant_btn_click(self): - pos = self.variant_hints_btn.rect().bottomLeft() - point = self.variant_hints_btn.mapToGlobal(pos) - self.variant_hints_menu.popup(point) - - def _on_variant_action(self, action): - value = action.text() - if self.variant_input.text() != value: - self.variant_input.setText(value) - - def _on_variant_change(self, variant_value=None): - if not self._prereq_available: - return - - # This should probably never happen? - if not self._selected_creator: - if self.subset_name_input.text(): - self.subset_name_input.setText("") - return - - if variant_value is None: - variant_value = self.variant_input.text() - - self.create_btn.setEnabled(True) - if not self._compiled_name_pattern.match(variant_value): - self.create_btn.setEnabled(False) - self._set_variant_state_property("invalid") - self.subset_name_input.setText("< Invalid variant >") - return - - project_name = self.controller.project_name - task_name = self._get_task_name() - - asset_doc = copy.deepcopy(self._asset_doc) - # Calculate subset name with Creator plugin - try: - subset_name = self._selected_creator.get_subset_name( - variant_value, task_name, asset_doc, project_name - ) - except TaskNotSetError: - self.create_btn.setEnabled(False) - self._set_variant_state_property("invalid") - self.subset_name_input.setText("< Missing task >") - return - - self.subset_name_input.setText(subset_name) - - self._validate_subset_name(subset_name, variant_value) - - def _validate_subset_name(self, subset_name, variant_value): - # Get all subsets of the current asset - if self._subset_names: - existing_subset_names = set(self._subset_names) - else: - existing_subset_names = set() - existing_subset_names_low = set( - _name.lower() - for _name in existing_subset_names - ) - - # Replace - compare_regex = re.compile(re.sub( - variant_value, "(.+)", subset_name, flags=re.IGNORECASE - )) - variant_hints = set() - if variant_value: - for _name in existing_subset_names: - _result = compare_regex.search(_name) - if _result: - variant_hints |= set(_result.groups()) - - # Remove previous hints from menu - for action in tuple(self.variant_hints_group.actions()): - self.variant_hints_group.removeAction(action) - self.variant_hints_menu.removeAction(action) - action.deleteLater() - - # Add separator if there are hints and menu already has actions - if variant_hints and self.variant_hints_menu.actions(): - self.variant_hints_menu.addSeparator() - - # Add hints to actions - for variant_hint in variant_hints: - action = self.variant_hints_menu.addAction(variant_hint) - self.variant_hints_group.addAction(action) - - # Indicate subset existence - if not variant_value: - property_value = "empty" - - elif subset_name.lower() in existing_subset_names_low: - # validate existence of subset name with lowered text - # - "renderMain" vs. "rendermain" mean same path item for - # windows - property_value = "exists" - else: - property_value = "new" - - self._set_variant_state_property(property_value) - - variant_is_valid = variant_value.strip() != "" - if variant_is_valid != self.create_btn.isEnabled(): - self.create_btn.setEnabled(variant_is_valid) - - def _set_variant_state_property(self, state): - current_value = self.variant_input.property("state") - if current_value != state: - self.variant_input.setProperty("state", state) - self.variant_input.style().polish(self.variant_input) - - def _on_first_show(self): - center = self.rect().center() - - width, height = self.default_size - self.resize(width, height) - part = int(width / 7) - self._splitter_widget.setSizes( - [part * 2, part * 2, width - (part * 4)] - ) - - new_pos = self.mapToGlobal(center) - new_pos.setX(new_pos.x() - int(self.width() / 2)) - new_pos.setY(new_pos.y() - int(self.height() / 2)) - self.move(new_pos) - - def moveEvent(self, event): - super(CreateDialog, self).moveEvent(event) - self._last_pos = self.pos() - - def showEvent(self, event): - super(CreateDialog, self).showEvent(event) - if self._first_show: - self._first_show = False - self._on_first_show() - - if self._last_pos is not None: - self.move(self._last_pos) - - self._update_help_btn() - - self.refresh() - - def resizeEvent(self, event): - super(CreateDialog, self).resizeEvent(event) - self._update_help_btn() - - def _on_create(self): - indexes = self.creators_view.selectedIndexes() - if not indexes or len(indexes) > 1: - return - - if not self.create_btn.isEnabled(): - return - - index = indexes[0] - creator_label = index.data(QtCore.Qt.DisplayRole) - creator_identifier = index.data(CREATOR_IDENTIFIER_ROLE) - family = index.data(FAMILY_ROLE) - subset_name = self.subset_name_input.text() - variant = self.variant_input.text() - asset_name = self._get_asset_name() - task_name = self._get_task_name() - pre_create_data = self._pre_create_widget.current_value() - # Where to define these data? - # - what data show be stored? - instance_data = { - "asset": asset_name, - "task": task_name, - "variant": variant, - "family": family - } - - error_msg = None - formatted_traceback = None - try: - self.controller.create( - creator_identifier, - subset_name, - instance_data, - pre_create_data - ) - - except CreatorError as exc: - error_msg = str(exc) - - # Use bare except because some hosts raise their exceptions that - # do not inherit from python's `BaseException` - except: - exc_type, exc_value, exc_traceback = sys.exc_info() - formatted_traceback = "".join(traceback.format_exception( - exc_type, exc_value, exc_traceback - )) - error_msg = str(exc_value) - - if error_msg is None: - self._set_creator(self._selected_creator) - self._emit_message("Creation finished...") - else: - box = CreateErrorMessageBox( - creator_label, - subset_name, - asset_name, - error_msg, - formatted_traceback, - parent=self - ) - box.show() - # Store dialog so is not garbage collected before is shown - self._message_dialog = box diff --git a/openpype/tools/publisher/widgets/create_widget.py b/openpype/tools/publisher/widgets/create_widget.py new file mode 100644 index 0000000000..7bdac46273 --- /dev/null +++ b/openpype/tools/publisher/widgets/create_widget.py @@ -0,0 +1,778 @@ +import re + +from Qt import QtWidgets, QtCore, QtGui + +from openpype.pipeline.create import ( + SUBSET_NAME_ALLOWED_SYMBOLS, + PRE_CREATE_THUMBNAIL_KEY, + TaskNotSetError, +) + +from .thumbnail_widget import ThumbnailWidget +from .widgets import ( + IconValuePixmapLabel, + CreateBtn, +) +from .assets_widget import CreateWidgetAssetsWidget +from .tasks_widget import CreateWidgetTasksWidget +from .precreate_widget import PreCreateWidget +from ..constants import ( + VARIANT_TOOLTIP, + CREATOR_IDENTIFIER_ROLE, + FAMILY_ROLE, + CREATOR_THUMBNAIL_ENABLED_ROLE, +) + +SEPARATORS = ("---separator---", "---") + + +class ResizeControlWidget(QtWidgets.QWidget): + resized = QtCore.Signal() + + def resizeEvent(self, event): + super(ResizeControlWidget, self).resizeEvent(event) + self.resized.emit() + + +# TODO add creator identifier/label to details +class CreatorShortDescWidget(QtWidgets.QWidget): + def __init__(self, parent=None): + super(CreatorShortDescWidget, self).__init__(parent=parent) + + # --- Short description widget --- + icon_widget = IconValuePixmapLabel(None, self) + icon_widget.setObjectName("FamilyIconLabel") + + # --- Short description inputs --- + short_desc_input_widget = QtWidgets.QWidget(self) + + family_label = QtWidgets.QLabel(short_desc_input_widget) + family_label.setAlignment( + QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft + ) + + description_label = QtWidgets.QLabel(short_desc_input_widget) + description_label.setAlignment( + QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft + ) + + short_desc_input_layout = QtWidgets.QVBoxLayout( + short_desc_input_widget + ) + short_desc_input_layout.setSpacing(0) + short_desc_input_layout.addWidget(family_label) + short_desc_input_layout.addWidget(description_label) + # -------------------------------- + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(icon_widget, 0) + layout.addWidget(short_desc_input_widget, 1) + # -------------------------------- + + self._icon_widget = icon_widget + self._family_label = family_label + self._description_label = description_label + + def set_creator_item(self, creator_item=None): + if not creator_item: + self._icon_widget.set_icon_def(None) + self._family_label.setText("") + self._description_label.setText("") + return + + plugin_icon = creator_item.icon + description = creator_item.description or "" + + self._icon_widget.set_icon_def(plugin_icon) + self._family_label.setText("{}".format(creator_item.family)) + self._family_label.setTextInteractionFlags(QtCore.Qt.NoTextInteraction) + self._description_label.setText(description) + + +class CreateWidget(QtWidgets.QWidget): + def __init__(self, controller, parent=None): + super(CreateWidget, self).__init__(parent) + + self.setWindowTitle("Create new instance") + + self._controller = controller + + self._asset_name = None + self._subset_names = None + self._selected_creator = None + + self._prereq_available = False + + name_pattern = "^[{}]*$".format(SUBSET_NAME_ALLOWED_SYMBOLS) + self._name_pattern = name_pattern + self._compiled_name_pattern = re.compile(name_pattern) + + main_splitter_widget = QtWidgets.QSplitter(self) + + context_widget = QtWidgets.QWidget(main_splitter_widget) + + assets_widget = CreateWidgetAssetsWidget(controller, context_widget) + tasks_widget = CreateWidgetTasksWidget(controller, context_widget) + + context_layout = QtWidgets.QVBoxLayout(context_widget) + context_layout.setContentsMargins(0, 0, 0, 0) + context_layout.setSpacing(0) + context_layout.addWidget(assets_widget, 2) + context_layout.addWidget(tasks_widget, 1) + + # --- Creators view --- + creators_widget = QtWidgets.QWidget(main_splitter_widget) + + creator_short_desc_widget = CreatorShortDescWidget(creators_widget) + + attr_separator_widget = QtWidgets.QWidget(creators_widget) + attr_separator_widget.setObjectName("Separator") + attr_separator_widget.setMinimumHeight(1) + attr_separator_widget.setMaximumHeight(1) + + creators_splitter = QtWidgets.QSplitter(creators_widget) + + creators_view_widget = QtWidgets.QWidget(creators_splitter) + + creator_view_label = QtWidgets.QLabel( + "Choose publish type", creators_view_widget + ) + + creators_view = QtWidgets.QListView(creators_view_widget) + creators_model = QtGui.QStandardItemModel() + creators_sort_model = QtCore.QSortFilterProxyModel() + creators_sort_model.setSourceModel(creators_model) + creators_view.setModel(creators_sort_model) + + creators_view_layout = QtWidgets.QVBoxLayout(creators_view_widget) + creators_view_layout.setContentsMargins(0, 0, 0, 0) + creators_view_layout.addWidget(creator_view_label, 0) + creators_view_layout.addWidget(creators_view, 1) + + # --- Creator attr defs --- + creators_attrs_widget = QtWidgets.QWidget(creators_splitter) + + # Top part - variant / subset name + thumbnail + creators_attrs_top = QtWidgets.QWidget(creators_attrs_widget) + + # Basics - variant / subset name + creator_basics_widget = ResizeControlWidget(creators_attrs_top) + + variant_subset_label = QtWidgets.QLabel( + "Create options", creator_basics_widget + ) + + variant_subset_widget = QtWidgets.QWidget(creator_basics_widget) + # Variant and subset input + variant_widget = ResizeControlWidget(variant_subset_widget) + variant_widget.setObjectName("VariantInputsWidget") + + variant_input = QtWidgets.QLineEdit(variant_widget) + variant_input.setObjectName("VariantInput") + variant_input.setToolTip(VARIANT_TOOLTIP) + + variant_hints_btn = QtWidgets.QToolButton(variant_widget) + variant_hints_btn.setArrowType(QtCore.Qt.DownArrow) + variant_hints_btn.setIconSize(QtCore.QSize(12, 12)) + + variant_hints_menu = QtWidgets.QMenu(variant_widget) + variant_hints_group = QtWidgets.QActionGroup(variant_hints_menu) + + variant_layout = QtWidgets.QHBoxLayout(variant_widget) + variant_layout.setContentsMargins(0, 0, 0, 0) + variant_layout.setSpacing(0) + variant_layout.addWidget(variant_input, 1) + variant_layout.addWidget(variant_hints_btn, 0, QtCore.Qt.AlignVCenter) + + subset_name_input = QtWidgets.QLineEdit(variant_subset_widget) + subset_name_input.setEnabled(False) + + variant_subset_layout = QtWidgets.QFormLayout(variant_subset_widget) + variant_subset_layout.setContentsMargins(0, 0, 0, 0) + variant_subset_layout.addRow("Variant", variant_widget) + variant_subset_layout.addRow("Subset", subset_name_input) + + creator_basics_layout = QtWidgets.QVBoxLayout(creator_basics_widget) + creator_basics_layout.setContentsMargins(0, 0, 0, 0) + creator_basics_layout.addWidget(variant_subset_label, 0) + creator_basics_layout.addWidget(variant_subset_widget, 0) + + thumbnail_widget = ThumbnailWidget(controller, creators_attrs_top) + + creators_attrs_top_layout = QtWidgets.QHBoxLayout(creators_attrs_top) + creators_attrs_top_layout.setContentsMargins(0, 0, 0, 0) + creators_attrs_top_layout.addWidget(creator_basics_widget, 1) + creators_attrs_top_layout.addWidget(thumbnail_widget, 0) + + # Precreate attributes widget + pre_create_widget = PreCreateWidget(creators_attrs_widget) + + # Create button + create_btn_wrapper = QtWidgets.QWidget(creators_attrs_widget) + create_btn = CreateBtn(create_btn_wrapper) + create_btn.setEnabled(False) + + create_btn_wrap_layout = QtWidgets.QHBoxLayout(create_btn_wrapper) + create_btn_wrap_layout.setContentsMargins(0, 0, 0, 0) + create_btn_wrap_layout.addStretch(1) + create_btn_wrap_layout.addWidget(create_btn, 0) + + creators_attrs_layout = QtWidgets.QVBoxLayout(creators_attrs_widget) + creators_attrs_layout.setContentsMargins(0, 0, 0, 0) + creators_attrs_layout.addWidget(creators_attrs_top, 0) + creators_attrs_layout.addWidget(pre_create_widget, 1) + creators_attrs_layout.addWidget(create_btn_wrapper, 0) + + creators_splitter.addWidget(creators_view_widget) + creators_splitter.addWidget(creators_attrs_widget) + creators_splitter.setStretchFactor(0, 1) + creators_splitter.setStretchFactor(1, 2) + + creators_layout = QtWidgets.QVBoxLayout(creators_widget) + creators_layout.setContentsMargins(0, 0, 0, 0) + creators_layout.addWidget(creator_short_desc_widget, 0) + creators_layout.addWidget(attr_separator_widget, 0) + creators_layout.addWidget(creators_splitter, 1) + # ------------ + + # --- Detailed information about creator --- + # Detailed description of creator + # TODO this has no way how can be showed now + + # ------------------------------------------- + main_splitter_widget.addWidget(context_widget) + main_splitter_widget.addWidget(creators_widget) + main_splitter_widget.setStretchFactor(0, 1) + main_splitter_widget.setStretchFactor(1, 3) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(main_splitter_widget, 1) + + prereq_timer = QtCore.QTimer() + prereq_timer.setInterval(50) + prereq_timer.setSingleShot(True) + + prereq_timer.timeout.connect(self._invalidate_prereq) + + create_btn.clicked.connect(self._on_create) + variant_widget.resized.connect(self._on_variant_widget_resize) + creator_basics_widget.resized.connect(self._on_creator_basics_resize) + variant_input.returnPressed.connect(self._on_create) + variant_input.textChanged.connect(self._on_variant_change) + creators_view.selectionModel().currentChanged.connect( + self._on_creator_item_change + ) + variant_hints_btn.clicked.connect(self._on_variant_btn_click) + variant_hints_menu.triggered.connect(self._on_variant_action) + assets_widget.selection_changed.connect(self._on_asset_change) + assets_widget.current_context_required.connect( + self._on_current_session_context_request + ) + tasks_widget.task_changed.connect(self._on_task_change) + thumbnail_widget.thumbnail_created.connect(self._on_thumbnail_create) + thumbnail_widget.thumbnail_cleared.connect(self._on_thumbnail_clear) + + controller.event_system.add_callback( + "plugins.refresh.finished", self._on_plugins_refresh + ) + + self._main_splitter_widget = main_splitter_widget + + self._creators_splitter = creators_splitter + + self._context_widget = context_widget + self._assets_widget = assets_widget + self._tasks_widget = tasks_widget + + self.subset_name_input = subset_name_input + + self.variant_input = variant_input + self.variant_hints_btn = variant_hints_btn + self.variant_hints_menu = variant_hints_menu + self.variant_hints_group = variant_hints_group + + self._creators_model = creators_model + self._creators_sort_model = creators_sort_model + self._creators_view = creators_view + self._create_btn = create_btn + + self._creator_short_desc_widget = creator_short_desc_widget + self._creator_basics_widget = creator_basics_widget + self._thumbnail_widget = thumbnail_widget + self._pre_create_widget = pre_create_widget + self._attr_separator_widget = attr_separator_widget + + self._prereq_timer = prereq_timer + self._first_show = True + self._last_thumbnail_path = None + + @property + def current_asset_name(self): + return self._controller.current_asset_name + + @property + def current_task_name(self): + return self._controller.current_task_name + + def _context_change_is_enabled(self): + return self._context_widget.isEnabled() + + def _get_asset_name(self): + asset_name = None + if self._context_change_is_enabled(): + asset_name = self._assets_widget.get_selected_asset_name() + + if asset_name is None: + asset_name = self.current_asset_name + return asset_name or None + + def _get_task_name(self): + task_name = None + if self._context_change_is_enabled(): + # Don't use selection of task if asset is not set + asset_name = self._assets_widget.get_selected_asset_name() + if asset_name: + task_name = self._tasks_widget.get_selected_task_name() + + if not task_name: + task_name = self.current_task_name + return task_name + + def _set_context_enabled(self, enabled): + self._assets_widget.set_enabled(enabled) + self._tasks_widget.set_enabled(enabled) + check_prereq = self._context_widget.isEnabled() != enabled + self._context_widget.setEnabled(enabled) + if check_prereq: + self._invalidate_prereq() + + def refresh(self): + # Get context before refresh to keep selection of asset and + # task widgets + asset_name = self._get_asset_name() + task_name = self._get_task_name() + + self._prereq_available = False + + # Disable context widget so refresh of asset will use context asset + # name + self._set_context_enabled(False) + + self._assets_widget.refresh() + + # Refresh data before update of creators + self._refresh_asset() + # Then refresh creators which may trigger callbacks using refreshed + # data + self._refresh_creators() + + self._assets_widget.update_current_asset() + self._assets_widget.select_asset_by_name(asset_name) + self._tasks_widget.set_asset_name(asset_name) + self._tasks_widget.select_task_name(task_name) + + self._invalidate_prereq_deffered() + + def _invalidate_prereq_deffered(self): + self._prereq_timer.start() + + def _invalidate_prereq(self): + prereq_available = True + creator_btn_tooltips = [] + + available_creators = self._creators_model.rowCount() > 0 + if available_creators != self._creators_view.isEnabled(): + self._creators_view.setEnabled(available_creators) + + if not available_creators: + prereq_available = False + creator_btn_tooltips.append("Creator is not selected") + + if self._context_change_is_enabled() and self._asset_name is None: + # QUESTION how to handle invalid asset? + prereq_available = False + creator_btn_tooltips.append("Context is not selected") + + if prereq_available != self._prereq_available: + self._prereq_available = prereq_available + + self._create_btn.setEnabled(prereq_available) + + self.variant_input.setEnabled(prereq_available) + self.variant_hints_btn.setEnabled(prereq_available) + + tooltip = "" + if creator_btn_tooltips: + tooltip = "\n".join(creator_btn_tooltips) + self._create_btn.setToolTip(tooltip) + + self._on_variant_change() + + def _refresh_asset(self): + asset_name = self._get_asset_name() + + # Skip if asset did not change + if self._asset_name and self._asset_name == asset_name: + return + + # Make sure `_asset_name` and `_subset_names` variables are reset + self._asset_name = asset_name + self._subset_names = None + if asset_name is None: + return + + subset_names = self._controller.get_existing_subset_names(asset_name) + + self._subset_names = subset_names + if subset_names is None: + self.subset_name_input.setText("< Asset is not set >") + + def _refresh_creators(self): + # Refresh creators and add their families to list + existing_items = {} + old_creators = set() + for row in range(self._creators_model.rowCount()): + item = self._creators_model.item(row, 0) + identifier = item.data(CREATOR_IDENTIFIER_ROLE) + existing_items[identifier] = item + old_creators.add(identifier) + + # Add new families + new_creators = set() + for identifier, creator_item in self._controller.creator_items.items(): + if creator_item.creator_type != "artist": + continue + + # TODO add details about creator + new_creators.add(identifier) + if identifier in existing_items: + item = existing_items[identifier] + else: + item = QtGui.QStandardItem() + item.setFlags( + QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable + ) + self._creators_model.appendRow(item) + + item.setData(creator_item.label, QtCore.Qt.DisplayRole) + item.setData(identifier, CREATOR_IDENTIFIER_ROLE) + item.setData( + creator_item.create_allow_thumbnail, + CREATOR_THUMBNAIL_ENABLED_ROLE + ) + item.setData(creator_item.family, FAMILY_ROLE) + + # Remove families that are no more available + for identifier in (old_creators - new_creators): + item = existing_items[identifier] + self._creators_model.takeRow(item.row()) + + if self._creators_model.rowCount() < 1: + return + + self._creators_sort_model.sort(0) + # Make sure there is a selection + indexes = self._creators_view.selectedIndexes() + if not indexes: + index = self._creators_sort_model.index(0, 0) + self._creators_view.setCurrentIndex(index) + else: + index = indexes[0] + + identifier = index.data(CREATOR_IDENTIFIER_ROLE) + + self._set_creator_by_identifier(identifier) + + def _on_plugins_refresh(self): + # Trigger refresh only if is visible + self.refresh() + + def _on_asset_change(self): + self._refresh_asset() + + asset_name = self._assets_widget.get_selected_asset_name() + self._tasks_widget.set_asset_name(asset_name) + if self._context_change_is_enabled(): + self._invalidate_prereq_deffered() + + def _on_task_change(self): + if self._context_change_is_enabled(): + self._invalidate_prereq_deffered() + + def _on_thumbnail_create(self, thumbnail_path): + self._last_thumbnail_path = thumbnail_path + self._thumbnail_widget.set_current_thumbnails([thumbnail_path]) + + def _on_thumbnail_clear(self): + self._last_thumbnail_path = None + + def _on_current_session_context_request(self): + self._assets_widget.set_current_session_asset() + task_name = self.current_task_name + if task_name: + self._tasks_widget.select_task_name(task_name) + + def _on_creator_item_change(self, new_index, _old_index): + identifier = None + if new_index.isValid(): + identifier = new_index.data(CREATOR_IDENTIFIER_ROLE) + self._set_creator_by_identifier(identifier) + + def _set_creator_detailed_text(self, creator_item): + # TODO implement + description = "" + if creator_item is not None: + description = creator_item.detailed_description or description + self._controller.event_system.emit( + "show.detailed.help", + { + "message": description + }, + "create.widget" + ) + + def _set_creator_by_identifier(self, identifier): + creator_item = self._controller.creator_items.get(identifier) + self._set_creator(creator_item) + + def _set_creator(self, creator_item): + """Set current creator item. + + Args: + creator_item (CreatorItem): Item representing creator that can be + triggered by artist. + """ + + self._creator_short_desc_widget.set_creator_item(creator_item) + self._set_creator_detailed_text(creator_item) + self._pre_create_widget.set_creator_item(creator_item) + + self._selected_creator = creator_item + + if not creator_item: + self._set_context_enabled(False) + return + + if ( + creator_item.create_allow_context_change + != self._context_change_is_enabled() + ): + self._set_context_enabled(creator_item.create_allow_context_change) + self._refresh_asset() + + self._thumbnail_widget.setVisible( + creator_item.create_allow_thumbnail + ) + + default_variants = creator_item.default_variants + if not default_variants: + default_variants = ["Main"] + + default_variant = creator_item.default_variant + if not default_variant: + default_variant = default_variants[0] + + for action in tuple(self.variant_hints_menu.actions()): + self.variant_hints_menu.removeAction(action) + action.deleteLater() + + for variant in default_variants: + if variant in SEPARATORS: + self.variant_hints_menu.addSeparator() + elif variant: + self.variant_hints_menu.addAction(variant) + + variant_text = default_variant or "Main" + # Make sure subset name is updated to new plugin + if variant_text == self.variant_input.text(): + self._on_variant_change() + else: + self.variant_input.setText(variant_text) + + def _on_variant_widget_resize(self): + self.variant_hints_btn.setFixedHeight(self.variant_input.height()) + + def _on_variant_btn_click(self): + pos = self.variant_hints_btn.rect().bottomLeft() + point = self.variant_hints_btn.mapToGlobal(pos) + self.variant_hints_menu.popup(point) + + def _on_variant_action(self, action): + value = action.text() + if self.variant_input.text() != value: + self.variant_input.setText(value) + + def _on_variant_change(self, variant_value=None): + if not self._prereq_available: + return + + # This should probably never happen? + if not self._selected_creator: + if self.subset_name_input.text(): + self.subset_name_input.setText("") + return + + if variant_value is None: + variant_value = self.variant_input.text() + + if not self._compiled_name_pattern.match(variant_value): + self._create_btn.setEnabled(False) + self._set_variant_state_property("invalid") + self.subset_name_input.setText("< Invalid variant >") + return + + if not self._context_change_is_enabled(): + self._create_btn.setEnabled(True) + self._set_variant_state_property("") + self.subset_name_input.setText("< Valid variant >") + return + + asset_name = self._get_asset_name() + task_name = self._get_task_name() + creator_idenfier = self._selected_creator.identifier + # Calculate subset name with Creator plugin + try: + subset_name = self._controller.get_subset_name( + creator_idenfier, variant_value, task_name, asset_name + ) + except TaskNotSetError: + self._create_btn.setEnabled(False) + self._set_variant_state_property("invalid") + self.subset_name_input.setText("< Missing task >") + return + + self.subset_name_input.setText(subset_name) + + self._create_btn.setEnabled(True) + self._validate_subset_name(subset_name, variant_value) + + def _validate_subset_name(self, subset_name, variant_value): + # Get all subsets of the current asset + if self._subset_names: + existing_subset_names = set(self._subset_names) + else: + existing_subset_names = set() + existing_subset_names_low = set( + _name.lower() + for _name in existing_subset_names + ) + + # Replace + compare_regex = re.compile(re.sub( + variant_value, "(.+)", subset_name, flags=re.IGNORECASE + )) + variant_hints = set() + if variant_value: + for _name in existing_subset_names: + _result = compare_regex.search(_name) + if _result: + variant_hints |= set(_result.groups()) + + # Remove previous hints from menu + for action in tuple(self.variant_hints_group.actions()): + self.variant_hints_group.removeAction(action) + self.variant_hints_menu.removeAction(action) + action.deleteLater() + + # Add separator if there are hints and menu already has actions + if variant_hints and self.variant_hints_menu.actions(): + self.variant_hints_menu.addSeparator() + + # Add hints to actions + for variant_hint in variant_hints: + action = self.variant_hints_menu.addAction(variant_hint) + self.variant_hints_group.addAction(action) + + # Indicate subset existence + if not variant_value: + property_value = "empty" + + elif subset_name.lower() in existing_subset_names_low: + # validate existence of subset name with lowered text + # - "renderMain" vs. "rendermain" mean same path item for + # windows + property_value = "exists" + else: + property_value = "new" + + self._set_variant_state_property(property_value) + + variant_is_valid = variant_value.strip() != "" + if variant_is_valid != self._create_btn.isEnabled(): + self._create_btn.setEnabled(variant_is_valid) + + def _set_variant_state_property(self, state): + current_value = self.variant_input.property("state") + if current_value != state: + self.variant_input.setProperty("state", state) + self.variant_input.style().polish(self.variant_input) + + def _on_first_show(self): + width = self.width() + part = int(width / 4) + rem_width = width - part + self._main_splitter_widget.setSizes([part, rem_width]) + rem_width = rem_width - part + self._creators_splitter.setSizes([part, rem_width]) + + def showEvent(self, event): + super(CreateWidget, self).showEvent(event) + if self._first_show: + self._first_show = False + self._on_first_show() + + def _on_creator_basics_resize(self): + self._thumbnail_widget.set_height( + self._creator_basics_widget.sizeHint().height() + ) + + def _on_create(self): + indexes = self._creators_view.selectedIndexes() + if not indexes or len(indexes) > 1: + return + + if not self._create_btn.isEnabled(): + return + + index = indexes[0] + creator_identifier = index.data(CREATOR_IDENTIFIER_ROLE) + family = index.data(FAMILY_ROLE) + variant = self.variant_input.text() + # Care about subset name only if context change is enabled + subset_name = None + asset_name = None + task_name = None + if self._context_change_is_enabled(): + subset_name = self.subset_name_input.text() + asset_name = self._get_asset_name() + task_name = self._get_task_name() + + pre_create_data = self._pre_create_widget.current_value() + if index.data(CREATOR_THUMBNAIL_ENABLED_ROLE): + pre_create_data[PRE_CREATE_THUMBNAIL_KEY] = ( + self._last_thumbnail_path + ) + + # Where to define these data? + # - what data show be stored? + instance_data = { + "asset": asset_name, + "task": task_name, + "variant": variant, + "family": family + } + + success = self._controller.create( + creator_identifier, + subset_name, + instance_data, + pre_create_data + ) + + if success: + self._set_creator(self._selected_creator) + self._controller.emit_card_message("Creation finished...") + self._last_thumbnail_path = None + self._thumbnail_widget.set_current_thumbnails() diff --git a/openpype/tools/publisher/widgets/help_widget.py b/openpype/tools/publisher/widgets/help_widget.py new file mode 100644 index 0000000000..0090111889 --- /dev/null +++ b/openpype/tools/publisher/widgets/help_widget.py @@ -0,0 +1,84 @@ +try: + import commonmark +except Exception: + commonmark = None + +from Qt import QtWidgets, QtCore + + +class HelpButton(QtWidgets.QPushButton): + """Button used to trigger help dialog.""" + + def __init__(self, parent): + super(HelpButton, self).__init__(parent) + self.setObjectName("CreateDialogHelpButton") + self.setText("?") + + +class HelpWidget(QtWidgets.QWidget): + """Widget showing help for single functionality.""" + + def __init__(self, parent): + super(HelpWidget, self).__init__(parent) + + # TODO add hints what to help with? + detail_description_input = QtWidgets.QTextEdit(self) + detail_description_input.setObjectName("CreatorDetailedDescription") + detail_description_input.setTextInteractionFlags( + QtCore.Qt.TextBrowserInteraction + ) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.setSpacing(0) + main_layout.addWidget(detail_description_input, 1) + + self._detail_description_input = detail_description_input + + self.set_detailed_text() + + def set_detailed_text(self, text=None): + if not text: + text = "We didn't prepare help for this part..." + + if commonmark: + html = commonmark.commonmark(text) + self._detail_description_input.setHtml(html) + elif hasattr(self._detail_description_input, "setMarkdown"): + self._detail_description_input.setMarkdown(text) + else: + self._detail_description_input.setText(text) + + +class HelpDialog(QtWidgets.QDialog): + default_width = 530 + default_height = 340 + + def __init__(self, controller, parent): + super(HelpDialog, self).__init__(parent) + + self.setWindowTitle("Help dialog") + + help_content = HelpWidget(self) + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.addWidget(help_content, 1) + + controller.event_system.add_callback( + "show.detailed.help", self._on_help_request + ) + + self._controller = controller + + self._help_content = help_content + + def _on_help_request(self, event): + message = event.get("message") + self.set_detailed_text(message) + + def set_detailed_text(self, text=None): + self._help_content.set_detailed_text(text) + + def showEvent(self, event): + super(HelpDialog, self).showEvent(event) + self.resize(self.default_width, self.default_height) diff --git a/openpype/tools/publisher/widgets/images/clear_thumbnail.png b/openpype/tools/publisher/widgets/images/clear_thumbnail.png new file mode 100644 index 0000000000..406328cb51 Binary files /dev/null and b/openpype/tools/publisher/widgets/images/clear_thumbnail.png differ diff --git a/openpype/tools/publisher/widgets/images/copy.png b/openpype/tools/publisher/widgets/images/copy.png deleted file mode 100644 index 522afcdc87..0000000000 Binary files a/openpype/tools/publisher/widgets/images/copy.png and /dev/null differ diff --git a/openpype/tools/publisher/widgets/images/create.png b/openpype/tools/publisher/widgets/images/create.png new file mode 100644 index 0000000000..d691f364dd Binary files /dev/null and b/openpype/tools/publisher/widgets/images/create.png differ diff --git a/openpype/tools/publisher/widgets/images/download_arrow.png b/openpype/tools/publisher/widgets/images/download_arrow.png deleted file mode 100644 index a35a12fb39..0000000000 Binary files a/openpype/tools/publisher/widgets/images/download_arrow.png and /dev/null differ diff --git a/openpype/tools/publisher/widgets/images/validate.png b/openpype/tools/publisher/widgets/images/validate.png index d3cfa0b75d..c8472e9d31 100644 Binary files a/openpype/tools/publisher/widgets/images/validate.png and b/openpype/tools/publisher/widgets/images/validate.png differ diff --git a/openpype/tools/publisher/widgets/images/view_report.png b/openpype/tools/publisher/widgets/images/view_report.png index 50e214c3f8..6f3efd5e19 100644 Binary files a/openpype/tools/publisher/widgets/images/view_report.png and b/openpype/tools/publisher/widgets/images/view_report.png differ diff --git a/openpype/tools/publisher/widgets/list_view_widgets.py b/openpype/tools/publisher/widgets/list_view_widgets.py index 6bddaf66c8..32d84862f0 100644 --- a/openpype/tools/publisher/widgets/list_view_widgets.py +++ b/openpype/tools/publisher/widgets/list_view_widgets.py @@ -28,13 +28,17 @@ from Qt import QtWidgets, QtCore, QtGui from openpype.style import get_objected_colors from openpype.widgets.nice_checkbox import NiceCheckbox +from openpype.tools.utils.lib import html_escape from .widgets import AbstractInstanceView from ..constants import ( INSTANCE_ID_ROLE, SORT_VALUE_ROLE, IS_GROUP_ROLE, CONTEXT_ID, - CONTEXT_LABEL + CONTEXT_LABEL, + GROUP_ROLE, + CONVERTER_IDENTIFIER_ROLE, + CONVERTOR_ITEM_GROUP, ) @@ -53,8 +57,7 @@ class ListItemDelegate(QtWidgets.QStyledItemDelegate): def __init__(self, parent): super(ListItemDelegate, self).__init__(parent) - colors_data = get_objected_colors() - group_color_info = colors_data["publisher"]["list-view-group"] + group_color_info = get_objected_colors("publisher", "list-view-group") self._group_colors = { key: value.get_qcolor() @@ -113,7 +116,9 @@ class InstanceListItemWidget(QtWidgets.QWidget): self.instance = instance - subset_name_label = QtWidgets.QLabel(instance["subset"], self) + instance_label = html_escape(instance.label) + + subset_name_label = QtWidgets.QLabel(instance_label, self) subset_name_label.setObjectName("ListViewSubsetName") active_checkbox = NiceCheckbox(parent=self) @@ -132,7 +137,7 @@ class InstanceListItemWidget(QtWidgets.QWidget): active_checkbox.stateChanged.connect(self._on_active_change) - self._subset_name_label = subset_name_label + self._instance_label_widget = subset_name_label self._active_checkbox = active_checkbox self._has_valid_context = None @@ -146,8 +151,8 @@ class InstanceListItemWidget(QtWidgets.QWidget): state = "" if not valid: state = "invalid" - self._subset_name_label.setProperty("state", state) - self._subset_name_label.style().polish(self._subset_name_label) + self._instance_label_widget.setProperty("state", state) + self._instance_label_widget.style().polish(self._instance_label_widget) def is_active(self): """Instance is activated.""" @@ -176,9 +181,9 @@ class InstanceListItemWidget(QtWidgets.QWidget): def update_instance_values(self): """Update instance data propagated to widgets.""" # Check subset name - subset_name = self.instance["subset"] - if subset_name != self._subset_name_label.text(): - self._subset_name_label.setText(subset_name) + label = self.instance.label + if label != self._instance_label_widget.text(): + self._instance_label_widget.setText(html_escape(label)) # Check active state self.set_active(self.instance["active"]) # Check valid states @@ -328,6 +333,9 @@ class InstanceTreeView(QtWidgets.QTreeView): """Ids of selected instances.""" instance_ids = set() for index in self.selectionModel().selectedIndexes(): + if index.data(CONVERTER_IDENTIFIER_ROLE) is not None: + continue + instance_id = index.data(INSTANCE_ID_ROLE) if instance_id is not None: instance_ids.add(instance_id) @@ -407,7 +415,7 @@ class InstanceListView(AbstractInstanceView): def __init__(self, controller, parent): super(InstanceListView, self).__init__(parent) - self.controller = controller + self._controller = controller instance_view = InstanceTreeView(self) instance_delegate = ListItemDelegate(instance_view) @@ -437,26 +445,35 @@ class InstanceListView(AbstractInstanceView): self._group_items = {} self._group_widgets = {} self._widgets_by_id = {} + # Group by instance id for handling of active state self._group_by_instance_id = {} self._context_item = None self._context_widget = None + self._convertor_group_item = None + self._convertor_group_widget = None + self._convertor_items_by_id = {} + self._instance_view = instance_view self._instance_delegate = instance_delegate self._instance_model = instance_model self._proxy_model = proxy_model def _on_expand(self, index): - group_name = index.data(SORT_VALUE_ROLE) - group_widget = self._group_widgets.get(group_name) - if group_widget: - group_widget.set_expanded(True) + self._update_widget_expand_state(index, True) def _on_collapse(self, index): - group_name = index.data(SORT_VALUE_ROLE) - group_widget = self._group_widgets.get(group_name) + self._update_widget_expand_state(index, False) + + def _update_widget_expand_state(self, index, expanded): + group_name = index.data(GROUP_ROLE) + if group_name == CONVERTOR_ITEM_GROUP: + group_widget = self._convertor_group_widget + else: + group_widget = self._group_widgets.get(group_name) + if group_widget: - group_widget.set_expanded(False) + group_widget.set_expanded(expanded) def _on_toggle_request(self, toggle): selected_instance_ids = self._instance_view.get_selected_instance_ids() @@ -515,83 +532,30 @@ class InstanceListView(AbstractInstanceView): def refresh(self): """Refresh instances in the view.""" - # Prepare instances by their groups - instances_by_group_name = collections.defaultdict(list) - group_names = set() - for instance in self.controller.instances: - group_label = instance.creator_label - group_names.add(group_label) - instances_by_group_name[group_label].append(instance) - # Sort view at the end of refresh # - is turned off until any change in view happens sort_at_the_end = False - - # Access to root item of main model - root_item = self._instance_model.invisibleRootItem() - # Create or use already existing context item # - context widget does not change so we don't have to update anything - context_item = None - if self._context_item is None: + if self._make_sure_context_item_exists(): sort_at_the_end = True - context_item = QtGui.QStandardItem() - context_item.setData(0, SORT_VALUE_ROLE) - context_item.setData(CONTEXT_ID, INSTANCE_ID_ROLE) - root_item.appendRow(context_item) + self._update_convertor_items_group() - index = self._instance_model.index( - context_item.row(), context_item.column() - ) - proxy_index = self._proxy_model.mapFromSource(index) - widget = ListContextWidget(self._instance_view) - self._instance_view.setIndexWidget(proxy_index, widget) - - self._context_widget = widget - self._context_item = context_item + # Prepare instances by their groups + instances_by_group_name = collections.defaultdict(list) + group_names = set() + for instance in self._controller.instances.values(): + group_label = instance.group_label + group_names.add(group_label) + instances_by_group_name[group_label].append(instance) # Create new groups based on prepared `instances_by_group_name` - new_group_items = [] - for group_name in group_names: - if group_name in self._group_items: - continue - - group_item = QtGui.QStandardItem() - group_item.setData(group_name, SORT_VALUE_ROLE) - group_item.setData(True, IS_GROUP_ROLE) - group_item.setFlags(QtCore.Qt.ItemIsEnabled) - self._group_items[group_name] = group_item - new_group_items.append(group_item) - - # Add new group items to root item if there are any - if new_group_items: - # Trigger sort at the end + if self._make_sure_groups_exists(group_names): sort_at_the_end = True - root_item.appendRows(new_group_items) - - # Create widget for each new group item and store it for future usage - for group_item in new_group_items: - index = self._instance_model.index( - group_item.row(), group_item.column() - ) - proxy_index = self._proxy_model.mapFromSource(index) - group_name = group_item.data(SORT_VALUE_ROLE) - widget = InstanceListGroupWidget(group_name, self._instance_view) - widget.expand_changed.connect(self._on_group_expand_request) - widget.toggle_requested.connect(self._on_group_toggle_request) - self._group_widgets[group_name] = widget - self._instance_view.setIndexWidget(proxy_index, widget) # Remove groups that are not available anymore - for group_name in tuple(self._group_items.keys()): - if group_name in group_names: - continue - - group_item = self._group_items.pop(group_name) - root_item.removeRow(group_item.row()) - widget = self._group_widgets.pop(group_name) - widget.deleteLater() + self._remove_groups_except(group_names) # Store which groups should be expanded at the end expand_groups = set() @@ -650,6 +614,7 @@ class InstanceListView(AbstractInstanceView): # Create new item and store it as new item = QtGui.QStandardItem() item.setData(instance["subset"], SORT_VALUE_ROLE) + item.setData(instance["subset"], GROUP_ROLE) item.setData(instance_id, INSTANCE_ID_ROLE) new_items.append(item) new_items_with_instance.append((item, instance)) @@ -715,19 +680,158 @@ class InstanceListView(AbstractInstanceView): self._instance_view.expand(proxy_index) + def _make_sure_context_item_exists(self): + if self._context_item is not None: + return False + + root_item = self._instance_model.invisibleRootItem() + context_item = QtGui.QStandardItem() + context_item.setData(0, SORT_VALUE_ROLE) + context_item.setData(CONTEXT_ID, INSTANCE_ID_ROLE) + + root_item.appendRow(context_item) + + index = self._instance_model.index( + context_item.row(), context_item.column() + ) + proxy_index = self._proxy_model.mapFromSource(index) + widget = ListContextWidget(self._instance_view) + self._instance_view.setIndexWidget(proxy_index, widget) + + self._context_widget = widget + self._context_item = context_item + return True + + def _update_convertor_items_group(self): + created_new_items = False + convertor_items_by_id = self._controller.convertor_items + group_item = self._convertor_group_item + if not convertor_items_by_id and group_item is None: + return created_new_items + + root_item = self._instance_model.invisibleRootItem() + if not convertor_items_by_id: + root_item.removeRow(group_item.row()) + self._convertor_group_widget.deleteLater() + self._convertor_group_widget = None + self._convertor_items_by_id = {} + return created_new_items + + if group_item is None: + created_new_items = True + group_item = QtGui.QStandardItem() + group_item.setData(CONVERTOR_ITEM_GROUP, GROUP_ROLE) + group_item.setData(1, SORT_VALUE_ROLE) + group_item.setData(True, IS_GROUP_ROLE) + group_item.setFlags(QtCore.Qt.ItemIsEnabled) + + root_item.appendRow(group_item) + + index = self._instance_model.index( + group_item.row(), group_item.column() + ) + proxy_index = self._proxy_model.mapFromSource(index) + widget = InstanceListGroupWidget( + CONVERTOR_ITEM_GROUP, self._instance_view + ) + widget.toggle_checkbox.setVisible(False) + widget.expand_changed.connect( + self._on_convertor_group_expand_request + ) + self._instance_view.setIndexWidget(proxy_index, widget) + + self._convertor_group_item = group_item + self._convertor_group_widget = widget + + for row in reversed(range(group_item.rowCount())): + child_item = group_item.child(row) + child_identifier = child_item.data(CONVERTER_IDENTIFIER_ROLE) + if child_identifier not in convertor_items_by_id: + self._convertor_items_by_id.pop(child_identifier, None) + group_item.removeRows(row, 1) + + new_items = [] + for identifier, convertor_item in convertor_items_by_id.items(): + item = self._convertor_items_by_id.get(identifier) + if item is None: + created_new_items = True + item = QtGui.QStandardItem(convertor_item.label) + new_items.append(item) + item.setData(convertor_item.id, INSTANCE_ID_ROLE) + item.setData(convertor_item.label, SORT_VALUE_ROLE) + item.setData(CONVERTOR_ITEM_GROUP, GROUP_ROLE) + item.setData( + convertor_item.identifier, CONVERTER_IDENTIFIER_ROLE + ) + self._convertor_items_by_id[identifier] = item + + if new_items: + group_item.appendRows(new_items) + + return created_new_items + + def _make_sure_groups_exists(self, group_names): + new_group_items = [] + for group_name in group_names: + if group_name in self._group_items: + continue + + group_item = QtGui.QStandardItem() + group_item.setData(group_name, GROUP_ROLE) + group_item.setData(group_name, SORT_VALUE_ROLE) + group_item.setData(True, IS_GROUP_ROLE) + group_item.setFlags(QtCore.Qt.ItemIsEnabled) + self._group_items[group_name] = group_item + new_group_items.append(group_item) + + # Add new group items to root item if there are any + if not new_group_items: + return False + + # Access to root item of main model + root_item = self._instance_model.invisibleRootItem() + root_item.appendRows(new_group_items) + + # Create widget for each new group item and store it for future usage + for group_item in new_group_items: + index = self._instance_model.index( + group_item.row(), group_item.column() + ) + proxy_index = self._proxy_model.mapFromSource(index) + group_name = group_item.data(GROUP_ROLE) + widget = InstanceListGroupWidget(group_name, self._instance_view) + widget.expand_changed.connect(self._on_group_expand_request) + widget.toggle_requested.connect(self._on_group_toggle_request) + self._group_widgets[group_name] = widget + self._instance_view.setIndexWidget(proxy_index, widget) + + return True + + def _remove_groups_except(self, group_names): + # Remove groups that are not available anymore + root_item = self._instance_model.invisibleRootItem() + for group_name in tuple(self._group_items.keys()): + if group_name in group_names: + continue + + group_item = self._group_items.pop(group_name) + root_item.removeRow(group_item.row()) + widget = self._group_widgets.pop(group_name) + widget.deleteLater() + def refresh_instance_states(self): """Trigger update of all instances.""" for widget in self._widgets_by_id.values(): widget.update_instance_values() def _on_active_changed(self, changed_instance_id, new_value): - selected_instances, _ = self.get_selected_items() + selected_instance_ids, _, _ = self.get_selected_items() selected_ids = set() found = False - for instance in selected_instances: - selected_ids.add(instance.id) - if not found and instance.id == changed_instance_id: + for instance_id in selected_instance_ids: + selected_ids.add(instance_id) + if not found and instance_id == changed_instance_id: found = True if not found: @@ -758,32 +862,6 @@ class InstanceListView(AbstractInstanceView): if changed_ids: self.active_changed.emit() - def get_selected_items(self): - """Get selected instance ids and context selection. - - Returns: - tuple: Selected instance ids and boolean if context - is selected. - """ - instances = [] - context_selected = False - instances_by_id = { - instance.id: instance - for instance in self.controller.instances - } - - for index in self._instance_view.selectionModel().selectedIndexes(): - instance_id = index.data(INSTANCE_ID_ROLE) - if not context_selected and instance_id == CONTEXT_ID: - context_selected = True - - elif instance_id is not None: - instance = instances_by_id.get(instance_id) - if instance: - instances.append(instance) - - return instances, context_selected - def _on_selection_change(self, *_args): self.selection_changed.emit() @@ -798,6 +876,16 @@ class InstanceListView(AbstractInstanceView): proxy_index = self._proxy_model.mapFromSource(group_index) self._instance_view.setExpanded(proxy_index, expanded) + def _on_convertor_group_expand_request(self, _, expanded): + group_item = self._convertor_group_item + if not group_item: + return + group_index = self._instance_model.index( + group_item.row(), group_item.column() + ) + proxy_index = self._proxy_model.mapFromSource(group_index) + self._instance_view.setExpanded(proxy_index, expanded) + def _on_group_toggle_request(self, group_name, state): if state == QtCore.Qt.PartiallyChecked: return @@ -823,3 +911,130 @@ class InstanceListView(AbstractInstanceView): proxy_index = self._proxy_model.mapFromSource(group_item.index()) if not self._instance_view.isExpanded(proxy_index): self._instance_view.expand(proxy_index) + + def get_selected_items(self): + """Get selected instance ids and context selection. + + Returns: + tuple: Selected instance ids and boolean if context + is selected. + """ + + instance_ids = [] + convertor_identifiers = [] + context_selected = False + + for index in self._instance_view.selectionModel().selectedIndexes(): + convertor_identifier = index.data(CONVERTER_IDENTIFIER_ROLE) + if convertor_identifier is not None: + convertor_identifiers.append(convertor_identifier) + continue + + instance_id = index.data(INSTANCE_ID_ROLE) + if not context_selected and instance_id == CONTEXT_ID: + context_selected = True + + elif instance_id is not None: + instance_ids.append(instance_id) + + return instance_ids, context_selected, convertor_identifiers + + def set_selected_items( + self, instance_ids, context_selected, convertor_identifiers + ): + s_instance_ids = set(instance_ids) + s_convertor_identifiers = set(convertor_identifiers) + cur_ids, cur_context, cur_convertor_identifiers = ( + self.get_selected_items() + ) + if ( + set(cur_ids) == s_instance_ids + and cur_context == context_selected + and set(cur_convertor_identifiers) == s_convertor_identifiers + ): + return + + view = self._instance_view + src_model = self._instance_model + proxy_model = self._proxy_model + + select_indexes = [] + + select_queue = collections.deque() + select_queue.append( + (src_model.invisibleRootItem(), []) + ) + while select_queue: + queue_item = select_queue.popleft() + item, parent_items = queue_item + + if item.hasChildren(): + new_parent_items = list(parent_items) + new_parent_items.append(item) + for row in range(item.rowCount()): + select_queue.append( + (item.child(row), list(new_parent_items)) + ) + + convertor_identifier = item.data(CONVERTER_IDENTIFIER_ROLE) + + select = False + expand_parent = True + if convertor_identifier is not None: + if convertor_identifier in s_convertor_identifiers: + select = True + else: + instance_id = item.data(INSTANCE_ID_ROLE) + if instance_id == CONTEXT_ID: + if context_selected: + select = True + expand_parent = False + + elif instance_id in s_instance_ids: + select = True + + if not select: + continue + + select_indexes.append(item.index()) + if not expand_parent: + continue + + for parent_item in parent_items: + index = parent_item.index() + proxy_index = proxy_model.mapFromSource(index) + if not view.isExpanded(proxy_index): + view.expand(proxy_index) + + selection_model = view.selectionModel() + if not select_indexes: + selection_model.clear() + return + + if len(select_indexes) == 1: + proxy_index = proxy_model.mapFromSource(select_indexes[0]) + selection_model.setCurrentIndex( + proxy_index, + selection_model.ClearAndSelect | selection_model.Rows + ) + return + + first_index = proxy_model.mapFromSource(select_indexes.pop(0)) + last_index = proxy_model.mapFromSource(select_indexes.pop(-1)) + + selection_model.setCurrentIndex( + first_index, + selection_model.ClearAndSelect | selection_model.Rows + ) + + for index in select_indexes: + proxy_index = proxy_model.mapFromSource(index) + selection_model.select( + proxy_index, + selection_model.Select | selection_model.Rows + ) + + selection_model.setCurrentIndex( + last_index, + selection_model.Select | selection_model.Rows + ) diff --git a/openpype/tools/publisher/widgets/overview_widget.py b/openpype/tools/publisher/widgets/overview_widget.py new file mode 100644 index 0000000000..1c924d1631 --- /dev/null +++ b/openpype/tools/publisher/widgets/overview_widget.py @@ -0,0 +1,404 @@ +from Qt import QtWidgets, QtCore + +from .border_label_widget import BorderedLabelWidget + +from .card_view_widgets import InstanceCardView +from .list_view_widgets import InstanceListView +from .widgets import ( + SubsetAttributesWidget, + CreateInstanceBtn, + RemoveInstanceBtn, + ChangeViewBtn, +) +from .create_widget import CreateWidget + + +class OverviewWidget(QtWidgets.QFrame): + active_changed = QtCore.Signal() + instance_context_changed = QtCore.Signal() + create_requested = QtCore.Signal() + + anim_end_value = 200 + anim_duration = 200 + + def __init__(self, controller, parent): + super(OverviewWidget, self).__init__(parent) + + self._refreshing_instances = False + self._controller = controller + + create_widget = CreateWidget(controller, self) + + # --- Created Subsets/Instances --- + # Common widget for creation and overview + subset_views_widget = BorderedLabelWidget( + "Subsets to publish", self + ) + + subset_view_cards = InstanceCardView(controller, subset_views_widget) + subset_list_view = InstanceListView(controller, subset_views_widget) + + subset_views_layout = QtWidgets.QStackedLayout() + subset_views_layout.addWidget(subset_view_cards) + subset_views_layout.addWidget(subset_list_view) + subset_views_layout.setCurrentWidget(subset_view_cards) + + # Buttons at the bottom of subset view + create_btn = CreateInstanceBtn(self) + delete_btn = RemoveInstanceBtn(self) + change_view_btn = ChangeViewBtn(self) + + # --- Overview --- + # Subset details widget + subset_attributes_wrap = BorderedLabelWidget( + "Publish options", self + ) + subset_attributes_widget = SubsetAttributesWidget( + controller, subset_attributes_wrap + ) + subset_attributes_wrap.set_center_widget(subset_attributes_widget) + + # Layout of buttons at the bottom of subset view + subset_view_btns_layout = QtWidgets.QHBoxLayout() + subset_view_btns_layout.setContentsMargins(0, 5, 0, 0) + subset_view_btns_layout.addWidget(create_btn) + subset_view_btns_layout.addSpacing(5) + subset_view_btns_layout.addWidget(delete_btn) + subset_view_btns_layout.addStretch(1) + subset_view_btns_layout.addWidget(change_view_btn) + + # Layout of view and buttons + # - widget 'subset_view_widget' is necessary + # - only layout won't be resized automatically to minimum size hint + # on child resize request! + subset_view_widget = QtWidgets.QWidget(subset_views_widget) + subset_view_layout = QtWidgets.QVBoxLayout(subset_view_widget) + subset_view_layout.setContentsMargins(0, 0, 0, 0) + subset_view_layout.addLayout(subset_views_layout, 1) + subset_view_layout.addLayout(subset_view_btns_layout, 0) + + subset_views_widget.set_center_widget(subset_view_widget) + + # Whole subset layout with attributes and details + subset_content_widget = QtWidgets.QWidget(self) + subset_content_layout = QtWidgets.QHBoxLayout(subset_content_widget) + subset_content_layout.setContentsMargins(0, 0, 0, 0) + subset_content_layout.addWidget(create_widget, 7) + subset_content_layout.addWidget(subset_views_widget, 3) + subset_content_layout.addWidget(subset_attributes_wrap, 7) + + # Subset frame layout + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(subset_content_widget, 1) + + change_anim = QtCore.QVariantAnimation() + change_anim.setStartValue(float(0)) + change_anim.setEndValue(float(self.anim_end_value)) + change_anim.setDuration(self.anim_duration) + change_anim.setEasingCurve(QtCore.QEasingCurve.InOutQuad) + + # --- Calbacks for instances/subsets view --- + create_btn.clicked.connect(self._on_create_clicked) + delete_btn.clicked.connect(self._on_delete_clicked) + change_view_btn.clicked.connect(self._on_change_view_clicked) + + change_anim.valueChanged.connect(self._on_change_anim) + change_anim.finished.connect(self._on_change_anim_finished) + + # Selection changed + subset_list_view.selection_changed.connect( + self._on_subset_change + ) + subset_view_cards.selection_changed.connect( + self._on_subset_change + ) + # Active instances changed + subset_list_view.active_changed.connect( + self._on_active_changed + ) + subset_view_cards.active_changed.connect( + self._on_active_changed + ) + # Instance context has changed + subset_attributes_widget.instance_context_changed.connect( + self._on_instance_context_change + ) + subset_attributes_widget.convert_requested.connect( + self._on_convert_requested + ) + + # --- Controller callbacks --- + controller.event_system.add_callback( + "publish.process.started", self._on_publish_start + ) + controller.event_system.add_callback( + "publish.reset.finished", self._on_publish_reset + ) + controller.event_system.add_callback( + "instances.refresh.finished", self._on_instances_refresh + ) + + self._subset_content_widget = subset_content_widget + self._subset_content_layout = subset_content_layout + + self._subset_view_cards = subset_view_cards + self._subset_list_view = subset_list_view + self._subset_views_layout = subset_views_layout + + self._delete_btn = delete_btn + + self._subset_attributes_widget = subset_attributes_widget + self._create_widget = create_widget + self._subset_views_widget = subset_views_widget + self._subset_attributes_wrap = subset_attributes_wrap + + self._change_anim = change_anim + + # Start in create mode + self._create_widget_policy = create_widget.sizePolicy() + self._subset_views_widget_policy = subset_views_widget.sizePolicy() + self._subset_attributes_wrap_policy = ( + subset_attributes_wrap.sizePolicy() + ) + self._max_widget_width = None + self._current_state = "create" + subset_attributes_wrap.setVisible(False) + + def set_state(self, new_state, animate): + if new_state == self._current_state: + return + + self._current_state = new_state + + anim_is_running = ( + self._change_anim.state() == self._change_anim.Running + ) + if not animate: + self._change_visibility_for_state() + if anim_is_running: + self._change_anim.stop() + return + + if self._max_widget_width is None: + self._max_widget_width = self._subset_views_widget.maximumWidth() + + if new_state == "create": + direction = self._change_anim.Backward + else: + direction = self._change_anim.Forward + self._change_anim.setDirection(direction) + + if not anim_is_running: + view_width = self._subset_views_widget.width() + self._subset_views_widget.setMinimumWidth(view_width) + self._subset_views_widget.setMaximumWidth(view_width) + self._change_anim.start() + + def get_subset_views_geo(self): + parent = self._subset_views_widget.parent() + global_pos = parent.mapToGlobal(self._subset_views_widget.pos()) + return QtCore.QRect( + global_pos.x(), + global_pos.y(), + self._subset_views_widget.width(), + self._subset_views_widget.height() + ) + + def _on_create_clicked(self): + """Pass signal to parent widget which should care about changing state. + + We don't change anything here until the parent will care about it. + """ + + self.create_requested.emit() + + def _on_delete_clicked(self): + instance_ids, _, _ = self.get_selected_items() + + # Ask user if he really wants to remove instances + dialog = QtWidgets.QMessageBox(self) + dialog.setIcon(QtWidgets.QMessageBox.Question) + dialog.setWindowTitle("Are you sure?") + if len(instance_ids) > 1: + msg = ( + "Do you really want to remove {} instances?" + ).format(len(instance_ids)) + else: + msg = ( + "Do you really want to remove the instance?" + ) + dialog.setText(msg) + dialog.setStandardButtons( + QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel + ) + dialog.setDefaultButton(QtWidgets.QMessageBox.Ok) + dialog.setEscapeButton(QtWidgets.QMessageBox.Cancel) + dialog.exec_() + # Skip if OK was not clicked + if dialog.result() == QtWidgets.QMessageBox.Ok: + instance_ids = set(instance_ids) + self._controller.remove_instances(instance_ids) + + def _on_change_view_clicked(self): + self._change_view_type() + + def _on_subset_change(self, *_args): + # Ignore changes if in middle of refreshing + if self._refreshing_instances: + return + + instance_ids, context_selected, convertor_identifiers = ( + self.get_selected_items() + ) + + # Disable delete button if nothing is selected + self._delete_btn.setEnabled(len(instance_ids) > 0) + + instances_by_id = self._controller.instances + instances = [ + instances_by_id[instance_id] + for instance_id in instance_ids + ] + self._subset_attributes_widget.set_current_instances( + instances, context_selected, convertor_identifiers + ) + + def _on_active_changed(self): + if self._refreshing_instances: + return + self.active_changed.emit() + + def _on_change_anim(self, value): + self._create_widget.setVisible(True) + self._subset_attributes_wrap.setVisible(True) + width = ( + self._subset_content_widget.width() + - ( + self._subset_views_widget.width() + + (self._subset_content_layout.spacing() * 2) + ) + ) + subset_attrs_width = int((float(width) / self.anim_end_value) * value) + if subset_attrs_width > width: + subset_attrs_width = width + + create_width = width - subset_attrs_width + + self._create_widget.setMinimumWidth(create_width) + self._create_widget.setMaximumWidth(create_width) + self._subset_attributes_wrap.setMinimumWidth(subset_attrs_width) + self._subset_attributes_wrap.setMaximumWidth(subset_attrs_width) + + def _on_change_anim_finished(self): + self._change_visibility_for_state() + self._create_widget.setMinimumWidth(0) + self._create_widget.setMaximumWidth(self._max_widget_width) + self._subset_attributes_wrap.setMinimumWidth(0) + self._subset_attributes_wrap.setMaximumWidth(self._max_widget_width) + self._subset_views_widget.setMinimumWidth(0) + self._subset_views_widget.setMaximumWidth(self._max_widget_width) + self._create_widget.setSizePolicy( + self._create_widget_policy + ) + self._subset_attributes_wrap.setSizePolicy( + self._subset_attributes_wrap_policy + ) + self._subset_views_widget.setSizePolicy( + self._subset_views_widget_policy + ) + + def _change_visibility_for_state(self): + self._create_widget.setVisible( + self._current_state == "create" + ) + self._subset_attributes_wrap.setVisible( + self._current_state == "publish" + ) + + def _on_instance_context_change(self): + current_idx = self._subset_views_layout.currentIndex() + for idx in range(self._subset_views_layout.count()): + if idx == current_idx: + continue + widget = self._subset_views_layout.widget(idx) + if widget.refreshed: + widget.set_refreshed(False) + + current_widget = self._subset_views_layout.widget(current_idx) + current_widget.refresh_instance_states() + + self.instance_context_changed.emit() + + def _on_convert_requested(self): + _, _, convertor_identifiers = self.get_selected_items() + self._controller.trigger_convertor_items(convertor_identifiers) + + def get_selected_items(self): + view = self._subset_views_layout.currentWidget() + return view.get_selected_items() + + def _change_view_type(self): + idx = self._subset_views_layout.currentIndex() + new_idx = (idx + 1) % self._subset_views_layout.count() + + old_view = self._subset_views_layout.currentWidget() + new_view = self._subset_views_layout.widget(new_idx) + + if not new_view.refreshed: + new_view.refresh() + new_view.set_refreshed(True) + else: + new_view.refresh_instance_states() + + instance_ids, context_selected, convertor_identifiers = ( + old_view.get_selected_items() + ) + new_view.set_selected_items( + instance_ids, context_selected, convertor_identifiers + ) + + self._subset_views_layout.setCurrentIndex(new_idx) + + self._on_subset_change() + + def _refresh_instances(self): + if self._refreshing_instances: + return + + self._refreshing_instances = True + + for idx in range(self._subset_views_layout.count()): + widget = self._subset_views_layout.widget(idx) + widget.set_refreshed(False) + + view = self._subset_views_layout.currentWidget() + view.refresh() + view.set_refreshed(True) + + self._refreshing_instances = False + + # Force to change instance and refresh details + self._on_subset_change() + + def _on_publish_start(self): + """Publish started.""" + + self._subset_attributes_wrap.setEnabled(False) + + def _on_publish_reset(self): + """Context in controller has been refreshed.""" + + self._subset_attributes_wrap.setEnabled(True) + self._subset_content_widget.setEnabled(self._controller.host_is_valid) + + def _on_instances_refresh(self): + """Controller refreshed instances.""" + + self._refresh_instances() + + # Give a change to process Resize Request + QtWidgets.QApplication.processEvents() + # Trigger update geometry of + widget = self._subset_views_layout.currentWidget() + widget.updateGeometry() diff --git a/openpype/tools/publisher/widgets/precreate_widget.py b/openpype/tools/publisher/widgets/precreate_widget.py index eaadfe890b..b688a83053 100644 --- a/openpype/tools/publisher/widgets/precreate_widget.py +++ b/openpype/tools/publisher/widgets/precreate_widget.py @@ -1,6 +1,6 @@ from Qt import QtWidgets, QtCore -from openpype.widgets.attribute_defs import create_widget_for_attr_def +from openpype.tools.attribute_defs import create_widget_for_attr_def class PreCreateWidget(QtWidgets.QWidget): @@ -58,12 +58,12 @@ class PreCreateWidget(QtWidgets.QWidget): def current_value(self): return self._attributes_widget.current_value() - def set_plugin(self, creator): + def set_creator_item(self, creator_item): attr_defs = [] creator_selected = False - if creator is not None: + if creator_item is not None: creator_selected = True - attr_defs = creator.get_pre_create_attr_defs() + attr_defs = creator_item.pre_create_attributes_defs self._attributes_widget.set_attr_defs(attr_defs) diff --git a/openpype/tools/publisher/widgets/publish_frame.py b/openpype/tools/publisher/widgets/publish_frame.py new file mode 100644 index 0000000000..00597451a9 --- /dev/null +++ b/openpype/tools/publisher/widgets/publish_frame.py @@ -0,0 +1,524 @@ +import os +import json +import time + +from Qt import QtWidgets, QtCore + +from .widgets import ( + StopBtn, + ResetBtn, + ValidateBtn, + PublishBtn, + PublishReportBtn, +) + + +class PublishFrame(QtWidgets.QWidget): + """Frame showed during publishing. + + Shows all information related to publishing. Contains validation error + widget which is showed if only validation error happens during validation. + + Processing layer is default layer. Validation error layer is shown if only + validation exception is raised during publishing. Report layer is available + only when publishing process is stopped and must be manually triggered to + change into that layer. + + +------------------------------------------------------------------------+ + | < Main label > | + | < Label top > | + | (#### 10% ) | + | | + | | + +------------------------------------------------------------------------+ + """ + + details_page_requested = QtCore.Signal() + + def __init__(self, controller, borders, parent): + super(PublishFrame, self).__init__(parent) + + # Bottom part of widget where process and callback buttons are showed + # - QFrame used to be able set background using stylesheets easily + # and not override all children widgets style + content_frame = QtWidgets.QFrame(self) + content_frame.setObjectName("PublishInfoFrame") + + top_content_widget = QtWidgets.QWidget(content_frame) + + # Center widget displaying current state (without any specific info) + main_label = QtWidgets.QLabel(top_content_widget) + main_label.setObjectName("PublishInfoMainLabel") + main_label.setAlignment(QtCore.Qt.AlignCenter) + + # Supporting labels for main label + # Top label is displayed just under main label + message_label_top = QtWidgets.QLabel(top_content_widget) + message_label_top.setAlignment(QtCore.Qt.AlignCenter) + + # Label showing currently processed instance + progress_widget = QtWidgets.QWidget(top_content_widget) + instance_plugin_widget = QtWidgets.QWidget(progress_widget) + instance_label = QtWidgets.QLabel( + "", instance_plugin_widget + ) + instance_label.setAlignment( + QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter + ) + # Label showing currently processed plugin + plugin_label = QtWidgets.QLabel( + "", instance_plugin_widget + ) + plugin_label.setAlignment( + QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter + ) + instance_plugin_layout = QtWidgets.QHBoxLayout(instance_plugin_widget) + instance_plugin_layout.setContentsMargins(0, 0, 0, 0) + instance_plugin_layout.addWidget(instance_label, 1) + instance_plugin_layout.addWidget(plugin_label, 1) + + # Progress bar showing progress of publishing + progress_bar = QtWidgets.QProgressBar(progress_widget) + progress_bar.setObjectName("PublishProgressBar") + + progress_layout = QtWidgets.QVBoxLayout(progress_widget) + progress_layout.setSpacing(5) + progress_layout.setContentsMargins(0, 0, 0, 0) + progress_layout.addWidget(instance_plugin_widget, 0) + progress_layout.addWidget(progress_bar, 0) + + top_content_layout = QtWidgets.QVBoxLayout(top_content_widget) + top_content_layout.setContentsMargins(0, 0, 0, 0) + top_content_layout.setSpacing(5) + top_content_layout.setAlignment(QtCore.Qt.AlignCenter) + top_content_layout.addWidget(main_label) + # TODO stretches should be probably replaced by spacing... + # - stretch in floating frame doesn't make sense + top_content_layout.addWidget(message_label_top) + top_content_layout.addWidget(progress_widget) + + # Publishing buttons to stop, reset or trigger publishing + footer_widget = QtWidgets.QWidget(content_frame) + + report_btn = PublishReportBtn(footer_widget) + + shrunk_main_label = QtWidgets.QLabel(footer_widget) + shrunk_main_label.setObjectName("PublishInfoMainLabel") + shrunk_main_label.setAlignment( + QtCore.Qt.AlignVCenter | QtCore.Qt.AlignLeft + ) + + reset_btn = ResetBtn(footer_widget) + stop_btn = StopBtn(footer_widget) + validate_btn = ValidateBtn(footer_widget) + publish_btn = PublishBtn(footer_widget) + + report_btn.add_action("Go to details", "go_to_report") + report_btn.add_action("Copy report", "copy_report") + report_btn.add_action("Export report", "export_report") + + # Footer on info frame layout + footer_layout = QtWidgets.QHBoxLayout(footer_widget) + footer_layout.setContentsMargins(0, 0, 0, 0) + footer_layout.addWidget(report_btn, 0) + footer_layout.addWidget(shrunk_main_label, 1) + footer_layout.addWidget(reset_btn, 0) + footer_layout.addWidget(stop_btn, 0) + footer_layout.addWidget(validate_btn, 0) + footer_layout.addWidget(publish_btn, 0) + + # Info frame content + content_layout = QtWidgets.QVBoxLayout(content_frame) + content_layout.setSpacing(5) + + content_layout.addWidget(top_content_widget) + content_layout.addWidget(footer_widget) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(borders, 0, borders, borders) + main_layout.addWidget(content_frame) + + shrunk_anim = QtCore.QVariantAnimation() + shrunk_anim.setDuration(140) + shrunk_anim.setEasingCurve(QtCore.QEasingCurve.InOutQuad) + + # Force translucent background for widgets + for widget in ( + self, + top_content_widget, + footer_widget, + progress_widget, + instance_plugin_widget, + ): + widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + report_btn.triggered.connect(self._on_report_triggered) + reset_btn.clicked.connect(self._on_reset_clicked) + stop_btn.clicked.connect(self._on_stop_clicked) + validate_btn.clicked.connect(self._on_validate_clicked) + publish_btn.clicked.connect(self._on_publish_clicked) + + shrunk_anim.valueChanged.connect(self._on_shrunk_anim) + shrunk_anim.finished.connect(self._on_shrunk_anim_finish) + + controller.event_system.add_callback( + "publish.reset.finished", self._on_publish_reset + ) + controller.event_system.add_callback( + "publish.process.started", self._on_publish_start + ) + controller.event_system.add_callback( + "publish.has_validated.changed", self._on_publish_validated_change + ) + controller.event_system.add_callback( + "publish.process.stopped", self._on_publish_stop + ) + + controller.event_system.add_callback( + "publish.process.instance.changed", self._on_instance_change + ) + controller.event_system.add_callback( + "publish.process.plugin.changed", self._on_plugin_change + ) + + self._shrunk_anim = shrunk_anim + + self._controller = controller + + self._content_frame = content_frame + self._content_layout = content_layout + self._top_content_layout = top_content_layout + self._top_content_widget = top_content_widget + + self._main_label = main_label + self._message_label_top = message_label_top + + self._instance_label = instance_label + self._plugin_label = plugin_label + + self._progress_bar = progress_bar + self._progress_widget = progress_widget + + self._shrunk_main_label = shrunk_main_label + self._reset_btn = reset_btn + self._stop_btn = stop_btn + self._validate_btn = validate_btn + self._publish_btn = publish_btn + + self._shrunken = False + self._top_widget_max_height = None + self._top_widget_size_policy = top_content_widget.sizePolicy() + self._last_instance_label = None + self._last_plugin_label = None + + def mouseReleaseEvent(self, event): + super(PublishFrame, self).mouseReleaseEvent(event) + self._change_shrunk_state() + + def _change_shrunk_state(self): + self.set_shrunk_state(not self._shrunken) + + def set_shrunk_state(self, shrunk): + if shrunk is self._shrunken: + return + + if self._top_widget_max_height is None: + self._top_widget_max_height = ( + self._top_content_widget.maximumHeight() + ) + + self._shrunken = shrunk + + anim_is_running = ( + self._shrunk_anim.state() == self._shrunk_anim.Running + ) + if not self.isVisible(): + if anim_is_running: + self._shrunk_anim.stop() + self._on_shrunk_anim_finish() + return + + start = 0 + end = 0 + if shrunk: + start = self._top_content_widget.height() + else: + if anim_is_running: + start = self._shrunk_anim.currentValue() + hint = self._top_content_widget.minimumSizeHint() + end = hint.height() + + self._shrunk_anim.setStartValue(float(start)) + self._shrunk_anim.setEndValue(float(end)) + if not anim_is_running: + self._shrunk_anim.start() + + def _on_shrunk_anim(self, value): + diff = self._top_content_widget.height() - int(value) + if not self._top_content_widget.isVisible(): + diff -= self._content_layout.spacing() + + window_pos = self.pos() + window_pos_y = window_pos.y() + diff + window_height = self.height() - diff + + self._top_content_widget.setMinimumHeight(value) + self._top_content_widget.setMaximumHeight(value) + self._top_content_widget.setVisible(True) + + self.resize(self.width(), window_height) + self.move(window_pos.x(), window_pos_y) + + def _on_shrunk_anim_finish(self): + self._top_content_widget.setVisible(not self._shrunken) + self._top_content_widget.setMinimumHeight(0) + self._top_content_widget.setMaximumHeight( + self._top_widget_max_height + ) + self._top_content_widget.setSizePolicy(self._top_widget_size_policy) + + if self._shrunken: + self._shrunk_main_label.setText(self._main_label.text()) + else: + self._shrunk_main_label.setText("") + + if self._shrunken: + content_frame_hint = self._content_frame.sizeHint() + + layout = self.layout() + margins = layout.contentsMargins() + window_height = ( + content_frame_hint.height() + + margins.bottom() + + margins.top() + ) + diff = self.height() - window_height + window_pos = self.pos() + window_pos_y = window_pos.y() + diff + self.resize(self.width(), window_height) + self.move(window_pos.x(), window_pos_y) + + def _set_main_label(self, message): + self._main_label.setText(message) + if self._shrunken: + self._shrunk_main_label.setText(message) + + def _on_publish_reset(self): + self._last_instance_label = None + self._last_plugin_label = None + + self._set_success_property() + self._set_progress_visibility(True) + + self._main_label.setText("Hit publish (play button)! If you want") + self._message_label_top.setText("") + + self._reset_btn.setEnabled(True) + self._stop_btn.setEnabled(False) + self._validate_btn.setEnabled(True) + self._publish_btn.setEnabled(True) + + self._progress_bar.setValue(self._controller.publish_progress) + self._progress_bar.setMaximum(self._controller.publish_max_progress) + + def _on_publish_start(self): + if self._last_plugin_label: + self._plugin_label.setText(self._last_plugin_label) + + if self._last_instance_label: + self._instance_label.setText(self._last_instance_label) + + self._set_success_property(3) + self._set_progress_visibility(True) + self._set_main_label("Publishing...") + + self._reset_btn.setEnabled(False) + self._stop_btn.setEnabled(True) + self._validate_btn.setEnabled(False) + self._publish_btn.setEnabled(False) + + self.set_shrunk_state(False) + + def _on_publish_validated_change(self, event): + if event["value"]: + self._validate_btn.setEnabled(False) + + def _on_instance_change(self, event): + """Change instance label when instance is going to be processed.""" + + self._last_instance_label = event["instance_label"] + self._instance_label.setText(event["instance_label"]) + QtWidgets.QApplication.processEvents() + + def _on_plugin_change(self, event): + """Change plugin label when instance is going to be processed.""" + + self._last_plugin_label = event["plugin_label"] + self._progress_bar.setValue(self._controller.publish_progress) + self._plugin_label.setText(event["plugin_label"]) + QtWidgets.QApplication.processEvents() + + def _on_publish_stop(self): + self._progress_bar.setValue(self._controller.publish_progress) + + self._reset_btn.setEnabled(True) + self._stop_btn.setEnabled(False) + + self._instance_label.setText("") + self._plugin_label.setText("") + + validate_enabled = not self._controller.publish_has_crashed + publish_enabled = not self._controller.publish_has_crashed + if validate_enabled: + validate_enabled = not self._controller.publish_has_validated + if publish_enabled: + if ( + self._controller.publish_has_validated + and self._controller.publish_has_validation_errors + ): + publish_enabled = False + + else: + publish_enabled = not self._controller.publish_has_finished + + self._validate_btn.setEnabled(validate_enabled) + self._publish_btn.setEnabled(publish_enabled) + + if self._controller.publish_has_crashed: + self._set_error_msg() + + elif self._controller.publish_has_validation_errors: + self._set_progress_visibility(False) + self._set_validation_errors() + + elif self._controller.publish_has_finished: + self._set_finished() + + else: + self._set_stopped() + + def _set_stopped(self): + main_label = "Publish paused" + if self._controller.publish_has_validated: + main_label += " - Validation passed" + + self._set_main_label(main_label) + self._message_label_top.setText( + "Hit publish (play button) to continue." + ) + + self._set_success_property(4) + + def _set_error_msg(self): + """Show error message to artist on publish crash.""" + + self._set_main_label("Error happened") + + self._message_label_top.setText(self._controller.publish_error_msg) + + self._set_success_property(1) + + def _set_validation_errors(self): + self._set_main_label("Your publish didn't pass studio validations") + self._message_label_top.setText("Check results above please") + self._set_success_property(2) + + def _set_finished(self): + self._set_main_label("Finished") + self._message_label_top.setText("") + self._set_success_property(0) + + def _set_progress_visibility(self, visible): + window_height = self.height() + self._progress_widget.setVisible(visible) + # Ignore rescaling and move of widget if is shrunken of progress bar + # should be visible + if self._shrunken or visible: + return + + height = self._progress_widget.height() + diff = height + self._top_content_layout.spacing() + + window_pos = self.pos() + window_pos_y = self.pos().y() + diff + window_height -= diff + + self.resize(self.width(), window_height) + self.move(window_pos.x(), window_pos_y) + + def _set_success_property(self, state=None): + """Apply styles by state. + + State enum: + - None - Default state after restart + - 0 - Success finish + - 1 - Error happened + - 2 - Validation error + - 3 - In progress + - 4 - Stopped/Paused + """ + + if state is None: + state = "" + else: + state = str(state) + + for widget in (self._progress_bar, self._content_frame): + if widget.property("state") != state: + widget.setProperty("state", state) + widget.style().polish(widget) + + def _copy_report(self): + logs = self._controller.get_publish_report() + logs_string = json.dumps(logs, indent=4) + + mime_data = QtCore.QMimeData() + mime_data.setText(logs_string) + QtWidgets.QApplication.instance().clipboard().setMimeData( + mime_data + ) + + def _export_report(self): + default_filename = "publish-report-{}".format( + time.strftime("%y%m%d-%H-%M") + ) + default_filepath = os.path.join( + os.path.expanduser("~"), + default_filename + ) + new_filepath, ext = QtWidgets.QFileDialog.getSaveFileName( + self, "Save report", default_filepath, ".json" + ) + if not ext or not new_filepath: + return + + logs = self._controller.get_publish_report() + full_path = new_filepath + ext + dir_path = os.path.dirname(full_path) + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + with open(full_path, "w") as file_stream: + json.dump(logs, file_stream) + + def _on_report_triggered(self, identifier): + if identifier == "export_report": + self._export_report() + + elif identifier == "copy_report": + self._copy_report() + + elif identifier == "go_to_report": + self.details_page_requested.emit() + + def _on_reset_clicked(self): + self._controller.reset() + + def _on_stop_clicked(self): + self._controller.stop_publish() + + def _on_validate_clicked(self): + self._controller.validate() + + def _on_publish_clicked(self): + self._controller.publish() diff --git a/openpype/tools/publisher/widgets/publish_widget.py b/openpype/tools/publisher/widgets/publish_widget.py deleted file mode 100644 index 80d0265dd3..0000000000 --- a/openpype/tools/publisher/widgets/publish_widget.py +++ /dev/null @@ -1,521 +0,0 @@ -import os -import json -import time - -from Qt import QtWidgets, QtCore, QtGui - -from openpype.pipeline import KnownPublishError - -from .validations_widget import ValidationsWidget -from ..publish_report_viewer import PublishReportViewerWidget -from .widgets import ( - StopBtn, - ResetBtn, - ValidateBtn, - PublishBtn, - CopyPublishReportBtn, - SavePublishReportBtn, - ShowPublishReportBtn -) - - -class ActionsButton(QtWidgets.QToolButton): - def __init__(self, parent=None): - super(ActionsButton, self).__init__(parent) - - self.setText("< No action >") - self.setPopupMode(self.MenuButtonPopup) - menu = QtWidgets.QMenu(self) - - self.setMenu(menu) - - self._menu = menu - self._actions = [] - self._current_action = None - - self.clicked.connect(self._on_click) - - def current_action(self): - return self._current_action - - def add_action(self, action): - self._actions.append(action) - action.triggered.connect(self._on_action_trigger) - self._menu.addAction(action) - if self._current_action is None: - self._set_action(action) - - def set_action(self, action): - if action not in self._actions: - self.add_action(action) - self._set_action(action) - - def _set_action(self, action): - if action is self._current_action: - return - self._current_action = action - self.setText(action.text()) - self.setIcon(action.icon()) - - def _on_click(self): - self._current_action.trigger() - - def _on_action_trigger(self): - action = self.sender() - if action not in self._actions: - return - - self._set_action(action) - - -class PublishFrame(QtWidgets.QFrame): - """Frame showed during publishing. - - Shows all information related to publishing. Contains validation error - widget which is showed if only validation error happens during validation. - - Processing layer is default layer. Validation error layer is shown if only - validation exception is raised during publishing. Report layer is available - only when publishing process is stopped and must be manually triggered to - change into that layer. - - +------------------------------------------------------------------------+ - | | - | | - | | - | < Validation error widget > | - | | - | | - | | - | | - +------------------------------------------------------------------------+ - | < Main label > | - | < Label top > | - | (#### 10% ) | - | | - | Report: