diff --git a/.all-contributorsrc b/.all-contributorsrc new file mode 100644 index 0000000000..a3b85cae68 --- /dev/null +++ b/.all-contributorsrc @@ -0,0 +1,315 @@ +{ + "projectName": "OpenPype", + "projectOwner": "pypeclub", + "repoType": "github", + "repoHost": "https://github.com", + "files": [ + "README.md" + ], + "imageSize": 100, + "commit": true, + "commitConvention": "none", + "contributors": [ + { + "login": "mkolar", + "name": "Milan Kolar", + "avatar_url": "https://avatars.githubusercontent.com/u/3333008?v=4", + "profile": "http://pype.club/", + "contributions": [ + "code", + "doc", + "infra", + "business", + "content", + "fundingFinding", + "maintenance", + "projectManagement", + "review", + "mentoring", + "question" + ] + }, + { + "login": "jakubjezek001", + "name": "Jakub Ježek", + "avatar_url": "https://avatars.githubusercontent.com/u/40640033?v=4", + "profile": "https://www.linkedin.com/in/jakubjezek79", + "contributions": [ + "code", + "doc", + "infra", + "content", + "review", + "maintenance", + "mentoring", + "projectManagement", + "question" + ] + }, + { + "login": "antirotor", + "name": "Ondřej Samohel", + "avatar_url": "https://avatars.githubusercontent.com/u/33513211?v=4", + "profile": "https://github.com/antirotor", + "contributions": [ + "code", + "doc", + "infra", + "content", + "review", + "maintenance", + "mentoring", + "projectManagement", + "question" + ] + }, + { + "login": "iLLiCiTiT", + "name": "Jakub Trllo", + "avatar_url": "https://avatars.githubusercontent.com/u/43494761?v=4", + "profile": "https://github.com/iLLiCiTiT", + "contributions": [ + "code", + "doc", + "infra", + "review", + "maintenance", + "question" + ] + }, + { + "login": "kalisp", + "name": "Petr Kalis", + "avatar_url": "https://avatars.githubusercontent.com/u/4457962?v=4", + "profile": "https://github.com/kalisp", + "contributions": [ + "code", + "doc", + "infra", + "review", + "maintenance", + "question" + ] + }, + { + "login": "64qam", + "name": "64qam", + "avatar_url": "https://avatars.githubusercontent.com/u/26925793?v=4", + "profile": "https://github.com/64qam", + "contributions": [ + "code", + "review", + "doc", + "infra", + "projectManagement", + "maintenance", + "content", + "userTesting" + ] + }, + { + "login": "BigRoy", + "name": "Roy Nieterau", + "avatar_url": "https://avatars.githubusercontent.com/u/2439881?v=4", + "profile": "http://www.colorbleed.nl/", + "contributions": [ + "code", + "doc", + "review", + "mentoring", + "question" + ] + }, + { + "login": "tokejepsen", + "name": "Toke Jepsen", + "avatar_url": "https://avatars.githubusercontent.com/u/1860085?v=4", + "profile": "https://github.com/tokejepsen", + "contributions": [ + "code", + "doc", + "review", + "mentoring", + "question" + ] + }, + { + "login": "jrsndl", + "name": "Jiri Sindelar", + "avatar_url": "https://avatars.githubusercontent.com/u/45896205?v=4", + "profile": "https://github.com/jrsndl", + "contributions": [ + "code", + "review", + "doc", + "content", + "tutorial", + "userTesting" + ] + }, + { + "login": "simonebarbieri", + "name": "Simone Barbieri", + "avatar_url": "https://avatars.githubusercontent.com/u/1087869?v=4", + "profile": "https://barbierisimone.com/", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "karimmozilla", + "name": "karimmozilla", + "avatar_url": "https://avatars.githubusercontent.com/u/82811760?v=4", + "profile": "http://karimmozilla.xyz/", + "contributions": [ + "code" + ] + }, + { + "login": "Allan-I", + "name": "Allan I. A.", + "avatar_url": "https://avatars.githubusercontent.com/u/76656700?v=4", + "profile": "https://github.com/Allan-I", + "contributions": [ + "code" + ] + }, + { + "login": "m-u-r-p-h-y", + "name": "murphy", + "avatar_url": "https://avatars.githubusercontent.com/u/352795?v=4", + "profile": "https://www.linkedin.com/in/mmuurrpphhyy/", + "contributions": [ + "code", + "review", + "userTesting", + "doc", + "projectManagement" + ] + }, + { + "login": "aardschok", + "name": "Wijnand Koreman", + "avatar_url": "https://avatars.githubusercontent.com/u/26920875?v=4", + "profile": "https://github.com/aardschok", + "contributions": [ + "code" + ] + }, + { + "login": "zhoub", + "name": "Bo Zhou", + "avatar_url": "https://avatars.githubusercontent.com/u/1798206?v=4", + "profile": "http://jedimaster.cnblogs.com/", + "contributions": [ + "code" + ] + }, + { + "login": "ClementHector", + "name": "Clément Hector", + "avatar_url": "https://avatars.githubusercontent.com/u/7068597?v=4", + "profile": "https://www.linkedin.com/in/clementhector/", + "contributions": [ + "code", + "review" + ] + }, + { + "login": "davidlatwe", + "name": "David Lai", + "avatar_url": "https://avatars.githubusercontent.com/u/3357009?v=4", + "profile": "https://twitter.com/davidlatwe", + "contributions": [ + "code", + "review" + ] + }, + { + "login": "2-REC", + "name": "Derek ", + "avatar_url": "https://avatars.githubusercontent.com/u/42170307?v=4", + "profile": "https://github.com/2-REC", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "gabormarinov", + "name": "Gábor Marinov", + "avatar_url": "https://avatars.githubusercontent.com/u/8620515?v=4", + "profile": "https://github.com/gabormarinov", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "icyvapor", + "name": "icyvapor", + "avatar_url": "https://avatars.githubusercontent.com/u/1195278?v=4", + "profile": "https://github.com/icyvapor", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "jlorrain", + "name": "Jérôme LORRAIN", + "avatar_url": "https://avatars.githubusercontent.com/u/7955673?v=4", + "profile": "https://github.com/jlorrain", + "contributions": [ + "code" + ] + }, + { + "login": "dmo-j-cube", + "name": "David Morris-Oliveros", + "avatar_url": "https://avatars.githubusercontent.com/u/89823400?v=4", + "profile": "https://github.com/dmo-j-cube", + "contributions": [ + "code" + ] + }, + { + "login": "BenoitConnan", + "name": "BenoitConnan", + "avatar_url": "https://avatars.githubusercontent.com/u/82808268?v=4", + "profile": "https://github.com/BenoitConnan", + "contributions": [ + "code" + ] + }, + { + "login": "Malthaldar", + "name": "Malthaldar", + "avatar_url": "https://avatars.githubusercontent.com/u/33671694?v=4", + "profile": "https://github.com/Malthaldar", + "contributions": [ + "code" + ] + }, + { + "login": "svenneve", + "name": "Sven Neve", + "avatar_url": "https://avatars.githubusercontent.com/u/2472863?v=4", + "profile": "http://www.svenneve.com/", + "contributions": [ + "code" + ] + }, + { + "login": "zafrs", + "name": "zafrs", + "avatar_url": "https://avatars.githubusercontent.com/u/26890002?v=4", + "profile": "https://github.com/zafrs", + "contributions": [ + "code" + ] + } + ], + "contributorsPerLine": 7 +} \ No newline at end of file diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml index d9b4d8089c..bf39f8f956 100644 --- a/.github/workflows/prerelease.yml +++ b/.github/workflows/prerelease.yml @@ -69,18 +69,16 @@ jobs: run: | git config user.email ${{ secrets.CI_EMAIL }} git config user.name ${{ secrets.CI_USER }} - cd repos/avalon-core git checkout main git pull - cd ../.. git add . git commit -m "[Automated] Bump version" tag_name="CI/${{ steps.version.outputs.next_tag }}" echo $tag_name git tag -a $tag_name -m "nightly build" - + - name: Push to protected main branch - uses: CasperWA/push-protected@v2 + uses: CasperWA/push-protected@v2.10.0 with: token: ${{ secrets.ADMIN_TOKEN }} branch: main diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 917e6c884c..85864b4442 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -68,7 +68,7 @@ jobs: - name: 🔏 Push to protected main branch if: steps.version.outputs.release_tag != 'skip' - uses: CasperWA/push-protected@v2 + uses: CasperWA/push-protected@v2.10.0 with: token: ${{ secrets.ADMIN_TOKEN }} branch: main diff --git a/.gitignore b/.gitignore index f90549d0c0..e18c94a1f4 100644 --- a/.gitignore +++ b/.gitignore @@ -70,6 +70,8 @@ coverage.xml ################## node_modules package-lock.json +package.json +yarn.lock openpype/premiere/ppro/js/debug.log diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 9920ceaad6..0000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "repos/avalon-core"] - path = repos/avalon-core - url = https://github.com/pypeclub/avalon-core.git \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index f1e7d5d9e0..b8cec29df7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,141 +1,163 @@ # Changelog -## [3.9.3](https://github.com/pypeclub/OpenPype/tree/3.9.3) (2022-04-07) +## [3.10.0-nightly.4](https://github.com/pypeclub/OpenPype/tree/HEAD) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.2...3.9.3) - -### 📖 Documentation - -- Website Docs: Manager Ftrack fix broken links [\#2979](https://github.com/pypeclub/OpenPype/pull/2979) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.8...HEAD) **🆕 New features** -- Ftrack: Add description integrator [\#3027](https://github.com/pypeclub/OpenPype/pull/3027) -- Publishing textures for Unreal [\#2988](https://github.com/pypeclub/OpenPype/pull/2988) -- Maya to Unreal: Static and Skeletal Meshes [\#2978](https://github.com/pypeclub/OpenPype/pull/2978) +- General: OpenPype modules publish plugins are registered in host [\#3180](https://github.com/pypeclub/OpenPype/pull/3180) +- General: Creator plugins from addons can be registered [\#3179](https://github.com/pypeclub/OpenPype/pull/3179) +- Ftrack: Single image reviewable [\#3157](https://github.com/pypeclub/OpenPype/pull/3157) +- Nuke: Expose write attributes to settings [\#3123](https://github.com/pypeclub/OpenPype/pull/3123) +- Hiero: Initial frame publish support [\#3106](https://github.com/pypeclub/OpenPype/pull/3106) **🚀 Enhancements** -- Ftrack: Add more options for note text of integrate ftrack note [\#3025](https://github.com/pypeclub/OpenPype/pull/3025) -- Console Interpreter: Changed how console splitter size are reused on show [\#3016](https://github.com/pypeclub/OpenPype/pull/3016) -- Deadline: Use more suitable name for sequence review logic [\#3015](https://github.com/pypeclub/OpenPype/pull/3015) -- Nuke: add concurrency attr to deadline job [\#3005](https://github.com/pypeclub/OpenPype/pull/3005) -- Photoshop: create image without instance [\#3001](https://github.com/pypeclub/OpenPype/pull/3001) -- Deadline: priority configurable in Maya jobs [\#2995](https://github.com/pypeclub/OpenPype/pull/2995) -- Workfiles tool: Save as published workfiles [\#2937](https://github.com/pypeclub/OpenPype/pull/2937) +- Project manager: Sped up project load [\#3216](https://github.com/pypeclub/OpenPype/pull/3216) +- Maya: added clean\_import option to Import loader [\#3181](https://github.com/pypeclub/OpenPype/pull/3181) +- Maya: add maya 2023 to default applications [\#3167](https://github.com/pypeclub/OpenPype/pull/3167) +- General: Add 'dataclasses' to required python modules [\#3149](https://github.com/pypeclub/OpenPype/pull/3149) +- Hooks: Tweak logging grammar [\#3147](https://github.com/pypeclub/OpenPype/pull/3147) +- Nuke: settings for reformat node in CreateWriteRender node [\#3143](https://github.com/pypeclub/OpenPype/pull/3143) +- Publisher: UI Modifications and fixes [\#3139](https://github.com/pypeclub/OpenPype/pull/3139) +- General: Simplified OP modules/addons import [\#3137](https://github.com/pypeclub/OpenPype/pull/3137) +- Terminal: Tweak coloring of TrayModuleManager logging enabled states [\#3133](https://github.com/pypeclub/OpenPype/pull/3133) +- General: Cleanup some Loader docstrings [\#3131](https://github.com/pypeclub/OpenPype/pull/3131) +- Nuke: render instance with subset name filtered overrides [\#3117](https://github.com/pypeclub/OpenPype/pull/3117) +- Unreal: Layout and Camera update and remove functions reimplemented and improvements [\#3116](https://github.com/pypeclub/OpenPype/pull/3116) +- Settings: Remove environment groups from settings [\#3115](https://github.com/pypeclub/OpenPype/pull/3115) +- TVPaint: Match renderlayer key with other hosts [\#3110](https://github.com/pypeclub/OpenPype/pull/3110) +- Ftrack: AssetVersion status on publish [\#3108](https://github.com/pypeclub/OpenPype/pull/3108) +- Tray publisher: Simple families from settings [\#3105](https://github.com/pypeclub/OpenPype/pull/3105) **🐛 Bug fixes** -- Deadline: Fixed default value of use sequence for review [\#3033](https://github.com/pypeclub/OpenPype/pull/3033) -- Settings UI: Version column can be extended so version are visible [\#3032](https://github.com/pypeclub/OpenPype/pull/3032) -- General: Fix import after movements [\#3028](https://github.com/pypeclub/OpenPype/pull/3028) -- Harmony: Added creating subset name for workfile from template [\#3024](https://github.com/pypeclub/OpenPype/pull/3024) -- AfterEffects: Added creating subset name for workfile from template [\#3023](https://github.com/pypeclub/OpenPype/pull/3023) -- General: Add example addons to ignored [\#3022](https://github.com/pypeclub/OpenPype/pull/3022) -- Maya: Remove missing import [\#3017](https://github.com/pypeclub/OpenPype/pull/3017) -- Ftrack: multiple reviewable componets [\#3012](https://github.com/pypeclub/OpenPype/pull/3012) -- Tray publisher: Fixes after code movement [\#3010](https://github.com/pypeclub/OpenPype/pull/3010) -- Nuke: fixing unicode type detection in effect loaders [\#3002](https://github.com/pypeclub/OpenPype/pull/3002) -- Nuke: removing redundant Ftrack asset when farm publishing [\#2996](https://github.com/pypeclub/OpenPype/pull/2996) +- Deadline: instance data overwrite fix [\#3214](https://github.com/pypeclub/OpenPype/pull/3214) +- Ftrack: Push hierarchical attributes action works [\#3210](https://github.com/pypeclub/OpenPype/pull/3210) +- Standalone Publisher: Always create new representation for thumbnail [\#3203](https://github.com/pypeclub/OpenPype/pull/3203) +- Photoshop: skip collector when automatic testing [\#3202](https://github.com/pypeclub/OpenPype/pull/3202) +- Nuke: render/workfile version sync doesn't work on farm [\#3185](https://github.com/pypeclub/OpenPype/pull/3185) +- Ftrack: Review image only if there are no mp4 reviews [\#3183](https://github.com/pypeclub/OpenPype/pull/3183) +- Ftrack: Locations deepcopy issue [\#3177](https://github.com/pypeclub/OpenPype/pull/3177) +- General: Avoid creating multiple thumbnails [\#3176](https://github.com/pypeclub/OpenPype/pull/3176) +- General/Hiero: better clip duration calculation [\#3169](https://github.com/pypeclub/OpenPype/pull/3169) +- General: Oiio conversion for ffmpeg checks for invalid characters [\#3166](https://github.com/pypeclub/OpenPype/pull/3166) +- Fix for attaching render to subset [\#3164](https://github.com/pypeclub/OpenPype/pull/3164) +- Harmony: fixed missing task name in render instance [\#3163](https://github.com/pypeclub/OpenPype/pull/3163) +- Ftrack: Action delete old versions formatting works [\#3152](https://github.com/pypeclub/OpenPype/pull/3152) +- Deadline: fix the output directory [\#3144](https://github.com/pypeclub/OpenPype/pull/3144) +- General: New Session schema [\#3141](https://github.com/pypeclub/OpenPype/pull/3141) +- General: Missing version on headless mode crash properly [\#3136](https://github.com/pypeclub/OpenPype/pull/3136) +- TVPaint: Composite layers in reversed order [\#3135](https://github.com/pypeclub/OpenPype/pull/3135) +- Nuke: fixing default settings for workfile builder loaders [\#3120](https://github.com/pypeclub/OpenPype/pull/3120) +- Nuke: fix anatomy imageio regex default [\#3119](https://github.com/pypeclub/OpenPype/pull/3119) +- General: Python 3 compatibility in queries [\#3112](https://github.com/pypeclub/OpenPype/pull/3112) +- General: Collect loaded versions skips not existing representations [\#3095](https://github.com/pypeclub/OpenPype/pull/3095) + +**🔀 Refactored code** + +- General: Remove remaining imports from avalon [\#3130](https://github.com/pypeclub/OpenPype/pull/3130) **Merged pull requests:** -- Maya: Allow to select invalid camera contents if no cameras found [\#3030](https://github.com/pypeclub/OpenPype/pull/3030) -- General: adding limitations for pyright [\#2994](https://github.com/pypeclub/OpenPype/pull/2994) +- Maya: added jpg to filter for Image Plane Loader [\#3223](https://github.com/pypeclub/OpenPype/pull/3223) +- Webpublisher: replace space by underscore in subset names [\#3160](https://github.com/pypeclub/OpenPype/pull/3160) +- StandalonePublisher: removed Extract Background plugins [\#3093](https://github.com/pypeclub/OpenPype/pull/3093) + +## [3.9.8](https://github.com/pypeclub/OpenPype/tree/3.9.8) (2022-05-19) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.7...3.9.8) + +**🚀 Enhancements** + +- nuke: generate publishing nodes inside render group node [\#3206](https://github.com/pypeclub/OpenPype/pull/3206) +- Backport of fix for attaching renders to subsets [\#3195](https://github.com/pypeclub/OpenPype/pull/3195) + +**🐛 Bug fixes** + +- Standalone Publisher: Always create new representation for thumbnail [\#3204](https://github.com/pypeclub/OpenPype/pull/3204) +- Nuke: render/workfile version sync doesn't work on farm [\#3184](https://github.com/pypeclub/OpenPype/pull/3184) +- Ftrack: Review image only if there are no mp4 reviews [\#3182](https://github.com/pypeclub/OpenPype/pull/3182) +- Ftrack: Locations deepcopy issue [\#3175](https://github.com/pypeclub/OpenPype/pull/3175) +- General: Avoid creating multiple thumbnails [\#3174](https://github.com/pypeclub/OpenPype/pull/3174) +- General: TemplateResult can be copied [\#3170](https://github.com/pypeclub/OpenPype/pull/3170) + +**Merged pull requests:** + +- hiero: otio p3 compatibility issue - metadata on effect use update [\#3194](https://github.com/pypeclub/OpenPype/pull/3194) + +## [3.9.7](https://github.com/pypeclub/OpenPype/tree/3.9.7) (2022-05-11) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.6...3.9.7) + +**🆕 New features** + +- Ftrack: Single image reviewable [\#3158](https://github.com/pypeclub/OpenPype/pull/3158) + +**🚀 Enhancements** + +- Deadline output dir issue to 3.9x [\#3155](https://github.com/pypeclub/OpenPype/pull/3155) +- Compressed bgeo publishing in SAP and Houdini loader [\#3153](https://github.com/pypeclub/OpenPype/pull/3153) +- nuke: removing redundant code from startup [\#3142](https://github.com/pypeclub/OpenPype/pull/3142) +- Houdini: Add loader for alembic through Alembic Archive node [\#3140](https://github.com/pypeclub/OpenPype/pull/3140) + +**🐛 Bug fixes** + +- Ftrack: Action delete old versions formatting works [\#3154](https://github.com/pypeclub/OpenPype/pull/3154) +- nuke: adding extract thumbnail settings [\#3148](https://github.com/pypeclub/OpenPype/pull/3148) + +**Merged pull requests:** + +- Webpublisher: replace space by underscore in subset names [\#3159](https://github.com/pypeclub/OpenPype/pull/3159) + +## [3.9.6](https://github.com/pypeclub/OpenPype/tree/3.9.6) (2022-05-03) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.5...3.9.6) + +**🆕 New features** + +- Nuke: render instance with subset name filtered overrides \(3.9.x\) [\#3125](https://github.com/pypeclub/OpenPype/pull/3125) + +**🚀 Enhancements** + +- TVPaint: Match renderlayer key with other hosts [\#3109](https://github.com/pypeclub/OpenPype/pull/3109) + +**🐛 Bug fixes** + +- TVPaint: Composite layers in reversed order [\#3134](https://github.com/pypeclub/OpenPype/pull/3134) +- General: Python 3 compatibility in queries [\#3111](https://github.com/pypeclub/OpenPype/pull/3111) + +**Merged pull requests:** + +- Ftrack: AssetVersion status on publish [\#3114](https://github.com/pypeclub/OpenPype/pull/3114) +- renderman support for 3.9.x [\#3107](https://github.com/pypeclub/OpenPype/pull/3107) + +## [3.9.5](https://github.com/pypeclub/OpenPype/tree/3.9.5) (2022-04-25) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.10.0-nightly.2...3.9.5) + +**🐛 Bug fixes** + +- Ftrack: Update Create Folders action [\#3092](https://github.com/pypeclub/OpenPype/pull/3092) +- General: Extract review sequence is not converted with same names [\#3075](https://github.com/pypeclub/OpenPype/pull/3075) + +## [3.9.4](https://github.com/pypeclub/OpenPype/tree/3.9.4) (2022-04-15) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.4-nightly.2...3.9.4) + +## [3.9.3](https://github.com/pypeclub/OpenPype/tree/3.9.3) (2022-04-07) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.3-nightly.2...3.9.3) ## [3.9.2](https://github.com/pypeclub/OpenPype/tree/3.9.2) (2022-04-04) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.2-nightly.4...3.9.2) -### 📖 Documentation - -- Documentation: Added mention of adding My Drive as a root [\#2999](https://github.com/pypeclub/OpenPype/pull/2999) -- Docs: Added MongoDB requirements [\#2951](https://github.com/pypeclub/OpenPype/pull/2951) -- Documentation: New publisher develop docs [\#2896](https://github.com/pypeclub/OpenPype/pull/2896) - -**🆕 New features** - -- nuke: bypass baking [\#2992](https://github.com/pypeclub/OpenPype/pull/2992) -- Multiverse: Initial Support [\#2908](https://github.com/pypeclub/OpenPype/pull/2908) - -**🚀 Enhancements** - -- TVPaint: Render scene family [\#3000](https://github.com/pypeclub/OpenPype/pull/3000) -- Nuke: ReviewDataMov Read RAW attribute [\#2985](https://github.com/pypeclub/OpenPype/pull/2985) -- General: `METADATA\_KEYS` constant as `frozenset` for optimal immutable lookup [\#2980](https://github.com/pypeclub/OpenPype/pull/2980) -- General: Tools with host filters [\#2975](https://github.com/pypeclub/OpenPype/pull/2975) -- Hero versions: Use custom templates [\#2967](https://github.com/pypeclub/OpenPype/pull/2967) -- Slack: Added configurable maximum file size of review upload to Slack [\#2945](https://github.com/pypeclub/OpenPype/pull/2945) -- NewPublisher: Prepared implementation of optional pyblish plugin [\#2943](https://github.com/pypeclub/OpenPype/pull/2943) -- TVPaint: Extractor to convert PNG into EXR [\#2942](https://github.com/pypeclub/OpenPype/pull/2942) -- Workfiles: Open published workfiles [\#2925](https://github.com/pypeclub/OpenPype/pull/2925) -- General: Default modules loaded dynamically [\#2923](https://github.com/pypeclub/OpenPype/pull/2923) -- Nuke: Add no-audio Tag [\#2911](https://github.com/pypeclub/OpenPype/pull/2911) -- Nuke: improving readability [\#2903](https://github.com/pypeclub/OpenPype/pull/2903) - -**🐛 Bug fixes** - -- Hosts: Remove path existence checks in 'add\_implementation\_envs' [\#3004](https://github.com/pypeclub/OpenPype/pull/3004) -- Fix - remove doubled dot in workfile created from template [\#2998](https://github.com/pypeclub/OpenPype/pull/2998) -- PS: fix renaming subset incorrectly in PS [\#2991](https://github.com/pypeclub/OpenPype/pull/2991) -- Fix: Disable setuptools auto discovery [\#2990](https://github.com/pypeclub/OpenPype/pull/2990) -- AEL: fix opening existing workfile if no scene opened [\#2989](https://github.com/pypeclub/OpenPype/pull/2989) -- Maya: Don't do hardlinks on windows for look publishing [\#2986](https://github.com/pypeclub/OpenPype/pull/2986) -- Settings UI: Fix version completer on linux [\#2981](https://github.com/pypeclub/OpenPype/pull/2981) -- Photoshop: Fix creation of subset names in PS review and workfile [\#2969](https://github.com/pypeclub/OpenPype/pull/2969) -- Slack: Added default for review\_upload\_limit for Slack [\#2965](https://github.com/pypeclub/OpenPype/pull/2965) -- General: OIIO conversion for ffmeg can handle sequences [\#2958](https://github.com/pypeclub/OpenPype/pull/2958) -- Settings: Conditional dictionary avoid invalid logs [\#2956](https://github.com/pypeclub/OpenPype/pull/2956) -- General: Smaller fixes and typos [\#2950](https://github.com/pypeclub/OpenPype/pull/2950) -- LogViewer: Don't refresh on initialization [\#2949](https://github.com/pypeclub/OpenPype/pull/2949) -- nuke: python3 compatibility issue with `iteritems` [\#2948](https://github.com/pypeclub/OpenPype/pull/2948) -- General: anatomy data with correct task short key [\#2947](https://github.com/pypeclub/OpenPype/pull/2947) -- SceneInventory: Fix imports in UI [\#2944](https://github.com/pypeclub/OpenPype/pull/2944) -- Slack: add generic exception [\#2941](https://github.com/pypeclub/OpenPype/pull/2941) -- General: Python specific vendor paths on env injection [\#2939](https://github.com/pypeclub/OpenPype/pull/2939) -- General: More fail safe delete old versions [\#2936](https://github.com/pypeclub/OpenPype/pull/2936) -- Settings UI: Collapsed of collapsible wrapper works as expected [\#2934](https://github.com/pypeclub/OpenPype/pull/2934) -- Maya: Do not pass `set` to maya commands \(fixes support for older maya versions\) [\#2932](https://github.com/pypeclub/OpenPype/pull/2932) -- General: Don't print log record on OSError [\#2926](https://github.com/pypeclub/OpenPype/pull/2926) -- Flame: centos related debugging [\#2922](https://github.com/pypeclub/OpenPype/pull/2922) - -**🔀 Refactored code** - -- General: Move plugins register and discover [\#2935](https://github.com/pypeclub/OpenPype/pull/2935) -- General: Move Attribute Definitions from pipeline [\#2931](https://github.com/pypeclub/OpenPype/pull/2931) -- General: Removed silo references and terminal splash [\#2927](https://github.com/pypeclub/OpenPype/pull/2927) -- General: Move pipeline constants to OpenPype [\#2918](https://github.com/pypeclub/OpenPype/pull/2918) -- General: Move remaining plugins from avalon [\#2912](https://github.com/pypeclub/OpenPype/pull/2912) - -**Merged pull requests:** - -- Bump paramiko from 2.9.2 to 2.10.1 [\#2973](https://github.com/pypeclub/OpenPype/pull/2973) -- Bump minimist from 1.2.5 to 1.2.6 in /website [\#2954](https://github.com/pypeclub/OpenPype/pull/2954) -- Bump node-forge from 1.2.1 to 1.3.0 in /website [\#2953](https://github.com/pypeclub/OpenPype/pull/2953) -- Maya - added transparency into review creator [\#2952](https://github.com/pypeclub/OpenPype/pull/2952) - ## [3.9.1](https://github.com/pypeclub/OpenPype/tree/3.9.1) (2022-03-18) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.1-nightly.3...3.9.1) -**🚀 Enhancements** - -- General: Change how OPENPYPE\_DEBUG value is handled [\#2907](https://github.com/pypeclub/OpenPype/pull/2907) -- nuke: imageio adding ocio config version 1.2 [\#2897](https://github.com/pypeclub/OpenPype/pull/2897) -- Flame: support for comment with xml attribute overrides [\#2892](https://github.com/pypeclub/OpenPype/pull/2892) - -**🐛 Bug fixes** - -- General: Fix use of Anatomy roots [\#2904](https://github.com/pypeclub/OpenPype/pull/2904) -- Fixing gap detection in extract review [\#2902](https://github.com/pypeclub/OpenPype/pull/2902) -- Pyblish Pype - ensure current state is correct when entering new group order [\#2899](https://github.com/pypeclub/OpenPype/pull/2899) -- SceneInventory: Fix import of load function [\#2894](https://github.com/pypeclub/OpenPype/pull/2894) -- Harmony - fixed creator issue [\#2891](https://github.com/pypeclub/OpenPype/pull/2891) - -**🔀 Refactored code** - -- General: Reduce style usage to OpenPype repository [\#2889](https://github.com/pypeclub/OpenPype/pull/2889) - ## [3.9.0](https://github.com/pypeclub/OpenPype/tree/3.9.0) (2022-03-14) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.0-nightly.9...3.9.0) diff --git a/README.md b/README.md index 0e450fc48d..b6966adbc4 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,7 @@ + +[![All Contributors](https://img.shields.io/badge/all_contributors-26-orange.svg?style=flat-square)](#contributors-) + OpenPype ==== @@ -283,3 +286,54 @@ Running tests To run tests, execute `.\tools\run_tests(.ps1|.sh)`. **Note that it needs existing virtual environment.** + +## Contributors ✨ + +Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Milan Kolar

💻 📖 🚇 💼 🖋 🔍 🚧 📆 👀 🧑‍🏫 💬

Jakub Ježek

💻 📖 🚇 🖋 👀 🚧 🧑‍🏫 📆 💬

Ondřej Samohel

💻 📖 🚇 🖋 👀 🚧 🧑‍🏫 📆 💬

Jakub Trllo

💻 📖 🚇 👀 🚧 💬

Petr Kalis

💻 📖 🚇 👀 🚧 💬

64qam

💻 👀 📖 🚇 📆 🚧 🖋 📓

Roy Nieterau

💻 📖 👀 🧑‍🏫 💬

Toke Jepsen

💻 📖 👀 🧑‍🏫 💬

Jiri Sindelar

💻 👀 📖 🖋 📓

Simone Barbieri

💻 📖

karimmozilla

💻

Allan I. A.

💻

murphy

💻 👀 📓 📖 📆

Wijnand Koreman

💻

Bo Zhou

💻

Clément Hector

💻 👀

David Lai

💻 👀

Derek

💻 📖

Gábor Marinov

💻 📖

icyvapor

💻 📖

Jérôme LORRAIN

💻

David Morris-Oliveros

💻

BenoitConnan

💻

Malthaldar

💻

Sven Neve

💻

zafrs

💻
+ + + + + + +This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! \ No newline at end of file diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py index ad49f868d5..08333885c0 100644 --- a/igniter/bootstrap_repos.py +++ b/igniter/bootstrap_repos.py @@ -627,8 +627,6 @@ class BootstrapRepos: Attributes: data_dir (Path): local OpenPype installation directory. - live_repo_dir (Path): path to repos directory if running live, - otherwise `None`. registry (OpenPypeSettingsRegistry): OpenPype registry object. zip_filter (list): List of files to exclude from zip openpype_filter (list): list of top level directories to @@ -654,7 +652,7 @@ class BootstrapRepos: self.registry = OpenPypeSettingsRegistry() self.zip_filter = [".pyc", "__pycache__"] self.openpype_filter = [ - "openpype", "repos", "schema", "LICENSE" + "openpype", "schema", "LICENSE" ] self._message = message @@ -667,11 +665,6 @@ class BootstrapRepos: progress_callback = empty_progress self._progress_callback = progress_callback - if getattr(sys, "frozen", False): - self.live_repo_dir = Path(sys.executable).parent / "repos" - else: - self.live_repo_dir = Path(Path(__file__).parent / ".." / "repos") - @staticmethod def get_version_path_from_list( version: str, version_list: list) -> Union[Path, None]: @@ -736,11 +729,12 @@ class BootstrapRepos: # if repo dir is not set, we detect local "live" OpenPype repository # version and use it as a source. Otherwise repo_dir is user # entered location. - if not repo_dir: - version = OpenPypeVersion.get_installed_version_str() - repo_dir = self.live_repo_dir - else: + if repo_dir: version = self.get_version(repo_dir) + else: + installed_version = OpenPypeVersion.get_installed_version() + version = str(installed_version) + repo_dir = installed_version.path if not version: self._print("OpenPype not found.", LOG_ERROR) @@ -756,7 +750,7 @@ class BootstrapRepos: Path(temp_dir) / f"openpype-v{version}.zip" self._print(f"creating zip: {temp_zip}") - self._create_openpype_zip(temp_zip, repo_dir.parent) + self._create_openpype_zip(temp_zip, repo_dir) if not os.path.exists(temp_zip): self._print("make archive failed.", LOG_ERROR) return None @@ -1057,27 +1051,11 @@ class BootstrapRepos: if not archive.is_file() and not archive.exists(): raise ValueError("Archive is not file.") - with ZipFile(archive, "r") as zip_file: - name_list = zip_file.namelist() - - roots = [] - paths = [] - for item in name_list: - if not item.startswith("repos/"): - continue - - root = item.split("/")[1] - - if root not in roots: - roots.append(root) - paths.append( - f"{archive}{os.path.sep}repos{os.path.sep}{root}") - sys.path.insert(0, paths[-1]) - - sys.path.insert(0, f"{archive}") + archive_path = str(archive) + sys.path.insert(0, archive_path) pythonpath = os.getenv("PYTHONPATH", "") python_paths = pythonpath.split(os.pathsep) - python_paths += paths + python_paths.insert(0, archive_path) os.environ["PYTHONPATH"] = os.pathsep.join(python_paths) @@ -1094,24 +1072,8 @@ class BootstrapRepos: directory (Path): path to directory. """ + sys.path.insert(0, directory.as_posix()) - directory /= "repos" - if not directory.exists() and not directory.is_dir(): - raise ValueError("directory is invalid") - - roots = [] - for item in directory.iterdir(): - if item.is_dir(): - root = item.as_posix() - if root not in roots: - roots.append(root) - sys.path.insert(0, root) - - pythonpath = os.getenv("PYTHONPATH", "") - paths = pythonpath.split(os.pathsep) - paths += roots - - os.environ["PYTHONPATH"] = os.pathsep.join(paths) @staticmethod def find_openpype_version(version, staging): @@ -1437,6 +1399,7 @@ class BootstrapRepos: # create destination parent directories even if they don't exist. destination.mkdir(parents=True) + remove_source_file = False # version is directory if openpype_version.path.is_dir(): # create zip inside temporary directory. @@ -1470,6 +1433,8 @@ class BootstrapRepos: self._progress_callback(35) openpype_version.path = self._copy_zip( openpype_version.path, destination) + # Mark zip to be deleted when done + remove_source_file = True # extract zip there self._print("extracting zip to destination ...") @@ -1478,6 +1443,10 @@ class BootstrapRepos: zip_ref.extractall(destination) self._progress_callback(100) + # Remove zip file copied to local app data + if remove_source_file: + os.remove(openpype_version.path) + return destination def _copy_zip(self, source: Path, destination: Path) -> Path: diff --git a/openpype/__init__.py b/openpype/__init__.py index 7fc7e63e61..810664707a 100644 --- a/openpype/__init__.py +++ b/openpype/__init__.py @@ -1,102 +1,5 @@ -# -*- coding: utf-8 -*- -"""Pype module.""" import os -import platform -import logging - -from .settings import get_project_settings -from .lib import ( - Anatomy, - filter_pyblish_plugins, - change_timer_to_current_context, - register_event_callback, -) - -log = logging.getLogger(__name__) PACKAGE_DIR = os.path.dirname(os.path.abspath(__file__)) PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") - -# Global plugin paths -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") - - -def install(): - """Install OpenPype to Avalon.""" - import avalon.api - import pyblish.api - from pyblish.lib import MessageHandler - from openpype.modules import load_modules - from openpype.pipeline import ( - register_loader_plugin_path, - register_inventory_action, - register_creator_plugin_path, - ) - - # Make sure modules are loaded - load_modules() - - def modified_emit(obj, record): - """Method replacing `emit` in Pyblish's MessageHandler.""" - record.msg = record.getMessage() - obj.records.append(record) - - MessageHandler.emit = modified_emit - - log.info("Registering global plug-ins..") - pyblish.api.register_plugin_path(PUBLISH_PATH) - pyblish.api.register_discovery_filter(filter_pyblish_plugins) - register_loader_plugin_path(LOAD_PATH) - - project_name = os.environ.get("AVALON_PROJECT") - - # Register studio specific plugins - if project_name: - anatomy = Anatomy(project_name) - anatomy.set_root_environments() - avalon.api.register_root(anatomy.roots) - - project_settings = get_project_settings(project_name) - platform_name = platform.system().lower() - project_plugins = ( - project_settings - .get("global", {}) - .get("project_plugins", {}) - .get(platform_name) - ) or [] - for path in project_plugins: - try: - path = str(path.format(**os.environ)) - except KeyError: - pass - - if not path or not os.path.exists(path): - continue - - pyblish.api.register_plugin_path(path) - register_loader_plugin_path(path) - register_creator_plugin_path(path) - register_inventory_action(path) - - # apply monkey patched discover to original one - log.info("Patching discovery") - - register_event_callback("taskChanged", _on_task_change) - - -def _on_task_change(): - change_timer_to_current_context() - - -def uninstall(): - """Uninstall Pype from Avalon.""" - import pyblish.api - from openpype.pipeline import deregister_loader_plugin_path - - log.info("Deregistering global plug-ins..") - pyblish.api.deregister_plugin_path(PUBLISH_PATH) - pyblish.api.deregister_discovery_filter(filter_pyblish_plugins) - deregister_loader_plugin_path(LOAD_PATH) - log.info("Global plug-ins unregistred") diff --git a/openpype/api.py b/openpype/api.py index b692b36065..9ce745b653 100644 --- a/openpype/api.py +++ b/openpype/api.py @@ -3,7 +3,6 @@ from .settings import ( get_project_settings, get_current_project_settings, get_anatomy_settings, - get_environments, SystemSettings, ProjectSettings @@ -23,7 +22,6 @@ from .lib import ( get_app_environments_for_context, source_hash, get_latest_version, - get_global_environments, get_local_site_id, change_openpype_mongo_url, create_project_folders, @@ -69,10 +67,10 @@ __all__ = [ "get_project_settings", "get_current_project_settings", "get_anatomy_settings", - "get_environments", "get_project_basic_paths", "SystemSettings", + "ProjectSettings", "PypeLogger", "Logger", @@ -102,8 +100,9 @@ __all__ = [ # get contextual data "version_up", - "get_hierarchy", "get_asset", + "get_hierarchy", + "get_workdir_data", "get_version_from_path", "get_last_version_from_path", "get_app_environments_for_context", @@ -111,7 +110,6 @@ __all__ = [ "run_subprocess", "get_latest_version", - "get_global_environments", "get_local_site_id", "change_openpype_mongo_url", diff --git a/openpype/cli.py b/openpype/cli.py index cbeb7fef9b..2aa4a46929 100644 --- a/openpype/cli.py +++ b/openpype/cli.py @@ -20,6 +20,10 @@ from .pype_commands import PypeCommands "to list staging versions.")) @click.option("--validate-version", expose_value=False, help="validate given version integrity") +@click.option("--debug", is_flag=True, expose_value=False, + help=("Enable debug")) +@click.option("--verbose", expose_value=False, + help=("Change OpenPype log level (debug - critical or 0-50)")) def main(ctx): """Pype is main command serving as entry point to pipeline system. @@ -49,18 +53,13 @@ def traypublisher(): @main.command() -@click.option("-d", "--debug", - is_flag=True, help=("Run pype tray in debug mode")) -def tray(debug=False): +def tray(): """Launch pype tray. Default action of pype command is to launch tray widget to control basic aspects of pype. See documentation for more information. - - Running pype with `--debug` will result in lot of information useful for - debugging to be shown in console. """ - PypeCommands().launch_tray(debug) + PypeCommands().launch_tray() @PypeCommands.add_modules @@ -75,7 +74,6 @@ def module(ctx): @main.command() -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("--ftrack-url", envvar="FTRACK_SERVER", help="Ftrack server url") @click.option("--ftrack-user", envvar="FTRACK_API_USER", @@ -88,8 +86,7 @@ def module(ctx): help="Clockify API key.") @click.option("--clockify-workspace", envvar="CLOCKIFY_WORKSPACE", help="Clockify workspace") -def eventserver(debug, - ftrack_url, +def eventserver(ftrack_url, ftrack_user, ftrack_api_key, legacy, @@ -100,8 +97,6 @@ def eventserver(debug, This should be ideally used by system service (such us systemd or upstart on linux and window service). """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands().launch_eventservercli( ftrack_url, @@ -114,12 +109,11 @@ def eventserver(debug, @main.command() -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-h", "--host", help="Host", default=None) @click.option("-p", "--port", help="Port", default=None) @click.option("-e", "--executable", help="Executable") @click.option("-u", "--upload_dir", help="Upload dir") -def webpublisherwebserver(debug, executable, upload_dir, host=None, port=None): +def webpublisherwebserver(executable, upload_dir, host=None, port=None): """Starts webserver for communication with Webpublish FR via command line OP must be congigured on a machine, eg. OPENPYPE_MONGO filled AND @@ -127,8 +121,6 @@ def webpublisherwebserver(debug, executable, upload_dir, host=None, port=None): Expect "pype.club" user created on Ftrack. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands().launch_webpublisher_webservercli( upload_dir=upload_dir, @@ -164,38 +156,34 @@ def extractenvironments(output_json_path, project, asset, task, app, envgroup): @main.command() @click.argument("paths", nargs=-1) -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-t", "--targets", help="Targets module", default=None, multiple=True) @click.option("-g", "--gui", is_flag=True, help="Show Publish UI", default=False) -def publish(debug, paths, targets, gui): +def publish(paths, targets, gui): """Start CLI publishing. Publish collects json from paths provided as an argument. More than one path is allowed. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands.publish(list(paths), targets, gui) @main.command() @click.argument("path") -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-h", "--host", help="Host") @click.option("-u", "--user", help="User email address") @click.option("-p", "--project", help="Project") @click.option("-t", "--targets", help="Targets", default=None, multiple=True) -def remotepublishfromapp(debug, project, path, host, user=None, targets=None): +def remotepublishfromapp(project, path, host, user=None, targets=None): """Start CLI publishing. Publish collects json from paths provided as an argument. More than one path is allowed. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands.remotepublishfromapp( project, path, host, user, targets=targets ) @@ -203,24 +191,21 @@ def remotepublishfromapp(debug, project, path, host, user=None, targets=None): @main.command() @click.argument("path") -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-u", "--user", help="User email address") @click.option("-p", "--project", help="Project") @click.option("-t", "--targets", help="Targets", default=None, multiple=True) -def remotepublish(debug, project, path, user=None, targets=None): +def remotepublish(project, path, user=None, targets=None): """Start CLI publishing. Publish collects json from paths provided as an argument. More than one path is allowed. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands.remotepublish(project, path, user, targets=targets) @main.command() -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-p", "--project", required=True, help="name of project asset is under") @click.option("-a", "--asset", required=True, @@ -228,7 +213,7 @@ def remotepublish(debug, project, path, user=None, targets=None): @click.option("--path", required=True, help="path where textures are found", type=click.Path(exists=True)) -def texturecopy(debug, project, asset, path): +def texturecopy(project, asset, path): """Copy specified textures to provided asset path. It validates if project and asset exists. Then it will use speedcopy to @@ -239,8 +224,7 @@ def texturecopy(debug, project, asset, path): Result will be copied without directory structure so it will be flat then. Nothing is written to database. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands().texture_copy(project, asset, path) @@ -389,11 +373,9 @@ def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant, @main.command() -@click.option("-d", "--debug", - is_flag=True, help=("Run process in debug mode")) @click.option("-a", "--active_site", required=True, help="Name of active stie") -def syncserver(debug, active_site): +def syncserver(active_site): """Run sync site server in background. Some Site Sync use cases need to expose site to another one. @@ -408,8 +390,7 @@ def syncserver(debug, active_site): Settings (configured by starting OP Tray with env var OPENPYPE_LOCAL_ID set to 'active_site'. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands().syncserver(active_site) diff --git a/openpype/hooks/pre_global_host_data.py b/openpype/hooks/pre_global_host_data.py index 4c85a511ed..ea5e290d6f 100644 --- a/openpype/hooks/pre_global_host_data.py +++ b/openpype/hooks/pre_global_host_data.py @@ -5,8 +5,7 @@ from openpype.lib import ( prepare_app_environments, prepare_context_environments ) - -import avalon.api +from openpype.pipeline import AvalonMongoDB class GlobalHostDataHook(PreLaunchHook): @@ -64,7 +63,7 @@ class GlobalHostDataHook(PreLaunchHook): self.data["anatomy"] = Anatomy(project_name) # Mongo connection - dbcon = avalon.api.AvalonMongoDB() + dbcon = AvalonMongoDB() dbcon.Session["AVALON_PROJECT"] = project_name dbcon.install() diff --git a/openpype/hosts/aftereffects/api/__init__.py b/openpype/hosts/aftereffects/api/__init__.py index cea1bdc023..2ad1255d27 100644 --- a/openpype/hosts/aftereffects/api/__init__.py +++ b/openpype/hosts/aftereffects/api/__init__.py @@ -16,7 +16,10 @@ from .pipeline import ( uninstall, list_instances, remove_instance, - containerise + containerise, + get_context_data, + update_context_data, + get_context_title ) from .workio import ( @@ -51,6 +54,9 @@ __all__ = [ "list_instances", "remove_instance", "containerise", + "get_context_data", + "update_context_data", + "get_context_title", "file_extensions", "has_unsaved_changes", diff --git a/openpype/hosts/aftereffects/api/extension.zxp b/openpype/hosts/aftereffects/api/extension.zxp index 389d74505d..0ed799991e 100644 Binary files a/openpype/hosts/aftereffects/api/extension.zxp and b/openpype/hosts/aftereffects/api/extension.zxp differ diff --git a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml b/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml index 668cb3fc24..a39f5781bb 100644 --- a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml +++ b/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml @@ -1,5 +1,5 @@ - diff --git a/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx b/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx index 8f82c9709d..91df433908 100644 --- a/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx +++ b/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx @@ -417,7 +417,9 @@ function getRenderInfo(){ var file_url = item.file.toString(); return JSON.stringify({ - "file_name": file_url + "file_name": file_url, + "width": render_item.comp.width, + "height": render_item.comp.height }) } diff --git a/openpype/hosts/aftereffects/api/launch_logic.py b/openpype/hosts/aftereffects/api/launch_logic.py index c549268978..30a3e1f1c3 100644 --- a/openpype/hosts/aftereffects/api/launch_logic.py +++ b/openpype/hosts/aftereffects/api/launch_logic.py @@ -12,9 +12,8 @@ from wsrpc_aiohttp import ( from Qt import QtCore +from openpype.pipeline import legacy_io from openpype.tools.utils import host_tools - -from avalon import api from openpype.tools.adobe_webserver.app import WebServerTool from .ws_stub import AfterEffectsServerStub @@ -271,13 +270,13 @@ class AfterEffectsRoute(WebSocketRoute): log.info("Setting context change") log.info("project {} asset {} ".format(project, asset)) if project: - api.Session["AVALON_PROJECT"] = project + legacy_io.Session["AVALON_PROJECT"] = project os.environ["AVALON_PROJECT"] = project if asset: - api.Session["AVALON_ASSET"] = asset + legacy_io.Session["AVALON_ASSET"] = asset os.environ["AVALON_ASSET"] = asset if task: - api.Session["AVALON_TASK"] = task + legacy_io.Session["AVALON_TASK"] = task os.environ["AVALON_TASK"] = task async def read(self): diff --git a/openpype/hosts/aftereffects/api/lib.py b/openpype/hosts/aftereffects/api/lib.py index dac6b5d28f..ce4cbf09af 100644 --- a/openpype/hosts/aftereffects/api/lib.py +++ b/openpype/hosts/aftereffects/api/lib.py @@ -6,6 +6,7 @@ import logging from Qt import QtWidgets +from openpype.pipeline import install_host from openpype.lib.remote_publish import headless_publish from openpype.tools.utils import host_tools @@ -22,10 +23,9 @@ def safe_excepthook(*args): def main(*subprocess_args): sys.excepthook = safe_excepthook - import avalon.api from openpype.hosts.aftereffects import api - avalon.api.install(api) + install_host(api) os.environ["OPENPYPE_LOG_NO_COLORS"] = "False" app = QtWidgets.QApplication([]) diff --git a/openpype/hosts/aftereffects/api/pipeline.py b/openpype/hosts/aftereffects/api/pipeline.py index 94bc369856..a428a1470d 100644 --- a/openpype/hosts/aftereffects/api/pipeline.py +++ b/openpype/hosts/aftereffects/api/pipeline.py @@ -2,10 +2,8 @@ import os import sys from Qt import QtWidgets -from bson.objectid import ObjectId import pyblish.api -from avalon import io from openpype import lib from openpype.api import Logger @@ -15,6 +13,7 @@ from openpype.pipeline import ( deregister_loader_plugin_path, deregister_creator_plugin_path, AVALON_CONTAINER_ID, + legacy_io, ) import openpype.hosts.aftereffects from openpype.lib import register_event_callback @@ -33,39 +32,6 @@ LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -def check_inventory(): - if not lib.any_outdated(): - return - - host = pyblish.api.registered_host() - outdated_containers = [] - for container in host.ls(): - representation = container['representation'] - representation_doc = io.find_one( - { - "_id": ObjectId(representation), - "type": "representation" - }, - projection={"parent": True} - ) - if representation_doc and not lib.is_latest(representation_doc): - outdated_containers.append(container) - - # Warn about outdated containers. - print("Starting new QApplication..") - app = QtWidgets.QApplication(sys.argv) - - message_box = QtWidgets.QMessageBox() - message_box.setIcon(QtWidgets.QMessageBox.Warning) - msg = "There are outdated containers in the scene." - message_box.setText(msg) - message_box.exec_() - - -def application_launch(): - check_inventory() - - def install(): print("Installing Pype config...") @@ -89,6 +55,11 @@ def uninstall(): deregister_creator_plugin_path(CREATE_PATH) +def application_launch(): + """Triggered after start of app""" + check_inventory() + + def on_pyblish_instance_toggled(instance, old_value, new_value): """Toggle layer visibility on instance toggles.""" instance[0].Visible = new_value @@ -123,65 +94,6 @@ def get_asset_settings(): } -def containerise(name, - namespace, - comp, - context, - loader=None, - suffix="_CON"): - """ - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Creates dictionary payloads that gets saved into file metadata. Each - container contains of who loaded (loader) and members (single or multiple - in case of background). - - Arguments: - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - comp (Comp): Composition to containerise - context (dict): Asset information - loader (str, optional): Name of loader used to produce this container. - suffix (str, optional): Suffix of container, defaults to `_CON`. - - Returns: - container (str): Name of container assembly - """ - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace, - "loader": str(loader), - "representation": str(context["representation"]["_id"]), - "members": comp.members or [comp.id] - } - - stub = get_stub() - stub.imprint(comp, data) - - return comp - - -def _get_stub(): - """ - Handle pulling stub from PS to run operations on host - Returns: - (AEServerStub) or None - """ - try: - stub = get_stub() # only after Photoshop is up - except lib.ConnectionNotEstablishedYet: - print("Not connected yet, ignoring") - return - - if not stub.get_active_document_name(): - return - - return stub - - def ls(): """Yields containers from active AfterEffects document. @@ -222,6 +134,66 @@ def ls(): yield data +def check_inventory(): + """Checks loaded containers if they are of highest version""" + if not lib.any_outdated(): + return + + # Warn about outdated containers. + _app = QtWidgets.QApplication.instance() + if not _app: + print("Starting new QApplication..") + _app = QtWidgets.QApplication([]) + + message_box = QtWidgets.QMessageBox() + message_box.setIcon(QtWidgets.QMessageBox.Warning) + msg = "There are outdated containers in the scene." + message_box.setText(msg) + message_box.exec_() + + +def containerise(name, + namespace, + comp, + context, + loader=None, + suffix="_CON"): + """ + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Creates dictionary payloads that gets saved into file metadata. Each + container contains of who loaded (loader) and members (single or multiple + in case of background). + + Arguments: + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + comp (AEItem): Composition to containerise + context (dict): Asset information + loader (str, optional): Name of loader used to produce this container. + suffix (str, optional): Suffix of container, defaults to `_CON`. + + Returns: + container (str): Name of container assembly + """ + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace, + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + "members": comp.members or [comp.id] + } + + stub = get_stub() + stub.imprint(comp.id, data) + + return comp + + +# created instances section def list_instances(): """ List all created instances from current workfile which @@ -242,16 +214,8 @@ def list_instances(): layers_meta = stub.get_metadata() for instance in layers_meta: - if instance.get("schema") and \ - "container" in instance.get("schema"): - continue - - uuid_val = instance.get("uuid") - if uuid_val: - instance['uuid'] = uuid_val - else: - instance['uuid'] = instance.get("members")[0] # legacy - instances.append(instance) + if instance.get("id") == "pyblish.avalon.instance": + instances.append(instance) return instances @@ -272,8 +236,59 @@ def remove_instance(instance): if not stub: return - stub.remove_instance(instance.get("uuid")) - item = stub.get_item(instance.get("uuid")) - if item: - stub.rename_item(item.id, - item.name.replace(stub.PUBLISH_ICON, '')) + inst_id = instance.get("instance_id") or instance.get("uuid") # legacy + if not inst_id: + log.warning("No instance identifier for {}".format(instance)) + return + + stub.remove_instance(inst_id) + + if instance.get("members"): + item = stub.get_item(instance["members"][0]) + if item: + stub.rename_item(item.id, + item.name.replace(stub.PUBLISH_ICON, '')) + + +# new publisher section +def get_context_data(): + meta = _get_stub().get_metadata() + for item in meta: + if item.get("id") == "publish_context": + item.pop("id") + return item + + return {} + + +def update_context_data(data, changes): + item = data + item["id"] = "publish_context" + _get_stub().imprint(item["id"], item) + + +def get_context_title(): + """Returns title for Creator window""" + + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + return "{}/{}/{}".format(project_name, asset_name, task_name) + + +def _get_stub(): + """ + Handle pulling stub from PS to run operations on host + Returns: + (AEServerStub) or None + """ + try: + stub = get_stub() # only after Photoshop is up + except lib.ConnectionNotEstablishedYet: + print("Not connected yet, ignoring") + return + + if not stub.get_active_document_name(): + return + + return stub diff --git a/openpype/hosts/aftereffects/api/workio.py b/openpype/hosts/aftereffects/api/workio.py index 70815bda6b..d6c732285a 100644 --- a/openpype/hosts/aftereffects/api/workio.py +++ b/openpype/hosts/aftereffects/api/workio.py @@ -51,4 +51,4 @@ def _active_document(): print("Nothing opened") pass - return document_name \ No newline at end of file + return document_name diff --git a/openpype/hosts/aftereffects/api/ws_stub.py b/openpype/hosts/aftereffects/api/ws_stub.py index b0893310c1..8719a8f46e 100644 --- a/openpype/hosts/aftereffects/api/ws_stub.py +++ b/openpype/hosts/aftereffects/api/ws_stub.py @@ -28,6 +28,9 @@ class AEItem(object): workAreaDuration = attr.ib(default=None) frameRate = attr.ib(default=None) file_name = attr.ib(default=None) + instance_id = attr.ib(default=None) # New Publisher + width = attr.ib(default=None) + height = attr.ib(default=None) class AfterEffectsServerStub(): @@ -110,11 +113,11 @@ class AfterEffectsServerStub(): self.log.debug("Couldn't find layer metadata") - def imprint(self, item, data, all_items=None, items_meta=None): + def imprint(self, item_id, data, all_items=None, items_meta=None): """ Save item metadata to Label field of metadata of active document Args: - item (AEItem): + item_id (int|str): id of FootageItem or instance_id for workfiles data(string): json representation for single layer all_items (list of item): for performance, could be injected for usage in loop, if not, single call will be @@ -132,8 +135,9 @@ class AfterEffectsServerStub(): is_new = True for item_meta in items_meta: - if item_meta.get('members') \ - and str(item.id) == str(item_meta.get('members')[0]): + if ((item_meta.get('members') and + str(item_id) == str(item_meta.get('members')[0])) or + item_meta.get("instance_id") == item_id): is_new = False if data: item_meta.update(data) @@ -153,10 +157,12 @@ class AfterEffectsServerStub(): item_ids = [int(item.id) for item in all_items] cleaned_data = [] for meta in result_meta: - # for creation of instance OR loaded container - if 'instance' in meta.get('id') or \ - int(meta.get('members')[0]) in item_ids: - cleaned_data.append(meta) + # do not added instance with nonexistend item id + if meta.get("members"): + if int(meta["members"][0]) not in item_ids: + continue + + cleaned_data.append(meta) payload = json.dumps(cleaned_data, indent=4) @@ -167,7 +173,7 @@ class AfterEffectsServerStub(): def get_active_document_full_name(self): """ - Returns just a name of active document via ws call + Returns absolute path of active document via ws call Returns(string): file name """ res = self.websocketserver.call(self.client.call( @@ -314,15 +320,13 @@ class AfterEffectsServerStub(): Keep matching item in file though. Args: - instance_id(string): instance uuid + instance_id(string): instance id """ cleaned_data = [] for instance in self.get_metadata(): - uuid_val = instance.get("uuid") - if not uuid_val: - uuid_val = instance.get("members")[0] # legacy - if uuid_val != instance_id: + inst_id = instance.get("instance_id") or instance.get("uuid") + if inst_id != instance_id: cleaned_data.append(instance) payload = json.dumps(cleaned_data, indent=4) @@ -357,7 +361,7 @@ class AfterEffectsServerStub(): item_id (int): Returns: - (namedtuple) + (AEItem) """ res = self.websocketserver.call(self.client.call @@ -418,7 +422,7 @@ class AfterEffectsServerStub(): """ Get render queue info for render purposes Returns: - (namedtuple): with 'file_name' field + (AEItem): with 'file_name' field """ res = self.websocketserver.call(self.client.call ('AfterEffects.get_render_info')) @@ -606,7 +610,10 @@ class AfterEffectsServerStub(): d.get('workAreaStart'), d.get('workAreaDuration'), d.get('frameRate'), - d.get('file_name')) + d.get('file_name'), + d.get("instance_id"), + d.get("width"), + d.get("height")) ret.append(item) return ret diff --git a/openpype/hosts/aftereffects/plugins/create/create_local_render.py b/openpype/hosts/aftereffects/plugins/create/create_legacy_local_render.py similarity index 64% rename from openpype/hosts/aftereffects/plugins/create/create_local_render.py rename to openpype/hosts/aftereffects/plugins/create/create_legacy_local_render.py index 9d2cdcd7be..04413acbcf 100644 --- a/openpype/hosts/aftereffects/plugins/create/create_local_render.py +++ b/openpype/hosts/aftereffects/plugins/create/create_legacy_local_render.py @@ -1,7 +1,7 @@ -from openpype.hosts.aftereffects.plugins.create import create_render +from openpype.hosts.aftereffects.plugins.create import create_legacy_render -class CreateLocalRender(create_render.CreateRender): +class CreateLocalRender(create_legacy_render.CreateRender): """ Creator to render locally. Created only after default render on farm. So family 'render.local' is diff --git a/openpype/hosts/aftereffects/plugins/create/create_legacy_render.py b/openpype/hosts/aftereffects/plugins/create/create_legacy_render.py new file mode 100644 index 0000000000..e4fbb47a33 --- /dev/null +++ b/openpype/hosts/aftereffects/plugins/create/create_legacy_render.py @@ -0,0 +1,62 @@ +from openpype.pipeline import create +from openpype.pipeline import CreatorError +from openpype.hosts.aftereffects.api import ( + get_stub, + list_instances +) + + +class CreateRender(create.LegacyCreator): + """Render folder for publish. + + Creates subsets in format 'familyTaskSubsetname', + eg 'renderCompositingMain'. + + Create only single instance from composition at a time. + """ + + name = "renderDefault" + label = "Render on Farm" + family = "render" + defaults = ["Main"] + + def process(self): + stub = get_stub() # only after After Effects is up + items = [] + if (self.options or {}).get("useSelection"): + items = stub.get_selected_items( + comps=True, folders=False, footages=False + ) + if len(items) > 1: + raise CreatorError( + "Please select only single composition at time." + ) + + if not items: + raise CreatorError(( + "Nothing to create. Select composition " + "if 'useSelection' or create at least " + "one composition." + )) + + existing_subsets = [ + instance['subset'].lower() + for instance in list_instances() + ] + + item = items.pop() + if self.name.lower() in existing_subsets: + txt = "Instance with name \"{}\" already exists.".format(self.name) + raise CreatorError(txt) + + self.data["members"] = [item.id] + self.data["uuid"] = item.id # for SubsetManager + self.data["subset"] = ( + self.data["subset"] + .replace(stub.PUBLISH_ICON, '') + .replace(stub.LOADED_ICON, '') + ) + + stub.imprint(item, self.data) + stub.set_label_color(item.id, 14) # Cyan options 0 - 16 + stub.rename_item(item.id, stub.PUBLISH_ICON + self.data["subset"]) diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py index 831085a5f1..215c148f37 100644 --- a/openpype/hosts/aftereffects/plugins/create/create_render.py +++ b/openpype/hosts/aftereffects/plugins/create/create_render.py @@ -1,38 +1,70 @@ +from openpype import resources +from openpype.lib import BoolDef, UISeparatorDef +from openpype.hosts.aftereffects import api from openpype.pipeline import ( + Creator, + CreatedInstance, CreatorError, - LegacyCreator -) -from openpype.hosts.aftereffects.api import ( - get_stub, - list_instances + legacy_io, ) -class CreateRender(LegacyCreator): - """Render folder for publish. - - Creates subsets in format 'familyTaskSubsetname', - eg 'renderCompositingMain'. - - Create only single instance from composition at a time. - """ - - name = "renderDefault" - label = "Render on Farm" +class RenderCreator(Creator): + identifier = "render" + label = "Render" family = "render" - defaults = ["Main"] + description = "Render creator" - def process(self): - stub = get_stub() # only after After Effects is up - if (self.options or {}).get("useSelection"): + create_allow_context_change = True + + def __init__( + self, create_context, system_settings, project_settings, headless=False + ): + super(RenderCreator, self).__init__(create_context, system_settings, + project_settings, headless) + self._default_variants = (project_settings["aftereffects"] + ["create"] + ["RenderCreator"] + ["defaults"]) + + def get_icon(self): + return resources.get_openpype_splash_filepath() + + def collect_instances(self): + for instance_data in api.list_instances(): + # legacy instances have family=='render' or 'renderLocal', use them + creator_id = (instance_data.get("creator_identifier") or + instance_data.get("family", '').replace("Local", '')) + if creator_id == self.identifier: + instance_data = self._handle_legacy(instance_data) + instance = CreatedInstance.from_existing( + instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + api.get_stub().imprint(created_inst.get("instance_id"), + created_inst.data_to_store()) + + def remove_instances(self, instances): + for instance in instances: + api.remove_instance(instance) + self._remove_instance_from_context(instance) + + def create(self, subset_name, data, pre_create_data): + stub = api.get_stub() # only after After Effects is up + if pre_create_data.get("use_selection"): items = stub.get_selected_items( comps=True, folders=False, footages=False ) + else: + items = stub.get_items(comps=True, folders=False, footages=False) + if len(items) > 1: raise CreatorError( "Please select only single composition at time." ) - if not items: raise CreatorError(( "Nothing to create. Select composition " @@ -40,24 +72,54 @@ class CreateRender(LegacyCreator): "one composition." )) - existing_subsets = [ - instance['subset'].lower() - for instance in list_instances() + for inst in self.create_context.instances: + if subset_name == inst.subset_name: + raise CreatorError("{} already exists".format( + inst.subset_name)) + + data["members"] = [items[0].id] + new_instance = CreatedInstance(self.family, subset_name, data, self) + if "farm" in pre_create_data: + use_farm = pre_create_data["farm"] + new_instance.creator_attributes["farm"] = use_farm + + api.get_stub().imprint(new_instance.id, + new_instance.data_to_store()) + self._add_instance_to_context(new_instance) + + def get_default_variants(self): + return self._default_variants + + def get_instance_attr_defs(self): + return [BoolDef("farm", label="Render on farm")] + + def get_pre_create_attr_defs(self): + output = [ + BoolDef("use_selection", default=True, label="Use selection"), + UISeparatorDef(), + BoolDef("farm", label="Render on farm") ] + return output - item = items.pop() - if self.name.lower() in existing_subsets: - txt = "Instance with name \"{}\" already exists.".format(self.name) - raise CreatorError(txt) + def get_detail_description(self): + return """Creator for Render instances""" - self.data["members"] = [item.id] - self.data["uuid"] = item.id # for SubsetManager - self.data["subset"] = ( - self.data["subset"] - .replace(stub.PUBLISH_ICON, '') - .replace(stub.LOADED_ICON, '') - ) + def _handle_legacy(self, instance_data): + """Converts old instances to new format.""" + if not instance_data.get("members"): + instance_data["members"] = [instance_data.get("uuid")] - stub.imprint(item, self.data) - stub.set_label_color(item.id, 14) # Cyan options 0 - 16 - stub.rename_item(item.id, stub.PUBLISH_ICON + self.data["subset"]) + if instance_data.get("uuid"): + # uuid not needed, replaced with unique instance_id + api.get_stub().remove_instance(instance_data.get("uuid")) + instance_data.pop("uuid") + + if not instance_data.get("task"): + instance_data["task"] = legacy_io.Session.get("AVALON_TASK") + + if not instance_data.get("creator_attributes"): + is_old_farm = instance_data["family"] != "renderLocal" + instance_data["creator_attributes"] = {"farm": is_old_farm} + instance_data["family"] = self.family + + return instance_data diff --git a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py new file mode 100644 index 0000000000..88e55e21b5 --- /dev/null +++ b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py @@ -0,0 +1,80 @@ +import openpype.hosts.aftereffects.api as api +from openpype.pipeline import ( + AutoCreator, + CreatedInstance, + legacy_io, +) + + +class AEWorkfileCreator(AutoCreator): + identifier = "workfile" + family = "workfile" + + def get_instance_attr_defs(self): + return [] + + def collect_instances(self): + for instance_data in api.list_instances(): + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + subset_name = instance_data["subset"] + instance = CreatedInstance( + self.family, subset_name, instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + # nothing to change on workfiles + pass + + def create(self, options=None): + existing_instance = None + for instance in self.create_context.instances: + if instance.family == self.family: + existing_instance = instance + break + + variant = '' + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + host_name = legacy_io.Session["AVALON_APP"] + + if existing_instance is None: + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": variant + } + data.update(self.get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name + )) + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(new_instance) + + api.get_stub().imprint(new_instance.get("instance_id"), + new_instance.data_to_store()) + + elif ( + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name diff --git a/openpype/hosts/aftereffects/plugins/load/load_background.py b/openpype/hosts/aftereffects/plugins/load/load_background.py index be43cae44e..d346df504a 100644 --- a/openpype/hosts/aftereffects/plugins/load/load_background.py +++ b/openpype/hosts/aftereffects/plugins/load/load_background.py @@ -90,7 +90,7 @@ class BackgroundLoader(AfterEffectsLoader): container["namespace"] = comp_name container["members"] = comp.members - stub.imprint(comp, container) + stub.imprint(comp.id, container) def remove(self, container): """ @@ -99,10 +99,9 @@ class BackgroundLoader(AfterEffectsLoader): Args: container (dict): container to be removed - used to get layer_id """ - print("!!!! container:: {}".format(container)) stub = self.get_stub() layer = container.pop("layer") - stub.imprint(layer, {}) + stub.imprint(layer.id, {}) stub.delete_item(layer.id) def switch(self, container, representation): diff --git a/openpype/hosts/aftereffects/plugins/load/load_file.py b/openpype/hosts/aftereffects/plugins/load/load_file.py index 9eb9e80a2c..6ab69c6bfa 100644 --- a/openpype/hosts/aftereffects/plugins/load/load_file.py +++ b/openpype/hosts/aftereffects/plugins/load/load_file.py @@ -96,9 +96,9 @@ class FileLoader(AfterEffectsLoader): # with aftereffects.maintained_selection(): # TODO stub.replace_item(layer.id, path, stub.LOADED_ICON + layer_name) stub.imprint( - layer, {"representation": str(representation["_id"]), - "name": context["subset"], - "namespace": layer_name} + layer.id, {"representation": str(representation["_id"]), + "name": context["subset"], + "namespace": layer_name} ) def remove(self, container): @@ -109,7 +109,7 @@ class FileLoader(AfterEffectsLoader): """ stub = self.get_stub() layer = container.pop("layer") - stub.imprint(layer, {}) + stub.imprint(layer.id, {}) stub.delete_item(layer.id) def switch(self, container, representation): diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_audio.py b/openpype/hosts/aftereffects/plugins/publish/collect_audio.py index 80679725e6..8647ba498b 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_audio.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_audio.py @@ -17,12 +17,11 @@ class CollectAudio(pyblish.api.ContextPlugin): def process(self, context): for instance in context: - if instance.data["family"] == 'render.farm': + if 'render.farm' in instance.data.get("families", []): comp_id = instance.data["comp_id"] if not comp_id: self.log.debug("No comp_id filled in instance") - # @iLLiCiTiT QUESTION Should return or continue? - return + continue context.data["audioFile"] = os.path.normpath( get_stub().get_audio_url(comp_id) ).replace("\\", "/") diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_render.py b/openpype/hosts/aftereffects/plugins/publish/collect_render.py index 2a4b773681..fa23bf92b0 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_render.py @@ -21,135 +21,129 @@ class AERenderInstance(RenderInstance): projectEntity = attr.ib(default=None) stagingDir = attr.ib(default=None) app_version = attr.ib(default=None) + publish_attributes = attr.ib(default=None) + file_name = attr.ib(default=None) class CollectAERender(abstract_collect_render.AbstractCollectRender): - order = pyblish.api.CollectorOrder + 0.498 + order = pyblish.api.CollectorOrder + 0.405 label = "Collect After Effects Render Layers" hosts = ["aftereffects"] - # internal - family_remapping = { - "render": ("render.farm", "farm"), # (family, label) - "renderLocal": ("render", "local") - } padding_width = 6 rendered_extension = 'png' - stub = get_stub() + _stub = None + + @classmethod + def get_stub(cls): + if not cls._stub: + cls._stub = get_stub() + return cls._stub def get_instances(self, context): instances = [] + instances_to_remove = [] - app_version = self.stub.get_app_version() + app_version = CollectAERender.get_stub().get_app_version() app_version = app_version[0:4] current_file = context.data["currentFile"] version = context.data["version"] - asset_entity = context.data["assetEntity"] + project_entity = context.data["projectEntity"] - compositions = self.stub.get_items(True) + compositions = CollectAERender.get_stub().get_items(True) compositions_by_id = {item.id: item for item in compositions} - for inst in self.stub.get_metadata(): - schema = inst.get('schema') - # loaded asset container skip it - if schema and 'container' in schema: + for inst in context: + if not inst.data.get("active", True): continue - if not inst["members"]: - raise ValueError("Couldn't find id, unable to publish. " + - "Please recreate instance.") - item_id = inst["members"][0] + family = inst.data["family"] + if family not in ["render", "renderLocal"]: # legacy + continue - work_area_info = self.stub.get_work_area(int(item_id)) + item_id = inst.data["members"][0] + + work_area_info = CollectAERender.get_stub().get_work_area( + int(item_id)) if not work_area_info: self.log.warning("Orphaned instance, deleting metadata") - self.stub.remove_instance(int(item_id)) + inst_id = inst.get("instance_id") or item_id + CollectAERender.get_stub().remove_instance(inst_id) continue - frameStart = work_area_info.workAreaStart - - frameEnd = round(work_area_info.workAreaStart + - float(work_area_info.workAreaDuration) * - float(work_area_info.frameRate)) - 1 + frame_start = work_area_info.workAreaStart + frame_end = round(work_area_info.workAreaStart + + float(work_area_info.workAreaDuration) * + float(work_area_info.frameRate)) - 1 fps = work_area_info.frameRate # TODO add resolution when supported by extension - if inst["family"] in self.family_remapping.keys() \ - and inst["active"]: - remapped_family = self.family_remapping[inst["family"]] - instance = AERenderInstance( - family=remapped_family[0], - families=[remapped_family[0]], - version=version, - time="", - source=current_file, - label="{} - {}".format(inst["subset"], remapped_family[1]), - subset=inst["subset"], - asset=context.data["assetEntity"]["name"], - attachTo=False, - setMembers='', - publish=True, - renderer='aerender', - name=inst["subset"], - resolutionWidth=asset_entity["data"].get( - "resolutionWidth", - project_entity["data"]["resolutionWidth"]), - resolutionHeight=asset_entity["data"].get( - "resolutionHeight", - project_entity["data"]["resolutionHeight"]), - pixelAspect=1, - tileRendering=False, - tilesX=0, - tilesY=0, - frameStart=frameStart, - frameEnd=frameEnd, - frameStep=1, - toBeRenderedOn='deadline', - fps=fps, - app_version=app_version - ) + task_name = inst.data.get("task") # legacy - comp = compositions_by_id.get(int(item_id)) - if not comp: - raise ValueError("There is no composition for item {}". - format(item_id)) - instance.comp_name = comp.name - instance.comp_id = item_id - instance._anatomy = context.data["anatomy"] - instance.anatomyData = context.data["anatomyData"] + render_q = CollectAERender.get_stub().get_render_info() + if not render_q: + raise ValueError("No file extension set in Render Queue") - instance.outputDir = self._get_output_dir(instance) - instance.context = context + subset_name = inst.data["subset"] + instance = AERenderInstance( + family=family, + families=inst.data.get("families", []), + version=version, + time="", + source=current_file, + label="{} - {}".format(subset_name, family), + subset=subset_name, + asset=inst.data["asset"], + task=task_name, + attachTo=False, + setMembers='', + publish=True, + renderer='aerender', + name=subset_name, + resolutionWidth=render_q.width, + resolutionHeight=render_q.height, + pixelAspect=1, + tileRendering=False, + tilesX=0, + tilesY=0, + frameStart=frame_start, + frameEnd=frame_end, + frameStep=1, + toBeRenderedOn='deadline', + fps=fps, + app_version=app_version, + publish_attributes=inst.data.get("publish_attributes"), + file_name=render_q.file_name + ) - settings = get_project_settings(os.getenv("AVALON_PROJECT")) - reviewable_subset_filter = \ - (settings["deadline"] - ["publish"] - ["ProcessSubmittedJobOnFarm"] - ["aov_filter"]) + comp = compositions_by_id.get(int(item_id)) + if not comp: + raise ValueError("There is no composition for item {}". + format(item_id)) + instance.outputDir = self._get_output_dir(instance) + instance.comp_name = comp.name + instance.comp_id = item_id - if inst["family"] == "renderLocal": - # for local renders - instance.anatomyData["version"] = instance.version - instance.anatomyData["subset"] = instance.subset - instance.stagingDir = tempfile.mkdtemp() - instance.projectEntity = project_entity + is_local = "renderLocal" in inst.data["family"] # legacy + if inst.data.get("creator_attributes"): + is_local = not inst.data["creator_attributes"].get("farm") + if is_local: + # for local renders + instance = self._update_for_local(instance, project_entity) + else: + fam = "render.farm" + if fam not in instance.families: + instance.families.append(fam) - if self.hosts[0] in reviewable_subset_filter.keys(): - for aov_pattern in \ - reviewable_subset_filter[self.hosts[0]]: - if re.match(aov_pattern, instance.subset): - instance.families.append("review") - instance.review = True - break - - self.log.info("New instance:: {}".format(instance)) - instances.append(instance) + instances.append(instance) + instances_to_remove.append(inst) + for instance in instances_to_remove: + context.remove(instance) return instances def get_expected_files(self, render_instance): @@ -168,15 +162,11 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): start = render_instance.frameStart end = render_instance.frameEnd - # pull file name from Render Queue Output module - render_q = self.stub.get_render_info() - if not render_q: - raise ValueError("No file extension set in Render Queue") - _, ext = os.path.splitext(os.path.basename(render_q.file_name)) + _, ext = os.path.splitext(os.path.basename(render_instance.file_name)) base_dir = self._get_output_dir(render_instance) expected_files = [] - if "#" not in render_q.file_name: # single frame (mov)W + if "#" not in render_instance.file_name: # single frame (mov)W path = os.path.join(base_dir, "{}_{}_{}.{}".format( render_instance.asset, render_instance.subset, @@ -216,3 +206,24 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): # for submit_publish_job return base_dir + + def _update_for_local(self, instance, project_entity): + """Update old saved instances to current publishing format""" + instance.stagingDir = tempfile.mkdtemp() + instance.projectEntity = project_entity + fam = "render.local" + if fam not in instance.families: + instance.families.append(fam) + + settings = get_project_settings(os.getenv("AVALON_PROJECT")) + reviewable_subset_filter = (settings["deadline"] + ["publish"] + ["ProcessSubmittedJobOnFarm"] + ["aov_filter"].get(self.hosts[0])) + for aov_pattern in reviewable_subset_filter: + if re.match(aov_pattern, instance.subset): + instance.families.append("review") + instance.review = True + break + + return instance diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py index cb5a2bad4f..9cb6900b0a 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py @@ -1,7 +1,8 @@ import os -from avalon import api + import pyblish.api from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline import legacy_io class CollectWorkfile(pyblish.api.ContextPlugin): @@ -11,16 +12,45 @@ class CollectWorkfile(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder + 0.1 def process(self, context): - task = api.Session["AVALON_TASK"] + existing_instance = None + for instance in context: + if instance.data["family"] == "workfile": + self.log.debug("Workfile instance found, won't create new") + existing_instance = instance + break + current_file = context.data["currentFile"] staging_dir = os.path.dirname(current_file) scene_file = os.path.basename(current_file) + if existing_instance is None: # old publish + instance = self._get_new_instance(context, scene_file) + else: + instance = existing_instance + + # creating representation + representation = { + 'name': 'aep', + 'ext': 'aep', + 'files': scene_file, + "stagingDir": staging_dir, + } + + if not instance.data.get("representations"): + instance.data["representations"] = [] + instance.data["representations"].append(representation) + + instance.data["publish"] = instance.data["active"] # for DL + + def _get_new_instance(self, context, scene_file): + task = legacy_io.Session["AVALON_TASK"] version = context.data["version"] asset_entity = context.data["assetEntity"] project_entity = context.data["projectEntity"] - shared_instance_data = { + instance_data = { + "active": True, "asset": asset_entity["name"], + "task": task, "frameStart": asset_entity["data"]["frameStart"], "frameEnd": asset_entity["data"]["frameEnd"], "handleStart": asset_entity["data"]["handleStart"], @@ -59,20 +89,6 @@ class CollectWorkfile(pyblish.api.ContextPlugin): "representations": list() }) - # adding basic script data - instance.data.update(shared_instance_data) + instance.data.update(instance_data) - # creating representation - representation = { - 'name': 'aep', - 'ext': 'aep', - 'files': scene_file, - "stagingDir": staging_dir, - } - - instance.data["representations"].append(representation) - - self.log.info('Publishing After Effects workfile') - - for i in context: - self.log.debug(f"{i.data['families']}") + return instance diff --git a/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py b/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py index b738068a7b..7323a0b125 100644 --- a/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py @@ -12,7 +12,7 @@ class ExtractLocalRender(openpype.api.Extractor): order = openpype.api.Extractor.order - 0.47 label = "Extract Local Render" hosts = ["aftereffects"] - families = ["render"] + families = ["renderLocal", "render.local"] def process(self, instance): stub = get_stub() diff --git a/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py b/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py index e20598b311..eb2977309f 100644 --- a/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py +++ b/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py @@ -1,15 +1,16 @@ +import pyblish.api + import openpype.api from openpype.hosts.aftereffects.api import get_stub -class ExtractSaveScene(openpype.api.Extractor): +class ExtractSaveScene(pyblish.api.ContextPlugin): """Save scene before extraction.""" order = openpype.api.Extractor.order - 0.48 label = "Extract Save Scene" hosts = ["aftereffects"] - families = ["workfile"] - def process(self, instance): + def process(self, context): stub = get_stub() stub.save() diff --git a/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml b/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml index 36fa90456e..0591020ed3 100644 --- a/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml +++ b/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml @@ -12,6 +12,8 @@ One of the settings in a scene doesn't match to asset settings in database. ### How to repair? Change values for {invalid_keys_str} in the scene OR change them in the asset database if they are wrong there. + + In the scene it is right mouse click on published composition > `Composition Settings`. ### __Detailed Info__ (optional) diff --git a/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py b/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py new file mode 100644 index 0000000000..03ec184524 --- /dev/null +++ b/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py @@ -0,0 +1,54 @@ +import json +import pyblish.api +from openpype.hosts.aftereffects.api import list_instances + + +class PreCollectRender(pyblish.api.ContextPlugin): + """ + Checks if render instance is of old type, adds to families to both + existing collectors work same way. + + Could be removed in the future when no one uses old publish. + """ + + label = "PreCollect Render" + order = pyblish.api.CollectorOrder + 0.400 + hosts = ["aftereffects"] + + family_remapping = { + "render": ("render.farm", "farm"), # (family, label) + "renderLocal": ("render.local", "local") + } + + def process(self, context): + if context.data.get("newPublishing"): + self.log.debug("Not applicable for New Publisher, skip") + return + + for inst in list_instances(): + if inst.get("creator_attributes"): + raise ValueError("Instance created in New publisher, " + "cannot be published in Pyblish.\n" + "Please publish in New Publisher " + "or recreate instances with legacy Creators") + + if inst["family"] not in self.family_remapping.keys(): + continue + + if not inst["members"]: + raise ValueError("Couldn't find id, unable to publish. " + + "Please recreate instance.") + + instance = context.create_instance(inst["subset"]) + inst["families"] = [self.family_remapping[inst["family"]][0]] + instance.data.update(inst) + + self._debug_log(instance) + + def _debug_log(self, instance): + def _default_json(value): + return str(value) + + self.log.info( + json.dumps(instance.data, indent=4, default=_default_json) + ) diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py index 37cecfbcc4..7a9356f020 100644 --- a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py +++ b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py @@ -1,7 +1,10 @@ -from avalon import api import pyblish.api + import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + legacy_io, +) from openpype.hosts.aftereffects.api import get_stub @@ -27,8 +30,8 @@ class ValidateInstanceAssetRepair(pyblish.api.Action): for instance in instances: data = stub.read(instance[0]) - data["asset"] = api.Session["AVALON_ASSET"] - stub.imprint(instance[0], data) + data["asset"] = legacy_io.Session["AVALON_ASSET"] + stub.imprint(instance[0].instance_id, data) class ValidateInstanceAsset(pyblish.api.InstancePlugin): @@ -51,7 +54,7 @@ class ValidateInstanceAsset(pyblish.api.InstancePlugin): def process(self, instance): instance_asset = instance.data["asset"] - current_asset = api.Session["AVALON_ASSET"] + current_asset = legacy_io.Session["AVALON_ASSET"] msg = ( f"Instance asset {instance_asset} is not the same " f"as current context {current_asset}." diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py index 273ccd295e..14e224fdc2 100644 --- a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py +++ b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py @@ -5,11 +5,15 @@ import re import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) from openpype.hosts.aftereffects.api import get_asset_settings -class ValidateSceneSettings(pyblish.api.InstancePlugin): +class ValidateSceneSettings(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): """ Ensures that Composition Settings (right mouse on comp) are same as in FTrack on task. @@ -59,15 +63,20 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): def process(self, instance): """Plugin entry point.""" + # Skip the instance if is not active by data on the instance + if not self.is_active(instance.data): + return + expected_settings = get_asset_settings() self.log.info("config from DB::{}".format(expected_settings)) - if any(re.search(pattern, os.getenv('AVALON_TASK')) + task_name = instance.data["anatomyData"]["task"]["name"] + if any(re.search(pattern, task_name) for pattern in self.skip_resolution_check): expected_settings.pop("resolutionWidth") expected_settings.pop("resolutionHeight") - if any(re.search(pattern, os.getenv('AVALON_TASK')) + if any(re.search(pattern, task_name) for pattern in self.skip_timelines_check): expected_settings.pop('fps', None) expected_settings.pop('frameStart', None) @@ -87,10 +96,14 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): duration = instance.data.get("frameEndHandle") - \ instance.data.get("frameStartHandle") + 1 - self.log.debug("filtered config::{}".format(expected_settings)) + self.log.debug("validated items::{}".format(expected_settings)) current_settings = { "fps": fps, + "frameStart": instance.data.get("frameStart"), + "frameEnd": instance.data.get("frameEnd"), + "handleStart": instance.data.get("handleStart"), + "handleEnd": instance.data.get("handleEnd"), "frameStartHandle": instance.data.get("frameStartHandle"), "frameEndHandle": instance.data.get("frameEndHandle"), "resolutionWidth": instance.data.get("resolutionWidth"), @@ -103,24 +116,22 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): invalid_keys = set() for key, value in expected_settings.items(): if value != current_settings[key]: - invalid_settings.append( - "{} expected: {} found: {}".format(key, value, - current_settings[key]) - ) + msg = "'{}' expected: '{}' found: '{}'".format( + key, value, current_settings[key]) + + if key == "duration" and expected_settings.get("handleStart"): + msg += "Handles included in calculation. Remove " \ + "handles in DB or extend frame range in " \ + "Composition Setting." + + invalid_settings.append(msg) invalid_keys.add(key) - if ((expected_settings.get("handleStart") - or expected_settings.get("handleEnd")) - and invalid_settings): - msg = "Handles included in calculation. Remove handles in DB " +\ - "or extend frame range in Composition Setting." - invalid_settings[-1]["reason"] = msg - - msg = "Found invalid settings:\n{}".format( - "\n".join(invalid_settings) - ) - if invalid_settings: + msg = "Found invalid settings:\n{}".format( + "\n".join(invalid_settings) + ) + invalid_keys_str = ",".join(invalid_keys) break_str = "
" invalid_setting_str = "Found invalid settings:
{}".\ diff --git a/openpype/hosts/blender/api/ops.py b/openpype/hosts/blender/api/ops.py index 29d6d356c8..c1b5add518 100644 --- a/openpype/hosts/blender/api/ops.py +++ b/openpype/hosts/blender/api/ops.py @@ -15,9 +15,9 @@ from Qt import QtWidgets, QtCore import bpy import bpy.utils.previews -import avalon.api -from openpype.tools.utils import host_tools from openpype import style +from openpype.pipeline import legacy_io +from openpype.tools.utils import host_tools from .workio import OpenFileCacher @@ -279,7 +279,7 @@ class LaunchLoader(LaunchQtApp): def before_window_show(self): self._window.set_context( - {"asset": avalon.api.Session["AVALON_ASSET"]}, + {"asset": legacy_io.Session["AVALON_ASSET"]}, refresh=True ) @@ -327,8 +327,8 @@ class LaunchWorkFiles(LaunchQtApp): def execute(self, context): result = super().execute(context) self._window.set_context({ - "asset": avalon.api.Session["AVALON_ASSET"], - "task": avalon.api.Session["AVALON_TASK"] + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] }) return result @@ -358,8 +358,8 @@ class TOPBAR_MT_avalon(bpy.types.Menu): else: pyblish_menu_icon_id = 0 - asset = avalon.api.Session['AVALON_ASSET'] - task = avalon.api.Session['AVALON_TASK'] + asset = legacy_io.Session['AVALON_ASSET'] + task = legacy_io.Session['AVALON_TASK'] context_label = f"{asset}, {task}" context_label_item = layout.row() context_label_item.operator( diff --git a/openpype/hosts/blender/api/pipeline.py b/openpype/hosts/blender/api/pipeline.py index b9ec2cfea4..5b81764644 100644 --- a/openpype/hosts/blender/api/pipeline.py +++ b/openpype/hosts/blender/api/pipeline.py @@ -1,6 +1,5 @@ import os import sys -import importlib import traceback from typing import Callable, Dict, Iterator, List, Optional @@ -10,10 +9,10 @@ from . import lib from . import ops import pyblish.api -import avalon.api -from avalon import io, schema from openpype.pipeline import ( + schema, + legacy_io, register_loader_plugin_path, register_creator_plugin_path, deregister_loader_plugin_path, @@ -84,8 +83,8 @@ def uninstall(): def set_start_end_frames(): - asset_name = io.Session["AVALON_ASSET"] - asset_doc = io.find_one({ + asset_name = legacy_io.Session["AVALON_ASSET"] + asset_doc = legacy_io.find_one({ "type": "asset", "name": asset_name }) @@ -189,7 +188,7 @@ def _on_task_changed(): # `directory` attribute, so it opens in that directory (does it?). # https://docs.blender.org/api/blender2.8/bpy.types.Operator.html#calling-a-file-selector # https://docs.blender.org/api/blender2.8/bpy.types.WindowManager.html#bpy.types.WindowManager.fileselect_add - workdir = avalon.api.Session["AVALON_WORKDIR"] + workdir = legacy_io.Session["AVALON_WORKDIR"] log.debug("New working directory: %s", workdir) @@ -200,27 +199,6 @@ def _register_events(): log.info("Installed event callback for 'taskChanged'...") -def reload_pipeline(*args): - """Attempt to reload pipeline at run-time. - - Warning: - This is primarily for development and debugging purposes and not well - tested. - - """ - - avalon.api.uninstall() - - for module in ( - "avalon.io", - "avalon.lib", - "avalon.pipeline", - "avalon.api", - ): - module = importlib.import_module(module) - importlib.reload(module) - - def _discover_gui() -> Optional[Callable]: """Return the most desirable of the currently registered GUIs""" diff --git a/openpype/hosts/blender/api/plugin.py b/openpype/hosts/blender/api/plugin.py index 3207f543b7..c59be8d7ff 100644 --- a/openpype/hosts/blender/api/plugin.py +++ b/openpype/hosts/blender/api/plugin.py @@ -266,7 +266,7 @@ class AssetLoader(LoaderPlugin): # Only containerise if it's not already a collection from a .blend file. # representation = context["representation"]["name"] # if representation != "blend": - # from avalon.blender.pipeline import containerise + # from openpype.hosts.blender.api.pipeline import containerise # return containerise( # name=name, # namespace=namespace, diff --git a/openpype/hosts/blender/blender_addon/startup/init.py b/openpype/hosts/blender/blender_addon/startup/init.py index e43373bc6c..13a4b8a7a1 100644 --- a/openpype/hosts/blender/blender_addon/startup/init.py +++ b/openpype/hosts/blender/blender_addon/startup/init.py @@ -1,4 +1,4 @@ -from avalon import pipeline +from openpype.pipeline import install_host from openpype.hosts.blender import api -pipeline.install(api) +install_host(api) diff --git a/openpype/hosts/blender/plugins/create/create_action.py b/openpype/hosts/blender/plugins/create/create_action.py index 5f66f5da6e..54b3a501a7 100644 --- a/openpype/hosts/blender/plugins/create/create_action.py +++ b/openpype/hosts/blender/plugins/create/create_action.py @@ -2,7 +2,7 @@ import bpy -from avalon import api +from openpype.pipeline import legacy_io import openpype.hosts.blender.api.plugin from openpype.hosts.blender.api import lib @@ -22,7 +22,7 @@ class CreateAction(openpype.hosts.blender.api.plugin.Creator): name = openpype.hosts.blender.api.plugin.asset_name(asset, subset) collection = bpy.data.collections.new(name=name) bpy.context.scene.collection.children.link(collection) - self.data['task'] = api.Session.get('AVALON_TASK') + self.data['task'] = legacy_io.Session.get('AVALON_TASK') lib.imprint(collection, self.data) if (self.options or {}).get("useSelection"): diff --git a/openpype/hosts/blender/plugins/create/create_animation.py b/openpype/hosts/blender/plugins/create/create_animation.py index b88010ae90..a0e9e5e399 100644 --- a/openpype/hosts/blender/plugins/create/create_animation.py +++ b/openpype/hosts/blender/plugins/create/create_animation.py @@ -2,7 +2,7 @@ import bpy -from avalon import api +from openpype.pipeline import legacy_io from openpype.hosts.blender.api import plugin, lib, ops from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES @@ -37,7 +37,7 @@ class CreateAnimation(plugin.Creator): # asset_group.empty_display_type = 'SINGLE_ARROW' asset_group = bpy.data.collections.new(name=name) instances.children.link(asset_group) - self.data['task'] = api.Session.get('AVALON_TASK') + self.data['task'] = legacy_io.Session.get('AVALON_TASK') lib.imprint(asset_group, self.data) if (self.options or {}).get("useSelection"): diff --git a/openpype/hosts/blender/plugins/create/create_camera.py b/openpype/hosts/blender/plugins/create/create_camera.py index cc796d464d..1a3c008069 100644 --- a/openpype/hosts/blender/plugins/create/create_camera.py +++ b/openpype/hosts/blender/plugins/create/create_camera.py @@ -2,7 +2,7 @@ import bpy -from avalon import api +from openpype.pipeline import legacy_io from openpype.hosts.blender.api import plugin, lib, ops from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES @@ -40,7 +40,7 @@ class CreateCamera(plugin.Creator): asset_group = bpy.data.objects.new(name=name, object_data=None) asset_group.empty_display_type = 'SINGLE_ARROW' instances.objects.link(asset_group) - self.data['task'] = api.Session.get('AVALON_TASK') + self.data['task'] = legacy_io.Session.get('AVALON_TASK') print(f"self.data: {self.data}") lib.imprint(asset_group, self.data) diff --git a/openpype/hosts/blender/plugins/create/create_layout.py b/openpype/hosts/blender/plugins/create/create_layout.py index f62cbc52ba..5949a4b86e 100644 --- a/openpype/hosts/blender/plugins/create/create_layout.py +++ b/openpype/hosts/blender/plugins/create/create_layout.py @@ -2,7 +2,7 @@ import bpy -from avalon import api +from openpype.pipeline import legacy_io from openpype.hosts.blender.api import plugin, lib, ops from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES @@ -34,7 +34,7 @@ class CreateLayout(plugin.Creator): asset_group = bpy.data.objects.new(name=name, object_data=None) asset_group.empty_display_type = 'SINGLE_ARROW' instances.objects.link(asset_group) - self.data['task'] = api.Session.get('AVALON_TASK') + self.data['task'] = legacy_io.Session.get('AVALON_TASK') lib.imprint(asset_group, self.data) # Add selected objects to instance diff --git a/openpype/hosts/blender/plugins/create/create_model.py b/openpype/hosts/blender/plugins/create/create_model.py index 75c90f9bb1..fedc708943 100644 --- a/openpype/hosts/blender/plugins/create/create_model.py +++ b/openpype/hosts/blender/plugins/create/create_model.py @@ -2,7 +2,7 @@ import bpy -from avalon import api +from openpype.pipeline import legacy_io from openpype.hosts.blender.api import plugin, lib, ops from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES @@ -34,7 +34,7 @@ class CreateModel(plugin.Creator): asset_group = bpy.data.objects.new(name=name, object_data=None) asset_group.empty_display_type = 'SINGLE_ARROW' instances.objects.link(asset_group) - self.data['task'] = api.Session.get('AVALON_TASK') + self.data['task'] = legacy_io.Session.get('AVALON_TASK') lib.imprint(asset_group, self.data) # Add selected objects to instance diff --git a/openpype/hosts/blender/plugins/create/create_pointcache.py b/openpype/hosts/blender/plugins/create/create_pointcache.py index bf5a84048f..38707fd3b1 100644 --- a/openpype/hosts/blender/plugins/create/create_pointcache.py +++ b/openpype/hosts/blender/plugins/create/create_pointcache.py @@ -2,7 +2,7 @@ import bpy -from avalon import api +from openpype.pipeline import legacy_io import openpype.hosts.blender.api.plugin from openpype.hosts.blender.api import lib @@ -22,7 +22,7 @@ class CreatePointcache(openpype.hosts.blender.api.plugin.Creator): name = openpype.hosts.blender.api.plugin.asset_name(asset, subset) collection = bpy.data.collections.new(name=name) bpy.context.scene.collection.children.link(collection) - self.data['task'] = api.Session.get('AVALON_TASK') + self.data['task'] = legacy_io.Session.get('AVALON_TASK') lib.imprint(collection, self.data) if (self.options or {}).get("useSelection"): diff --git a/openpype/hosts/blender/plugins/create/create_rig.py b/openpype/hosts/blender/plugins/create/create_rig.py index 65f5061924..0abd306c6b 100644 --- a/openpype/hosts/blender/plugins/create/create_rig.py +++ b/openpype/hosts/blender/plugins/create/create_rig.py @@ -2,7 +2,7 @@ import bpy -from avalon import api +from openpype.pipeline import legacy_io from openpype.hosts.blender.api import plugin, lib, ops from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES @@ -34,7 +34,7 @@ class CreateRig(plugin.Creator): asset_group = bpy.data.objects.new(name=name, object_data=None) asset_group.empty_display_type = 'SINGLE_ARROW' instances.objects.link(asset_group) - self.data['task'] = api.Session.get('AVALON_TASK') + self.data['task'] = legacy_io.Session.get('AVALON_TASK') lib.imprint(asset_group, self.data) # Add selected objects to instance diff --git a/openpype/hosts/blender/plugins/publish/extract_layout.py b/openpype/hosts/blender/plugins/publish/extract_layout.py index b78a193d81..8ecc78a2c6 100644 --- a/openpype/hosts/blender/plugins/publish/extract_layout.py +++ b/openpype/hosts/blender/plugins/publish/extract_layout.py @@ -7,7 +7,7 @@ import bpy import bpy_extras import bpy_extras.anim_utils -from avalon import io +from openpype.pipeline import legacy_io from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY import openpype.api @@ -139,7 +139,7 @@ class ExtractLayout(openpype.api.Extractor): self.log.debug("Parent: {}".format(parent)) # Get blend reference - blend = io.find_one( + blend = legacy_io.find_one( { "type": "representation", "parent": ObjectId(parent), @@ -150,7 +150,7 @@ class ExtractLayout(openpype.api.Extractor): if blend: blend_id = blend["_id"] # Get fbx reference - fbx = io.find_one( + fbx = legacy_io.find_one( { "type": "representation", "parent": ObjectId(parent), @@ -161,7 +161,7 @@ class ExtractLayout(openpype.api.Extractor): if fbx: fbx_id = fbx["_id"] # Get abc reference - abc = io.find_one( + abc = legacy_io.find_one( { "type": "representation", "parent": ObjectId(parent), diff --git a/openpype/hosts/blender/plugins/publish/integrate_animation.py b/openpype/hosts/blender/plugins/publish/integrate_animation.py index 90e94a4aac..d9a85bc79b 100644 --- a/openpype/hosts/blender/plugins/publish/integrate_animation.py +++ b/openpype/hosts/blender/plugins/publish/integrate_animation.py @@ -1,6 +1,5 @@ import json -from avalon import io import pyblish.api diff --git a/openpype/hosts/celaction/api/cli.py b/openpype/hosts/celaction/api/cli.py index bc1e3eaf89..8c7b3a2e74 100644 --- a/openpype/hosts/celaction/api/cli.py +++ b/openpype/hosts/celaction/api/cli.py @@ -3,8 +3,6 @@ import sys import copy import argparse -from avalon import io - import pyblish.api import pyblish.util @@ -13,6 +11,8 @@ import openpype import openpype.hosts.celaction from openpype.hosts.celaction import api as celaction from openpype.tools.utils import host_tools +from openpype.pipeline import install_openpype_plugins + log = Logger().get_logger("Celaction_cli_publisher") @@ -21,9 +21,6 @@ publish_host = "celaction" HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.celaction.__file__)) PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") def cli(): @@ -74,7 +71,7 @@ def main(): _prepare_publish_environments() # Registers pype's Global pyblish plugins - openpype.install() + install_openpype_plugins() if os.path.exists(PUBLISH_PATH): log.info(f"Registering path: {PUBLISH_PATH}") diff --git a/openpype/hosts/celaction/plugins/publish/collect_audio.py b/openpype/hosts/celaction/plugins/publish/collect_audio.py index 80c1c37d7e..8acda5fc7c 100644 --- a/openpype/hosts/celaction/plugins/publish/collect_audio.py +++ b/openpype/hosts/celaction/plugins/publish/collect_audio.py @@ -1,10 +1,10 @@ import os import collections +from pprint import pformat import pyblish.api -from avalon import io -from pprint import pformat +from openpype.pipeline import legacy_io class AppendCelactionAudio(pyblish.api.ContextPlugin): @@ -60,7 +60,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): """ # Query all subsets for asset - subset_docs = io.find({ + subset_docs = legacy_io.find({ "type": "subset", "parent": asset_doc["_id"] }) @@ -93,7 +93,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): }} ] last_versions_by_subset_id = dict() - for doc in io.aggregate(pipeline): + for doc in legacy_io.aggregate(pipeline): doc["parent"] = doc["_id"] doc["_id"] = doc.pop("_version_id") last_versions_by_subset_id[doc["parent"]] = doc @@ -102,7 +102,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): for version_doc in last_versions_by_subset_id.values(): version_docs_by_id[version_doc["_id"]] = version_doc - repre_docs = io.find({ + repre_docs = legacy_io.find({ "type": "representation", "parent": {"$in": list(version_docs_by_id.keys())}, "name": {"$in": representations} diff --git a/openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py b/openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py index f393e471c4..1d2d9da1af 100644 --- a/openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py +++ b/openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py @@ -1,6 +1,6 @@ import os -from avalon import api import pyblish.api +from openpype.pipeline import legacy_io class CollectCelactionInstances(pyblish.api.ContextPlugin): @@ -10,7 +10,7 @@ class CollectCelactionInstances(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder + 0.1 def process(self, context): - task = api.Session["AVALON_TASK"] + task = legacy_io.Session["AVALON_TASK"] current_file = context.data["currentFile"] staging_dir = os.path.dirname(current_file) scene_file = os.path.basename(current_file) diff --git a/openpype/hosts/flame/api/__init__.py b/openpype/hosts/flame/api/__init__.py index f210c27f87..2c461e5f16 100644 --- a/openpype/hosts/flame/api/__init__.py +++ b/openpype/hosts/flame/api/__init__.py @@ -11,10 +11,8 @@ from .constants import ( from .lib import ( CTX, FlameAppFramework, - get_project_manager, get_current_project, get_current_sequence, - create_bin, create_segment_data_marker, get_segment_data_marker, set_segment_data_marker, @@ -29,7 +27,10 @@ from .lib import ( get_frame_from_filename, get_padding_from_filename, maintained_object_duplication, - get_clip_segment + maintained_temp_file_path, + get_clip_segment, + get_batch_group_from_desktop, + MediaInfoFile ) from .utils import ( setup, @@ -56,7 +57,6 @@ from .plugin import ( PublishableClip, ClipLoader, OpenClipSolver - ) from .workio import ( open_file, @@ -71,6 +71,10 @@ from .render_utils import ( get_preset_path_by_xml_name, modify_preset_file ) +from .batch_utils import ( + create_batch_group, + create_batch_group_conent +) __all__ = [ # constants @@ -83,10 +87,8 @@ __all__ = [ # lib "CTX", "FlameAppFramework", - "get_project_manager", "get_current_project", "get_current_sequence", - "create_bin", "create_segment_data_marker", "get_segment_data_marker", "set_segment_data_marker", @@ -101,7 +103,10 @@ __all__ = [ "get_frame_from_filename", "get_padding_from_filename", "maintained_object_duplication", + "maintained_temp_file_path", "get_clip_segment", + "get_batch_group_from_desktop", + "MediaInfoFile", # pipeline "install", @@ -142,5 +147,9 @@ __all__ = [ # render utils "export_clip", "get_preset_path_by_xml_name", - "modify_preset_file" + "modify_preset_file", + + # batch utils + "create_batch_group", + "create_batch_group_conent" ] diff --git a/openpype/hosts/flame/api/batch_utils.py b/openpype/hosts/flame/api/batch_utils.py new file mode 100644 index 0000000000..9d419a4a90 --- /dev/null +++ b/openpype/hosts/flame/api/batch_utils.py @@ -0,0 +1,151 @@ +import flame + + +def create_batch_group( + name, + frame_start, + frame_duration, + update_batch_group=None, + **kwargs +): + """Create Batch Group in active project's Desktop + + Args: + name (str): name of batch group to be created + frame_start (int): start frame of batch + frame_end (int): end frame of batch + update_batch_group (PyBatch)[optional]: batch group to update + + Return: + PyBatch: active flame batch group + """ + # make sure some batch obj is present + batch_group = update_batch_group or flame.batch + + schematic_reels = kwargs.get("shematic_reels") or ['LoadedReel1'] + shelf_reels = kwargs.get("shelf_reels") or ['ShelfReel1'] + + handle_start = kwargs.get("handleStart") or 0 + handle_end = kwargs.get("handleEnd") or 0 + + frame_start -= handle_start + frame_duration += handle_start + handle_end + + if not update_batch_group: + # Create batch group with name, start_frame value, duration value, + # set of schematic reel names, set of shelf reel names + batch_group = batch_group.create_batch_group( + name, + start_frame=frame_start, + duration=frame_duration, + reels=schematic_reels, + shelf_reels=shelf_reels + ) + else: + batch_group.name = name + batch_group.start_frame = frame_start + batch_group.duration = frame_duration + + # add reels to batch group + _add_reels_to_batch_group( + batch_group, schematic_reels, shelf_reels) + + # TODO: also update write node if there is any + # TODO: also update loaders to start from correct frameStart + + if kwargs.get("switch_batch_tab"): + # use this command to switch to the batch tab + batch_group.go_to() + + return batch_group + + +def _add_reels_to_batch_group(batch_group, reels, shelf_reels): + # update or create defined reels + # helper variables + reel_names = [ + r.name.get_value() + for r in batch_group.reels + ] + shelf_reel_names = [ + r.name.get_value() + for r in batch_group.shelf_reels + ] + # add schematic reels + for _r in reels: + if _r in reel_names: + continue + batch_group.create_reel(_r) + + # add shelf reels + for _sr in shelf_reels: + if _sr in shelf_reel_names: + continue + batch_group.create_shelf_reel(_sr) + + +def create_batch_group_conent(batch_nodes, batch_links, batch_group=None): + """Creating batch group with links + + Args: + batch_nodes (list of dict): each dict is node definition + batch_links (list of dict): each dict is link definition + batch_group (PyBatch, optional): batch group. Defaults to None. + + Return: + dict: all batch nodes {name or id: PyNode} + """ + # make sure some batch obj is present + batch_group = batch_group or flame.batch + all_batch_nodes = { + b.name.get_value(): b + for b in batch_group.nodes + } + for node in batch_nodes: + # NOTE: node_props needs to be ideally OrederDict type + node_id, node_type, node_props = ( + node["id"], node["type"], node["properties"]) + + # get node name for checking if exists + node_name = node_props.pop("name", None) or node_id + + if all_batch_nodes.get(node_name): + # update existing batch node + batch_node = all_batch_nodes[node_name] + else: + # create new batch node + batch_node = batch_group.create_node(node_type) + + # set name + batch_node.name.set_value(node_name) + + # set attributes found in node props + for key, value in node_props.items(): + if not hasattr(batch_node, key): + continue + setattr(batch_node, key, value) + + # add created node for possible linking + all_batch_nodes[node_id] = batch_node + + # link nodes to each other + for link in batch_links: + _from_n, _to_n = link["from_node"], link["to_node"] + + # check if all linking nodes are available + if not all([ + all_batch_nodes.get(_from_n["id"]), + all_batch_nodes.get(_to_n["id"]) + ]): + continue + + # link nodes in defined link + batch_group.connect_nodes( + all_batch_nodes[_from_n["id"]], _from_n["connector"], + all_batch_nodes[_to_n["id"]], _to_n["connector"] + ) + + # sort batch nodes + batch_group.organize() + + return all_batch_nodes diff --git a/openpype/hosts/flame/api/lib.py b/openpype/hosts/flame/api/lib.py index aa2cfcb96d..c7c444c1fb 100644 --- a/openpype/hosts/flame/api/lib.py +++ b/openpype/hosts/flame/api/lib.py @@ -3,7 +3,12 @@ import os import re import json import pickle +import tempfile +import itertools import contextlib +import xml.etree.cElementTree as cET +from copy import deepcopy +from xml.etree import ElementTree as ET from pprint import pformat from .constants import ( MARKER_COLOR, @@ -12,9 +17,10 @@ from .constants import ( COLOR_MAP, MARKER_PUBLISH_DEFAULT ) -from openpype.api import Logger -log = Logger.get_logger(__name__) +import openpype.api as openpype + +log = openpype.Logger.get_logger(__name__) FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]") @@ -227,16 +233,6 @@ class FlameAppFramework(object): return True -def get_project_manager(): - # TODO: get_project_manager - return - - -def get_media_storage(): - # TODO: get_media_storage - return - - def get_current_project(): import flame return flame.project.current_project @@ -266,11 +262,6 @@ def get_current_sequence(selection): return process_timeline -def create_bin(name, root=None): - # TODO: create_bin - return - - def rescan_hooks(): import flame try: @@ -280,6 +271,7 @@ def rescan_hooks(): def get_metadata(project_name, _log=None): + # TODO: can be replaced by MediaInfoFile class method from adsk.libwiretapPythonClientAPI import ( WireTapClient, WireTapServerHandle, @@ -704,6 +696,25 @@ def maintained_object_duplication(item): flame.delete(duplicate) +@contextlib.contextmanager +def maintained_temp_file_path(suffix=None): + _suffix = suffix or "" + + try: + # Store dumped json to temporary file + temporary_file = tempfile.mktemp( + suffix=_suffix, prefix="flame_maintained_") + yield temporary_file.replace("\\", "/") + + except IOError as _error: + raise IOError( + "Not able to create temp json file: {}".format(_error)) + + finally: + # Remove the temporary json + os.remove(temporary_file) + + def get_clip_segment(flame_clip): name = flame_clip.name.get_value() version = flame_clip.versions[0] @@ -717,3 +728,213 @@ def get_clip_segment(flame_clip): raise ValueError("Clip `{}` has too many segments!".format(name)) return segments[0] + + +def get_batch_group_from_desktop(name): + project = get_current_project() + project_desktop = project.current_workspace.desktop + + for bgroup in project_desktop.batch_groups: + if bgroup.name.get_value() in name: + return bgroup + + +class MediaInfoFile(object): + """Class to get media info file clip data + + Raises: + IOError: MEDIA_SCRIPT_PATH path doesn't exists + TypeError: Not able to generate clip xml data file + ET.ParseError: Missing clip in xml clip data + IOError: Not able to save xml clip data to file + + Attributes: + str: `MEDIA_SCRIPT_PATH` path to flame binary + logging.Logger: `log` logger + + TODO: add method for getting metadata to dict + """ + MEDIA_SCRIPT_PATH = "/opt/Autodesk/mio/current/dl_get_media_info" + + log = log + + _clip_data = None + _start_frame = None + _fps = None + _drop_mode = None + + def __init__(self, path, **kwargs): + + # replace log if any + if kwargs.get("logger"): + self.log = kwargs["logger"] + + # test if `dl_get_media_info` paht exists + self._validate_media_script_path() + + # derivate other feed variables + self.feed_basename = os.path.basename(path) + self.feed_dir = os.path.dirname(path) + self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower() + + with maintained_temp_file_path(".clip") as tmp_path: + self.log.info("Temp File: {}".format(tmp_path)) + self._generate_media_info_file(tmp_path) + + # get clip data and make them single if there is multiple + # clips data + xml_data = self._make_single_clip_media_info(tmp_path) + self.log.debug("xml_data: {}".format(xml_data)) + self.log.debug("type: {}".format(type(xml_data))) + + # get all time related data and assign them + self._get_time_info_from_origin(xml_data) + self.log.debug("start_frame: {}".format(self.start_frame)) + self.log.debug("fps: {}".format(self.fps)) + self.log.debug("drop frame: {}".format(self.drop_mode)) + self.clip_data = xml_data + + @property + def clip_data(self): + """Clip's xml clip data + + Returns: + xml.etree.ElementTree: xml data + """ + return self._clip_data + + @clip_data.setter + def clip_data(self, data): + self._clip_data = data + + @property + def start_frame(self): + """ Clip's starting frame found in timecode + + Returns: + int: number of frames + """ + return self._start_frame + + @start_frame.setter + def start_frame(self, number): + self._start_frame = int(number) + + @property + def fps(self): + """ Clip's frame rate + + Returns: + float: frame rate + """ + return self._fps + + @fps.setter + def fps(self, fl_number): + self._fps = float(fl_number) + + @property + def drop_mode(self): + """ Clip's drop frame mode + + Returns: + str: drop frame flag + """ + return self._drop_mode + + @drop_mode.setter + def drop_mode(self, text): + self._drop_mode = str(text) + + def _validate_media_script_path(self): + if not os.path.isfile(self.MEDIA_SCRIPT_PATH): + raise IOError("Media Scirpt does not exist: `{}`".format( + self.MEDIA_SCRIPT_PATH)) + + def _generate_media_info_file(self, fpath): + # Create cmd arguments for gettig xml file info file + cmd_args = [ + self.MEDIA_SCRIPT_PATH, + "-e", self.feed_ext, + "-o", fpath, + self.feed_dir + ] + + try: + # execute creation of clip xml template data + openpype.run_subprocess(cmd_args) + except TypeError as error: + raise TypeError( + "Error creating `{}` due: {}".format(fpath, error)) + + def _make_single_clip_media_info(self, fpath): + with open(fpath) as f: + lines = f.readlines() + _added_root = itertools.chain( + "", deepcopy(lines)[1:], "") + new_root = ET.fromstringlist(_added_root) + + # find the clip which is matching to my input name + xml_clips = new_root.findall("clip") + matching_clip = None + for xml_clip in xml_clips: + if xml_clip.find("name").text in self.feed_basename: + matching_clip = xml_clip + + if matching_clip is None: + # return warning there is missing clip + raise ET.ParseError( + "Missing clip in `{}`. Available clips {}".format( + self.feed_basename, [ + xml_clip.find("name").text + for xml_clip in xml_clips + ] + )) + + return matching_clip + + def _get_time_info_from_origin(self, xml_data): + try: + for out_track in xml_data.iter('track'): + for out_feed in out_track.iter('feed'): + # start frame + out_feed_nb_ticks_obj = out_feed.find( + 'startTimecode/nbTicks') + self.start_frame = out_feed_nb_ticks_obj.text + + # fps + out_feed_fps_obj = out_feed.find( + 'startTimecode/rate') + self.fps = out_feed_fps_obj.text + + # drop frame mode + out_feed_drop_mode_obj = out_feed.find( + 'startTimecode/dropMode') + self.drop_mode = out_feed_drop_mode_obj.text + break + else: + continue + except Exception as msg: + self.log.warning(msg) + + @staticmethod + def write_clip_data_to_file(fpath, xml_element_data): + """ Write xml element of clip data to file + + Args: + fpath (string): file path + xml_element_data (xml.etree.ElementTree.Element): xml data + + Raises: + IOError: If data could not be written to file + """ + try: + # save it as new file + tree = cET.ElementTree(xml_element_data) + tree.write( + fpath, xml_declaration=True, + method='xml', encoding='UTF-8' + ) + except IOError as error: + raise IOError( + "Not able to write data to file: {}".format(error)) diff --git a/openpype/hosts/flame/api/plugin.py b/openpype/hosts/flame/api/plugin.py index 4c9d3c5383..11108ba49f 100644 --- a/openpype/hosts/flame/api/plugin.py +++ b/openpype/hosts/flame/api/plugin.py @@ -1,24 +1,19 @@ import os import re import shutil -import sys -from xml.etree import ElementTree as ET -import six -import qargparse -from Qt import QtWidgets, QtCore -import openpype.api as openpype -from openpype.pipeline import ( - LegacyCreator, - LoaderPlugin, -) -from openpype import style -from . import ( - lib as flib, - pipeline as fpipeline, - constants -) - from copy import deepcopy +from xml.etree import ElementTree as ET + +from Qt import QtCore, QtWidgets + +import openpype.api as openpype +import qargparse +from openpype import style +from openpype.pipeline import LegacyCreator, LoaderPlugin + +from . import constants +from . import lib as flib +from . import pipeline as fpipeline log = openpype.Logger.get_logger(__name__) @@ -660,8 +655,8 @@ class PublishableClip: # Publishing plugin functions -# Loader plugin functions +# Loader plugin functions class ClipLoader(LoaderPlugin): """A basic clip loader for Flame @@ -681,50 +676,52 @@ class ClipLoader(LoaderPlugin): ] -class OpenClipSolver: - media_script_path = "/opt/Autodesk/mio/current/dl_get_media_info" - tmp_name = "_tmp.clip" - tmp_file = None +class OpenClipSolver(flib.MediaInfoFile): create_new_clip = False - out_feed_nb_ticks = None - out_feed_fps = None - out_feed_drop_mode = None - log = log def __init__(self, openclip_file_path, feed_data): - # test if media script paht exists - self._validate_media_script_path() + self.out_file = openclip_file_path # new feed variables: - feed_path = feed_data["path"] + feed_path = feed_data.pop("path") + + # initialize parent class + super(OpenClipSolver, self).__init__( + feed_path, + **feed_data + ) + + # get other metadata self.feed_version_name = feed_data["version"] self.feed_colorspace = feed_data.get("colorspace") - - if feed_data.get("logger"): - self.log = feed_data["logger"] + self.log.debug("feed_version_name: {}".format(self.feed_version_name)) # derivate other feed variables self.feed_basename = os.path.basename(feed_path) self.feed_dir = os.path.dirname(feed_path) self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower() - - if not os.path.isfile(openclip_file_path): - # openclip does not exist yet and will be created - self.tmp_file = self.out_file = openclip_file_path + self.log.debug("feed_ext: {}".format(self.feed_ext)) + self.log.debug("out_file: {}".format(self.out_file)) + if not self._is_valid_tmp_file(self.out_file): self.create_new_clip = True - else: - # output a temp file - self.out_file = openclip_file_path - self.tmp_file = os.path.join(self.feed_dir, self.tmp_name) - self._clear_tmp_file() + def _is_valid_tmp_file(self, file): + # check if file exists + if os.path.isfile(file): + # test also if file is not empty + with open(file) as f: + lines = f.readlines() - self.log.info("Temp File: {}".format(self.tmp_file)) + if len(lines) > 2: + return True + + # file is probably corrupted + os.remove(file) + return False def make(self): - self._generate_media_info_file() if self.create_new_clip: # New openClip @@ -732,42 +729,17 @@ class OpenClipSolver: else: self._update_open_clip() - def _validate_media_script_path(self): - if not os.path.isfile(self.media_script_path): - raise IOError("Media Scirpt does not exist: `{}`".format( - self.media_script_path)) - - def _generate_media_info_file(self): - # Create cmd arguments for gettig xml file info file - cmd_args = [ - self.media_script_path, - "-e", self.feed_ext, - "-o", self.tmp_file, - self.feed_dir - ] - - # execute creation of clip xml template data - try: - openpype.run_subprocess(cmd_args) - except TypeError: - self.log.error("Error creating self.tmp_file") - six.reraise(*sys.exc_info()) - - def _clear_tmp_file(self): - if os.path.isfile(self.tmp_file): - os.remove(self.tmp_file) - def _clear_handler(self, xml_object): for handler in xml_object.findall("./handler"): - self.log.debug("Handler found") + self.log.info("Handler found") xml_object.remove(handler) def _create_new_open_clip(self): self.log.info("Building new openClip") + self.log.debug(">> self.clip_data: {}".format(self.clip_data)) - tmp_xml = ET.parse(self.tmp_file) - - tmp_xml_feeds = tmp_xml.find('tracks/track/feeds') + # clip data comming from MediaInfoFile + tmp_xml_feeds = self.clip_data.find('tracks/track/feeds') tmp_xml_feeds.set('currentVersion', self.feed_version_name) for tmp_feed in tmp_xml_feeds: tmp_feed.set('vuid', self.feed_version_name) @@ -778,46 +750,48 @@ class OpenClipSolver: self._clear_handler(tmp_feed) - tmp_xml_versions_obj = tmp_xml.find('versions') + tmp_xml_versions_obj = self.clip_data.find('versions') tmp_xml_versions_obj.set('currentVersion', self.feed_version_name) for xml_new_version in tmp_xml_versions_obj: xml_new_version.set('uid', self.feed_version_name) xml_new_version.set('type', 'version') - xml_data = self._fix_xml_data(tmp_xml) + self._clear_handler(self.clip_data) self.log.info("Adding feed version: {}".format(self.feed_basename)) - self._write_result_xml_to_file(xml_data) - - self.log.info("openClip Updated: {}".format(self.tmp_file)) + self.write_clip_data_to_file(self.out_file, self.clip_data) def _update_open_clip(self): self.log.info("Updating openClip ..") out_xml = ET.parse(self.out_file) - tmp_xml = ET.parse(self.tmp_file) + out_xml = out_xml.getroot() self.log.debug(">> out_xml: {}".format(out_xml)) - self.log.debug(">> tmp_xml: {}".format(tmp_xml)) + self.log.debug(">> self.clip_data: {}".format(self.clip_data)) # Get new feed from tmp file - tmp_xml_feed = tmp_xml.find('tracks/track/feeds/feed') + tmp_xml_feed = self.clip_data.find('tracks/track/feeds/feed') self._clear_handler(tmp_xml_feed) - self._get_time_info_from_origin(out_xml) - if self.out_feed_fps: + # update fps from MediaInfoFile class + if self.fps: tmp_feed_fps_obj = tmp_xml_feed.find( "startTimecode/rate") - tmp_feed_fps_obj.text = self.out_feed_fps - if self.out_feed_nb_ticks: + tmp_feed_fps_obj.text = str(self.fps) + + # update start_frame from MediaInfoFile class + if self.start_frame: tmp_feed_nb_ticks_obj = tmp_xml_feed.find( "startTimecode/nbTicks") - tmp_feed_nb_ticks_obj.text = self.out_feed_nb_ticks - if self.out_feed_drop_mode: + tmp_feed_nb_ticks_obj.text = str(self.start_frame) + + # update drop_mode from MediaInfoFile class + if self.drop_mode: tmp_feed_drop_mode_obj = tmp_xml_feed.find( "startTimecode/dropMode") - tmp_feed_drop_mode_obj.text = self.out_feed_drop_mode + tmp_feed_drop_mode_obj.text = str(self.drop_mode) new_path_obj = tmp_xml_feed.find( "spans/span/path") @@ -850,7 +824,7 @@ class OpenClipSolver: "version", {"type": "version", "uid": self.feed_version_name}) out_xml_versions_obj.insert(0, new_version_obj) - xml_data = self._fix_xml_data(out_xml) + self._clear_handler(out_xml) # fist create backup self._create_openclip_backup_file(self.out_file) @@ -858,30 +832,9 @@ class OpenClipSolver: self.log.info("Adding feed version: {}".format( self.feed_version_name)) - self._write_result_xml_to_file(xml_data) + self.write_clip_data_to_file(self.out_file, out_xml) - self.log.info("openClip Updated: {}".format(self.out_file)) - - self._clear_tmp_file() - - def _get_time_info_from_origin(self, xml_data): - try: - for out_track in xml_data.iter('track'): - for out_feed in out_track.iter('feed'): - out_feed_nb_ticks_obj = out_feed.find( - 'startTimecode/nbTicks') - self.out_feed_nb_ticks = out_feed_nb_ticks_obj.text - out_feed_fps_obj = out_feed.find( - 'startTimecode/rate') - self.out_feed_fps = out_feed_fps_obj.text - out_feed_drop_mode_obj = out_feed.find( - 'startTimecode/dropMode') - self.out_feed_drop_mode = out_feed_drop_mode_obj.text - break - else: - continue - except Exception as msg: - self.log.warning(msg) + self.log.debug("OpenClip Updated: {}".format(self.out_file)) def _feed_exists(self, xml_data, path): # loop all available feed paths and check if @@ -892,15 +845,6 @@ class OpenClipSolver: "Not appending file as it already is in .clip file") return True - def _fix_xml_data(self, xml_data): - xml_root = xml_data.getroot() - self._clear_handler(xml_root) - return ET.tostring(xml_root).decode('utf-8') - - def _write_result_xml_to_file(self, xml_data): - with open(self.out_file, "w") as f: - f.write(xml_data) - def _create_openclip_backup_file(self, file): bck_file = "{}.bak".format(file) # if backup does not exist @@ -929,6 +873,5 @@ class OpenClipSolver: if feed_clr_obj is not None: feed_clr_obj = ET.Element( "colourSpace", {"type": "string"}) + feed_clr_obj.text = profile_name feed_storage_obj.append(feed_clr_obj) - - feed_clr_obj.text = profile_name diff --git a/openpype/hosts/flame/api/scripts/wiretap_com.py b/openpype/hosts/flame/api/scripts/wiretap_com.py index 54993d34eb..4825ff4386 100644 --- a/openpype/hosts/flame/api/scripts/wiretap_com.py +++ b/openpype/hosts/flame/api/scripts/wiretap_com.py @@ -185,7 +185,9 @@ class WireTapCom(object): exit_code = subprocess.call( project_create_cmd, - cwd=os.path.expanduser('~')) + cwd=os.path.expanduser('~'), + preexec_fn=_subprocess_preexec_fn + ) if exit_code != 0: RuntimeError("Cannot create project in flame db") @@ -254,7 +256,7 @@ class WireTapCom(object): filtered_users = [user for user in used_names if user_name in user] if filtered_users: - # todo: need to find lastly created following regex pattern for + # TODO: need to find lastly created following regex pattern for # date used in name return filtered_users.pop() @@ -448,7 +450,9 @@ class WireTapCom(object): exit_code = subprocess.call( project_colorspace_cmd, - cwd=os.path.expanduser('~')) + cwd=os.path.expanduser('~'), + preexec_fn=_subprocess_preexec_fn + ) if exit_code != 0: RuntimeError("Cannot set colorspace {} on project {}".format( @@ -456,6 +460,15 @@ class WireTapCom(object): )) +def _subprocess_preexec_fn(): + """ Helper function + + Setting permission mask to 0777 + """ + os.setpgrp() + os.umask(0o000) + + if __name__ == "__main__": # get json exchange data json_path = sys.argv[-1] diff --git a/openpype/hosts/flame/otio/flame_export.py b/openpype/hosts/flame/otio/flame_export.py index 8c240fc9d5..4fe05ec1d8 100644 --- a/openpype/hosts/flame/otio/flame_export.py +++ b/openpype/hosts/flame/otio/flame_export.py @@ -11,8 +11,6 @@ from . import utils import flame from pprint import pformat -reload(utils) # noqa - log = logging.getLogger(__name__) @@ -260,24 +258,15 @@ def create_otio_markers(otio_item, item): otio_item.markers.append(otio_marker) -def create_otio_reference(clip_data): +def create_otio_reference(clip_data, fps=None): metadata = _get_metadata(clip_data) # get file info for path and start frame frame_start = 0 - fps = CTX.get_fps() + fps = fps or CTX.get_fps() path = clip_data["fpath"] - reel_clip = None - match_reel_clip = [ - clip for clip in CTX.clips - if clip["fpath"] == path - ] - if match_reel_clip: - reel_clip = match_reel_clip.pop() - fps = reel_clip["fps"] - file_name = os.path.basename(path) file_head, extension = os.path.splitext(file_name) @@ -339,13 +328,22 @@ def create_otio_reference(clip_data): def create_otio_clip(clip_data): + from openpype.hosts.flame.api import MediaInfoFile + segment = clip_data["PySegment"] - # create media reference - media_reference = create_otio_reference(clip_data) - # calculate source in - first_frame = utils.get_frame_from_filename(clip_data["fpath"]) or 0 + media_info = MediaInfoFile(clip_data["fpath"]) + media_timecode_start = media_info.start_frame + media_fps = media_info.fps + + # create media reference + media_reference = create_otio_reference(clip_data, media_fps) + + # define first frame + first_frame = media_timecode_start or utils.get_frame_from_filename( + clip_data["fpath"]) or 0 + source_in = int(clip_data["source_in"]) - int(first_frame) # creatae source range @@ -378,38 +376,6 @@ def create_otio_gap(gap_start, clip_start, tl_start_frame, fps): ) -def get_clips_in_reels(project): - output_clips = [] - project_desktop = project.current_workspace.desktop - - for reel_group in project_desktop.reel_groups: - for reel in reel_group.reels: - for clip in reel.clips: - clip_data = { - "PyClip": clip, - "fps": float(str(clip.frame_rate)[:-4]) - } - - attrs = [ - "name", "width", "height", - "ratio", "sample_rate", "bit_depth" - ] - - for attr in attrs: - val = getattr(clip, attr) - clip_data[attr] = val - - version = clip.versions[-1] - track = version.tracks[-1] - for segment in track.segments: - segment_data = _get_segment_attributes(segment) - clip_data.update(segment_data) - - output_clips.append(clip_data) - - return output_clips - - def _get_colourspace_policy(): output = {} @@ -493,9 +459,6 @@ def _get_shot_tokens_values(clip, tokens): old_value = None output = {} - if not clip.shot_name: - return output - old_value = clip.shot_name.get_value() for token in tokens: @@ -513,15 +476,21 @@ def _get_shot_tokens_values(clip, tokens): def _get_segment_attributes(segment): - # log.debug(dir(segment)) - if str(segment.name)[1:-1] == "": + log.debug("Segment name|hidden: {}|{}".format( + segment.name.get_value(), segment.hidden + )) + if ( + segment.name.get_value() == "" + or segment.hidden.get_value() + ): return None # Add timeline segment to tree clip_data = { "segment_name": segment.name.get_value(), "segment_comment": segment.comment.get_value(), + "shot_name": segment.shot_name.get_value(), "tape_name": segment.tape_name, "source_name": segment.source_name, "fpath": segment.file_path, @@ -529,9 +498,10 @@ def _get_segment_attributes(segment): } # add all available shot tokens - shot_tokens = _get_shot_tokens_values(segment, [ - "", "", "", "", - ]) + shot_tokens = _get_shot_tokens_values( + segment, + ["", "", "", ""] + ) clip_data.update(shot_tokens) # populate shot source metadata @@ -561,11 +531,6 @@ def create_otio_timeline(sequence): log.info(sequence.attributes) CTX.project = get_current_flame_project() - CTX.clips = get_clips_in_reels(CTX.project) - - log.debug(pformat( - CTX.clips - )) # get current timeline CTX.set_fps( @@ -583,8 +548,13 @@ def create_otio_timeline(sequence): # create otio tracks and clips for ver in sequence.versions: for track in ver.tracks: - if len(track.segments) == 0 and track.hidden: - return None + # avoid all empty tracks + # or hidden tracks + if ( + len(track.segments) == 0 + or track.hidden.get_value() + ): + continue # convert track to otio otio_track = create_otio_track( @@ -597,11 +567,7 @@ def create_otio_timeline(sequence): continue all_segments.append(clip_data) - segments_ordered = { - itemindex: clip_data - for itemindex, clip_data in enumerate( - all_segments) - } + segments_ordered = dict(enumerate(all_segments)) log.debug("_ segments_ordered: {}".format( pformat(segments_ordered) )) @@ -612,15 +578,11 @@ def create_otio_timeline(sequence): log.debug("_ itemindex: {}".format(itemindex)) # Add Gap if needed - if itemindex == 0: - # if it is first track item at track then add - # it to previous item - prev_item = segment_data - - else: - # get previous item - prev_item = segments_ordered[itemindex - 1] - + prev_item = ( + segment_data + if itemindex == 0 + else segments_ordered[itemindex - 1] + ) log.debug("_ segment_data: {}".format(segment_data)) # calculate clip frame range difference from each other diff --git a/openpype/hosts/flame/plugins/load/load_clip.py b/openpype/hosts/flame/plugins/load/load_clip.py index 8980f72cb8..e0a7297381 100644 --- a/openpype/hosts/flame/plugins/load/load_clip.py +++ b/openpype/hosts/flame/plugins/load/load_clip.py @@ -22,7 +22,7 @@ class LoadClip(opfapi.ClipLoader): # settings reel_group_name = "OpenPype_Reels" reel_name = "Loaded" - clip_name_template = "{asset}_{subset}_{representation}" + clip_name_template = "{asset}_{subset}_{output}" def load(self, context, name, namespace, options): @@ -39,7 +39,7 @@ class LoadClip(opfapi.ClipLoader): clip_name = self.clip_name_template.format( **context["representation"]["context"]) - # todo: settings in imageio + # TODO: settings in imageio # convert colorspace with ocio to flame mapping # in imageio flame section colorspace = colorspace diff --git a/openpype/hosts/flame/plugins/load/load_clip_batch.py b/openpype/hosts/flame/plugins/load/load_clip_batch.py new file mode 100644 index 0000000000..5de3226035 --- /dev/null +++ b/openpype/hosts/flame/plugins/load/load_clip_batch.py @@ -0,0 +1,139 @@ +import os +import flame +from pprint import pformat +import openpype.hosts.flame.api as opfapi + + +class LoadClipBatch(opfapi.ClipLoader): + """Load a subset to timeline as clip + + Place clip to timeline on its asset origin timings collected + during conforming to project + """ + + families = ["render2d", "source", "plate", "render", "review"] + representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"] + + label = "Load as clip to current batch" + order = -10 + icon = "code-fork" + color = "orange" + + # settings + reel_name = "OP_LoadedReel" + clip_name_template = "{asset}_{subset}_{output}" + + def load(self, context, name, namespace, options): + + # get flame objects + self.batch = options.get("batch") or flame.batch + + # load clip to timeline and get main variables + namespace = namespace + version = context['version'] + version_data = version.get("data", {}) + version_name = version.get("name", None) + colorspace = version_data.get("colorspace", None) + + # in case output is not in context replace key to representation + if not context["representation"]["context"].get("output"): + self.clip_name_template.replace("output", "representation") + + clip_name = self.clip_name_template.format( + **context["representation"]["context"]) + + # TODO: settings in imageio + # convert colorspace with ocio to flame mapping + # in imageio flame section + colorspace = colorspace + + # create workfile path + workfile_dir = options.get("workdir") or os.environ["AVALON_WORKDIR"] + openclip_dir = os.path.join( + workfile_dir, clip_name + ) + openclip_path = os.path.join( + openclip_dir, clip_name + ".clip" + ) + if not os.path.exists(openclip_dir): + os.makedirs(openclip_dir) + + # prepare clip data from context ad send it to openClipLoader + loading_context = { + "path": self.fname.replace("\\", "/"), + "colorspace": colorspace, + "version": "v{:0>3}".format(version_name), + "logger": self.log + + } + self.log.debug(pformat( + loading_context + )) + self.log.debug(openclip_path) + + # make openpype clip file + opfapi.OpenClipSolver(openclip_path, loading_context).make() + + # prepare Reel group in actual desktop + opc = self._get_clip( + clip_name, + openclip_path + ) + + # add additional metadata from the version to imprint Avalon knob + add_keys = [ + "frameStart", "frameEnd", "source", "author", + "fps", "handleStart", "handleEnd" + ] + + # move all version data keys to tag data + data_imprint = { + key: version_data.get(key, str(None)) + for key in add_keys + } + # add variables related to version context + data_imprint.update({ + "version": version_name, + "colorspace": colorspace, + "objectName": clip_name + }) + + # TODO: finish the containerisation + # opc_segment = opfapi.get_clip_segment(opc) + + # return opfapi.containerise( + # opc_segment, + # name, namespace, context, + # self.__class__.__name__, + # data_imprint) + + return opc + + def _get_clip(self, name, clip_path): + reel = self._get_reel() + + # with maintained openclip as opc + matching_clip = None + for cl in reel.clips: + if cl.name.get_value() != name: + continue + matching_clip = cl + + if not matching_clip: + created_clips = flame.import_clips(str(clip_path), reel) + return created_clips.pop() + + return matching_clip + + def _get_reel(self): + + matching_reel = [ + rg for rg in self.batch.reels + if rg.name.get_value() == self.reel_name + ] + + return ( + matching_reel.pop() + if matching_reel + else self.batch.create_reel(str(self.reel_name)) + ) diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py index 2482abd9c7..5174f9db48 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py @@ -21,24 +21,15 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): audio_track_items = [] - # TODO: add to settings # settings - xml_preset_attrs_from_comments = { - "width": "number", - "height": "number", - "pixelRatio": "float", - "resizeType": "string", - "resizeFilter": "string" - } + xml_preset_attrs_from_comments = [] + add_tasks = [] def process(self, context): - project = context.data["flameProject"] - sequence = context.data["flameSequence"] selected_segments = context.data["flameSelectedSegments"] self.log.debug("__ selected_segments: {}".format(selected_segments)) self.otio_timeline = context.data["otioTimeline"] - self.clips_in_reels = opfapi.get_clips_in_reels(project) self.fps = context.data["fps"] # process all sellected @@ -70,18 +61,15 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): # get file path file_path = clip_data["fpath"] - # get source clip - source_clip = self._get_reel_clip(file_path) - first_frame = opfapi.get_frame_from_filename(file_path) or 0 head, tail = self._get_head_tail(clip_data, first_frame) # solve handles length marker_data["handleStart"] = min( - marker_data["handleStart"], head) + marker_data["handleStart"], abs(head)) marker_data["handleEnd"] = min( - marker_data["handleEnd"], tail) + marker_data["handleEnd"], abs(tail)) with_audio = bool(marker_data.pop("audio")) @@ -110,9 +98,12 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): "families": families, "publish": marker_data["publish"], "fps": self.fps, - "flameSourceClip": source_clip, "sourceFirstFrame": int(first_frame), - "path": file_path + "path": file_path, + "flameAddTasks": self.add_tasks, + "tasks": { + task["name"]: {"type": task["type"]} + for task in self.add_tasks} }) # get otio clip data @@ -187,7 +178,10 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): # split to key and value key, value = split.split(":") - for a_name, a_type in self.xml_preset_attrs_from_comments.items(): + for attr_data in self.xml_preset_attrs_from_comments: + a_name = attr_data["name"] + a_type = attr_data["type"] + # exclude all not related attributes if a_name.lower() not in key.lower(): continue @@ -247,6 +241,7 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): head = clip_data.get("segment_head") tail = clip_data.get("segment_tail") + # HACK: it is here to serve for versions bellow 2021.1 if not head: head = int(clip_data["source_in"]) - int(first_frame) if not tail: @@ -257,14 +252,6 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): ) return head, tail - def _get_reel_clip(self, path): - match_reel_clip = [ - clip for clip in self.clips_in_reels - if clip["fpath"] == path - ] - if match_reel_clip: - return match_reel_clip.pop() - def _get_resolution_to_data(self, data, context): assert data.get("otioClip"), "Missing `otioClip` data" diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py b/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py index c6aeae7730..f2ae1f62a9 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py @@ -1,6 +1,7 @@ import pyblish.api -import avalon.api as avalon + import openpype.lib as oplib +from openpype.pipeline import legacy_io import openpype.hosts.flame.api as opfapi from openpype.hosts.flame.otio import flame_export @@ -18,7 +19,7 @@ class CollecTimelineOTIO(pyblish.api.ContextPlugin): # main asset_doc = context.data["assetEntity"] - task_name = avalon.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] project = opfapi.get_current_project() sequence = opfapi.get_current_sequence(opfapi.CTX.selection) diff --git a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py index 32f6b9508f..fd0ece2590 100644 --- a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py +++ b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py @@ -1,4 +1,5 @@ import os +import re from pprint import pformat from copy import deepcopy @@ -6,6 +7,8 @@ import pyblish.api import openpype.api from openpype.hosts.flame import api as opfapi +import flame + class ExtractSubsetResources(openpype.api.Extractor): """ @@ -20,27 +23,31 @@ class ExtractSubsetResources(openpype.api.Extractor): # plugin defaults default_presets = { "thumbnail": { + "active": True, "ext": "jpg", "xml_preset_file": "Jpeg (8-bit).xml", "xml_preset_dir": "", "export_type": "File Sequence", - "ignore_comment_attrs": True, + "parsed_comment_attrs": False, "colorspace_out": "Output - sRGB", "representation_add_range": False, - "representation_tags": ["thumbnail"] + "representation_tags": ["thumbnail"], + "path_regex": ".*" }, "ftrackpreview": { + "active": True, "ext": "mov", "xml_preset_file": "Apple iPad (1920x1080).xml", "xml_preset_dir": "", "export_type": "Movie", - "ignore_comment_attrs": True, + "parsed_comment_attrs": False, "colorspace_out": "Output - Rec.709", "representation_add_range": True, "representation_tags": [ "review", "delete" - ] + ], + "path_regex": ".*" } } keep_original_representation = False @@ -61,9 +68,10 @@ class ExtractSubsetResources(openpype.api.Extractor): # flame objects segment = instance.data["item"] + asset_name = instance.data["asset"] + segment_name = segment.name.get_value() + clip_path = instance.data["path"] sequence_clip = instance.context.data["flameSequence"] - clip_data = instance.data["flameSourceClip"] - clip = clip_data["PyClip"] # segment's parent track name s_track_name = segment.parent.name.get_value() @@ -100,14 +108,54 @@ class ExtractSubsetResources(openpype.api.Extractor): for unique_name, preset_config in export_presets.items(): modify_xml_data = {} + # get activating attributes + activated_preset = preset_config["active"] + filter_path_regex = preset_config.get("filter_path_regex") + + self.log.info( + "Preset `{}` is active `{}` with filter `{}`".format( + unique_name, activated_preset, filter_path_regex + ) + ) + self.log.debug( + "__ clip_path: `{}`".format(clip_path)) + + # skip if not activated presete + if not activated_preset: + continue + + # exclude by regex filter if any + if ( + filter_path_regex + and not re.search(filter_path_regex, clip_path) + ): + continue + # get all presets attributes + extension = preset_config["ext"] preset_file = preset_config["xml_preset_file"] preset_dir = preset_config["xml_preset_dir"] export_type = preset_config["export_type"] repre_tags = preset_config["representation_tags"] - ignore_comment_attrs = preset_config["ignore_comment_attrs"] + parsed_comment_attrs = preset_config["parsed_comment_attrs"] color_out = preset_config["colorspace_out"] + self.log.info( + "Processing `{}` as `{}` to `{}` type...".format( + preset_file, export_type, extension + ) + ) + + # get attribures related loading in integrate_batch_group + load_to_batch_group = preset_config.get( + "load_to_batch_group") + batch_group_loader_name = preset_config.get( + "batch_group_loader_name") + + # convert to None if empty string + if batch_group_loader_name == "": + batch_group_loader_name = None + # get frame range with handles for representation range frame_start_handle = frame_start - handle_start source_duration_handles = ( @@ -117,147 +165,157 @@ class ExtractSubsetResources(openpype.api.Extractor): in_mark = (source_start_handles - source_first_frame) + 1 out_mark = in_mark + source_duration_handles - # by default export source clips - exporting_clip = clip - + exporting_clip = None + name_patern_xml = "_{}.".format( + unique_name) if export_type == "Sequence Publish": # change export clip to sequence - exporting_clip = sequence_clip + exporting_clip = flame.duplicate(sequence_clip) - # change in/out marks to timeline in/out - in_mark = clip_in - out_mark = clip_out + # only keep visible layer where instance segment is child + self.hide_others( + exporting_clip, segment_name, s_track_name) - # add xml tags modifications - modify_xml_data.update({ - "exportHandles": True, - "nbHandles": handles, - "startFrame": frame_start - }) + # change name patern + name_patern_xml = ( + "__{}.").format( + unique_name) + else: + exporting_clip = self.import_clip(clip_path) + exporting_clip.name.set_value("{}_{}".format( + asset_name, segment_name)) - if not ignore_comment_attrs: - # add any xml overrides collected form segment.comment - modify_xml_data.update(instance.data["xml_overrides"]) + # change in/out marks to timeline in/out + in_mark = clip_in + out_mark = clip_out + + # add xml tags modifications + modify_xml_data.update({ + "exportHandles": True, + "nbHandles": handles, + "startFrame": frame_start, + "namePattern": name_patern_xml + }) + + if parsed_comment_attrs: + # add any xml overrides collected form segment.comment + modify_xml_data.update(instance.data["xml_overrides"]) self.log.debug("__ modify_xml_data: {}".format(pformat( modify_xml_data ))) - # with maintained duplication loop all presets - with opfapi.maintained_object_duplication( - exporting_clip) as duplclip: - kwargs = {} + export_kwargs = {} + # validate xml preset file is filled + if preset_file == "": + raise ValueError( + ("Check Settings for {} preset: " + "`XML preset file` is not filled").format( + unique_name) + ) - if export_type == "Sequence Publish": - # only keep visible layer where instance segment is child - self.hide_other_tracks(duplclip, s_track_name) + # resolve xml preset dir if not filled + if preset_dir == "": + preset_dir = opfapi.get_preset_path_by_xml_name( + preset_file) - # validate xml preset file is filled - if preset_file == "": + if not preset_dir: raise ValueError( ("Check Settings for {} preset: " - "`XML preset file` is not filled").format( - unique_name) + "`XML preset file` {} is not found").format( + unique_name, preset_file) ) - # resolve xml preset dir if not filled - if preset_dir == "": - preset_dir = opfapi.get_preset_path_by_xml_name( - preset_file) + # create preset path + preset_orig_xml_path = str(os.path.join( + preset_dir, preset_file + )) - if not preset_dir: - raise ValueError( - ("Check Settings for {} preset: " - "`XML preset file` {} is not found").format( - unique_name, preset_file) - ) + preset_path = opfapi.modify_preset_file( + preset_orig_xml_path, staging_dir, modify_xml_data) - # create preset path - preset_orig_xml_path = str(os.path.join( - preset_dir, preset_file - )) + # define kwargs based on preset type + if "thumbnail" in unique_name: + export_kwargs["thumb_frame_number"] = int(in_mark + ( + source_duration_handles / 2)) + else: + export_kwargs.update({ + "in_mark": in_mark, + "out_mark": out_mark + }) - preset_path = opfapi.modify_preset_file( - preset_orig_xml_path, staging_dir, modify_xml_data) + # get and make export dir paths + export_dir_path = str(os.path.join( + staging_dir, unique_name + )) + os.makedirs(export_dir_path) - # define kwargs based on preset type - if "thumbnail" in unique_name: - kwargs["thumb_frame_number"] = in_mark + ( - source_duration_handles / 2) - else: - kwargs.update({ - "in_mark": in_mark, - "out_mark": out_mark - }) + # export + opfapi.export_clip( + export_dir_path, exporting_clip, preset_path, **export_kwargs) - # get and make export dir paths - export_dir_path = str(os.path.join( - staging_dir, unique_name - )) - os.makedirs(export_dir_path) + # create representation data + representation_data = { + "name": unique_name, + "outputName": unique_name, + "ext": extension, + "stagingDir": export_dir_path, + "tags": repre_tags, + "data": { + "colorspace": color_out + }, + "load_to_batch_group": load_to_batch_group, + "batch_group_loader_name": batch_group_loader_name + } - # export - opfapi.export_clip( - export_dir_path, duplclip, preset_path, **kwargs) + # collect all available content of export dir + files = os.listdir(export_dir_path) - extension = preset_config["ext"] + # make sure no nested folders inside + n_stage_dir, n_files = self._unfolds_nested_folders( + export_dir_path, files, extension) - # create representation data - representation_data = { - "name": unique_name, - "outputName": unique_name, - "ext": extension, - "stagingDir": export_dir_path, - "tags": repre_tags, - "data": { - "colorspace": color_out - } - } + # fix representation in case of nested folders + if n_stage_dir: + representation_data["stagingDir"] = n_stage_dir + files = n_files - # collect all available content of export dir - files = os.listdir(export_dir_path) + # add files to represetation but add + # imagesequence as list + if ( + # first check if path in files is not mov extension + [ + f for f in files + if os.path.splitext(f)[-1] == ".mov" + ] + # then try if thumbnail is not in unique name + or unique_name == "thumbnail" + ): + representation_data["files"] = files.pop() + else: + representation_data["files"] = files - # make sure no nested folders inside - n_stage_dir, n_files = self._unfolds_nested_folders( - export_dir_path, files, extension) + # add frame range + if preset_config["representation_add_range"]: + representation_data.update({ + "frameStart": frame_start_handle, + "frameEnd": ( + frame_start_handle + source_duration_handles), + "fps": instance.data["fps"] + }) - # fix representation in case of nested folders - if n_stage_dir: - representation_data["stagingDir"] = n_stage_dir - files = n_files + instance.data["representations"].append(representation_data) - # add files to represetation but add - # imagesequence as list - if ( - # first check if path in files is not mov extension - [ - f for f in files - if os.path.splitext(f)[-1] == ".mov" - ] - # then try if thumbnail is not in unique name - or unique_name == "thumbnail" - ): - representation_data["files"] = files.pop() - else: - representation_data["files"] = files + # add review family if found in tags + if "review" in repre_tags: + instance.data["families"].append("review") - # add frame range - if preset_config["representation_add_range"]: - representation_data.update({ - "frameStart": frame_start_handle, - "frameEnd": ( - frame_start_handle + source_duration_handles), - "fps": instance.data["fps"] - }) + self.log.info("Added representation: {}".format( + representation_data)) - instance.data["representations"].append(representation_data) - - # add review family if found in tags - if "review" in repre_tags: - instance.data["families"].append("review") - - self.log.info("Added representation: {}".format( - representation_data)) + if export_type == "Sequence Publish": + # at the end remove the duplicated clip + flame.delete(exporting_clip) self.log.debug("All representations: {}".format( pformat(instance.data["representations"]))) @@ -322,18 +380,41 @@ class ExtractSubsetResources(openpype.api.Extractor): return new_stage_dir, new_files_list - def hide_other_tracks(self, sequence_clip, track_name): + def hide_others(self, sequence_clip, segment_name, track_name): """Helper method used only if sequence clip is used Args: sequence_clip (flame.Clip): sequence clip + segment_name (str): segment name track_name (str): track name """ # create otio tracks and clips for ver in sequence_clip.versions: for track in ver.tracks: - if len(track.segments) == 0 and track.hidden: + if len(track.segments) == 0 and track.hidden.get_value(): continue + # hide tracks which are not parent track if track.name.get_value() != track_name: track.hidden = True + continue + + # hidde all other segments + for segment in track.segments: + if segment.name.get_value() != segment_name: + segment.hidden = True + + def import_clip(self, path): + """ + Import clip from path + """ + clips = flame.import_clips(path) + self.log.info("Clips [{}] imported from `{}`".format(clips, path)) + if not clips: + self.log.warning("Path `{}` is not having any clips".format(path)) + return None + elif len(clips) > 1: + self.log.warning( + "Path `{}` is containing more that one clip".format(path) + ) + return clips[0] diff --git a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py new file mode 100644 index 0000000000..da9553cc2a --- /dev/null +++ b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py @@ -0,0 +1,328 @@ +import os +import copy +from collections import OrderedDict +from pprint import pformat +import pyblish +from openpype.lib import get_workdir +import openpype.hosts.flame.api as opfapi +import openpype.pipeline as op_pipeline + + +class IntegrateBatchGroup(pyblish.api.InstancePlugin): + """Integrate published shot to batch group""" + + order = pyblish.api.IntegratorOrder + 0.45 + label = "Integrate Batch Groups" + hosts = ["flame"] + families = ["clip"] + + # settings + default_loader = "LoadClip" + + def process(self, instance): + add_tasks = instance.data["flameAddTasks"] + + # iterate all tasks from settings + for task_data in add_tasks: + # exclude batch group + if not task_data["create_batch_group"]: + continue + + # create or get already created batch group + bgroup = self._get_batch_group(instance, task_data) + + # add batch group content + all_batch_nodes = self._add_nodes_to_batch_with_links( + instance, task_data, bgroup) + + for name, node in all_batch_nodes.items(): + self.log.debug("name: {}, dir: {}".format( + name, dir(node) + )) + self.log.debug("__ node.attributes: {}".format( + node.attributes + )) + + # load plate to batch group + self.log.info("Loading subset `{}` into batch `{}`".format( + instance.data["subset"], bgroup.name.get_value() + )) + self._load_clip_to_context(instance, bgroup) + + def _add_nodes_to_batch_with_links(self, instance, task_data, batch_group): + # get write file node properties > OrederDict because order does mater + write_pref_data = self._get_write_prefs(instance, task_data) + + batch_nodes = [ + { + "type": "comp", + "properties": {}, + "id": "comp_node01" + }, + { + "type": "Write File", + "properties": write_pref_data, + "id": "write_file_node01" + } + ] + batch_links = [ + { + "from_node": { + "id": "comp_node01", + "connector": "Result" + }, + "to_node": { + "id": "write_file_node01", + "connector": "Front" + } + } + ] + + # add nodes into batch group + return opfapi.create_batch_group_conent( + batch_nodes, batch_links, batch_group) + + def _load_clip_to_context(self, instance, bgroup): + # get all loaders for host + loaders_by_name = { + loader.__name__: loader + for loader in op_pipeline.discover_loader_plugins() + } + + # get all published representations + published_representations = instance.data["published_representations"] + repres_db_id_by_name = { + repre_info["representation"]["name"]: repre_id + for repre_id, repre_info in published_representations.items() + } + + # get all loadable representations + repres_by_name = { + repre["name"]: repre for repre in instance.data["representations"] + } + + # get repre_id for the loadable representations + loader_name_by_repre_id = { + repres_db_id_by_name[repr_name]: { + "loader": repr_data["batch_group_loader_name"], + # add repre data for exception logging + "_repre_data": repr_data + } + for repr_name, repr_data in repres_by_name.items() + if repr_data.get("load_to_batch_group") + } + + self.log.debug("__ loader_name_by_repre_id: {}".format(pformat( + loader_name_by_repre_id))) + + # get representation context from the repre_id + repre_contexts = op_pipeline.load.get_repres_contexts( + loader_name_by_repre_id.keys()) + + self.log.debug("__ repre_contexts: {}".format(pformat( + repre_contexts))) + + # loop all returned repres from repre_context dict + for repre_id, repre_context in repre_contexts.items(): + self.log.debug("__ repre_id: {}".format(repre_id)) + # get loader name by representation id + loader_name = ( + loader_name_by_repre_id[repre_id]["loader"] + # if nothing was added to settings fallback to default + or self.default_loader + ) + + # get loader plugin + loader_plugin = loaders_by_name.get(loader_name) + if loader_plugin: + # load to flame by representation context + try: + op_pipeline.load.load_with_repre_context( + loader_plugin, repre_context, **{ + "data": { + "workdir": self.task_workdir, + "batch": bgroup + } + }) + except op_pipeline.load.IncompatibleLoaderError as msg: + self.log.error( + "Check allowed representations for Loader `{}` " + "in settings > error: {}".format( + loader_plugin.__name__, msg)) + self.log.error( + "Representaton context >>{}<< is not compatible " + "with loader `{}`".format( + pformat(repre_context), loader_plugin.__name__ + ) + ) + else: + self.log.warning( + "Something got wrong and there is not Loader found for " + "following data: {}".format( + pformat(loader_name_by_repre_id)) + ) + + def _get_batch_group(self, instance, task_data): + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + frame_duration = (frame_end - frame_start) + 1 + asset_name = instance.data["asset"] + + task_name = task_data["name"] + batchgroup_name = "{}_{}".format(asset_name, task_name) + + batch_data = { + "shematic_reels": [ + "OP_LoadedReel" + ], + "handleStart": handle_start, + "handleEnd": handle_end + } + self.log.debug( + "__ batch_data: {}".format(pformat(batch_data))) + + # check if the batch group already exists + bgroup = opfapi.get_batch_group_from_desktop(batchgroup_name) + + if not bgroup: + self.log.info( + "Creating new batch group: {}".format(batchgroup_name)) + # create batch with utils + bgroup = opfapi.create_batch_group( + batchgroup_name, + frame_start, + frame_duration, + **batch_data + ) + + else: + self.log.info( + "Updating batch group: {}".format(batchgroup_name)) + # update already created batch group + bgroup = opfapi.create_batch_group( + batchgroup_name, + frame_start, + frame_duration, + update_batch_group=bgroup, + **batch_data + ) + + return bgroup + + def _get_anamoty_data_with_current_task(self, instance, task_data): + anatomy_data = copy.deepcopy(instance.data["anatomyData"]) + task_name = task_data["name"] + task_type = task_data["type"] + anatomy_obj = instance.context.data["anatomy"] + + # update task data in anatomy data + project_task_types = anatomy_obj["tasks"] + task_code = project_task_types.get(task_type, {}).get("short_name") + anatomy_data.update({ + "task": { + "name": task_name, + "type": task_type, + "short": task_code + } + }) + return anatomy_data + + def _get_write_prefs(self, instance, task_data): + # update task in anatomy data + anatomy_data = self._get_anamoty_data_with_current_task( + instance, task_data) + + self.task_workdir = self._get_shot_task_dir_path( + instance, task_data) + self.log.debug("__ task_workdir: {}".format( + self.task_workdir)) + + # TODO: this might be done with template in settings + render_dir_path = os.path.join( + self.task_workdir, "render", "flame") + + if not os.path.exists(render_dir_path): + os.makedirs(render_dir_path, mode=0o777) + + # TODO: add most of these to `imageio/flame/batch/write_node` + name = "{project[code]}_{asset}_{task[name]}".format( + **anatomy_data + ) + + # The path attribute where the rendered clip is exported + # /path/to/file.[0001-0010].exr + media_path = render_dir_path + # name of file represented by tokens + media_path_pattern = ( + "_v/_v.") + # The Create Open Clip attribute of the Write File node. \ + # Determines if an Open Clip is created by the Write File node. + create_clip = True + # The Include Setup attribute of the Write File node. + # Determines if a Batch Setup file is created by the Write File node. + include_setup = True + # The path attribute where the Open Clip file is exported by + # the Write File node. + create_clip_path = "" + # The path attribute where the Batch setup file + # is exported by the Write File node. + include_setup_path = "./_v" + # The file type for the files written by the Write File node. + # Setting this attribute also overwrites format_extension, + # bit_depth and compress_mode to match the defaults for + # this file type. + file_type = "OpenEXR" + # The file extension for the files written by the Write File node. + # This attribute resets to match file_type whenever file_type + # is set. If you require a specific extension, you must + # set format_extension after setting file_type. + format_extension = "exr" + # The bit depth for the files written by the Write File node. + # This attribute resets to match file_type whenever file_type is set. + bit_depth = "16" + # The compressing attribute for the files exported by the Write + # File node. Only relevant when file_type in 'OpenEXR', 'Sgi', 'Tiff' + compress = True + # The compression format attribute for the specific File Types + # export by the Write File node. You must set compress_mode + # after setting file_type. + compress_mode = "DWAB" + # The frame index mode attribute of the Write File node. + # Value range: `Use Timecode` or `Use Start Frame` + frame_index_mode = "Use Start Frame" + frame_padding = 6 + # The versioning mode of the Open Clip exported by the Write File node. + # Only available if create_clip = True. + version_mode = "Follow Iteration" + version_name = "v" + version_padding = 3 + + # need to make sure the order of keys is correct + return OrderedDict(( + ("name", name), + ("media_path", media_path), + ("media_path_pattern", media_path_pattern), + ("create_clip", create_clip), + ("include_setup", include_setup), + ("create_clip_path", create_clip_path), + ("include_setup_path", include_setup_path), + ("file_type", file_type), + ("format_extension", format_extension), + ("bit_depth", bit_depth), + ("compress", compress), + ("compress_mode", compress_mode), + ("frame_index_mode", frame_index_mode), + ("frame_padding", frame_padding), + ("version_mode", version_mode), + ("version_name", version_name), + ("version_padding", version_padding) + )) + + def _get_shot_task_dir_path(self, instance, task_data): + project_doc = instance.data["projectEntity"] + asset_entity = instance.data["assetEntity"] + + return get_workdir( + project_doc, asset_entity, task_data["name"], "flame") diff --git a/openpype/hosts/flame/plugins/publish/validate_source_clip.py b/openpype/hosts/flame/plugins/publish/validate_source_clip.py deleted file mode 100644 index 9ff015f628..0000000000 --- a/openpype/hosts/flame/plugins/publish/validate_source_clip.py +++ /dev/null @@ -1,24 +0,0 @@ -import pyblish - - -@pyblish.api.log -class ValidateSourceClip(pyblish.api.InstancePlugin): - """Validate instance is not having empty `flameSourceClip`""" - - order = pyblish.api.ValidatorOrder - label = "Validate Source Clip" - hosts = ["flame"] - families = ["clip"] - - def process(self, instance): - flame_source_clip = instance.data["flameSourceClip"] - - self.log.debug("_ flame_source_clip: {}".format(flame_source_clip)) - - if flame_source_clip is None: - raise AttributeError(( - "Timeline segment `{}` is not having " - "relative clip in reels. Please make sure " - "you push `Save Sources` button in Conform Tab").format( - instance.data["asset"] - )) diff --git a/openpype/hosts/flame/startup/openpype_in_flame.py b/openpype/hosts/flame/startup/openpype_in_flame.py index 931c5a1b79..f2ac23b19e 100644 --- a/openpype/hosts/flame/startup/openpype_in_flame.py +++ b/openpype/hosts/flame/startup/openpype_in_flame.py @@ -3,18 +3,19 @@ import sys from Qt import QtWidgets from pprint import pformat import atexit -import openpype -import avalon + import openpype.hosts.flame.api as opfapi +from openpype.pipeline import ( + install_host, + registered_host, +) def openpype_install(): """Registering OpenPype in context """ - openpype.install() - avalon.api.install(opfapi) - print("Avalon registered hosts: {}".format( - avalon.api.registered_host())) + install_host(opfapi) + print("Registered host: {}".format(registered_host())) # Exception handler diff --git a/openpype/hosts/fusion/api/lib.py b/openpype/hosts/fusion/api/lib.py index f7a2360bfa..29f3a3a3eb 100644 --- a/openpype/hosts/fusion/api/lib.py +++ b/openpype/hosts/fusion/api/lib.py @@ -6,8 +6,10 @@ import contextlib from bson.objectid import ObjectId from Qt import QtGui -from avalon import io -from openpype.pipeline import switch_container +from openpype.pipeline import ( + switch_container, + legacy_io, +) from .pipeline import get_current_comp, comp_lock_and_undo_chunk self = sys.modules[__name__] @@ -94,8 +96,10 @@ def switch_item(container, # so we can use the original name from those. if any(not x for x in [asset_name, subset_name, representation_name]): _id = ObjectId(container["representation"]) - representation = io.find_one({"type": "representation", "_id": _id}) - version, subset, asset, project = io.parenthood(representation) + representation = legacy_io.find_one({ + "type": "representation", "_id": _id + }) + version, subset, asset, project = legacy_io.parenthood(representation) if asset_name is None: asset_name = asset["name"] @@ -107,14 +111,14 @@ def switch_item(container, representation_name = representation["name"] # Find the new one - asset = io.find_one({ + asset = legacy_io.find_one({ "name": asset_name, "type": "asset" }) assert asset, ("Could not find asset in the database with the name " "'%s'" % asset_name) - subset = io.find_one({ + subset = legacy_io.find_one({ "name": subset_name, "type": "subset", "parent": asset["_id"] @@ -122,7 +126,7 @@ def switch_item(container, assert subset, ("Could not find subset in the database with the name " "'%s'" % subset_name) - version = io.find_one( + version = legacy_io.find_one( { "type": "version", "parent": subset["_id"] @@ -134,7 +138,7 @@ def switch_item(container, asset_name, subset_name ) - representation = io.find_one({ + representation = legacy_io.find_one({ "name": representation_name, "type": "representation", "parent": version["_id"]} diff --git a/openpype/hosts/fusion/api/pipeline.py b/openpype/hosts/fusion/api/pipeline.py index 0867b464d5..54002f9f51 100644 --- a/openpype/hosts/fusion/api/pipeline.py +++ b/openpype/hosts/fusion/api/pipeline.py @@ -45,7 +45,8 @@ def install(): This is where you install menus and register families, data and loaders into fusion. - It is called automatically when installing via `api.install(avalon.fusion)` + It is called automatically when installing via + `openpype.pipeline.install_host(openpype.hosts.fusion.api)` See the Maya equivalent for inspiration on how to implement this. diff --git a/openpype/hosts/fusion/plugins/load/actions.py b/openpype/hosts/fusion/plugins/load/actions.py index bc59cec77f..819c9272fd 100644 --- a/openpype/hosts/fusion/plugins/load/actions.py +++ b/openpype/hosts/fusion/plugins/load/actions.py @@ -6,7 +6,7 @@ from openpype.pipeline import load class FusionSetFrameRangeLoader(load.LoaderPlugin): - """Specific loader of Alembic for the avalon.animation family""" + """Set frame range excluding pre- and post-handles""" families = ["animation", "camera", @@ -40,7 +40,7 @@ class FusionSetFrameRangeLoader(load.LoaderPlugin): class FusionSetFrameRangeWithHandlesLoader(load.LoaderPlugin): - """Specific loader of Alembic for the avalon.animation family""" + """Set frame range including pre- and post-handles""" families = ["animation", "camera", diff --git a/openpype/hosts/fusion/plugins/load/load_sequence.py b/openpype/hosts/fusion/plugins/load/load_sequence.py index 075820de35..b860abd88b 100644 --- a/openpype/hosts/fusion/plugins/load/load_sequence.py +++ b/openpype/hosts/fusion/plugins/load/load_sequence.py @@ -1,10 +1,9 @@ import os import contextlib -from avalon import io - from openpype.pipeline import ( load, + legacy_io, get_representation_path, ) from openpype.hosts.fusion.api import ( @@ -212,8 +211,10 @@ class FusionLoadSequence(load.LoaderPlugin): path = self._get_first_image(root) # Get start frame from version data - version = io.find_one({"type": "version", - "_id": representation["parent"]}) + version = legacy_io.find_one({ + "type": "version", + "_id": representation["parent"] + }) start = version["data"].get("frameStart") if start is None: self.log.warning("Missing start frame for updated version" diff --git a/openpype/hosts/fusion/plugins/publish/submit_deadline.py b/openpype/hosts/fusion/plugins/publish/submit_deadline.py index 9da99dd9e2..8570c759bc 100644 --- a/openpype/hosts/fusion/plugins/publish/submit_deadline.py +++ b/openpype/hosts/fusion/plugins/publish/submit_deadline.py @@ -4,10 +4,10 @@ import getpass import requests -from avalon import api - import pyblish.api +from openpype.pipeline import legacy_io + class FusionSubmitDeadline(pyblish.api.InstancePlugin): """Submit current Comp to Deadline @@ -133,7 +133,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin): "FUSION9_MasterPrefs" ] environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) payload["JobInfo"].update({ "EnvironmentKeyValue%d" % index: "{key}={value}".format( @@ -146,7 +146,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin): self.log.info(json.dumps(payload, indent=4, sort_keys=True)) # E.g. http://192.168.0.1:8082/api/jobs - url = "{}/api/jobs".format(DEADLINE_REST_URL) + url = "{}/api/jobs".format(deadline_url) response = requests.post(url, json=payload) if not response.ok: raise Exception(response.text) diff --git a/openpype/hosts/fusion/scripts/fusion_switch_shot.py b/openpype/hosts/fusion/scripts/fusion_switch_shot.py index ca7efb9136..704f420796 100644 --- a/openpype/hosts/fusion/scripts/fusion_switch_shot.py +++ b/openpype/hosts/fusion/scripts/fusion_switch_shot.py @@ -4,9 +4,11 @@ import sys import logging # Pipeline imports -import avalon.api -from avalon import io - +from openpype.pipeline import ( + legacy_io, + install_host, + registered_host, +) from openpype.lib import version_up from openpype.hosts.fusion import api from openpype.hosts.fusion.api import lib @@ -163,7 +165,7 @@ def update_frame_range(comp, representations): """ version_ids = [r["parent"] for r in representations] - versions = io.find({"type": "version", "_id": {"$in": version_ids}}) + versions = legacy_io.find({"type": "version", "_id": {"$in": version_ids}}) versions = list(versions) versions = [v for v in versions @@ -201,12 +203,11 @@ def switch(asset_name, filepath=None, new=True): # Assert asset name exists # It is better to do this here then to wait till switch_shot does it - asset = io.find_one({"type": "asset", "name": asset_name}) + asset = legacy_io.find_one({"type": "asset", "name": asset_name}) assert asset, "Could not find '%s' in the database" % asset_name # Get current project - self._project = io.find_one({"type": "project", - "name": avalon.api.Session["AVALON_PROJECT"]}) + self._project = legacy_io.find_one({"type": "project"}) # Go to comp if not filepath: @@ -218,7 +219,7 @@ def switch(asset_name, filepath=None, new=True): assert current_comp is not None, ( "Fusion could not load '{}'").format(filepath) - host = avalon.api.registered_host() + host = registered_host() containers = list(host.ls()) assert containers, "Nothing to update" @@ -237,7 +238,7 @@ def switch(asset_name, filepath=None, new=True): current_comp.Print(message) # Build the session to switch to - switch_to_session = avalon.api.Session.copy() + switch_to_session = legacy_io.Session.copy() switch_to_session["AVALON_ASSET"] = asset['name'] if new: @@ -279,7 +280,7 @@ if __name__ == '__main__': args, unknown = parser.parse_args() - avalon.api.install(api) + install_host(api) switch(args.asset_name, args.file_path) sys.exit(0) diff --git a/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py b/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py index 4b5e8f91a0..de8fc4b3b4 100644 --- a/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py +++ b/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py @@ -1,24 +1,23 @@ import os import sys -import openpype from openpype.api import Logger +from openpype.pipeline import ( + install_host, + registered_host, +) log = Logger().get_logger(__name__) def main(env): - import avalon.api from openpype.hosts.fusion import api from openpype.hosts.fusion.api import menu - # Registers pype's Global pyblish plugins - openpype.install() - # activate resolve from pype - avalon.api.install(api) + install_host(api) - log.info(f"Avalon registered hosts: {avalon.api.registered_host()}") + log.info(f"Registered host: {registered_host()}") menu.launch_openpype_menu() diff --git a/openpype/hosts/fusion/utility_scripts/switch_ui.py b/openpype/hosts/fusion/utility_scripts/switch_ui.py index d9eeae25ea..70eb3d0a19 100644 --- a/openpype/hosts/fusion/utility_scripts/switch_ui.py +++ b/openpype/hosts/fusion/utility_scripts/switch_ui.py @@ -1,14 +1,17 @@ import os +import sys import glob import logging from Qt import QtWidgets, QtCore -import avalon.api -from avalon import io import qtawesome as qta from openpype import style +from openpype.pipeline import ( + install_host, + legacy_io, +) from openpype.hosts.fusion import api from openpype.lib.avalon_context import get_workdir_from_session @@ -163,7 +166,7 @@ class App(QtWidgets.QWidget): return items def collect_assets(self): - return list(io.find({"type": "asset"}, {"name": True})) + return list(legacy_io.find({"type": "asset"}, {"name": True})) def populate_comp_box(self, files): """Ensure we display the filename only but the path is stored as well @@ -181,8 +184,7 @@ class App(QtWidgets.QWidget): if __name__ == '__main__': - import sys - avalon.api.install(api) + install_host(api) app = QtWidgets.QApplication(sys.argv) window = App() diff --git a/openpype/hosts/harmony/api/README.md b/openpype/hosts/harmony/api/README.md index e8d354e1e6..dd45eb14dd 100644 --- a/openpype/hosts/harmony/api/README.md +++ b/openpype/hosts/harmony/api/README.md @@ -419,7 +419,6 @@ class ExtractImage(pyblish.api.InstancePlugin): ```python import os -from avalon import api, io import openpype.hosts.harmony.api as harmony signature = str(uuid4()).replace("-", "_") @@ -611,7 +610,7 @@ class ImageSequenceLoader(load.LoaderPlugin): def update(self, container, representation): node = container.pop("node") - version = io.find_one({"_id": representation["parent"]}) + version = legacy_io.find_one({"_id": representation["parent"]}) files = [] for f in version["data"]["files"]: files.append( diff --git a/openpype/hosts/harmony/api/lib.py b/openpype/hosts/harmony/api/lib.py index 66eeac1e3a..e5e7ad1b7e 100644 --- a/openpype/hosts/harmony/api/lib.py +++ b/openpype/hosts/harmony/api/lib.py @@ -183,10 +183,10 @@ def launch(application_path, *args): application_path (str): Path to Harmony. """ - from avalon import api + from openpype.pipeline import install_host from openpype.hosts.harmony import api as harmony - api.install(harmony) + install_host(harmony) ProcessContext.port = random.randrange(49152, 65535) os.environ["AVALON_HARMONY_PORT"] = str(ProcessContext.port) @@ -463,7 +463,7 @@ def imprint(node_id, data, remove=False): remove (bool): Removes the data from the scene. Example: - >>> from avalon.harmony import lib + >>> from openpype.hosts.harmony.api import lib >>> node = "Top/Display" >>> data = {"str": "someting", "int": 1, "float": 0.32, "bool": True} >>> lib.imprint(layer, data) diff --git a/openpype/hosts/harmony/api/pipeline.py b/openpype/hosts/harmony/api/pipeline.py index 88f11dd16f..b953d0e984 100644 --- a/openpype/hosts/harmony/api/pipeline.py +++ b/openpype/hosts/harmony/api/pipeline.py @@ -5,11 +5,10 @@ import logging from bson.objectid import ObjectId import pyblish.api -from avalon import io - from openpype import lib from openpype.lib import register_event_callback from openpype.pipeline import ( + legacy_io, register_loader_plugin_path, register_creator_plugin_path, deregister_loader_plugin_path, @@ -111,7 +110,7 @@ def check_inventory(): outdated_containers = [] for container in ls(): representation = container['representation'] - representation_doc = io.find_one( + representation_doc = legacy_io.find_one( { "_id": ObjectId(representation), "type": "representation" diff --git a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py b/openpype/hosts/harmony/plugins/publish/collect_farm_render.py index 35b123f97d..3e9e680efd 100644 --- a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py +++ b/openpype/hosts/harmony/plugins/publish/collect_farm_render.py @@ -3,13 +3,13 @@ from pathlib import Path import attr -from avalon import api -from openpype.lib import get_formatted_current_time -import openpype.lib.abstract_collect_render -import openpype.hosts.harmony.api as harmony -from openpype.lib.abstract_collect_render import RenderInstance import openpype.lib +import openpype.lib.abstract_collect_render +from openpype.lib.abstract_collect_render import RenderInstance +from openpype.lib import get_formatted_current_time +from openpype.pipeline import legacy_io +import openpype.hosts.harmony.api as harmony @attr.s @@ -143,7 +143,8 @@ class CollectFarmRender(openpype.lib.abstract_collect_render. source=context.data["currentFile"], label=node.split("/")[1], subset=subset_name, - asset=api.Session["AVALON_ASSET"], + asset=legacy_io.Session["AVALON_ASSET"], + task=task_name, attachTo=False, setMembers=[node], publish=info[4], diff --git a/openpype/hosts/hiero/api/lib.py b/openpype/hosts/hiero/api/lib.py index df3b24ff2c..2a4cd03b76 100644 --- a/openpype/hosts/hiero/api/lib.py +++ b/openpype/hosts/hiero/api/lib.py @@ -12,8 +12,7 @@ import hiero from Qt import QtWidgets from bson.objectid import ObjectId -import avalon.api as avalon -import avalon.io +from openpype.pipeline import legacy_io from openpype.api import (Logger, Anatomy, get_anatomy_settings) from . import tags @@ -38,8 +37,6 @@ self.pype_tag_name = "openpypeData" self.default_sequence_name = "openpypeSequence" self.default_bin_name = "openpypeBin" -AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") - def flatten(_list): for item in _list: @@ -49,6 +46,7 @@ def flatten(_list): else: yield item + def get_current_project(remove_untitled=False): projects = flatten(hiero.core.projects()) if not remove_untitled: @@ -384,7 +382,7 @@ def get_publish_attribute(tag): def sync_avalon_data_to_workfile(): # import session to get project dir - project_name = avalon.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] anatomy = Anatomy(project_name) work_template = anatomy.templates["work"]["path"] @@ -409,7 +407,7 @@ def sync_avalon_data_to_workfile(): project.setProjectRoot(active_project_root) # get project data from avalon db - project_doc = avalon.io.find_one({"type": "project"}) + project_doc = legacy_io.find_one({"type": "project"}) project_data = project_doc["data"] log.debug("project_data: {}".format(project_data)) @@ -555,10 +553,10 @@ class PublishAction(QtWidgets.QAction): # # ''' # import hiero.core -# from avalon.nuke import imprint -# from pype.hosts.nuke import ( -# lib as nklib -# ) +# from openpype.hosts.nuke.api.lib import ( +# BuildWorkfile, +# imprint +# ) # # # check if the file exists if does then Raise "File exists!" # if os.path.exists(filepath): @@ -585,8 +583,7 @@ class PublishAction(QtWidgets.QAction): # # nuke_script.addNode(root_node) # -# # here to call pype.hosts.nuke.lib.BuildWorkfile -# script_builder = nklib.BuildWorkfile( +# script_builder = BuildWorkfile( # root_node=root_node, # root_path=root_path, # nodes=nuke_script.getNodes(), @@ -995,7 +992,6 @@ def check_inventory_versions(): it to red. """ from . import parse_container - from avalon import io # presets clip_color_last = "green" @@ -1007,19 +1003,19 @@ def check_inventory_versions(): if container: # get representation from io - representation = io.find_one({ + representation = legacy_io.find_one({ "type": "representation", "_id": ObjectId(container["representation"]) }) # Get start frame from version data - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/hiero/api/menu.py b/openpype/hosts/hiero/api/menu.py index de20b86f30..e262abec00 100644 --- a/openpype/hosts/hiero/api/menu.py +++ b/openpype/hosts/hiero/api/menu.py @@ -1,14 +1,16 @@ import os import sys + import hiero.core -from openpype.api import Logger -from openpype.tools.utils import host_tools -from avalon.api import Session from hiero.ui import findMenuAction +from openpype.api import Logger +from openpype.pipeline import legacy_io +from openpype.tools.utils import host_tools + from . import tags -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) self = sys.modules[__name__] self._change_context_menu = None @@ -24,8 +26,10 @@ def update_menu_task_label(): log.warning("Can't find menuItem: {}".format(object_name)) return - label = "{}, {}".format(Session["AVALON_ASSET"], - Session["AVALON_TASK"]) + label = "{}, {}".format( + legacy_io.Session["AVALON_ASSET"], + legacy_io.Session["AVALON_TASK"] + ) menu = found_menu.menu() self._change_context_menu = label @@ -51,7 +55,8 @@ def menu_install(): menu_name = os.environ['AVALON_LABEL'] context_label = "{0}, {1}".format( - Session["AVALON_ASSET"], Session["AVALON_TASK"] + legacy_io.Session["AVALON_ASSET"], + legacy_io.Session["AVALON_TASK"] ) self._change_context_menu = context_label diff --git a/openpype/hosts/hiero/api/otio/hiero_export.py b/openpype/hosts/hiero/api/otio/hiero_export.py index 1e4088d9c0..64fb81aed4 100644 --- a/openpype/hosts/hiero/api/otio/hiero_export.py +++ b/openpype/hosts/hiero/api/otio/hiero_export.py @@ -151,7 +151,7 @@ def create_otio_reference(clip): padding = media_source.filenamePadding() file_head = media_source.filenameHead() is_sequence = not media_source.singleFile() - frame_duration = media_source.duration() + frame_duration = media_source.duration() - 1 fps = utils.get_rate(clip) or self.project_fps extension = os.path.splitext(path)[-1] diff --git a/openpype/hosts/hiero/api/pipeline.py b/openpype/hosts/hiero/api/pipeline.py index b334102129..8025ebff05 100644 --- a/openpype/hosts/hiero/api/pipeline.py +++ b/openpype/hosts/hiero/api/pipeline.py @@ -5,10 +5,10 @@ import os import contextlib from collections import OrderedDict -from avalon import schema from pyblish import api as pyblish from openpype.api import Logger from openpype.pipeline import ( + schema, register_creator_plugin_path, register_loader_plugin_path, deregister_creator_plugin_path, @@ -20,8 +20,6 @@ from . import lib, menu, events log = Logger().get_logger(__name__) -AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") - # plugin paths API_DIR = os.path.dirname(os.path.abspath(__file__)) HOST_DIR = os.path.dirname(API_DIR) @@ -34,14 +32,7 @@ AVALON_CONTAINERS = ":AVALON_CONTAINERS" def install(): - """ - Installing Hiero integration for avalon - - Args: - config (obj): avalon config module `pype` in our case, it is not - used but required by avalon.api.install() - - """ + """Installing Hiero integration.""" # adding all events events.register_events() @@ -254,15 +245,10 @@ def reload_config(): import importlib for module in ( - "avalon", - "avalon.lib", - "avalon.pipeline", - "pyblish", - "pypeapp", - "{}.api".format(AVALON_CONFIG), - "{}.hosts.hiero.lib".format(AVALON_CONFIG), - "{}.hosts.hiero.menu".format(AVALON_CONFIG), - "{}.hosts.hiero.tags".format(AVALON_CONFIG) + "openpype.api", + "openpype.hosts.hiero.lib", + "openpype.hosts.hiero.menu", + "openpype.hosts.hiero.tags" ): log.info("Reloading module: {}...".format(module)) try: diff --git a/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py b/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py index 21c21cd7c3..2e638c2088 100644 --- a/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py +++ b/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py @@ -1,9 +1,9 @@ import traceback # activate hiero from pype -import avalon.api +from openpype.pipeline import install_host import openpype.hosts.hiero.api as phiero -avalon.api.install(phiero) +install_host(phiero) try: __import__("openpype.hosts.hiero.api") diff --git a/openpype/hosts/hiero/api/tags.py b/openpype/hosts/hiero/api/tags.py index fe5c0d5257..8877b92b9d 100644 --- a/openpype/hosts/hiero/api/tags.py +++ b/openpype/hosts/hiero/api/tags.py @@ -3,23 +3,13 @@ import os import hiero from openpype.api import Logger -from avalon import io +from openpype.pipeline import legacy_io -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) def tag_data(): return { - # "Retiming": { - # "editable": "1", - # "note": "Clip has retime or TimeWarp effects (or multiple effects stacked on the clip)", # noqa - # "icon": "retiming.png", - # "metadata": { - # "family": "retiming", - # "marginIn": 1, - # "marginOut": 1 - # } - # }, "[Lenses]": { "Set lense here": { "editable": "1", @@ -48,6 +38,16 @@ def tag_data(): "family": "comment", "subset": "main" } + }, + "FrameMain": { + "editable": "1", + "note": "Publishing a frame subset.", + "icon": "z_layer_main.png", + "metadata": { + "family": "frame", + "subset": "main", + "format": "png" + } } } @@ -141,7 +141,7 @@ def add_tags_to_workfile(): nks_pres_tags = tag_data() # Get project task types. - tasks = io.find_one({"type": "project"})["config"]["tasks"] + tasks = legacy_io.find_one({"type": "project"})["config"]["tasks"] nks_pres_tags["[Tasks]"] = {} log.debug("__ tasks: {}".format(tasks)) for task_type in tasks.keys(): @@ -159,7 +159,7 @@ def add_tags_to_workfile(): # asset builds and shots. if int(os.getenv("TAG_ASSETBUILD_STARTUP", 0)) == 1: nks_pres_tags["[AssetBuilds]"] = {} - for asset in io.find({"type": "asset"}): + for asset in legacy_io.find({"type": "asset"}): if asset["data"]["entityType"] == "AssetBuild": nks_pres_tags["[AssetBuilds]"][asset["name"]] = { "editable": "1", diff --git a/openpype/hosts/hiero/plugins/load/load_clip.py b/openpype/hosts/hiero/plugins/load/load_clip.py index d3908695a2..da4326c8c1 100644 --- a/openpype/hosts/hiero/plugins/load/load_clip.py +++ b/openpype/hosts/hiero/plugins/load/load_clip.py @@ -1,5 +1,7 @@ -from avalon import io -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + legacy_io, + get_representation_path, +) import openpype.hosts.hiero.api as phiero # from openpype.hosts.hiero.api import plugin, lib # reload(lib) @@ -105,7 +107,7 @@ class LoadClip(phiero.SequenceLoader): namespace = container['namespace'] track_item = phiero.get_track_items( track_item_name=namespace) - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -174,7 +176,7 @@ class LoadClip(phiero.SequenceLoader): # define version name version_name = version.get("name", None) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/hiero/plugins/publish/collect_frame_tag_instances.py b/openpype/hosts/hiero/plugins/publish/collect_frame_tag_instances.py new file mode 100644 index 0000000000..982a34efd6 --- /dev/null +++ b/openpype/hosts/hiero/plugins/publish/collect_frame_tag_instances.py @@ -0,0 +1,142 @@ +from pprint import pformat +import re +import ast +import json + +import pyblish.api + + +class CollectFrameTagInstances(pyblish.api.ContextPlugin): + """Collect frames from tags. + + Tag is expected to have metadata: + { + "family": "frame" + "subset": "main" + } + """ + + order = pyblish.api.CollectorOrder + label = "Collect Frames" + hosts = ["hiero"] + + def process(self, context): + self._context = context + + # collect all sequence tags + subset_data = self._create_frame_subset_data_sequence(context) + + self.log.debug("__ subset_data: {}".format( + pformat(subset_data) + )) + + # create instances + self._create_instances(subset_data) + + def _get_tag_data(self, tag): + data = {} + + # get tag metadata attribute + tag_data = tag.metadata() + + # convert tag metadata to normal keys names and values to correct types + for k, v in dict(tag_data).items(): + key = k.replace("tag.", "") + + try: + # capture exceptions which are related to strings only + if re.match(r"^[\d]+$", v): + value = int(v) + elif re.match(r"^True$", v): + value = True + elif re.match(r"^False$", v): + value = False + elif re.match(r"^None$", v): + value = None + elif re.match(r"^[\w\d_]+$", v): + value = v + else: + value = ast.literal_eval(v) + except (ValueError, SyntaxError): + value = v + + data[key] = value + + return data + + def _create_frame_subset_data_sequence(self, context): + + sequence_tags = [] + sequence = context.data["activeTimeline"] + + # get all publishable sequence frames + publish_frames = range(int(sequence.duration() + 1)) + + self.log.debug("__ publish_frames: {}".format( + pformat(publish_frames) + )) + + # get all sequence tags + for tag in sequence.tags(): + tag_data = self._get_tag_data(tag) + self.log.debug("__ tag_data: {}".format( + pformat(tag_data) + )) + if not tag_data: + continue + + if "family" not in tag_data: + continue + + if tag_data["family"] != "frame": + continue + + sequence_tags.append(tag_data) + + self.log.debug("__ sequence_tags: {}".format( + pformat(sequence_tags) + )) + + # first collect all available subset tag frames + subset_data = {} + for tag_data in sequence_tags: + frame = int(tag_data["start"]) + + if frame not in publish_frames: + continue + + subset = tag_data["subset"] + + if subset in subset_data: + # update existing subset key + subset_data[subset]["frames"].append(frame) + else: + # create new subset key + subset_data[subset] = { + "frames": [frame], + "format": tag_data["format"], + "asset": context.data["assetEntity"]["name"] + } + return subset_data + + def _create_instances(self, subset_data): + # create instance per subset + for subset_name, subset_data in subset_data.items(): + name = "frame" + subset_name.title() + data = { + "name": name, + "label": "{} {}".format(name, subset_data["frames"]), + "family": "image", + "families": ["frame"], + "asset": subset_data["asset"], + "subset": name, + "format": subset_data["format"], + "frames": subset_data["frames"] + } + self._context.create_instance(**data) + + self.log.info( + "Created instance: {}".format( + json.dumps(data, sort_keys=True, indent=4) + ) + ) diff --git a/openpype/hosts/hiero/plugins/publish/extract_frames.py b/openpype/hosts/hiero/plugins/publish/extract_frames.py new file mode 100644 index 0000000000..aa3eda2e9f --- /dev/null +++ b/openpype/hosts/hiero/plugins/publish/extract_frames.py @@ -0,0 +1,82 @@ +import os +import pyblish.api +import openpype + + +class ExtractFrames(openpype.api.Extractor): + """Extracts frames""" + + order = pyblish.api.ExtractorOrder + label = "Extract Frames" + hosts = ["hiero"] + families = ["frame"] + movie_extensions = ["mov", "mp4"] + + def process(self, instance): + oiio_tool_path = openpype.lib.get_oiio_tools_path() + staging_dir = self.staging_dir(instance) + output_template = os.path.join(staging_dir, instance.data["name"]) + sequence = instance.context.data["activeTimeline"] + + files = [] + for frame in instance.data["frames"]: + track_item = sequence.trackItemAt(frame) + media_source = track_item.source().mediaSource() + input_path = media_source.fileinfos()[0].filename() + input_frame = ( + track_item.mapTimelineToSource(frame) + + track_item.source().mediaSource().startTime() + ) + output_ext = instance.data["format"] + output_path = output_template + output_path += ".{:04d}.{}".format(int(frame), output_ext) + + args = [oiio_tool_path] + + ext = os.path.splitext(input_path)[1][1:] + if ext in self.movie_extensions: + args.extend(["--subimage", str(int(input_frame))]) + else: + args.extend(["--frames", str(int(input_frame))]) + + if ext == "exr": + args.extend(["--powc", "0.45,0.45,0.45,1.0"]) + + args.extend([input_path, "-o", output_path]) + output = openpype.api.run_subprocess(args) + + failed_output = "oiiotool produced no output." + if failed_output in output: + raise ValueError( + "oiiotool processing failed. Args: {}".format(args) + ) + + files.append(output_path) + + # Feedback to user because "oiiotool" can make the publishing + # appear unresponsive. + self.log.info( + "Processed {} of {} frames".format( + instance.data["frames"].index(frame) + 1, + len(instance.data["frames"]) + ) + ) + + if len(files) == 1: + instance.data["representations"] = [ + { + "name": output_ext, + "ext": output_ext, + "files": os.path.basename(files[0]), + "stagingDir": staging_dir + } + ] + else: + instance.data["representations"] = [ + { + "name": output_ext, + "ext": output_ext, + "files": [os.path.basename(x) for x in files], + "stagingDir": staging_dir + } + ] diff --git a/openpype/hosts/hiero/plugins/publish/precollect_instances.py b/openpype/hosts/hiero/plugins/publish/precollect_instances.py index 4eac6a008a..46f0b2440e 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_instances.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_instances.py @@ -296,6 +296,8 @@ class PrecollectInstances(pyblish.api.ContextPlugin): continue if otio_clip.name not in track_item.name(): continue + self.log.debug("__ parent_range: {}".format(parent_range)) + self.log.debug("__ timeline_range: {}".format(timeline_range)) if openpype.lib.is_overlapping_otio_ranges( parent_range, timeline_range, strict=True): diff --git a/openpype/hosts/hiero/plugins/publish/precollect_workfile.py b/openpype/hosts/hiero/plugins/publish/precollect_workfile.py index d48d6949bd..b9f58c15f6 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_workfile.py @@ -1,12 +1,15 @@ import os -import pyblish.api -import hiero.ui -from openpype.hosts.hiero import api as phiero -from avalon import api as avalon -from pprint import pformat -from openpype.hosts.hiero.api.otio import hiero_export -from Qt.QtGui import QPixmap import tempfile +from pprint import pformat + +import pyblish.api +from Qt.QtGui import QPixmap + +import hiero.ui + +from openpype.pipeline import legacy_io +from openpype.hosts.hiero import api as phiero +from openpype.hosts.hiero.api.otio import hiero_export class PrecollectWorkfile(pyblish.api.ContextPlugin): @@ -17,7 +20,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin): def process(self, context): - asset = avalon.Session["AVALON_ASSET"] + asset = legacy_io.Session["AVALON_ASSET"] subset = "workfile" project = phiero.get_current_project() active_timeline = hiero.ui.activeSequence() @@ -65,6 +68,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin): "subset": "{}{}".format(asset, subset.capitalize()), "item": project, "family": "workfile", + "families": [], "representations": [workfile_representation, thumb_representation] } @@ -74,6 +78,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin): # update context with main project attributes context_data = { "activeProject": project, + "activeTimeline": active_timeline, "otioTimeline": otio_timeline, "currentFile": curent_file, "colorspace": self.get_colorspace(project), diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py index a90856c6fd..10baf25803 100644 --- a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py @@ -1,5 +1,5 @@ from pyblish import api -from avalon import io +from openpype.pipeline import legacy_io class CollectAssetBuilds(api.ContextPlugin): @@ -18,7 +18,7 @@ class CollectAssetBuilds(api.ContextPlugin): def process(self, context): asset_builds = {} - for asset in io.find({"type": "asset"}): + for asset in legacy_io.find({"type": "asset"}): if asset["data"]["entityType"] == "AssetBuild": self.log.debug("Found \"{}\" in database.".format(asset)) asset_builds[asset["name"]] = asset diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_clip_resolution.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_clip_resolution.py deleted file mode 100644 index 1d0727d0af..0000000000 --- a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_clip_resolution.py +++ /dev/null @@ -1,38 +0,0 @@ -import pyblish.api - - -class CollectClipResolution(pyblish.api.InstancePlugin): - """Collect clip geometry resolution""" - - order = pyblish.api.CollectorOrder - 0.1 - label = "Collect Clip Resolution" - hosts = ["hiero"] - families = ["clip"] - - def process(self, instance): - sequence = instance.context.data['activeSequence'] - item = instance.data["item"] - source_resolution = instance.data.get("sourceResolution", None) - - resolution_width = int(sequence.format().width()) - resolution_height = int(sequence.format().height()) - pixel_aspect = sequence.format().pixelAspect() - - # source exception - if source_resolution: - resolution_width = int(item.source().mediaSource().width()) - resolution_height = int(item.source().mediaSource().height()) - pixel_aspect = item.source().mediaSource().pixelAspect() - - resolution_data = { - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height, - "pixelAspect": pixel_aspect - } - # add to instacne data - instance.data.update(resolution_data) - - self.log.info("Resolution of instance '{}' is: {}".format( - instance, - resolution_data - )) diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_host_version.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_host_version.py deleted file mode 100644 index 76e5bd11d5..0000000000 --- a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_host_version.py +++ /dev/null @@ -1,15 +0,0 @@ -import pyblish.api - - -class CollectHostVersion(pyblish.api.ContextPlugin): - """Inject the hosts version into context""" - - label = "Collect Host and HostVersion" - order = pyblish.api.CollectorOrder - 0.5 - - def process(self, context): - import nuke - import pyblish.api - - context.set_data("host", pyblish.api.current_host()) - context.set_data('hostVersion', value=nuke.NUKE_VERSION_STRING) diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_tag_retime.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_tag_retime.py deleted file mode 100644 index 0634130976..0000000000 --- a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_tag_retime.py +++ /dev/null @@ -1,32 +0,0 @@ -from pyblish import api - - -class CollectTagRetime(api.InstancePlugin): - """Collect Retiming from Tags of selected track items.""" - - order = api.CollectorOrder + 0.014 - label = "Collect Retiming Tag" - hosts = ["hiero"] - families = ['clip'] - - def process(self, instance): - # gets tags - tags = instance.data["tags"] - - for t in tags: - t_metadata = dict(t["metadata"]) - t_family = t_metadata.get("tag.family", "") - - # gets only task family tags and collect labels - if "retiming" in t_family: - margin_in = t_metadata.get("tag.marginIn", "") - margin_out = t_metadata.get("tag.marginOut", "") - - instance.data["retimeMarginIn"] = int(margin_in) - instance.data["retimeMarginOut"] = int(margin_out) - instance.data["retime"] = True - - self.log.info("retimeMarginIn: `{}`".format(margin_in)) - self.log.info("retimeMarginOut: `{}`".format(margin_out)) - - instance.data["families"] += ["retime"] diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_instances.py b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_instances.py deleted file mode 100644 index f9cc158e79..0000000000 --- a/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_instances.py +++ /dev/null @@ -1,223 +0,0 @@ -from compiler.ast import flatten -from pyblish import api -from openpype.hosts.hiero import api as phiero -import hiero -# from openpype.hosts.hiero.api import lib -# reload(lib) -# reload(phiero) - - -class PreCollectInstances(api.ContextPlugin): - """Collect all Track items selection.""" - - order = api.CollectorOrder - 0.509 - label = "Pre-collect Instances" - hosts = ["hiero"] - - def process(self, context): - track_items = phiero.get_track_items( - selected=True, check_tagged=True, check_enabled=True) - # only return enabled track items - if not track_items: - track_items = phiero.get_track_items( - check_enabled=True, check_tagged=True) - # get sequence and video tracks - sequence = context.data["activeSequence"] - tracks = sequence.videoTracks() - - # add collection to context - tracks_effect_items = self.collect_sub_track_items(tracks) - - context.data["tracksEffectItems"] = tracks_effect_items - - self.log.info( - "Processing enabled track items: {}".format(len(track_items))) - - for _ti in track_items: - data = {} - clip = _ti.source() - - # get clips subtracks and anotations - annotations = self.clip_annotations(clip) - subtracks = self.clip_subtrack(_ti) - self.log.debug("Annotations: {}".format(annotations)) - self.log.debug(">> Subtracks: {}".format(subtracks)) - - # get pype tag data - tag_parsed_data = phiero.get_track_item_pype_data(_ti) - # self.log.debug(pformat(tag_parsed_data)) - - if not tag_parsed_data: - continue - - if tag_parsed_data.get("id") != "pyblish.avalon.instance": - continue - # add tag data to instance data - data.update({ - k: v for k, v in tag_parsed_data.items() - if k not in ("id", "applieswhole", "label") - }) - - asset = tag_parsed_data["asset"] - subset = tag_parsed_data["subset"] - review_track = tag_parsed_data.get("reviewTrack") - hiero_track = tag_parsed_data.get("heroTrack") - audio = tag_parsed_data.get("audio") - - # remove audio attribute from data - data.pop("audio") - - # insert family into families - family = tag_parsed_data["family"] - families = [str(f) for f in tag_parsed_data["families"]] - families.insert(0, str(family)) - - track = _ti.parent() - media_source = _ti.source().mediaSource() - source_path = media_source.firstpath() - file_head = media_source.filenameHead() - file_info = media_source.fileinfos().pop() - source_first_frame = int(file_info.startFrame()) - - # apply only for review and master track instance - if review_track and hiero_track: - families += ["review", "ftrack"] - - data.update({ - "name": "{} {} {}".format(asset, subset, families), - "asset": asset, - "item": _ti, - "families": families, - - # tags - "tags": _ti.tags(), - - # track item attributes - "track": track.name(), - "trackItem": track, - "reviewTrack": review_track, - - # version data - "versionData": { - "colorspace": _ti.sourceMediaColourTransform() - }, - - # source attribute - "source": source_path, - "sourceMedia": media_source, - "sourcePath": source_path, - "sourceFileHead": file_head, - "sourceFirst": source_first_frame, - - # clip's effect - "clipEffectItems": subtracks - }) - - instance = context.create_instance(**data) - - self.log.info("Creating instance.data: {}".format(instance.data)) - - if audio: - a_data = dict() - - # add tag data to instance data - a_data.update({ - k: v for k, v in tag_parsed_data.items() - if k not in ("id", "applieswhole", "label") - }) - - # create main attributes - subset = "audioMain" - family = "audio" - families = ["clip", "ftrack"] - families.insert(0, str(family)) - - name = "{} {} {}".format(asset, subset, families) - - a_data.update({ - "name": name, - "subset": subset, - "asset": asset, - "family": family, - "families": families, - "item": _ti, - - # tags - "tags": _ti.tags(), - }) - - a_instance = context.create_instance(**a_data) - self.log.info("Creating audio instance: {}".format(a_instance)) - - @staticmethod - def clip_annotations(clip): - """ - Returns list of Clip's hiero.core.Annotation - """ - annotations = [] - subTrackItems = flatten(clip.subTrackItems()) - annotations += [item for item in subTrackItems if isinstance( - item, hiero.core.Annotation)] - return annotations - - @staticmethod - def clip_subtrack(clip): - """ - Returns list of Clip's hiero.core.SubTrackItem - """ - subtracks = [] - subTrackItems = flatten(clip.parent().subTrackItems()) - for item in subTrackItems: - # avoid all anotation - if isinstance(item, hiero.core.Annotation): - continue - # # avoid all not anaibled - if not item.isEnabled(): - continue - subtracks.append(item) - return subtracks - - @staticmethod - def collect_sub_track_items(tracks): - """ - Returns dictionary with track index as key and list of subtracks - """ - # collect all subtrack items - sub_track_items = dict() - for track in tracks: - items = track.items() - - # skip if no clips on track > need track with effect only - if items: - continue - - # skip all disabled tracks - if not track.isEnabled(): - continue - - track_index = track.trackIndex() - _sub_track_items = flatten(track.subTrackItems()) - - # continue only if any subtrack items are collected - if len(_sub_track_items) < 1: - continue - - enabled_sti = list() - # loop all found subtrack items and check if they are enabled - for _sti in _sub_track_items: - # checking if not enabled - if not _sti.isEnabled(): - continue - if isinstance(_sti, hiero.core.Annotation): - continue - # collect the subtrack item - enabled_sti.append(_sti) - - # continue only if any subtrack items are collected - if len(enabled_sti) < 1: - continue - - # add collection of subtrackitems to dict - sub_track_items[track_index] = enabled_sti - - return sub_track_items diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_workfile.py b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_workfile.py deleted file mode 100644 index ef7d07421b..0000000000 --- a/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_workfile.py +++ /dev/null @@ -1,74 +0,0 @@ -import os -import pyblish.api -from openpype.hosts.hiero import api as phiero -from avalon import api as avalon - - -class PreCollectWorkfile(pyblish.api.ContextPlugin): - """Inject the current working file into context""" - - label = "Pre-collect Workfile" - order = pyblish.api.CollectorOrder - 0.51 - - def process(self, context): - asset = avalon.Session["AVALON_ASSET"] - subset = "workfile" - - project = phiero.get_current_project() - active_sequence = phiero.get_current_sequence() - video_tracks = active_sequence.videoTracks() - audio_tracks = active_sequence.audioTracks() - current_file = project.path() - staging_dir = os.path.dirname(current_file) - base_name = os.path.basename(current_file) - - # get workfile's colorspace properties - _clrs = {} - _clrs["useOCIOEnvironmentOverride"] = project.useOCIOEnvironmentOverride() # noqa - _clrs["lutSetting16Bit"] = project.lutSetting16Bit() - _clrs["lutSetting8Bit"] = project.lutSetting8Bit() - _clrs["lutSettingFloat"] = project.lutSettingFloat() - _clrs["lutSettingLog"] = project.lutSettingLog() - _clrs["lutSettingViewer"] = project.lutSettingViewer() - _clrs["lutSettingWorkingSpace"] = project.lutSettingWorkingSpace() - _clrs["lutUseOCIOForExport"] = project.lutUseOCIOForExport() - _clrs["ocioConfigName"] = project.ocioConfigName() - _clrs["ocioConfigPath"] = project.ocioConfigPath() - - # set main project attributes to context - context.data["activeProject"] = project - context.data["activeSequence"] = active_sequence - context.data["videoTracks"] = video_tracks - context.data["audioTracks"] = audio_tracks - context.data["currentFile"] = current_file - context.data["colorspace"] = _clrs - - self.log.info("currentFile: {}".format(current_file)) - - # creating workfile representation - representation = { - 'name': 'hrox', - 'ext': 'hrox', - 'files': base_name, - "stagingDir": staging_dir, - } - - instance_data = { - "name": "{}_{}".format(asset, subset), - "asset": asset, - "subset": "{}{}".format(asset, subset.capitalize()), - "item": project, - "family": "workfile", - - # version data - "versionData": { - "colorspace": _clrs - }, - - # source attribute - "sourcePath": current_file, - "representations": [representation] - } - - instance = context.create_instance(**instance_data) - self.log.info("Creating instance: {}".format(instance)) diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py index bd41618856..603519069a 100644 --- a/openpype/hosts/houdini/api/lib.py +++ b/openpype/hosts/houdini/api/lib.py @@ -4,8 +4,8 @@ from contextlib import contextmanager import six -from avalon import api, io from openpype.api import get_asset +from openpype.pipeline import legacy_io import hou @@ -75,9 +75,13 @@ def generate_ids(nodes, asset_id=None): if asset_id is None: # Get the asset ID from the database for the asset of current context - asset_data = io.find_one({"type": "asset", - "name": api.Session["AVALON_ASSET"]}, - projection={"_id": True}) + asset_data = legacy_io.find_one( + { + "type": "asset", + "name": legacy_io.Session["AVALON_ASSET"] + }, + projection={"_id": True} + ) assert asset_data, "No current asset found in Session" asset_id = asset_data['_id'] @@ -155,7 +159,7 @@ def validate_fps(): if parent is None: pass else: - dialog = popup.Popup(parent=parent) + dialog = popup.PopupUpdateKeys(parent=parent) dialog.setModal(True) dialog.setWindowTitle("Houdini scene does not match project FPS") dialog.setMessage("Scene %i FPS does not match project %i FPS" % @@ -163,7 +167,7 @@ def validate_fps(): dialog.setButtonText("Fix") # on_show is the Fix button clicked callback - dialog.on_clicked.connect(lambda: set_scene_fps(fps)) + dialog.on_clicked_state.connect(lambda: set_scene_fps(fps)) dialog.show() @@ -424,8 +428,8 @@ def maintained_selection(): def reset_framerange(): """Set frame range to current asset""" - asset_name = api.Session["AVALON_ASSET"] - asset = io.find_one({"name": asset_name, "type": "asset"}) + asset_name = legacy_io.Session["AVALON_ASSET"] + asset = legacy_io.find_one({"name": asset_name, "type": "asset"}) frame_start = asset["data"].get("frameStart") frame_end = asset["data"].get("frameEnd") diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py index 8e093a89bc..7048accceb 100644 --- a/openpype/hosts/houdini/api/pipeline.py +++ b/openpype/hosts/houdini/api/pipeline.py @@ -4,11 +4,8 @@ import logging import contextlib import hou -import hdefereval import pyblish.api -import avalon.api -from avalon.lib import find_submodule from openpype.pipeline import ( register_creator_plugin_path, @@ -215,24 +212,12 @@ def ls(): "pyblish.mindbender.container"): containers += lib.lsattr("id", identifier) - has_metadata_collector = False - config_host = find_submodule(avalon.api.registered_config(), "houdini") - if hasattr(config_host, "collect_container_metadata"): - has_metadata_collector = True - for container in sorted(containers, # Hou 19+ Python 3 hou.ObjNode are not # sortable due to not supporting greater # than comparisons key=lambda node: node.path()): - data = parse_container(container) - - # Collect custom data if attribute is present - if has_metadata_collector: - metadata = config_host.collect_container_metadata(container) - data.update(metadata) - - yield data + yield parse_container(container) def before_save(): @@ -305,7 +290,13 @@ def on_new(): start = hou.playbar.playbackRange()[0] hou.setFrame(start) - hdefereval.executeDeferred(_enforce_start_frame) + if hou.isUIAvailable(): + import hdefereval + hdefereval.executeDeferred(_enforce_start_frame) + else: + # Run without execute deferred when no UI is available because + # without UI `hdefereval` is not available to import + _enforce_start_frame() def _set_context_settings(): diff --git a/openpype/hosts/houdini/api/usd.py b/openpype/hosts/houdini/api/usd.py index a992f1d082..e9991e38ec 100644 --- a/openpype/hosts/houdini/api/usd.py +++ b/openpype/hosts/houdini/api/usd.py @@ -1,11 +1,12 @@ """Houdini-specific USD Library functions.""" import contextlib - import logging + from Qt import QtWidgets, QtCore, QtGui -from avalon import io + from openpype import style +from openpype.pipeline import legacy_io from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget from pxr import Sdf @@ -20,11 +21,12 @@ class SelectAssetDialog(QtWidgets.QWidget): Args: parm: Parameter where selected asset name is set. """ + def __init__(self, parm): self.setWindowTitle("Pick Asset") self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup) - assets_widget = SingleSelectAssetsWidget(io, parent=self) + assets_widget = SingleSelectAssetsWidget(legacy_io, parent=self) layout = QtWidgets.QHBoxLayout(self) layout.addWidget(assets_widget) @@ -44,7 +46,7 @@ class SelectAssetDialog(QtWidgets.QWidget): select_id = None name = self._parm.eval() if name: - db_asset = io.find_one( + db_asset = legacy_io.find_one( {"name": name, "type": "asset"}, {"_id": True} ) diff --git a/openpype/hosts/houdini/plugins/create/create_hda.py b/openpype/hosts/houdini/plugins/create/create_hda.py index 0a9c1bad1e..5fc78c7539 100644 --- a/openpype/hosts/houdini/plugins/create/create_hda.py +++ b/openpype/hosts/houdini/plugins/create/create_hda.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- import hou -from avalon import io + +from openpype.pipeline import legacy_io from openpype.hosts.houdini.api import lib from openpype.hosts.houdini.api import plugin @@ -22,13 +23,16 @@ class CreateHDA(plugin.Creator): # type: (str) -> bool """Check if existing subset name versions already exists.""" # Get all subsets of the current asset - asset_id = io.find_one({"name": self.data["asset"], "type": "asset"}, - projection={"_id": True})['_id'] - subset_docs = io.find( + asset_id = legacy_io.find_one( + {"name": self.data["asset"], "type": "asset"}, + projection={"_id": True} + )['_id'] + subset_docs = legacy_io.find( { "type": "subset", "parent": asset_id - }, {"name": 1} + }, + {"name": 1} ) existing_subset_names = set(subset_docs.distinct("name")) existing_subset_names_low = { diff --git a/openpype/hosts/houdini/plugins/load/actions.py b/openpype/hosts/houdini/plugins/load/actions.py index 63d74c39a5..637be1513d 100644 --- a/openpype/hosts/houdini/plugins/load/actions.py +++ b/openpype/hosts/houdini/plugins/load/actions.py @@ -6,7 +6,7 @@ from openpype.pipeline import load class SetFrameRangeLoader(load.LoaderPlugin): - """Set Houdini frame range""" + """Set frame range excluding pre- and post-handles""" families = [ "animation", @@ -44,7 +44,7 @@ class SetFrameRangeLoader(load.LoaderPlugin): class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): - """Set Maya frame range including pre- and post-handles""" + """Set frame range including pre- and post-handles""" families = [ "animation", diff --git a/openpype/hosts/houdini/plugins/load/load_alembic.py b/openpype/hosts/houdini/plugins/load/load_alembic.py index 0214229d5a..96e666b255 100644 --- a/openpype/hosts/houdini/plugins/load/load_alembic.py +++ b/openpype/hosts/houdini/plugins/load/load_alembic.py @@ -7,7 +7,7 @@ from openpype.hosts.houdini.api import pipeline class AbcLoader(load.LoaderPlugin): - """Specific loader of Alembic for the avalon.animation family""" + """Load Alembic""" families = ["model", "animation", "pointcache", "gpuCache"] label = "Load Alembic" diff --git a/openpype/hosts/houdini/plugins/load/load_alembic_archive.py b/openpype/hosts/houdini/plugins/load/load_alembic_archive.py new file mode 100644 index 0000000000..b960073e12 --- /dev/null +++ b/openpype/hosts/houdini/plugins/load/load_alembic_archive.py @@ -0,0 +1,75 @@ +import os +from openpype.pipeline import ( + load, + get_representation_path, +) +from openpype.hosts.houdini.api import pipeline + + +class AbcArchiveLoader(load.LoaderPlugin): + """Load Alembic as full geometry network hierarchy """ + + families = ["model", "animation", "pointcache", "gpuCache"] + label = "Load Alembic as Archive" + representations = ["abc"] + order = -5 + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, data=None): + + import hou + + # Format file name, Houdini only wants forward slashes + file_path = os.path.normpath(self.fname) + file_path = file_path.replace("\\", "/") + + # Get the root node + obj = hou.node("/obj") + + # Define node name + namespace = namespace if namespace else context["asset"]["name"] + node_name = "{}_{}".format(namespace, name) if namespace else name + + # Create an Alembic archive node + node = obj.createNode("alembicarchive", node_name=node_name) + node.moveToGoodPosition() + + # TODO: add FPS of project / asset + node.setParms({"fileName": file_path, + "channelRef": True}) + + # Apply some magic + node.parm("buildHierarchy").pressButton() + node.moveToGoodPosition() + + nodes = [node] + + self[:] = nodes + + return pipeline.containerise(node_name, + namespace, + nodes, + context, + self.__class__.__name__, + suffix="") + + def update(self, container, representation): + + node = container["node"] + + # Update the file path + file_path = get_representation_path(representation) + file_path = file_path.replace("\\", "/") + + # Update attributes + node.setParms({"fileName": file_path, + "representation": str(representation["_id"])}) + + # Rebuild + node.parm("buildHierarchy").pressButton() + + def remove(self, container): + + node = container["node"] + node.destroy() diff --git a/openpype/hosts/houdini/plugins/load/load_bgeo.py b/openpype/hosts/houdini/plugins/load/load_bgeo.py new file mode 100644 index 0000000000..a463d51383 --- /dev/null +++ b/openpype/hosts/houdini/plugins/load/load_bgeo.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- +import os +import re + +from openpype.pipeline import ( + load, + get_representation_path, +) +from openpype.hosts.houdini.api import pipeline + + +class BgeoLoader(load.LoaderPlugin): + """Load bgeo files to Houdini.""" + + label = "Load bgeo" + families = ["model", "pointcache", "bgeo"] + representations = [ + "bgeo", "bgeosc", "bgeogz", + "bgeo.sc", "bgeo.gz", "bgeo.lzma", "bgeo.bz2"] + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, data=None): + + import hou + + # Get the root node + obj = hou.node("/obj") + + # Define node name + namespace = namespace if namespace else context["asset"]["name"] + node_name = "{}_{}".format(namespace, name) if namespace else name + + # Create a new geo node + container = obj.createNode("geo", node_name=node_name) + is_sequence = bool(context["representation"]["context"].get("frame")) + + # Remove the file node, it only loads static meshes + # Houdini 17 has removed the file node from the geo node + file_node = container.node("file1") + if file_node: + file_node.destroy() + + # Explicitly create a file node + file_node = container.createNode("file", node_name=node_name) + file_node.setParms({"file": self.format_path(self.fname, is_sequence)}) + + # Set display on last node + file_node.setDisplayFlag(True) + + nodes = [container, file_node] + self[:] = nodes + + return pipeline.containerise( + node_name, + namespace, + nodes, + context, + self.__class__.__name__, + suffix="", + ) + + @staticmethod + def format_path(path, is_sequence): + """Format file path correctly for single bgeo or bgeo sequence.""" + if not os.path.exists(path): + raise RuntimeError("Path does not exist: %s" % path) + + # The path is either a single file or sequence in a folder. + if not is_sequence: + filename = path + print("single") + else: + filename = re.sub(r"(.*)\.(\d+)\.(bgeo.*)", "\\1.$F4.\\3", path) + + filename = os.path.join(path, filename) + + filename = os.path.normpath(filename) + filename = filename.replace("\\", "/") + + return filename + + def update(self, container, representation): + + node = container["node"] + try: + file_node = next( + n for n in node.children() if n.type().name() == "file" + ) + except StopIteration: + self.log.error("Could not find node of type `alembic`") + return + + # Update the file path + file_path = get_representation_path(representation) + file_path = self.format_path(file_path) + + file_node.setParms({"fileName": file_path}) + + # Update attribute + node.setParms({"representation": str(representation["_id"])}) + + def remove(self, container): + + node = container["node"] + node.destroy() diff --git a/openpype/hosts/houdini/plugins/load/load_camera.py b/openpype/hosts/houdini/plugins/load/load_camera.py index ef57d115da..059ad11a76 100644 --- a/openpype/hosts/houdini/plugins/load/load_camera.py +++ b/openpype/hosts/houdini/plugins/load/load_camera.py @@ -78,7 +78,7 @@ def transfer_non_default_values(src, dest, ignore=None): class CameraLoader(load.LoaderPlugin): - """Specific loader of Alembic for the avalon.animation family""" + """Load camera from an Alembic file""" families = ["camera"] label = "Load Camera (abc)" diff --git a/openpype/hosts/houdini/plugins/load/load_image.py b/openpype/hosts/houdini/plugins/load/load_image.py index 671f08f18f..928c2ee734 100644 --- a/openpype/hosts/houdini/plugins/load/load_image.py +++ b/openpype/hosts/houdini/plugins/load/load_image.py @@ -42,9 +42,9 @@ def get_image_avalon_container(): class ImageLoader(load.LoaderPlugin): - """Specific loader of Alembic for the avalon.animation family""" + """Load images into COP2""" - families = ["colorbleed.imagesequence"] + families = ["imagesequence"] label = "Load Image (COP2)" representations = ["*"] order = -10 diff --git a/openpype/hosts/houdini/plugins/load/load_vdb.py b/openpype/hosts/houdini/plugins/load/load_vdb.py index 06bb9e45e4..bff0f8b0bf 100644 --- a/openpype/hosts/houdini/plugins/load/load_vdb.py +++ b/openpype/hosts/houdini/plugins/load/load_vdb.py @@ -9,7 +9,7 @@ from openpype.hosts.houdini.api import pipeline class VdbLoader(load.LoaderPlugin): - """Specific loader of Alembic for the avalon.animation family""" + """Load VDB""" families = ["vdbcache"] label = "Load VDB" diff --git a/openpype/hosts/houdini/plugins/load/show_usdview.py b/openpype/hosts/houdini/plugins/load/show_usdview.py index 8066615181..2737bc40fa 100644 --- a/openpype/hosts/houdini/plugins/load/show_usdview.py +++ b/openpype/hosts/houdini/plugins/load/show_usdview.py @@ -1,3 +1,7 @@ +import os +import subprocess + +from openpype.lib.vendor_bin_utils import find_executable from openpype.pipeline import load @@ -14,12 +18,7 @@ class ShowInUsdview(load.LoaderPlugin): def load(self, context, name=None, namespace=None, data=None): - import os - import subprocess - - import avalon.lib as lib - - usdview = lib.which("usdview") + usdview = find_executable("usdview") filepath = os.path.normpath(self.fname) filepath = filepath.replace("\\", "/") diff --git a/openpype/hosts/houdini/plugins/publish/collect_inputs.py b/openpype/hosts/houdini/plugins/publish/collect_inputs.py index 39e2737e8c..8c7098c710 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_inputs.py +++ b/openpype/hosts/houdini/plugins/publish/collect_inputs.py @@ -1,6 +1,7 @@ -import avalon.api as api import pyblish.api +from openpype.pipeline import registered_host + def collect_input_containers(nodes): """Collect containers that contain any of the node in `nodes`. @@ -18,7 +19,7 @@ def collect_input_containers(nodes): lookup = frozenset(nodes) containers = [] - host = api.registered_host() + host = registered_host() for container in host.ls(): node = container["node"] diff --git a/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py b/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py index 66dfba64df..3f0d10e0ba 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py +++ b/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py @@ -1,6 +1,6 @@ import pyblish.api -from avalon import io +from openpype.pipeline import legacy_io import openpype.lib.usdlib as usdlib @@ -50,7 +50,10 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin): self.log.debug("Add bootstrap for: %s" % bootstrap) - asset = io.find_one({"name": instance.data["asset"], "type": "asset"}) + asset = legacy_io.find_one({ + "name": instance.data["asset"], + "type": "asset" + }) assert asset, "Asset must exist: %s" % asset # Check which are not about to be created and don't exist yet @@ -104,7 +107,8 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin): # Or, if they already exist in the database we can # skip them too. return bool( - io.find_one( - {"name": subset, "type": "subset", "parent": asset["_id"]} + legacy_io.find_one( + {"name": subset, "type": "subset", "parent": asset["_id"]}, + {"_id": True} ) ) diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py index 3e842ae766..bfcd93c1cb 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py +++ b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py @@ -7,7 +7,10 @@ from collections import deque import pyblish.api import openpype.api -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + legacy_io, +) import openpype.hosts.houdini.api.usd as hou_usdlib from openpype.hosts.houdini.api.lib import render_rop @@ -266,8 +269,6 @@ class ExtractUSDLayered(openpype.api.Extractor): instance.data["files"].append(fname) def _compare_with_latest_publish(self, dependency, new_file): - - from avalon import api, io import filecmp _, ext = os.path.splitext(new_file) @@ -275,10 +276,10 @@ class ExtractUSDLayered(openpype.api.Extractor): # Compare this dependency with the latest published version # to detect whether we should make this into a new publish # version. If not, skip it. - asset = io.find_one( + asset = legacy_io.find_one( {"name": dependency.data["asset"], "type": "asset"} ) - subset = io.find_one( + subset = legacy_io.find_one( { "name": dependency.data["subset"], "type": "subset", @@ -290,7 +291,7 @@ class ExtractUSDLayered(openpype.api.Extractor): self.log.debug("No existing subset..") return False - version = io.find_one( + version = legacy_io.find_one( {"type": "version", "parent": subset["_id"], }, sort=[("name", -1)] ) @@ -298,7 +299,7 @@ class ExtractUSDLayered(openpype.api.Extractor): self.log.debug("No existing version..") return False - representation = io.find_one( + representation = legacy_io.find_one( { "name": ext.lstrip("."), "type": "representation", diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file.py b/openpype/hosts/houdini/plugins/publish/increment_current_file.py index 31c2954ee7..c5cacd1880 100644 --- a/openpype/hosts/houdini/plugins/publish/increment_current_file.py +++ b/openpype/hosts/houdini/plugins/publish/increment_current_file.py @@ -1,8 +1,8 @@ import pyblish.api -import avalon.api from openpype.api import version_up from openpype.action import get_errored_plugins_from_data +from openpype.pipeline import registered_host class IncrementCurrentFile(pyblish.api.InstancePlugin): @@ -41,7 +41,7 @@ class IncrementCurrentFile(pyblish.api.InstancePlugin): ) # Filename must not have changed since collecting - host = avalon.api.registered_host() + host = registered_host() current_file = host.current_file() assert ( context.data["currentFile"] == current_file diff --git a/openpype/hosts/houdini/plugins/publish/save_scene.py b/openpype/hosts/houdini/plugins/publish/save_scene.py index fe5962fbd3..6128c7af77 100644 --- a/openpype/hosts/houdini/plugins/publish/save_scene.py +++ b/openpype/hosts/houdini/plugins/publish/save_scene.py @@ -1,5 +1,6 @@ import pyblish.api -import avalon.api + +from openpype.pipeline import registered_host class SaveCurrentScene(pyblish.api.ContextPlugin): @@ -12,7 +13,7 @@ class SaveCurrentScene(pyblish.api.ContextPlugin): def process(self, context): # Filename must not have changed since collecting - host = avalon.api.registered_host() + host = registered_host() current_file = host.current_file() assert context.data['currentFile'] == current_file, ( "Collected filename from current scene name." diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py index fcfbf6b22d..44719ae488 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py @@ -1,9 +1,9 @@ import re import pyblish.api -import openpype.api -from avalon import io +import openpype.api +from openpype.pipeline import legacy_io class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin): @@ -23,16 +23,20 @@ class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin): shade_subset = subset.split(".", 1)[0] model_subset = re.sub("^usdShade", "usdModel", shade_subset) - asset_doc = io.find_one({"name": asset, "type": "asset"}) + asset_doc = legacy_io.find_one( + {"name": asset, "type": "asset"}, + {"_id": True} + ) if not asset_doc: raise RuntimeError("Asset does not exist: %s" % asset) - subset_doc = io.find_one( + subset_doc = legacy_io.find_one( { "name": model_subset, "type": "subset", "parent": asset_doc["_id"], - } + }, + {"_id": True} ) if not subset_doc: raise RuntimeError( diff --git a/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py b/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py index eb33b49759..afadbffd3e 100644 --- a/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py +++ b/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py @@ -1,10 +1,10 @@ -import avalon.api +from openpype.pipeline import install_host from openpype.hosts.houdini import api def main(): print("Installing OpenPype ...") - avalon.api.install(api) + install_host(api) main() diff --git a/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py b/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py index eb33b49759..afadbffd3e 100644 --- a/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py +++ b/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py @@ -1,10 +1,10 @@ -import avalon.api +from openpype.pipeline import install_host from openpype.hosts.houdini import api def main(): print("Installing OpenPype ...") - avalon.api.install(api) + install_host(api) main() diff --git a/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py index 499b733570..01a29472e7 100644 --- a/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py +++ b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py @@ -1,17 +1,21 @@ +import os import hou import husdoutputprocessors.base as base -import os -import re -import logging import colorbleed.usdlib as usdlib +from openpype.pipeline import ( + legacy_io, + registered_root, +) + def _get_project_publish_template(): """Return publish template from database for current project""" - from avalon import io - project = io.find_one({"type": "project"}, - projection={"config.template.publish": True}) + project = legacy_io.find_one( + {"type": "project"}, + projection={"config.template.publish": True} + ) return project["config"]["template"]["publish"] @@ -133,15 +137,15 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase): """ - from avalon import api, io - - PROJECT = api.Session["AVALON_PROJECT"] - asset_doc = io.find_one({"name": asset, - "type": "asset"}) + PROJECT = legacy_io.Session["AVALON_PROJECT"] + asset_doc = legacy_io.find_one({ + "name": asset, + "type": "asset" + }) if not asset_doc: raise RuntimeError("Invalid asset name: '%s'" % asset) - root = api.registered_root() + root = registered_root() path = self._template.format(**{ "root": root, "project": PROJECT, diff --git a/openpype/hosts/maya/api/action.py b/openpype/hosts/maya/api/action.py index ab26748c8a..ca1006b6aa 100644 --- a/openpype/hosts/maya/api/action.py +++ b/openpype/hosts/maya/api/action.py @@ -2,8 +2,8 @@ from __future__ import absolute_import import pyblish.api -from avalon import io +from openpype.pipeline import legacy_io from openpype.api import get_errored_instances_from_context @@ -75,8 +75,10 @@ class GenerateUUIDsOnInvalidAction(pyblish.api.Action): from . import lib asset = instance.data['asset'] - asset_id = io.find_one({"name": asset, "type": "asset"}, - projection={"_id": True})['_id'] + asset_id = legacy_io.find_one( + {"name": asset, "type": "asset"}, + projection={"_id": True} + )['_id'] for node, _id in lib.generate_ids(nodes, asset_id=asset_id): lib.set_id(node, _id, overwrite=True) diff --git a/openpype/hosts/maya/api/commands.py b/openpype/hosts/maya/api/commands.py index a1e0be2cfe..dd616b6dd6 100644 --- a/openpype/hosts/maya/api/commands.py +++ b/openpype/hosts/maya/api/commands.py @@ -1,7 +1,8 @@ # -*- coding: utf-8 -*- """OpenPype script commands to be used directly in Maya.""" from maya import cmds -from avalon import api, io + +from openpype.pipeline import legacy_io class ToolWindows: @@ -73,13 +74,13 @@ def reset_frame_range(): 59.94: '59.94fps', 44100: '44100fps', 48000: '48000fps' - }.get(float(api.Session.get("AVALON_FPS", 25)), "pal") + }.get(float(legacy_io.Session.get("AVALON_FPS", 25)), "pal") cmds.currentUnit(time=fps) # Set frame start/end - asset_name = api.Session["AVALON_ASSET"] - asset = io.find_one({"name": asset_name, "type": "asset"}) + asset_name = legacy_io.Session["AVALON_ASSET"] + asset = legacy_io.find_one({"name": asset_name, "type": "asset"}) frame_start = asset["data"].get("frameStart") frame_end = asset["data"].get("frameEnd") @@ -144,8 +145,8 @@ def reset_resolution(): resolution_height = 1080 # Get resolution from asset - asset_name = api.Session["AVALON_ASSET"] - asset_doc = io.find_one({"name": asset_name, "type": "asset"}) + asset_name = legacy_io.Session["AVALON_ASSET"] + asset_doc = legacy_io.find_one({"name": asset_name, "type": "asset"}) resolution = _resolution_from_document(asset_doc) # Try get resolution from project if resolution is None: @@ -154,7 +155,7 @@ def reset_resolution(): "Asset \"{}\" does not have set resolution." " Trying to get resolution from project" ).format(asset_name)) - project_doc = io.find_one({"type": "project"}) + project_doc = legacy_io.find_one({"type": "project"}) resolution = _resolution_from_document(project_doc) if resolution is None: diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 90688423e0..088304ab05 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -17,15 +17,15 @@ import bson from maya import cmds, mel import maya.api.OpenMaya as om -from avalon import api, io - from openpype import lib from openpype.api import get_anatomy_settings from openpype.pipeline import ( + legacy_io, discover_loader_plugins, loaders_from_representation, get_representation_path, load_container, + registered_host, ) from .commands import reset_frame_range @@ -1387,9 +1387,13 @@ def generate_ids(nodes, asset_id=None): if asset_id is None: # Get the asset ID from the database for the asset of current context - asset_data = io.find_one({"type": "asset", - "name": api.Session["AVALON_ASSET"]}, - projection={"_id": True}) + asset_data = legacy_io.find_one( + { + "type": "asset", + "name": legacy_io.Session["AVALON_ASSET"] + }, + projection={"_id": True} + ) assert asset_data, "No current asset found in Session" asset_id = asset_data['_id'] @@ -1544,9 +1548,11 @@ def list_looks(asset_id): # # get all subsets with look leading in # the name associated with the asset - subset = io.find({"parent": bson.ObjectId(asset_id), - "type": "subset", - "name": {"$regex": "look*"}}) + subset = legacy_io.find({ + "parent": bson.ObjectId(asset_id), + "type": "subset", + "name": {"$regex": "look*"} + }) return list(subset) @@ -1565,16 +1571,20 @@ def assign_look_by_version(nodes, version_id): """ # Get representations of shader file and relationships - look_representation = io.find_one({"type": "representation", - "parent": version_id, - "name": "ma"}) + look_representation = legacy_io.find_one({ + "type": "representation", + "parent": version_id, + "name": "ma" + }) - json_representation = io.find_one({"type": "representation", - "parent": version_id, - "name": "json"}) + json_representation = legacy_io.find_one({ + "type": "representation", + "parent": version_id, + "name": "json" + }) # See if representation is already loaded, if so reuse it. - host = api.registered_host() + host = registered_host() representation_id = str(look_representation['_id']) for container in host.ls(): if (container['loader'] == "LookLoader" and @@ -1636,9 +1646,11 @@ def assign_look(nodes, subset="lookDefault"): except bson.errors.InvalidId: log.warning("Asset ID is not compatible with bson") continue - subset_data = io.find_one({"type": "subset", - "name": subset, - "parent": asset_id}) + subset_data = legacy_io.find_one({ + "type": "subset", + "name": subset, + "parent": asset_id + }) if not subset_data: log.warning("No subset '{}' found for {}".format(subset, asset_id)) @@ -1646,13 +1658,18 @@ def assign_look(nodes, subset="lookDefault"): # get last version # with backwards compatibility - version = io.find_one({"parent": subset_data['_id'], - "type": "version", - "data.families": - {"$in": ["look"]} - }, - sort=[("name", -1)], - projection={"_id": True, "name": True}) + version = legacy_io.find_one( + { + "parent": subset_data['_id'], + "type": "version", + "data.families": {"$in": ["look"]} + }, + sort=[("name", -1)], + projection={ + "_id": True, + "name": True + } + ) log.debug("Assigning look '{}' ".format(subset, version["name"])) @@ -2135,7 +2152,7 @@ def reset_scene_resolution(): None """ - project_doc = io.find_one({"type": "project"}) + project_doc = legacy_io.find_one({"type": "project"}) project_data = project_doc["data"] asset_data = lib.get_asset()["data"] @@ -2168,13 +2185,13 @@ def set_context_settings(): """ # Todo (Wijnand): apply renderer and resolution of project - project_doc = io.find_one({"type": "project"}) + project_doc = legacy_io.find_one({"type": "project"}) project_data = project_doc["data"] asset_data = lib.get_asset()["data"] # Set project fps fps = asset_data.get("fps", project_data.get("fps", 25)) - api.Session["AVALON_FPS"] = str(fps) + legacy_io.Session["AVALON_FPS"] = str(fps) set_scene_fps(fps) reset_scene_resolution() @@ -2209,15 +2226,17 @@ def validate_fps(): parent = get_main_window() - dialog = popup.Popup2(parent=parent) + dialog = popup.PopupUpdateKeys(parent=parent) dialog.setModal(True) - dialog.setWindowTitle("Maya scene not in line with project") - dialog.setMessage("The FPS is out of sync, please fix") + dialog.setWindowTitle("Maya scene does not match project FPS") + dialog.setMessage("Scene %i FPS does not match project %i FPS" % + (current_fps, fps)) + dialog.setButtonText("Fix") # Set new text for button (add optional argument for the popup?) toggle = dialog.widgets["toggle"] update = toggle.isChecked() - dialog.on_show.connect(lambda: set_scene_fps(fps, update)) + dialog.on_clicked_state.connect(lambda: set_scene_fps(fps, update)) dialog.show() @@ -2612,7 +2631,7 @@ def get_attr_in_layer(attr, layer): def fix_incompatible_containers(): """Backwards compatibility: old containers to use new ReferenceLoader""" - host = api.registered_host() + host = registered_host() for container in host.ls(): loader = container['loader'] @@ -2934,7 +2953,7 @@ def update_content_on_context_change(): This will update scene content to match new asset on context change """ scene_sets = cmds.listSets(allSets=True) - new_asset = api.Session["AVALON_ASSET"] + new_asset = legacy_io.Session["AVALON_ASSET"] new_data = lib.get_asset()["data"] for s in scene_sets: try: diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py index 0c34998874..ff04fa7aa2 100644 --- a/openpype/hosts/maya/api/lib_renderproducts.py +++ b/openpype/hosts/maya/api/lib_renderproducts.py @@ -77,8 +77,10 @@ IMAGE_PREFIXES = { "arnold": "defaultRenderGlobals.imageFilePrefix", "renderman": "rmanGlobals.imageFileFormat", "redshift": "defaultRenderGlobals.imageFilePrefix", + "mayahardware2": "defaultRenderGlobals.imageFilePrefix" } +RENDERMAN_IMAGE_DIR = "maya//" @attr.s class LayerMetadata(object): @@ -154,7 +156,8 @@ def get(layer, render_instance=None): "arnold": RenderProductsArnold, "vray": RenderProductsVray, "redshift": RenderProductsRedshift, - "renderman": RenderProductsRenderman + "renderman": RenderProductsRenderman, + "mayahardware2": RenderProductsMayaHardware }.get(renderer_name.lower(), None) if renderer is None: raise UnsupportedRendererException( @@ -1054,6 +1057,8 @@ class RenderProductsRenderman(ARenderProducts): :func:`ARenderProducts.get_render_products()` """ + from rfm2.api.displays import get_displays # noqa + cameras = [ self.sanitize_camera_name(c) for c in self.get_renderable_cameras() @@ -1066,47 +1071,122 @@ class RenderProductsRenderman(ARenderProducts): ] products = [] - default_ext = "exr" - displays = cmds.listConnections("rmanGlobals.displays") - for aov in displays: - enabled = self._get_attr(aov, "enabled") + # NOTE: This is guessing extensions from renderman display types. + # Some of them are just framebuffers, d_texture format can be + # set in display setting. We set those now to None, but it + # should be handled more gracefully. + display_types = { + "d_deepexr": "exr", + "d_it": None, + "d_null": None, + "d_openexr": "exr", + "d_png": "png", + "d_pointcloud": "ptc", + "d_targa": "tga", + "d_texture": None, + "d_tiff": "tif" + } + + displays = get_displays()["displays"] + for name, display in displays.items(): + enabled = display["params"]["enable"]["value"] if not enabled: continue - aov_name = str(aov) + aov_name = name if aov_name == "rmanDefaultDisplay": aov_name = "beauty" + extensions = display_types.get( + display["driverNode"]["type"], "exr") + for camera in cameras: product = RenderProduct(productName=aov_name, - ext=default_ext, + ext=extensions, camera=camera) products.append(product) return products - def get_files(self, product, camera): + def get_files(self, product): """Get expected files. - In renderman we hack it with prepending path. This path would - normally be translated from `rmanGlobals.imageOutputDir`. We skip - this and hardcode prepend path we expect. There is no place for user - to mess around with this settings anyway and it is enforced in - render settings validator. """ - files = super(RenderProductsRenderman, self).get_files(product, camera) + files = super(RenderProductsRenderman, self).get_files(product) layer_data = self.layer_data new_files = [] + + resolved_image_dir = re.sub("", layer_data.sceneName, RENDERMAN_IMAGE_DIR, flags=re.IGNORECASE) # noqa: E501 + resolved_image_dir = re.sub("", layer_data.layerName, resolved_image_dir, flags=re.IGNORECASE) # noqa: E501 for file in files: - new_file = "{}/{}/{}".format( - layer_data["sceneName"], layer_data["layerName"], file - ) + new_file = "{}/{}".format(resolved_image_dir, file) new_files.append(new_file) return new_files +class RenderProductsMayaHardware(ARenderProducts): + """Expected files for MayaHardware renderer.""" + + renderer = "mayahardware2" + + extensions = [ + {"label": "JPEG", "index": 8, "extension": "jpg"}, + {"label": "PNG", "index": 32, "extension": "png"}, + {"label": "EXR(exr)", "index": 40, "extension": "exr"} + ] + + def _get_extension(self, value): + result = None + if isinstance(value, int): + extensions = { + extension["index"]: extension["extension"] + for extension in self.extensions + } + try: + result = extensions[value] + except KeyError: + raise NotImplementedError( + "Could not find extension for {}".format(value) + ) + + if isinstance(value, six.string_types): + extensions = { + extension["label"]: extension["extension"] + for extension in self.extensions + } + try: + result = extensions[value] + except KeyError: + raise NotImplementedError( + "Could not find extension for {}".format(value) + ) + + if not result: + raise NotImplementedError( + "Could not find extension for {}".format(value) + ) + + return result + + def get_render_products(self): + """Get all AOVs. + See Also: + :func:`ARenderProducts.get_render_products()` + """ + ext = self._get_extension( + self._get_attr("defaultRenderGlobals.imageFormat") + ) + + products = [] + for cam in self.get_renderable_cameras(): + product = RenderProduct(productName="beauty", ext=ext, camera=cam) + products.append(product) + + return products + + class AOVError(Exception): """Custom exception for determining AOVs.""" diff --git a/openpype/hosts/maya/api/menu.py b/openpype/hosts/maya/api/menu.py index 5f0fc39bf3..97f06c43af 100644 --- a/openpype/hosts/maya/api/menu.py +++ b/openpype/hosts/maya/api/menu.py @@ -6,10 +6,9 @@ from Qt import QtWidgets, QtGui import maya.utils import maya.cmds as cmds -import avalon.api - from openpype.api import BuildWorkfile from openpype.settings import get_project_settings +from openpype.pipeline import legacy_io from openpype.tools.utils import host_tools from openpype.hosts.maya.api import lib from .lib import get_main_window, IS_HEADLESS @@ -40,15 +39,15 @@ def install(): parent_widget = get_main_window() cmds.menu( MENU_NAME, - label=avalon.api.Session["AVALON_LABEL"], + label=legacy_io.Session["AVALON_LABEL"], tearOff=True, parent="MayaWindow" ) # Create context menu context_label = "{}, {}".format( - avalon.api.Session["AVALON_ASSET"], - avalon.api.Session["AVALON_TASK"] + legacy_io.Session["AVALON_ASSET"], + legacy_io.Session["AVALON_TASK"] ) cmds.menuItem( "currentContext", @@ -211,7 +210,7 @@ def update_menu_task_label(): return label = "{}, {}".format( - avalon.api.Session["AVALON_ASSET"], - avalon.api.Session["AVALON_TASK"] + legacy_io.Session["AVALON_ASSET"], + legacy_io.Session["AVALON_TASK"] ) cmds.menuItem(object_name, edit=True, label=label) diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index f6f3472eef..b0e8fac635 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -7,7 +7,6 @@ from maya import utils, cmds, OpenMaya import maya.api.OpenMaya as om import pyblish.api -import avalon.api import openpype.hosts.maya from openpype.tools.utils import host_tools @@ -18,6 +17,7 @@ from openpype.lib import ( ) from openpype.lib.path_tools import HostDirmap from openpype.pipeline import ( + legacy_io, register_loader_plugin_path, register_inventory_action_path, register_creator_plugin_path, @@ -93,7 +93,7 @@ def _set_project(): None """ - workdir = avalon.api.Session["AVALON_WORKDIR"] + workdir = legacy_io.Session["AVALON_WORKDIR"] try: os.makedirs(workdir) @@ -448,7 +448,7 @@ def on_open(): dialog.setWindowTitle("Maya scene has outdated content") dialog.setMessage("There are outdated containers in " "your Maya scene.") - dialog.on_show.connect(_on_show_inventory) + dialog.on_clicked.connect(_on_show_inventory) dialog.show() @@ -473,7 +473,7 @@ def on_task_changed(): # Run menu.update_menu_task_label() - workdir = avalon.api.Session["AVALON_WORKDIR"] + workdir = legacy_io.Session["AVALON_WORKDIR"] if os.path.exists(workdir): log.info("Updating Maya workspace for task change to %s", workdir) @@ -494,9 +494,9 @@ def on_task_changed(): lib.update_content_on_context_change() msg = " project: {}\n asset: {}\n task:{}".format( - avalon.api.Session["AVALON_PROJECT"], - avalon.api.Session["AVALON_ASSET"], - avalon.api.Session["AVALON_TASK"] + legacy_io.Session["AVALON_PROJECT"], + legacy_io.Session["AVALON_ASSET"], + legacy_io.Session["AVALON_TASK"] ) lib.show_message( diff --git a/openpype/hosts/maya/api/setdress.py b/openpype/hosts/maya/api/setdress.py index 0b60564e5e..f8d3ed79b8 100644 --- a/openpype/hosts/maya/api/setdress.py +++ b/openpype/hosts/maya/api/setdress.py @@ -10,8 +10,9 @@ from bson.objectid import ObjectId from maya import cmds -from avalon import io from openpype.pipeline import ( + schema, + legacy_io, discover_loader_plugins, loaders_from_representation, load_container, @@ -253,7 +254,6 @@ def get_contained_containers(container): """ - import avalon.schema from .pipeline import parse_container # Get avalon containers in this package setdress container @@ -263,7 +263,7 @@ def get_contained_containers(container): try: member_container = parse_container(node) containers.append(member_container) - except avalon.schema.ValidationError: + except schema.ValidationError: pass return containers @@ -283,21 +283,23 @@ def update_package_version(container, version): """ # Versioning (from `core.maya.pipeline`) - current_representation = io.find_one({ + current_representation = legacy_io.find_one({ "_id": ObjectId(container["representation"]) }) assert current_representation is not None, "This is a bug" - version_, subset, asset, project = io.parenthood(current_representation) + version_, subset, asset, project = legacy_io.parenthood( + current_representation + ) if version == -1: - new_version = io.find_one({ + new_version = legacy_io.find_one({ "type": "version", "parent": subset["_id"] }, sort=[("name", -1)]) else: - new_version = io.find_one({ + new_version = legacy_io.find_one({ "type": "version", "parent": subset["_id"], "name": version, @@ -306,7 +308,7 @@ def update_package_version(container, version): assert new_version is not None, "This is a bug" # Get the new representation (new file) - new_representation = io.find_one({ + new_representation = legacy_io.find_one({ "type": "representation", "parent": new_version["_id"], "name": current_representation["name"] @@ -328,7 +330,7 @@ def update_package(set_container, representation): """ # Load the original package data - current_representation = io.find_one({ + current_representation = legacy_io.find_one({ "_id": ObjectId(set_container['representation']), "type": "representation" }) @@ -479,10 +481,10 @@ def update_scene(set_container, containers, current_data, new_data, new_file): # Check whether the conversion can be done by the Loader. # They *must* use the same asset, subset and Loader for # `update_container` to make sense. - old = io.find_one({ + old = legacy_io.find_one({ "_id": ObjectId(representation_current) }) - new = io.find_one({ + new = legacy_io.find_one({ "_id": ObjectId(representation_new) }) is_valid = compare_representations(old=old, new=new) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 4f0a394f85..93ee6679e5 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -4,8 +4,6 @@ import os import json import appdirs import requests -import six -import sys from maya import cmds import maya.app.renderSetup.model.renderSetup as renderSetup @@ -14,14 +12,16 @@ from openpype.hosts.maya.api import ( lib, plugin ) +from openpype.lib import requests_get from openpype.api import ( get_system_settings, get_project_settings, get_asset) from openpype.modules import ModulesManager -from openpype.pipeline import CreatorError - -from avalon.api import Session +from openpype.pipeline import ( + CreatorError, + legacy_io, +) class CreateRender(plugin.Creator): @@ -76,16 +76,20 @@ class CreateRender(plugin.Creator): 'mentalray': 'defaultRenderGlobals.imageFilePrefix', 'vray': 'vraySettings.fileNamePrefix', 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'defaultRenderGlobals.imageFilePrefix', - 'redshift': 'defaultRenderGlobals.imageFilePrefix' + 'renderman': 'rmanGlobals.imageFileFormat', + 'redshift': 'defaultRenderGlobals.imageFilePrefix', + 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix', } _image_prefixes = { 'mentalray': 'maya///{aov_separator}', # noqa 'vray': 'maya///', 'arnold': 'maya///{aov_separator}', # noqa - 'renderman': 'maya///{aov_separator}', - 'redshift': 'maya///' # noqa + # this needs `imageOutputDir` + # (/renders/maya/) set separately + 'renderman': '_..', + 'redshift': 'maya///', # noqa + 'mayahardware2': 'maya///', # noqa } _aov_chars = { @@ -104,7 +108,7 @@ class CreateRender(plugin.Creator): self.deadline_servers = {} return self._project_settings = get_project_settings( - Session["AVALON_PROJECT"]) + legacy_io.Session["AVALON_PROJECT"]) # project_settings/maya/create/CreateRender/aov_separator try: @@ -117,6 +121,8 @@ class CreateRender(plugin.Creator): except KeyError: self.aov_separator = "_" + manager = ModulesManager() + self.deadline_module = manager.modules_by_name["deadline"] try: default_servers = deadline_settings["deadline_urls"] project_servers = ( @@ -133,10 +139,8 @@ class CreateRender(plugin.Creator): except AttributeError: # Handle situation were we had only one url for deadline. - manager = ModulesManager() - deadline_module = manager.modules_by_name["deadline"] # get default deadline webservice url from deadline module - self.deadline_servers = deadline_module.deadline_urls + self.deadline_servers = self.deadline_module.deadline_urls def process(self): """Entry point.""" @@ -205,48 +209,31 @@ class CreateRender(plugin.Creator): def _deadline_webservice_changed(self): """Refresh Deadline server dependent options.""" # get selected server - from maya import cmds webservice = self.deadline_servers[ self.server_aliases[ cmds.getAttr("{}.deadlineServers".format(self.instance)) ] ] - pools = self._get_deadline_pools(webservice) + pools = self.deadline_module.get_deadline_pools(webservice, self.log) cmds.deleteAttr("{}.primaryPool".format(self.instance)) cmds.deleteAttr("{}.secondaryPool".format(self.instance)) + + pool_setting = (self._project_settings["deadline"] + ["publish"] + ["CollectDeadlinePools"]) + + primary_pool = pool_setting["primary_pool"] + sorted_pools = self._set_default_pool(list(pools), primary_pool) cmds.addAttr(self.instance, longName="primaryPool", attributeType="enum", - enumName=":".join(pools)) - cmds.addAttr(self.instance, longName="secondaryPool", + enumName=":".join(sorted_pools)) + + pools = ["-"] + pools + secondary_pool = pool_setting["secondary_pool"] + sorted_pools = self._set_default_pool(list(pools), secondary_pool) + cmds.addAttr("{}.secondaryPool".format(self.instance), attributeType="enum", - enumName=":".join(["-"] + pools)) - - def _get_deadline_pools(self, webservice): - # type: (str) -> list - """Get pools from Deadline. - Args: - webservice (str): Server url. - Returns: - list: Pools. - Throws: - RuntimeError: If deadline webservice is unreachable. - - """ - argument = "{}/api/pools?NamesOnly=true".format(webservice) - try: - response = self._requests_get(argument) - except requests.exceptions.ConnectionError as exc: - msg = 'Cannot connect to deadline web service' - self.log.error(msg) - six.reraise( - RuntimeError, - RuntimeError('{} - {}'.format(msg, exc)), - sys.exc_info()[2]) - if not response.ok: - self.log.warning("No pools retrieved") - return [] - - return response.json() + enumName=":".join(sorted_pools)) def _create_render_settings(self): """Create instance settings.""" @@ -295,7 +282,8 @@ class CreateRender(plugin.Creator): # use first one for initial list of pools. deadline_url = next(iter(self.deadline_servers.values())) - pool_names = self._get_deadline_pools(deadline_url) + pool_names = self.deadline_module.get_deadline_pools(deadline_url, + self.log) maya_submit_dl = self._project_settings.get( "deadline", {}).get( "publish", {}).get( @@ -326,12 +314,27 @@ class CreateRender(plugin.Creator): self.log.info(" - pool: {}".format(pool["name"])) pool_names.append(pool["name"]) - self.data["primaryPool"] = pool_names + pool_setting = (self._project_settings["deadline"] + ["publish"] + ["CollectDeadlinePools"]) + primary_pool = pool_setting["primary_pool"] + self.data["primaryPool"] = self._set_default_pool(pool_names, + primary_pool) # We add a string "-" to allow the user to not # set any secondary pools - self.data["secondaryPool"] = ["-"] + pool_names + pool_names = ["-"] + pool_names + secondary_pool = pool_setting["secondary_pool"] + self.data["secondaryPool"] = self._set_default_pool(pool_names, + secondary_pool) self.options = {"useSelection": False} # Force no content + def _set_default_pool(self, pool_names, pool_value): + """Reorder pool names, default should come first""" + if pool_value and pool_value in pool_names: + pool_names.remove(pool_value) + pool_names = [pool_value] + pool_names + return pool_names + def _load_credentials(self): """Load Muster credentials. @@ -366,7 +369,7 @@ class CreateRender(plugin.Creator): """ params = {"authToken": self._token} api_entry = "/api/pools/list" - response = self._requests_get(self.MUSTER_REST_URL + api_entry, + response = requests_get(self.MUSTER_REST_URL + api_entry, params=params) if response.status_code != 200: if response.status_code == 401: @@ -392,45 +395,11 @@ class CreateRender(plugin.Creator): api_url = "{}/muster/show_login".format( os.environ["OPENPYPE_WEBSERVER_URL"]) self.log.debug(api_url) - login_response = self._requests_get(api_url, timeout=1) + login_response = requests_get(api_url, timeout=1) if login_response.status_code != 200: self.log.error("Cannot show login form to Muster") raise Exception("Cannot show login form to Muster") - def _requests_post(self, *args, **kwargs): - """Wrap request post method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if "verify" not in kwargs: - kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - return requests.post(*args, **kwargs) - - def _requests_get(self, *args, **kwargs): - """Wrap request get method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if "verify" not in kwargs: - kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - return requests.get(*args, **kwargs) - def _set_default_renderer_settings(self, renderer): """Set basic settings based on renderer. @@ -475,6 +444,10 @@ class CreateRender(plugin.Creator): self._set_global_output_settings() + if renderer == "renderman": + cmds.setAttr("rmanGlobals.imageOutputDir", + "maya//", type="string") + def _set_vray_settings(self, asset): # type: (dict) -> None """Sets important settings for Vray.""" diff --git a/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py b/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py index a6deeeee2e..1a8e84c80d 100644 --- a/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py +++ b/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Creator for Unreal Skeletal Meshes.""" from openpype.hosts.maya.api import plugin, lib -from avalon.api import Session +from openpype.pipeline import legacy_io from maya import cmds # noqa @@ -26,7 +26,7 @@ class CreateUnrealSkeletalMesh(plugin.Creator): dynamic_data = super(CreateUnrealSkeletalMesh, cls).get_dynamic_data( variant, task_name, asset_id, project_name, host_name ) - dynamic_data["asset"] = Session.get("AVALON_ASSET") + dynamic_data["asset"] = legacy_io.Session.get("AVALON_ASSET") return dynamic_data def process(self): diff --git a/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py b/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py index f62d15fe62..4e4417ff34 100644 --- a/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py +++ b/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """Creator for Unreal Static Meshes.""" from openpype.hosts.maya.api import plugin, lib -from avalon.api import Session from openpype.api import get_project_settings +from openpype.pipeline import legacy_io from maya import cmds # noqa @@ -18,7 +18,7 @@ class CreateUnrealStaticMesh(plugin.Creator): """Constructor.""" super(CreateUnrealStaticMesh, self).__init__(*args, **kwargs) self._project_settings = get_project_settings( - Session["AVALON_PROJECT"]) + legacy_io.Session["AVALON_PROJECT"]) @classmethod def get_dynamic_data( @@ -27,7 +27,7 @@ class CreateUnrealStaticMesh(plugin.Creator): dynamic_data = super(CreateUnrealStaticMesh, cls).get_dynamic_data( variant, task_name, asset_id, project_name, host_name ) - dynamic_data["asset"] = Session.get("AVALON_ASSET") + dynamic_data["asset"] = legacy_io.Session.get("AVALON_ASSET") return dynamic_data def process(self): diff --git a/openpype/hosts/maya/plugins/create/create_vrayscene.py b/openpype/hosts/maya/plugins/create/create_vrayscene.py index fa9c59e016..45c4b7e443 100644 --- a/openpype/hosts/maya/plugins/create/create_vrayscene.py +++ b/openpype/hosts/maya/plugins/create/create_vrayscene.py @@ -4,8 +4,6 @@ import os import json import appdirs import requests -import six -import sys from maya import cmds import maya.app.renderSetup.model.renderSetup as renderSetup @@ -19,11 +17,13 @@ from openpype.api import ( get_project_settings ) -from openpype.pipeline import CreatorError +from openpype.lib import requests_get +from openpype.pipeline import ( + CreatorError, + legacy_io, +) from openpype.modules import ModulesManager -from avalon.api import Session - class CreateVRayScene(plugin.Creator): """Create Vray Scene.""" @@ -40,11 +40,15 @@ class CreateVRayScene(plugin.Creator): self._rs = renderSetup.instance() self.data["exportOnFarm"] = False deadline_settings = get_system_settings()["modules"]["deadline"] + + manager = ModulesManager() + self.deadline_module = manager.modules_by_name["deadline"] + if not deadline_settings["enabled"]: self.deadline_servers = {} return self._project_settings = get_project_settings( - Session["AVALON_PROJECT"]) + legacy_io.Session["AVALON_PROJECT"]) try: default_servers = deadline_settings["deadline_urls"] @@ -62,10 +66,8 @@ class CreateVRayScene(plugin.Creator): except AttributeError: # Handle situation were we had only one url for deadline. - manager = ModulesManager() - deadline_module = manager.modules_by_name["deadline"] # get default deadline webservice url from deadline module - self.deadline_servers = deadline_module.deadline_urls + self.deadline_servers = self.deadline_module.deadline_urls def process(self): """Entry point.""" @@ -128,7 +130,7 @@ class CreateVRayScene(plugin.Creator): cmds.getAttr("{}.deadlineServers".format(self.instance)) ] ] - pools = self._get_deadline_pools(webservice) + pools = self.deadline_module.get_deadline_pools(webservice) cmds.deleteAttr("{}.primaryPool".format(self.instance)) cmds.deleteAttr("{}.secondaryPool".format(self.instance)) cmds.addAttr(self.instance, longName="primaryPool", @@ -138,33 +140,6 @@ class CreateVRayScene(plugin.Creator): attributeType="enum", enumName=":".join(["-"] + pools)) - def _get_deadline_pools(self, webservice): - # type: (str) -> list - """Get pools from Deadline. - Args: - webservice (str): Server url. - Returns: - list: Pools. - Throws: - RuntimeError: If deadline webservice is unreachable. - - """ - argument = "{}/api/pools?NamesOnly=true".format(webservice) - try: - response = self._requests_get(argument) - except requests.exceptions.ConnectionError as exc: - msg = 'Cannot connect to deadline web service' - self.log.error(msg) - six.reraise( - CreatorError, - CreatorError('{} - {}'.format(msg, exc)), - sys.exc_info()[2]) - if not response.ok: - self.log.warning("No pools retrieved") - return [] - - return response.json() - def _create_vray_instance_settings(self): # get pools pools = [] @@ -195,7 +170,7 @@ class CreateVRayScene(plugin.Creator): for k in self.deadline_servers.keys() ][0] - pool_names = self._get_deadline_pools(deadline_url) + pool_names = self.deadline_module.get_deadline_pools(deadline_url) if muster_enabled: self.log.info(">>> Loading Muster credentials ...") @@ -259,8 +234,8 @@ class CreateVRayScene(plugin.Creator): """ params = {"authToken": self._token} api_entry = "/api/pools/list" - response = self._requests_get(self.MUSTER_REST_URL + api_entry, - params=params) + response = requests_get(self.MUSTER_REST_URL + api_entry, + params=params) if response.status_code != 200: if response.status_code == 401: self.log.warning("Authentication token expired.") @@ -285,45 +260,7 @@ class CreateVRayScene(plugin.Creator): api_url = "{}/muster/show_login".format( os.environ["OPENPYPE_WEBSERVER_URL"]) self.log.debug(api_url) - login_response = self._requests_get(api_url, timeout=1) + login_response = requests_get(api_url, timeout=1) if login_response.status_code != 200: self.log.error("Cannot show login form to Muster") raise CreatorError("Cannot show login form to Muster") - - def _requests_post(self, *args, **kwargs): - """Wrap request post method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if "verify" not in kwargs: - kwargs["verify"] = ( - False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True - ) # noqa - return requests.post(*args, **kwargs) - - def _requests_get(self, *args, **kwargs): - """Wrap request get method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if "verify" not in kwargs: - kwargs["verify"] = ( - False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True - ) # noqa - return requests.get(*args, **kwargs) diff --git a/openpype/hosts/maya/plugins/inventory/import_modelrender.py b/openpype/hosts/maya/plugins/inventory/import_modelrender.py index c2e43f196f..a5367f16e5 100644 --- a/openpype/hosts/maya/plugins/inventory/import_modelrender.py +++ b/openpype/hosts/maya/plugins/inventory/import_modelrender.py @@ -1,9 +1,10 @@ import json -from avalon import io from bson.objectid import ObjectId + from openpype.pipeline import ( InventoryAction, get_representation_context, + legacy_io, ) from openpype.hosts.maya.api.lib import ( maintained_selection, @@ -39,7 +40,7 @@ class ImportModelRender(InventoryAction): else: nodes.append(n) - repr_doc = io.find_one({ + repr_doc = legacy_io.find_one({ "_id": ObjectId(container["representation"]), }) version_id = repr_doc["parent"] @@ -63,7 +64,7 @@ class ImportModelRender(InventoryAction): from maya import cmds # Get representations of shader file and relationships - look_repr = io.find_one({ + look_repr = legacy_io.find_one({ "type": "representation", "parent": version_id, "name": {"$regex": self.scene_type_regex}, @@ -72,7 +73,7 @@ class ImportModelRender(InventoryAction): print("No model render sets for this model version..") return - json_repr = io.find_one({ + json_repr = legacy_io.find_one({ "type": "representation", "parent": version_id, "name": self.look_data_type, diff --git a/openpype/hosts/maya/plugins/load/_load_animation.py b/openpype/hosts/maya/plugins/load/_load_animation.py index bce1f0fc67..9c37e498ef 100644 --- a/openpype/hosts/maya/plugins/load/_load_animation.py +++ b/openpype/hosts/maya/plugins/load/_load_animation.py @@ -2,7 +2,7 @@ import openpype.hosts.maya.api.plugin class AbcLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): - """Specific loader of Alembic for the avalon.animation family""" + """Loader to reference an Alembic file""" families = ["animation", "camera", diff --git a/openpype/hosts/maya/plugins/load/actions.py b/openpype/hosts/maya/plugins/load/actions.py index 483ad32402..253dae1e43 100644 --- a/openpype/hosts/maya/plugins/load/actions.py +++ b/openpype/hosts/maya/plugins/load/actions.py @@ -1,7 +1,7 @@ """A module containing generic loader actions that will display in the Loader. """ - +import qargparse from openpype.pipeline import load from openpype.hosts.maya.api.lib import ( maintained_selection, @@ -10,7 +10,7 @@ from openpype.hosts.maya.api.lib import ( class SetFrameRangeLoader(load.LoaderPlugin): - """Specific loader of Alembic for the avalon.animation family""" + """Set frame range excluding pre- and post-handles""" families = ["animation", "camera", @@ -44,7 +44,7 @@ class SetFrameRangeLoader(load.LoaderPlugin): class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): - """Specific loader of Alembic for the avalon.animation family""" + """Set frame range including pre- and post-handles""" families = ["animation", "camera", @@ -98,6 +98,15 @@ class ImportMayaLoader(load.LoaderPlugin): icon = "arrow-circle-down" color = "#775555" + options = [ + qargparse.Boolean( + "clean_import", + label="Clean import", + default=False, + help="Should all occurences of cbId be purged?" + ) + ] + def load(self, context, name=None, namespace=None, data=None): import maya.cmds as cmds @@ -114,13 +123,22 @@ class ImportMayaLoader(load.LoaderPlugin): ) with maintained_selection(): - cmds.file(self.fname, - i=True, - preserveReferences=True, - namespace=namespace, - returnNewNodes=True, - groupReference=True, - groupName="{}:{}".format(namespace, name)) + nodes = cmds.file(self.fname, + i=True, + preserveReferences=True, + namespace=namespace, + returnNewNodes=True, + groupReference=True, + groupName="{}:{}".format(namespace, name)) + + if data.get("clean_import", False): + remove_attributes = ["cbId"] + for node in nodes: + for attr in remove_attributes: + if cmds.attributeQuery(attr, node=node, exists=True): + full_attr = "{}.{}".format(node, attr) + print("Removing {}".format(full_attr)) + cmds.deleteAttr(full_attr) # We do not containerize imported content, it remains unmanaged return diff --git a/openpype/hosts/maya/plugins/load/load_ass.py b/openpype/hosts/maya/plugins/load/load_ass.py index 18de4df3b1..a284b7ec1f 100644 --- a/openpype/hosts/maya/plugins/load/load_ass.py +++ b/openpype/hosts/maya/plugins/load/load_ass.py @@ -16,7 +16,7 @@ from openpype.hosts.maya.api.pipeline import containerise class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): - """Load the Proxy""" + """Load Arnold Proxy as reference""" families = ["ass"] representations = ["ass"] diff --git a/openpype/hosts/maya/plugins/load/load_audio.py b/openpype/hosts/maya/plugins/load/load_audio.py index d8844ffea6..ce814e1299 100644 --- a/openpype/hosts/maya/plugins/load/load_audio.py +++ b/openpype/hosts/maya/plugins/load/load_audio.py @@ -1,8 +1,9 @@ from maya import cmds, mel -from avalon import io + from openpype.pipeline import ( + legacy_io, load, - get_representation_path + get_representation_path, ) from openpype.hosts.maya.api.pipeline import containerise from openpype.hosts.maya.api.lib import unique_namespace @@ -64,9 +65,9 @@ class AudioLoader(load.LoaderPlugin): ) # Set frame range. - version = io.find_one({"_id": representation["parent"]}) - subset = io.find_one({"_id": version["parent"]}) - asset = io.find_one({"_id": subset["parent"]}) + version = legacy_io.find_one({"_id": representation["parent"]}) + subset = legacy_io.find_one({"_id": version["parent"]}) + asset = legacy_io.find_one({"_id": subset["parent"]}) audio_node.sourceStart.set(1 - asset["data"]["frameStart"]) audio_node.sourceEnd.set(asset["data"]["frameEnd"]) diff --git a/openpype/hosts/maya/plugins/load/load_gpucache.py b/openpype/hosts/maya/plugins/load/load_gpucache.py index 591e568e4c..6d5e945508 100644 --- a/openpype/hosts/maya/plugins/load/load_gpucache.py +++ b/openpype/hosts/maya/plugins/load/load_gpucache.py @@ -8,7 +8,7 @@ from openpype.api import get_project_settings class GpuCacheLoader(load.LoaderPlugin): - """Load model Alembic as gpuCache""" + """Load Alembic as gpuCache""" families = ["model"] representations = ["abc"] diff --git a/openpype/hosts/maya/plugins/load/load_image_plane.py b/openpype/hosts/maya/plugins/load/load_image_plane.py index b250986489..5e44917f28 100644 --- a/openpype/hosts/maya/plugins/load/load_image_plane.py +++ b/openpype/hosts/maya/plugins/load/load_image_plane.py @@ -1,7 +1,7 @@ from Qt import QtWidgets, QtCore -from avalon import io from openpype.pipeline import ( + legacy_io, load, get_representation_path ) @@ -83,7 +83,7 @@ class ImagePlaneLoader(load.LoaderPlugin): families = ["image", "plate", "render"] label = "Load imagePlane" - representations = ["mov", "exr", "preview", "png"] + representations = ["mov", "exr", "preview", "png", "jpg"] icon = "image" color = "orange" @@ -216,9 +216,9 @@ class ImagePlaneLoader(load.LoaderPlugin): ) # Set frame range. - version = io.find_one({"_id": representation["parent"]}) - subset = io.find_one({"_id": version["parent"]}) - asset = io.find_one({"_id": subset["parent"]}) + version = legacy_io.find_one({"_id": representation["parent"]}) + subset = legacy_io.find_one({"_id": version["parent"]}) + asset = legacy_io.find_one({"_id": subset["parent"]}) start_frame = asset["data"]["frameStart"] end_frame = asset["data"]["frameEnd"] image_plane_shape.frameOffset.set(1 - start_frame) diff --git a/openpype/hosts/maya/plugins/load/load_look.py b/openpype/hosts/maya/plugins/load/load_look.py index 8f02ed59b8..80eac8e0b5 100644 --- a/openpype/hosts/maya/plugins/load/load_look.py +++ b/openpype/hosts/maya/plugins/load/load_look.py @@ -5,8 +5,10 @@ from collections import defaultdict from Qt import QtWidgets -from avalon import io -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + legacy_io, + get_representation_path, +) import openpype.hosts.maya.api.plugin from openpype.hosts.maya.api import lib from openpype.widgets.message_window import ScrollMessageBox @@ -71,7 +73,7 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): shader_nodes = cmds.ls(members, type='shadingEngine') nodes = set(self._get_nodes_with_shader(shader_nodes)) - json_representation = io.find_one({ + json_representation = legacy_io.find_one({ "type": "representation", "parent": representation['parent'], "name": "json" diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py index a7222edfd4..d65b5a2c1e 100644 --- a/openpype/hosts/maya/plugins/load/load_reference.py +++ b/openpype/hosts/maya/plugins/load/load_reference.py @@ -1,16 +1,18 @@ import os from maya import cmds -from avalon import api from openpype.api import get_project_settings from openpype.lib import get_creator_by_name -from openpype.pipeline import legacy_create +from openpype.pipeline import ( + legacy_io, + legacy_create, +) import openpype.hosts.maya.api.plugin from openpype.hosts.maya.api.lib import maintained_selection class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): - """Load the model""" + """Reference file""" families = ["model", "pointcache", @@ -143,7 +145,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): roots = cmds.ls(self[:], assemblies=True, long=True) assert roots, "No root nodes in rig, this is a bug." - asset = api.Session["AVALON_ASSET"] + asset = legacy_io.Session["AVALON_ASSET"] dependency = str(context["representation"]["_id"]) self.log.info("Creating subset: {}".format(namespace)) diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py b/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py index 4f14235bfb..3a16264ec0 100644 --- a/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py +++ b/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py @@ -74,6 +74,7 @@ def _fix_duplicate_vvg_callbacks(): class LoadVDBtoVRay(load.LoaderPlugin): + """Load OpenVDB in a V-Ray Volume Grid""" families = ["vdbcache"] representations = ["vdb"] diff --git a/openpype/hosts/maya/plugins/load/load_vrayproxy.py b/openpype/hosts/maya/plugins/load/load_vrayproxy.py index 69d54df62b..22d56139f6 100644 --- a/openpype/hosts/maya/plugins/load/load_vrayproxy.py +++ b/openpype/hosts/maya/plugins/load/load_vrayproxy.py @@ -11,9 +11,9 @@ from bson.objectid import ObjectId import maya.cmds as cmds -from avalon import io from openpype.api import get_project_settings from openpype.pipeline import ( + legacy_io, load, get_representation_path ) @@ -185,12 +185,11 @@ class VRayProxyLoader(load.LoaderPlugin): """ self.log.debug( "Looking for abc in published representations of this version.") - abc_rep = io.find_one( - { - "type": "representation", - "parent": ObjectId(version_id), - "name": "abc" - }) + abc_rep = legacy_io.find_one({ + "type": "representation", + "parent": ObjectId(version_id), + "name": "abc" + }) if abc_rep: self.log.debug("Found, we'll link alembic to vray proxy.") diff --git a/openpype/hosts/maya/plugins/load/load_yeti_cache.py b/openpype/hosts/maya/plugins/load/load_yeti_cache.py index c64e1c540b..fb903785ae 100644 --- a/openpype/hosts/maya/plugins/load/load_yeti_cache.py +++ b/openpype/hosts/maya/plugins/load/load_yeti_cache.py @@ -7,9 +7,9 @@ from pprint import pprint from maya import cmds -from avalon import io from openpype.api import get_project_settings from openpype.pipeline import ( + legacy_io, load, get_representation_path ) @@ -111,11 +111,11 @@ class YetiCacheLoader(load.LoaderPlugin): def update(self, container, representation): - io.install() + legacy_io.install() namespace = container["namespace"] container_node = container["objectName"] - fur_settings = io.find_one( + fur_settings = legacy_io.find_one( {"parent": representation["parent"], "name": "fursettings"} ) diff --git a/openpype/hosts/maya/plugins/publish/collect_ass.py b/openpype/hosts/maya/plugins/publish/collect_ass.py index 8e6691120a..7c9a1b76fb 100644 --- a/openpype/hosts/maya/plugins/publish/collect_ass.py +++ b/openpype/hosts/maya/plugins/publish/collect_ass.py @@ -1,23 +1,16 @@ from maya import cmds -import pymel.core as pm import pyblish.api -import avalon.api + class CollectAssData(pyblish.api.InstancePlugin): - """Collect Ass data - - """ + """Collect Ass data.""" order = pyblish.api.CollectorOrder + 0.2 label = 'Collect Ass' families = ["ass"] def process(self, instance): - - - context = instance.context - objsets = instance.data['setMembers'] for objset in objsets: diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index a525b562f3..e66983780e 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -49,8 +49,8 @@ import maya.app.renderSetup.model.renderSetup as renderSetup import pyblish.api -from avalon import api from openpype.lib import get_formatted_current_time +from openpype.pipeline import legacy_io from openpype.hosts.maya.api.lib_renderproducts import get as get_layer_render_products # noqa: E501 from openpype.hosts.maya.api import lib @@ -93,7 +93,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): render_globals = render_instance collected_render_layers = render_instance.data["setMembers"] filepath = context.data["currentFile"].replace("\\", "/") - asset = api.Session["AVALON_ASSET"] + asset = legacy_io.Session["AVALON_ASSET"] workspace = context.data["workspaceDir"] deadline_settings = ( @@ -208,6 +208,9 @@ class CollectMayaRender(pyblish.api.ContextPlugin): product) }) + has_cameras = any(product.camera for product in render_products) + assert has_cameras, "No render cameras found." + self.log.info("multipart: {}".format( multipart)) assert exp_files, "no file names were generated, this is bug" @@ -323,8 +326,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "byFrameStep": int( self.get_render_attribute("byFrameStep", layer=layer_name)), - "renderer": self.get_render_attribute("currentRenderer", - layer=layer_name), + "renderer": self.get_render_attribute( + "currentRenderer", layer=layer_name).lower(), # instance subset "family": "renderlayer", "families": ["renderlayer"], @@ -386,6 +389,12 @@ class CollectMayaRender(pyblish.api.ContextPlugin): overrides = self.parse_options(str(render_globals)) data.update(**overrides) + # get string values for pools + primary_pool = overrides["renderGlobals"]["Pool"] + secondary_pool = overrides["renderGlobals"].get("SecondaryPool") + data["primaryPool"] = primary_pool + data["secondaryPool"] = secondary_pool + # Define nice label label = "{0} ({1})".format(expected_layer_name, data["asset"]) label += " [{0}-{1}]".format( diff --git a/openpype/hosts/maya/plugins/publish/collect_review.py b/openpype/hosts/maya/plugins/publish/collect_review.py index 60183341f9..1af92c3bfc 100644 --- a/openpype/hosts/maya/plugins/publish/collect_review.py +++ b/openpype/hosts/maya/plugins/publish/collect_review.py @@ -2,7 +2,8 @@ from maya import cmds, mel import pymel.core as pm import pyblish.api -import avalon.api + +from openpype.pipeline import legacy_io class CollectReview(pyblish.api.InstancePlugin): @@ -19,7 +20,7 @@ class CollectReview(pyblish.api.InstancePlugin): self.log.debug('instance: {}'.format(instance)) - task = avalon.api.Session["AVALON_TASK"] + task = legacy_io.Session["AVALON_TASK"] # get cameras members = instance.data['setMembers'] diff --git a/openpype/hosts/maya/plugins/publish/collect_vrayscene.py b/openpype/hosts/maya/plugins/publish/collect_vrayscene.py index 327fc836dc..afdb570cbc 100644 --- a/openpype/hosts/maya/plugins/publish/collect_vrayscene.py +++ b/openpype/hosts/maya/plugins/publish/collect_vrayscene.py @@ -6,7 +6,8 @@ import maya.app.renderSetup.model.renderSetup as renderSetup from maya import cmds import pyblish.api -from avalon import api + +from openpype.pipeline import legacy_io from openpype.lib import get_formatted_current_time from openpype.hosts.maya.api import lib @@ -117,7 +118,7 @@ class CollectVrayScene(pyblish.api.InstancePlugin): # instance subset "family": "vrayscene_layer", "families": ["vrayscene_layer"], - "asset": api.Session["AVALON_ASSET"], + "asset": legacy_io.Session["AVALON_ASSET"], "time": get_formatted_current_time(), "author": context.data["user"], # Add source to allow tracing back to the scene from diff --git a/openpype/hosts/maya/plugins/publish/collect_workfile.py b/openpype/hosts/maya/plugins/publish/collect_workfile.py index ee676f50d0..12d86869ea 100644 --- a/openpype/hosts/maya/plugins/publish/collect_workfile.py +++ b/openpype/hosts/maya/plugins/publish/collect_workfile.py @@ -1,7 +1,8 @@ -import pyblish.api -import avalon.api import os +import pyblish.api + from maya import cmds +from openpype.pipeline import legacy_io class CollectWorkfile(pyblish.api.ContextPlugin): @@ -19,7 +20,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): folder, file = os.path.split(current_file) filename, ext = os.path.splitext(file) - task = avalon.api.Session["AVALON_TASK"] + task = legacy_io.Session["AVALON_TASK"] data = {} diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py index 6fcc308f78..881705b92c 100644 --- a/openpype/hosts/maya/plugins/publish/extract_look.py +++ b/openpype/hosts/maya/plugins/publish/extract_look.py @@ -12,9 +12,9 @@ from collections import OrderedDict from maya import cmds # noqa import pyblish.api -from avalon import io import openpype.api +from openpype.pipeline import legacy_io from openpype.hosts.maya.api import lib # Modes for transfer @@ -40,7 +40,7 @@ def find_paths_by_hash(texture_hash): """ key = "data.sourceHashes.{0}".format(texture_hash) - return io.distinct(key, {"type": "version"}) + return legacy_io.distinct(key, {"type": "version"}) def maketx(source, destination, *args): diff --git a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py index f852904580..c4250a20bd 100644 --- a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py +++ b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py @@ -4,14 +4,13 @@ import getpass import platform import appdirs -import requests from maya import cmds -from avalon import api - import pyblish.api +from openpype.lib import requests_post from openpype.hosts.maya.api import lib +from openpype.pipeline import legacy_io from openpype.api import get_system_settings @@ -184,7 +183,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): "select": "name" } api_entry = '/api/templates/list' - response = self._requests_post( + response = requests_post( self.MUSTER_REST_URL + api_entry, params=params) if response.status_code != 200: self.log.error( @@ -235,7 +234,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): "name": "submit" } api_entry = '/api/queue/actions' - response = self._requests_post( + response = requests_post( self.MUSTER_REST_URL + api_entry, params=params, json=payload) if response.status_code != 200: @@ -489,7 +488,6 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): "MAYA_RENDER_DESC_PATH", "MAYA_MODULE_PATH", "ARNOLD_PLUGIN_PATH", - "AVALON_SCHEMA", "FTRACK_API_KEY", "FTRACK_API_USER", "FTRACK_SERVER", @@ -503,7 +501,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): "TOOL_ENV" ] environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) # self.log.debug("enviro: {}".format(pprint(environment))) for path in os.environ: if path.lower().startswith('pype_'): @@ -548,17 +546,3 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): "%f=%d was rounded off to nearest integer" % (value, int(value)) ) - - def _requests_post(self, *args, **kwargs): - """ Wrapper for requests, disabling SSL certificate validation if - DONT_VERIFY_SSL environment variable is found. This is useful when - Deadline or Muster server are running with self-signed certificates - and their certificate is not added to trusted certificates on - client machines. - - WARNING: disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa - return requests.post(*args, **kwargs) diff --git a/openpype/hosts/maya/plugins/publish/validate_model_name.py b/openpype/hosts/maya/plugins/publish/validate_model_name.py index 3757e13a9b..50acf2b8b7 100644 --- a/openpype/hosts/maya/plugins/publish/validate_model_name.py +++ b/openpype/hosts/maya/plugins/publish/validate_model_name.py @@ -1,16 +1,17 @@ # -*- coding: utf-8 -*- """Validate model nodes names.""" +import os +import re from maya import cmds import pyblish.api + import openpype.api -import avalon.api +from openpype.pipeline import legacy_io import openpype.hosts.maya.api.action from openpype.hosts.maya.api.shader_definition_editor import ( DEFINITION_FILENAME) from openpype.lib.mongo import OpenPypeMongoConnection import gridfs -import re -import os class ValidateModelName(pyblish.api.InstancePlugin): @@ -68,7 +69,7 @@ class ValidateModelName(pyblish.api.InstancePlugin): invalid.append(top_group) else: if "asset" in r.groupindex: - if m.group("asset") != avalon.api.Session["AVALON_ASSET"]: + if m.group("asset") != legacy_io.Session["AVALON_ASSET"]: cls.log.error("Invalid asset name in top level group.") return top_group if "subset" in r.groupindex: @@ -76,7 +77,7 @@ class ValidateModelName(pyblish.api.InstancePlugin): cls.log.error("Invalid subset name in top level group.") return top_group if "project" in r.groupindex: - if m.group("project") != avalon.api.Session["AVALON_PROJECT"]: + if m.group("project") != legacy_io.Session["AVALON_PROJECT"]: cls.log.error("Invalid project name in top level group.") return top_group diff --git a/openpype/hosts/maya/plugins/publish/validate_muster_connection.py b/openpype/hosts/maya/plugins/publish/validate_muster_connection.py index af32c82f97..6dc7bd3bc4 100644 --- a/openpype/hosts/maya/plugins/publish/validate_muster_connection.py +++ b/openpype/hosts/maya/plugins/publish/validate_muster_connection.py @@ -2,9 +2,9 @@ import os import json import appdirs -import requests import pyblish.api +from openpype.lib import requests_get from openpype.plugin import contextplugin_should_run import openpype.hosts.maya.api.action @@ -51,7 +51,7 @@ class ValidateMusterConnection(pyblish.api.ContextPlugin): 'authToken': self._token } api_entry = '/api/pools/list' - response = self._requests_get( + response = requests_get( MUSTER_REST_URL + api_entry, params=params) assert response.status_code == 200, "invalid response from server" assert response.json()['ResponseData'], "invalid data in response" @@ -88,35 +88,7 @@ class ValidateMusterConnection(pyblish.api.ContextPlugin): api_url = "{}/muster/show_login".format( os.environ["OPENPYPE_WEBSERVER_URL"]) cls.log.debug(api_url) - response = cls._requests_get(api_url, timeout=1) + response = requests_get(api_url, timeout=1) if response.status_code != 200: cls.log.error('Cannot show login form to Muster') raise Exception('Cannot show login form to Muster') - - def _requests_post(self, *args, **kwargs): - """ Wrapper for requests, disabling SSL certificate validation if - DONT_VERIFY_SSL environment variable is found. This is useful when - Deadline or Muster server are running with self-signed certificates - and their certificate is not added to trusted certificates on - client machines. - - WARNING: disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa - return requests.post(*args, **kwargs) - - def _requests_get(self, *args, **kwargs): - """ Wrapper for requests, disabling SSL certificate validation if - DONT_VERIFY_SSL environment variable is found. This is useful when - Deadline or Muster server are running with self-signed certificates - and their certificate is not added to trusted certificates on - client machines. - - WARNING: disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa - return requests.get(*args, **kwargs) diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py b/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py index c5f675c8ca..068d6b38a1 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py @@ -1,8 +1,7 @@ import pyblish.api -from avalon import io - import openpype.api +from openpype.pipeline import legacy_io import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib @@ -43,7 +42,7 @@ class ValidateNodeIdsInDatabase(pyblish.api.InstancePlugin): nodes=instance[:]) # check ids against database ids - db_asset_ids = io.find({"type": "asset"}).distinct("_id") + db_asset_ids = legacy_io.find({"type": "asset"}).distinct("_id") db_asset_ids = set(str(i) for i in db_asset_ids) # Get all asset IDs diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py b/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py index 276b6713f4..38407e4176 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py @@ -1,9 +1,8 @@ import pyblish.api import openpype.api -from avalon import io +from openpype.pipeline import legacy_io import openpype.hosts.maya.api.action - from openpype.hosts.maya.api import lib @@ -38,7 +37,7 @@ class ValidateNodeIDsRelated(pyblish.api.InstancePlugin): invalid = list() asset = instance.data['asset'] - asset_data = io.find_one( + asset_data = legacy_io.find_one( { "name": asset, "type": "asset" diff --git a/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py b/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py index 0838b4fbf8..e6c6ef6c9e 100644 --- a/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py +++ b/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py @@ -12,7 +12,8 @@ ImagePrefixes = { 'vray': 'vraySettings.fileNamePrefix', 'arnold': 'defaultRenderGlobals.imageFilePrefix', 'renderman': 'defaultRenderGlobals.imageFilePrefix', - 'redshift': 'defaultRenderGlobals.imageFilePrefix' + 'redshift': 'defaultRenderGlobals.imageFilePrefix', + 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix', } diff --git a/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py b/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py index 4eb445ac68..e65150eb0f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py +++ b/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py @@ -1,7 +1,7 @@ import pyblish.api import openpype.hosts.maya.api.action -from avalon import io +from openpype.pipeline import legacy_io import openpype.api @@ -48,8 +48,8 @@ class ValidateRenderLayerAOVs(pyblish.api.InstancePlugin): def validate_subset_registered(self, asset_name, subset_name): """Check if subset is registered in the database under the asset""" - asset = io.find_one({"type": "asset", "name": asset_name}) - is_valid = io.find_one({ + asset = legacy_io.find_one({"type": "asset", "name": asset_name}) + is_valid = legacy_io.find_one({ "type": "subset", "name": subset_name, "parent": asset["_id"] diff --git a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py index e24e88cab7..ba6c1397ab 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py @@ -50,15 +50,17 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): 'vray': 'vraySettings.fileNamePrefix', 'arnold': 'defaultRenderGlobals.imageFilePrefix', 'renderman': 'rmanGlobals.imageFileFormat', - 'redshift': 'defaultRenderGlobals.imageFilePrefix' + 'redshift': 'defaultRenderGlobals.imageFilePrefix', + 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix', } ImagePrefixTokens = { - - 'arnold': 'maya///{aov_separator}', # noqa + 'mentalray': 'maya///{aov_separator}', # noqa: E501 + 'arnold': 'maya///{aov_separator}', # noqa: E501 'redshift': 'maya///', 'vray': 'maya///', - 'renderman': '{aov_separator}..' # noqa + 'renderman': '{aov_separator}..', + 'mayahardware2': 'maya///', } _aov_chars = { @@ -69,14 +71,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): redshift_AOV_prefix = "/{aov_separator}" # noqa: E501 - # WARNING: There is bug? in renderman, translating token - # to something left behind mayas default image prefix. So instead - # `SceneName_v01` it translates to: - # `SceneName_v01//` that means - # for example: - # `SceneName_v01/Main/Main_`. Possible solution is to define - # custom token like to point to determined scene name. - RendermanDirPrefix = "/renders/maya//" + renderman_dir_prefix = "maya//" R_AOV_TOKEN = re.compile( r'%a||', re.IGNORECASE) @@ -116,15 +111,22 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): prefix = prefix.replace( "{aov_separator}", instance.data.get("aovSeparator", "_")) + + required_prefix = "maya/" + if not anim_override: invalid = True cls.log.error("Animation needs to be enabled. Use the same " "frame for start and end to render single frame") - if not prefix.lower().startswith("maya/"): + if renderer != "renderman" and not prefix.lower().startswith( + required_prefix): invalid = True - cls.log.error("Wrong image prefix [ {} ] - " - "doesn't start with: 'maya/'".format(prefix)) + cls.log.error( + ("Wrong image prefix [ {} ] " + " - doesn't start with: '{}'").format( + prefix, required_prefix) + ) if not re.search(cls.R_LAYER_TOKEN, prefix): invalid = True @@ -198,7 +200,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): invalid = True cls.log.error("Wrong image prefix [ {} ]".format(file_prefix)) - if dir_prefix.lower() != cls.RendermanDirPrefix.lower(): + if dir_prefix.lower() != cls.renderman_dir_prefix.lower(): invalid = True cls.log.error("Wrong directory prefix [ {} ]".format( dir_prefix)) @@ -234,7 +236,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): # load validation definitions from settings validation_settings = ( instance.context.data["project_settings"]["maya"]["publish"]["ValidateRenderSettings"].get( # noqa: E501 - "{}_render_attributes".format(renderer)) + "{}_render_attributes".format(renderer)) or [] ) # go through definitions and test if such node.attribute exists. @@ -304,7 +306,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): default_prefix, type="string") cmds.setAttr("rmanGlobals.imageOutputDir", - cls.RendermanDirPrefix, + cls.renderman_dir_prefix, type="string") if renderer == "vray": diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py b/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py index 43f6c85827..33788d1835 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- """Validator for correct naming of Static Meshes.""" -from maya import cmds # noqa +import re + import pyblish.api import openpype.api import openpype.hosts.maya.api.action -from avalon.api import Session +from openpype.pipeline import legacy_io from openpype.api import get_project_settings -import re class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): @@ -63,7 +63,9 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): invalid = [] - project_settings = get_project_settings(Session["AVALON_PROJECT"]) + project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) collision_prefixes = ( project_settings ["maya"] diff --git a/openpype/hosts/maya/startup/userSetup.py b/openpype/hosts/maya/startup/userSetup.py index b89244817a..a3ab483add 100644 --- a/openpype/hosts/maya/startup/userSetup.py +++ b/openpype/hosts/maya/startup/userSetup.py @@ -1,11 +1,10 @@ import os -import avalon.api from openpype.api import get_project_settings +from openpype.pipeline import install_host from openpype.hosts.maya import api -import openpype.hosts.maya.api.lib as mlib from maya import cmds -avalon.api.install(api) +install_host(api) print("starting OpenPype usersetup") diff --git a/openpype/hosts/nuke/api/command.py b/openpype/hosts/nuke/api/command.py index 6f74c08e97..c756c48a12 100644 --- a/openpype/hosts/nuke/api/command.py +++ b/openpype/hosts/nuke/api/command.py @@ -3,8 +3,7 @@ import contextlib import nuke from bson.objectid import ObjectId -from avalon import api, io - +from openpype.pipeline import legacy_io log = logging.getLogger(__name__) @@ -15,11 +14,11 @@ def reset_frame_range(): displayed handles """ - fps = float(api.Session.get("AVALON_FPS", 25)) + fps = float(legacy_io.Session.get("AVALON_FPS", 25)) nuke.root()["fps"].setValue(fps) - name = api.Session["AVALON_ASSET"] - asset = io.find_one({"name": name, "type": "asset"}) + name = legacy_io.Session["AVALON_ASSET"] + asset = legacy_io.find_one({"name": name, "type": "asset"}) asset_data = asset["data"] handles = get_handles(asset) @@ -71,10 +70,10 @@ def get_handles(asset): if "visualParent" in data: vp = data["visualParent"] if vp is not None: - parent_asset = io.find_one({"_id": ObjectId(vp)}) + parent_asset = legacy_io.find_one({"_id": ObjectId(vp)}) if parent_asset is None: - parent_asset = io.find_one({"_id": ObjectId(asset["parent"])}) + parent_asset = legacy_io.find_one({"_id": ObjectId(asset["parent"])}) if parent_asset is not None: return get_handles(parent_asset) @@ -84,7 +83,7 @@ def get_handles(asset): def reset_resolution(): """Set resolution to project resolution.""" - project = io.find_one({"type": "project"}) + project = legacy_io.find_one({"type": "project"}) p_data = project["data"] width = p_data.get("resolution_width", diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index e05c6aecbd..ba8aa7a8db 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -1,4 +1,5 @@ import os +from pprint import pformat import re import six import platform @@ -10,8 +11,6 @@ from bson.objectid import ObjectId import nuke -from avalon import api, io - from openpype.api import ( Logger, Anatomy, @@ -26,7 +25,10 @@ from openpype.tools.utils import host_tools from openpype.lib.path_tools import HostDirmap from openpype.settings import get_project_settings from openpype.modules import ModulesManager -from openpype.pipeline import discover_legacy_creator_plugins +from openpype.pipeline import ( + discover_legacy_creator_plugins, + legacy_io, +) from .workio import ( save_file, @@ -192,7 +194,7 @@ def imprint(node, data, tab=None): Examples: ``` import nuke - from avalon.nuke import lib + from openpype.hosts.nuke.api import lib node = nuke.createNode("NoOp") data = { @@ -363,17 +365,15 @@ def fix_data_for_node_create(data): return data -def add_write_node(name, **kwarg): +def add_write_node_legacy(name, **kwarg): """Adding nuke write node - Arguments: name (str): nuke node name kwarg (attrs): data for nuke knobs - Returns: node (obj): nuke write node """ - frame_range = kwarg.get("frame_range", None) + frame_range = kwarg.get("use_range_limit", None) w = nuke.createNode( "Write", @@ -399,7 +399,36 @@ def add_write_node(name, **kwarg): return w -def read(node): +def add_write_node(name, file_path, knobs, **kwarg): + """Adding nuke write node + + Arguments: + name (str): nuke node name + kwarg (attrs): data for nuke knobs + + Returns: + node (obj): nuke write node + """ + frame_range = kwarg.get("use_range_limit", None) + + w = nuke.createNode( + "Write", + "name {}".format(name)) + + w["file"].setValue(file_path) + + # finally add knob overrides + set_node_knobs_from_settings(w, knobs, **kwarg) + + if frame_range: + w["use_limit"].setValue(True) + w["first"].setValue(frame_range[0]) + w["last"].setValue(frame_range[1]) + + return w + + +def read_avalon_data(node): """Return user-defined knobs from given `node` Args: @@ -414,8 +443,6 @@ def read(node): return knob_name[len("avalon:"):] elif knob_name.startswith("ak:"): return knob_name[len("ak:"):] - else: - return knob_name data = dict() @@ -444,7 +471,8 @@ def read(node): (knob_type == 26 and value) ): key = compat_prefixed(knob_name) - data[key] = value + if key is not None: + data[key] = value if knob_name == first_user_knob: break @@ -500,30 +528,171 @@ def get_nuke_imageio_settings(): return get_anatomy_settings(Context.project_name)["imageio"]["nuke"] -def get_created_node_imageio_setting(**kwarg): +def get_created_node_imageio_setting_legacy(nodeclass, creator, subset): ''' Get preset data for dataflow (fileType, compression, bitDepth) ''' - log.debug(kwarg) - nodeclass = kwarg.get("nodeclass", None) - creator = kwarg.get("creator", None) assert any([creator, nodeclass]), nuke.message( "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) - imageio_nodes = get_nuke_imageio_settings()["nodes"]["requiredNodes"] + imageio_nodes = get_nuke_imageio_settings()["nodes"] + required_nodes = imageio_nodes["requiredNodes"] + override_nodes = imageio_nodes["overrideNodes"] imageio_node = None - for node in imageio_nodes: + for node in required_nodes: log.info(node) - if (nodeclass in node["nukeNodeClass"]) and ( - creator in node["plugins"]): + if ( + nodeclass in node["nukeNodeClass"] + and creator in node["plugins"] + ): imageio_node = node break + log.debug("__ imageio_node: {}".format(imageio_node)) + + # find matching override node + override_imageio_node = None + for onode in override_nodes: + log.info(onode) + if nodeclass not in node["nukeNodeClass"]: + continue + + if creator not in node["plugins"]: + continue + + if ( + onode["subsets"] + and not any(re.search(s, subset) for s in onode["subsets"]) + ): + continue + + override_imageio_node = onode + break + + log.debug("__ override_imageio_node: {}".format(override_imageio_node)) + # add overrides to imageio_node + if override_imageio_node: + # get all knob names in imageio_node + knob_names = [k["name"] for k in imageio_node["knobs"]] + + for oknob in override_imageio_node["knobs"]: + for knob in imageio_node["knobs"]: + # override matching knob name + if oknob["name"] == knob["name"]: + log.debug( + "_ overriding knob: `{}` > `{}`".format( + knob, oknob + )) + if not oknob["value"]: + # remove original knob if no value found in oknob + imageio_node["knobs"].remove(knob) + else: + # override knob value with oknob's + knob["value"] = oknob["value"] + + # add missing knobs into imageio_node + if oknob["name"] not in knob_names: + log.debug( + "_ adding knob: `{}`".format(oknob)) + imageio_node["knobs"].append(oknob) + knob_names.append(oknob["name"]) + log.info("ImageIO node: {}".format(imageio_node)) return imageio_node +def get_imageio_node_setting(node_class, plugin_name, subset): + ''' Get preset data for dataflow (fileType, compression, bitDepth) + ''' + imageio_nodes = get_nuke_imageio_settings()["nodes"] + required_nodes = imageio_nodes["requiredNodes"] + + imageio_node = None + for node in required_nodes: + log.info(node) + if ( + node_class in node["nukeNodeClass"] + and plugin_name in node["plugins"] + ): + imageio_node = node + break + + log.debug("__ imageio_node: {}".format(imageio_node)) + + if not imageio_node: + return + + # find overrides and update knobs with them + get_imageio_node_override_setting( + node_class, + plugin_name, + subset, + imageio_node["knobs"] + ) + + log.info("ImageIO node: {}".format(imageio_node)) + return imageio_node + + +def get_imageio_node_override_setting( + node_class, plugin_name, subset, knobs_settings +): + ''' Get imageio node overrides from settings + ''' + imageio_nodes = get_nuke_imageio_settings()["nodes"] + override_nodes = imageio_nodes["overrideNodes"] + + # find matching override node + override_imageio_node = None + for onode in override_nodes: + log.info(onode) + if node_class not in onode["nukeNodeClass"]: + continue + + if plugin_name not in onode["plugins"]: + continue + + if ( + onode["subsets"] + and not any(re.search(s, subset) for s in onode["subsets"]) + ): + continue + + override_imageio_node = onode + break + + log.debug("__ override_imageio_node: {}".format(override_imageio_node)) + # add overrides to imageio_node + if override_imageio_node: + # get all knob names in imageio_node + knob_names = [k["name"] for k in knobs_settings] + + for oknob in override_imageio_node["knobs"]: + for knob in knobs_settings: + # override matching knob name + if oknob["name"] == knob["name"]: + log.debug( + "_ overriding knob: `{}` > `{}`".format( + knob, oknob + )) + if not oknob["value"]: + # remove original knob if no value found in oknob + knobs_settings.remove(knob) + else: + # override knob value with oknob's + knob["value"] = oknob["value"] + + # add missing knobs into imageio_node + if oknob["name"] not in knob_names: + log.debug( + "_ adding knob: `{}`".format(oknob)) + knobs_settings.append(oknob) + knob_names.append(oknob["name"]) + + return knobs_settings + + def get_imageio_input_colorspace(filename): ''' Get input file colorspace based on regex in settings. ''' @@ -541,7 +710,7 @@ def get_imageio_input_colorspace(filename): def on_script_load(): ''' Callback for ffmpeg support ''' - if nuke.env['LINUX']: + if nuke.env["LINUX"]: nuke.tcl('load ffmpegReader') nuke.tcl('load ffmpegWriter') else: @@ -566,10 +735,10 @@ def check_inventory_versions(): if container: node = nuke.toNode(container["objectName"]) - avalon_knob_data = read(node) + avalon_knob_data = read_avalon_data(node) # get representation from io - representation = io.find_one({ + representation = legacy_io.find_one({ "type": "representation", "_id": ObjectId(avalon_knob_data["representation"]) }) @@ -583,16 +752,16 @@ def check_inventory_versions(): continue # Get start frame from version data - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] - }).distinct('name') + }).distinct("name") max_version = max(versions) @@ -622,20 +791,20 @@ def writes_version_sync(): if _NODE_TAB_NAME not in each.knobs(): continue - avalon_knob_data = read(each) + avalon_knob_data = read_avalon_data(each) try: - if avalon_knob_data['families'] not in ["render"]: - log.debug(avalon_knob_data['families']) + if avalon_knob_data["families"] not in ["render"]: + log.debug(avalon_knob_data["families"]) continue - node_file = each['file'].value() + node_file = each["file"].value() node_version = "v" + get_version_from_path(node_file) log.debug("node_version: {}".format(node_version)) node_new_file = node_file.replace(node_version, new_version) - each['file'].setValue(node_new_file) + each["file"].setValue(node_new_file) if not os.path.isdir(os.path.dirname(node_new_file)): log.warning("Path does not exist! I am creating it.") os.makedirs(os.path.dirname(node_new_file)) @@ -664,21 +833,21 @@ def check_subsetname_exists(nodes, subset_name): bool: True of False """ return next((True for n in nodes - if subset_name in read(n).get("subset", "")), + if subset_name in read_avalon_data(n).get("subset", "")), False) def get_render_path(node): ''' Generate Render path from presets regarding avalon knob data ''' - data = {'avalon': read(node)} - data_preset = { - "nodeclass": data['avalon']['family'], - "families": [data['avalon']['families']], - "creator": data['avalon']['creator'] - } + avalon_knob_data = read_avalon_data(node) + data = {'avalon': avalon_knob_data} - nuke_imageio_writes = get_created_node_imageio_setting(**data_preset) + nuke_imageio_writes = get_imageio_node_setting( + node_class=avalon_knob_data["family"], + plugin_name=avalon_knob_data["creator"], + subset=avalon_knob_data["subset"] + ) host_name = os.environ.get("AVALON_APP") data.update({ @@ -726,8 +895,8 @@ def format_anatomy(data): file = script_name() data["version"] = get_version_from_path(file) - project_doc = io.find_one({"type": "project"}) - asset_doc = io.find_one({ + project_doc = legacy_io.find_one({"type": "project"}) + asset_doc = legacy_io.find_one({ "type": "asset", "name": data["avalon"]["asset"] }) @@ -748,7 +917,7 @@ def format_anatomy(data): def script_name(): ''' Returns nuke script path ''' - return nuke.root().knob('name').value() + return nuke.root().knob("name").value() def add_button_write_to_read(node): @@ -770,8 +939,282 @@ def add_button_clear_rendered(node, path): node.addKnob(knob) -def create_write_node(name, data, input=None, prenodes=None, - review=True, linked_knobs=None, farm=True): +def create_prenodes( + prev_node, + nodes_setting, + plugin_name=None, + subset=None, + **kwargs +): + last_node = None + for_dependency = {} + for name, node in nodes_setting.items(): + # get attributes + nodeclass = node["nodeclass"] + knobs = node["knobs"] + + # create node + now_node = nuke.createNode( + nodeclass, "name {}".format(name)) + now_node.hideControlPanel() + + # add for dependency linking + for_dependency[name] = { + "node": now_node, + "dependent": node["dependent"] + } + + if all([plugin_name, subset]): + # find imageio overrides + get_imageio_node_override_setting( + now_node.Class(), + plugin_name, + subset, + knobs + ) + + # add data to knob + set_node_knobs_from_settings(now_node, knobs, **kwargs) + + # switch actual node to previous + last_node = now_node + + for _node_name, node_prop in for_dependency.items(): + if not node_prop["dependent"]: + node_prop["node"].setInput( + 0, prev_node) + elif node_prop["dependent"] in for_dependency: + _prev_node = for_dependency[node_prop["dependent"]]["node"] + node_prop["node"].setInput( + 0, _prev_node) + else: + log.warning("Dependency has wrong name of node: {}".format( + node_prop + )) + + return last_node + + +def create_write_node( + name, + data, + input=None, + prenodes=None, + review=True, + farm=True, + linked_knobs=None, + **kwargs +): + ''' Creating write node which is group node + + Arguments: + name (str): name of node + data (dict): creator write instance data + input (node)[optional]: selected node to connect to + prenodes (dict)[optional]: + nodes to be created before write with dependency + review (bool)[optional]: adding review knob + farm (bool)[optional]: rendering workflow target + kwargs (dict)[optional]: additional key arguments for formating + + Example: + prenodes = { + "nodeName": { + "nodeclass": "Reformat", + "dependent": [ + following_node_01, + ... + ], + "knobs": [ + { + "type": "text", + "name": "knobname", + "value": "knob value" + }, + ... + ] + }, + ... + } + + + Return: + node (obj): group node with avalon data as Knobs + ''' + prenodes = prenodes or {} + + # group node knob overrides + knob_overrides = data.pop("knobs", []) + + # filtering variables + plugin_name = data["creator"] + subset = data["subset"] + + # get knob settings for write node + imageio_writes = get_imageio_node_setting( + node_class=data["nodeclass"], + plugin_name=plugin_name, + subset=subset + ) + + for knob in imageio_writes["knobs"]: + if knob["name"] == "file_type": + representation = knob["value"] + + host_name = os.environ.get("AVALON_APP") + try: + data.update({ + "app": host_name, + "imageio_writes": imageio_writes, + "representation": representation, + }) + anatomy_filled = format_anatomy(data) + + except Exception as e: + msg = "problem with resolving anatomy template: {}".format(e) + log.error(msg) + nuke.message(msg) + + # build file path to workfiles + fdir = str(anatomy_filled["work"]["folder"]).replace("\\", "/") + fpath = data["fpath_template"].format( + work=fdir, + version=data["version"], + subset=data["subset"], + frame=data["frame"], + ext=representation + ) + + # create directory + if not os.path.isdir(os.path.dirname(fpath)): + log.warning("Path does not exist! I am creating it.") + os.makedirs(os.path.dirname(fpath)) + + GN = nuke.createNode("Group", "name {}".format(name)) + + prev_node = None + with GN: + if input: + input_name = str(input.name()).replace(" ", "") + # if connected input node was defined + prev_node = nuke.createNode( + "Input", "name {}".format(input_name)) + else: + # generic input node connected to nothing + prev_node = nuke.createNode( + "Input", "name {}".format("rgba")) + prev_node.hideControlPanel() + + # creating pre-write nodes `prenodes` + last_prenode = create_prenodes( + prev_node, + prenodes, + plugin_name, + subset, + **kwargs + ) + if last_prenode: + prev_node = last_prenode + + # creating write node + write_node = now_node = add_write_node( + "inside_{}".format(name), + fpath, + imageio_writes["knobs"], + **data + ) + write_node.hideControlPanel() + # connect to previous node + now_node.setInput(0, prev_node) + + # switch actual node to previous + prev_node = now_node + + now_node = nuke.createNode("Output", "name Output1") + now_node.hideControlPanel() + + # connect to previous node + now_node.setInput(0, prev_node) + + # imprinting group node + set_avalon_knob_data(GN, data["avalon"]) + add_publish_knob(GN) + add_rendering_knobs(GN, farm) + + if review: + add_review_knob(GN) + + # add divider + GN.addKnob(nuke.Text_Knob('', 'Rendering')) + + # Add linked knobs. + linked_knob_names = [] + + # add input linked knobs and create group only if any input + if linked_knobs: + linked_knob_names.append("_grp-start_") + linked_knob_names.extend(linked_knobs) + linked_knob_names.append("_grp-end_") + + linked_knob_names.append("Render") + + for _k_name in linked_knob_names: + if "_grp-start_" in _k_name: + knob = nuke.Tab_Knob( + "rnd_attr", "Rendering attributes", nuke.TABBEGINCLOSEDGROUP) + GN.addKnob(knob) + elif "_grp-end_" in _k_name: + knob = nuke.Tab_Knob( + "rnd_attr_end", "Rendering attributes", nuke.TABENDGROUP) + GN.addKnob(knob) + else: + if "___" in _k_name: + # add divider + GN.addKnob(nuke.Text_Knob("")) + else: + # add linked knob by _k_name + link = nuke.Link_Knob("") + link.makeLink(write_node.name(), _k_name) + link.setName(_k_name) + + # make render + if "Render" in _k_name: + link.setLabel("Render Local") + link.setFlag(0x1000) + GN.addKnob(link) + + # adding write to read button + add_button_write_to_read(GN) + + # adding write to read button + add_button_clear_rendered(GN, os.path.dirname(fpath)) + + # Deadline tab. + add_deadline_tab(GN) + + # open the our Tab as default + GN[_NODE_TAB_NAME].setFlag(0) + + # set tile color + tile_color = next( + iter( + k["value"] for k in imageio_writes["knobs"] + if "tile_color" in k["name"] + ), [255, 0, 0, 255] + ) + GN["tile_color"].setValue( + color_gui_to_int(tile_color)) + + # finally add knob overrides + set_node_knobs_from_settings(GN, knob_overrides, **kwargs) + + return GN + + +def create_write_node_legacy( + name, data, input=None, prenodes=None, + review=True, linked_knobs=None, farm=True +): ''' Creating write node which is group node Arguments: @@ -803,8 +1246,14 @@ def create_write_node(name, data, input=None, prenodes=None, Return: node (obj): group node with avalon data as Knobs ''' + knob_overrides = data.get("knobs", []) + nodeclass = data["nodeclass"] + creator = data["creator"] + subset = data["subset"] - imageio_writes = get_created_node_imageio_setting(**data) + imageio_writes = get_created_node_imageio_setting_legacy( + nodeclass, creator, subset + ) for knob in imageio_writes["knobs"]: if knob["name"] == "file_type": representation = knob["value"] @@ -843,7 +1292,7 @@ def create_write_node(name, data, input=None, prenodes=None, # adding dataflow template log.debug("imageio_writes: `{}`".format(imageio_writes)) for knob in imageio_writes["knobs"]: - _data.update({knob["name"]: knob["value"]}) + _data[knob["name"]] = knob["value"] _data = fix_data_for_node_create(_data) @@ -926,7 +1375,8 @@ def create_write_node(name, data, input=None, prenodes=None, prev_node = now_node # creating write node - write_node = now_node = add_write_node( + + write_node = now_node = add_write_node_legacy( "inside_{}".format(name), **_data ) @@ -1006,9 +1456,106 @@ def create_write_node(name, data, input=None, prenodes=None, tile_color = _data.get("tile_color", "0xff0000ff") GN["tile_color"].setValue(tile_color) + # overrie knob values from settings + for knob in knob_overrides: + knob_type = knob["type"] + knob_name = knob["name"] + knob_value = knob["value"] + if knob_name not in GN.knobs(): + continue + if not knob_value: + continue + + # set correctly knob types + if knob_type == "string": + knob_value = str(knob_value) + if knob_type == "number": + knob_value = int(knob_value) + if knob_type == "decimal_number": + knob_value = float(knob_value) + if knob_type == "bool": + knob_value = bool(knob_value) + if knob_type in ["2d_vector", "3d_vector"]: + knob_value = list(knob_value) + + GN[knob_name].setValue(knob_value) + return GN +def set_node_knobs_from_settings(node, knob_settings, **kwargs): + """ Overriding knob values from settings + + Using `schema_nuke_knob_inputs` for knob type definitions. + + Args: + node (nuke.Node): nuke node + knob_settings (list): list of dict. Keys are `type`, `name`, `value` + kwargs (dict)[optional]: keys for formatable knob settings + """ + for knob in knob_settings: + log.debug("__ knob: {}".format(pformat(knob))) + knob_type = knob["type"] + knob_name = knob["name"] + + if knob_name not in node.knobs(): + continue + + # first deal with formatable knob settings + if knob_type == "formatable": + template = knob["template"] + to_type = knob["to_type"] + try: + _knob_value = template.format( + **kwargs + ) + log.debug("__ knob_value0: {}".format(_knob_value)) + except KeyError as msg: + log.warning("__ msg: {}".format(msg)) + raise KeyError(msg) + + # convert value to correct type + if to_type == "2d_vector": + knob_value = _knob_value.split(";").split(",") + else: + knob_value = _knob_value + + knob_type = to_type + + else: + knob_value = knob["value"] + + if not knob_value: + continue + + # first convert string types to string + # just to ditch unicode + if isinstance(knob_value, six.text_type): + knob_value = str(knob_value) + + # set correctly knob types + if knob_type == "bool": + knob_value = bool(knob_value) + elif knob_type == "decimal_number": + knob_value = float(knob_value) + elif knob_type == "number": + knob_value = int(knob_value) + elif knob_type == "text": + knob_value = knob_value + elif knob_type == "color_gui": + knob_value = color_gui_to_int(knob_value) + elif knob_type in ["2d_vector", "3d_vector", "color"]: + knob_value = [float(v) for v in knob_value] + + node[knob_name].setValue(knob_value) + + +def color_gui_to_int(color_gui): + hex_value = ( + "0x{0:0>2x}{1:0>2x}{2:0>2x}{3:0>2x}").format(*color_gui) + return int(hex_value, 16) + + def add_rendering_knobs(node, farm=True): ''' Adds additional rendering knobs to given node @@ -1062,6 +1609,14 @@ def add_deadline_tab(node): knob.setValue(0) node.addKnob(knob) + knob = nuke.Text_Knob("divd", '') + knob.setValue('') + node.addKnob(knob) + + knob = nuke.Boolean_Knob("suspend_publish", "Suspend publish") + knob.setValue(False) + node.addKnob(knob) + def get_deadline_knob_names(): return [ @@ -1138,8 +1693,11 @@ class WorkfileSettings(object): nodes=None, **kwargs): Context._project_doc = kwargs.get( - "project") or io.find_one({"type": "project"}) - self._asset = kwargs.get("asset_name") or api.Session["AVALON_ASSET"] + "project") or legacy_io.find_one({"type": "project"}) + self._asset = ( + kwargs.get("asset_name") + or legacy_io.Session["AVALON_ASSET"] + ) self._asset_entity = get_asset(self._asset) self._root_node = root_node or nuke.root() self._nodes = self.get_nodes(nodes=nodes) @@ -1181,15 +1739,19 @@ class WorkfileSettings(object): erased_viewers = [] for v in nuke.allNodes(filter="Viewer"): - v['viewerProcess'].setValue(str(viewer_dict["viewerProcess"])) + # set viewProcess to preset from settings + v["viewerProcess"].setValue( + str(viewer_dict["viewerProcess"]) + ) + if str(viewer_dict["viewerProcess"]) \ - not in v['viewerProcess'].value(): + not in v["viewerProcess"].value(): copy_inputs = v.dependencies() copy_knobs = {k: v[k].value() for k in v.knobs() if k not in filter_knobs} # delete viewer with wrong settings - erased_viewers.append(v['name'].value()) + erased_viewers.append(v["name"].value()) nuke.delete(v) # create new viewer @@ -1205,7 +1767,7 @@ class WorkfileSettings(object): nv[k].setValue(v) # set viewerProcess - nv['viewerProcess'].setValue(str(viewer_dict["viewerProcess"])) + nv["viewerProcess"].setValue(str(viewer_dict["viewerProcess"])) if erased_viewers: log.warning( @@ -1281,12 +1843,12 @@ class WorkfileSettings(object): for node in nuke.allNodes(filter="Group"): # get data from avalon knob - avalon_knob_data = read(node) + avalon_knob_data = read_avalon_data(node) - if not avalon_knob_data: + if avalon_knob_data.get("id") != "pyblish.avalon.instance": continue - if avalon_knob_data["id"] != "pyblish.avalon.instance": + if "creator" not in avalon_knob_data: continue # establish families @@ -1294,14 +1856,11 @@ class WorkfileSettings(object): if avalon_knob_data.get("families"): families.append(avalon_knob_data.get("families")) - data_preset = { - "nodeclass": avalon_knob_data["family"], - "families": families, - "creator": avalon_knob_data['creator'] - } - - nuke_imageio_writes = get_created_node_imageio_setting( - **data_preset) + nuke_imageio_writes = get_imageio_node_setting( + node_class=avalon_knob_data["family"], + plugin_name=avalon_knob_data["creator"], + subset=avalon_knob_data["subset"] + ) log.debug("nuke_imageio_writes: `{}`".format(nuke_imageio_writes)) @@ -1330,7 +1889,6 @@ class WorkfileSettings(object): write_node[knob["name"]].setValue(value) - def set_reads_colorspace(self, read_clrs_inputs): """ Setting colorspace to Read nodes @@ -1356,17 +1914,16 @@ class WorkfileSettings(object): current = n["colorspace"].value() future = str(preset_clrsp) if current != future: - changes.update({ - n.name(): { - "from": current, - "to": future - } - }) + changes[n.name()] = { + "from": current, + "to": future + } + log.debug(changes) if changes: msg = "Read nodes are not set to correct colospace:\n\n" for nname, knobs in changes.items(): - msg += str( + msg += ( " - node: '{0}' is now '{1}' but should be '{2}'\n" ).format(nname, knobs["from"], knobs["to"]) @@ -1486,9 +2043,9 @@ class WorkfileSettings(object): def reset_resolution(self): """Set resolution to project resolution.""" log.info("Resetting resolution") - project = io.find_one({"type": "project"}) - asset = api.Session["AVALON_ASSET"] - asset = io.find_one({"name": asset, "type": "asset"}) + project = legacy_io.find_one({"type": "project"}) + asset = legacy_io.Session["AVALON_ASSET"] + asset = legacy_io.find_one({"name": asset, "type": "asset"}) asset_data = asset.get('data', {}) data = { @@ -1598,17 +2155,17 @@ def get_hierarchical_attr(entity, attr, default=None): if not value: break - if value or entity['type'].lower() == 'project': + if value or entity["type"].lower() == "project": return value - parent_id = entity['parent'] + parent_id = entity["parent"] if ( - entity['type'].lower() == 'asset' - and entity.get('data', {}).get('visualParent') + entity["type"].lower() == "asset" + and entity.get("data", {}).get("visualParent") ): - parent_id = entity['data']['visualParent'] + parent_id = entity["data"]["visualParent"] - parent = io.find_one({'_id': parent_id}) + parent = legacy_io.find_one({"_id": parent_id}) return get_hierarchical_attr(parent, attr) @@ -1618,26 +2175,24 @@ def get_write_node_template_attr(node): ''' # get avalon data from node - data = dict() - data['avalon'] = read(node) - data_preset = { - "nodeclass": data['avalon']['family'], - "families": [data['avalon']['families']], - "creator": data['avalon']['creator'] - } - + avalon_knob_data = read_avalon_data(node) # get template data - nuke_imageio_writes = get_created_node_imageio_setting(**data_preset) + nuke_imageio_writes = get_imageio_node_setting( + node_class=avalon_knob_data["family"], + plugin_name=avalon_knob_data["creator"], + subset=avalon_knob_data["subset"] + ) # collecting correct data correct_data = OrderedDict({ "file": get_render_path(node) }) - # adding imageio template - {correct_data.update({k: v}) - for k, v in nuke_imageio_writes.items() - if k not in ["_id", "_previous"]} + # adding imageio knob presets + for k, v in nuke_imageio_writes.items(): + if k in ["_id", "_previous"]: + continue + correct_data[k] = v # fix badly encoded data return fix_data_for_node_create(correct_data) @@ -1753,8 +2308,8 @@ def maintained_selection(): Example: >>> with maintained_selection(): - ... node['selected'].setValue(True) - >>> print(node['selected'].value()) + ... node["selected"].setValue(True) + >>> print(node["selected"].value()) False """ previous_selection = nuke.selectedNodes() @@ -1762,11 +2317,11 @@ def maintained_selection(): yield finally: # unselect all selection in case there is some - current_seletion = nuke.selectedNodes() - [n['selected'].setValue(False) for n in current_seletion] + reset_selection() + # and select all previously selected nodes if previous_selection: - [n['selected'].setValue(True) for n in previous_selection] + select_nodes(previous_selection) def reset_selection(): diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py index 6ee3d2ce05..2785eb65cd 100644 --- a/openpype/hosts/nuke/api/pipeline.py +++ b/openpype/hosts/nuke/api/pipeline.py @@ -32,13 +32,12 @@ from .lib import ( launch_workfiles_app, check_inventory_versions, set_avalon_knob_data, - read, + read_avalon_data, Context ) log = Logger.get_logger(__name__) -AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.nuke.__file__)) PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") @@ -79,11 +78,11 @@ def reload_config(): """ for module in ( - "{}.api".format(AVALON_CONFIG), - "{}.hosts.nuke.api.actions".format(AVALON_CONFIG), - "{}.hosts.nuke.api.menu".format(AVALON_CONFIG), - "{}.hosts.nuke.api.plugin".format(AVALON_CONFIG), - "{}.hosts.nuke.api.lib".format(AVALON_CONFIG), + "openpype.api", + "openpype.hosts.nuke.api.actions", + "openpype.hosts.nuke.api.menu", + "openpype.hosts.nuke.api.plugin", + "openpype.hosts.nuke.api.lib", ): log.info("Reloading module: {}...".format(module)) @@ -360,7 +359,7 @@ def parse_container(node): dict: The container schema data for this container node. """ - data = read(node) + data = read_avalon_data(node) # (TODO) Remove key validation when `ls` has re-implemented. # diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index 3ac750a48f..2bad6f2c78 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -1,6 +1,8 @@ import os import random import string +from collections import OrderedDict +from abc import abstractmethod import nuke @@ -15,7 +17,8 @@ from .lib import ( reset_selection, maintained_selection, set_avalon_knob_data, - add_publish_knob + add_publish_knob, + get_nuke_imageio_settings ) @@ -25,9 +28,6 @@ class OpenPypeCreator(LegacyCreator): def __init__(self, *args, **kwargs): super(OpenPypeCreator, self).__init__(*args, **kwargs) - self.presets = get_current_project_settings()["nuke"]["create"].get( - self.__class__.__name__, {} - ) if check_subsetname_exists( nuke.allNodes(), self.data["subset"]): @@ -258,8 +258,6 @@ class ExporterReview(object): return nuke_imageio["viewer"]["viewerProcess"] - - class ExporterReviewLut(ExporterReview): """ Generator object for review lut from Nuke @@ -594,3 +592,156 @@ class ExporterReviewMov(ExporterReview): nuke.scriptSave() return self.data + + +class AbstractWriteRender(OpenPypeCreator): + """Abstract creator to gather similar implementation for Write creators""" + name = "" + label = "" + hosts = ["nuke"] + n_class = "Write" + family = "render" + icon = "sign-out" + defaults = ["Main", "Mask"] + knobs = [] + prenodes = {} + + def __init__(self, *args, **kwargs): + super(AbstractWriteRender, self).__init__(*args, **kwargs) + + data = OrderedDict() + + data["family"] = self.family + data["families"] = self.n_class + + for k, v in self.data.items(): + if k not in data.keys(): + data.update({k: v}) + + self.data = data + self.nodes = nuke.selectedNodes() + self.log.debug("_ self.data: '{}'".format(self.data)) + + def process(self): + + inputs = [] + outputs = [] + instance = nuke.toNode(self.data["subset"]) + selected_node = None + + # use selection + if (self.options or {}).get("useSelection"): + nodes = self.nodes + + if not (len(nodes) < 2): + msg = ("Select only one node. " + "The node you want to connect to, " + "or tick off `Use selection`") + self.log.error(msg) + nuke.message(msg) + return + + if len(nodes) == 0: + msg = ( + "No nodes selected. Please select a single node to connect" + " to or tick off `Use selection`" + ) + self.log.error(msg) + nuke.message(msg) + return + + selected_node = nodes[0] + inputs = [selected_node] + outputs = selected_node.dependent() + + if instance: + if (instance.name() in selected_node.name()): + selected_node = instance.dependencies()[0] + + # if node already exist + if instance: + # collect input / outputs + inputs = instance.dependencies() + outputs = instance.dependent() + selected_node = inputs[0] + # remove old one + nuke.delete(instance) + + # recreate new + write_data = { + "nodeclass": self.n_class, + "families": [self.family], + "avalon": self.data, + "subset": self.data["subset"], + "knobs": self.knobs + } + + # add creator data + creator_data = {"creator": self.__class__.__name__} + self.data.update(creator_data) + write_data.update(creator_data) + + write_node = self._create_write_node( + selected_node, + inputs, + outputs, + write_data + ) + + # relinking to collected connections + for i, input in enumerate(inputs): + write_node.setInput(i, input) + + write_node.autoplace() + + for output in outputs: + output.setInput(0, write_node) + + write_node = self._modify_write_node(write_node) + + return write_node + + def is_legacy(self): + """Check if it needs to run legacy code + + In case where `type` key is missing in singe + knob it is legacy project anatomy. + + Returns: + bool: True if legacy + """ + imageio_nodes = get_nuke_imageio_settings()["nodes"] + node = imageio_nodes["requiredNodes"][0] + if "type" not in node["knobs"][0]: + # if type is not yet in project anatomy + return True + elif next(iter( + _k for _k in node["knobs"] + if _k.get("type") == "__legacy__" + ), None): + # in case someone re-saved anatomy + # with old configuration + return True + + @abstractmethod + def _create_write_node(self, selected_node, inputs, outputs, write_data): + """Family dependent implementation of Write node creation + + Args: + selected_node (nuke.Node) + inputs (list of nuke.Node) - input dependencies (what is connected) + outputs (list of nuke.Node) - output dependencies + write_data (dict) - values used to fill Knobs + Returns: + node (nuke.Node): group node with data as Knobs + """ + pass + + @abstractmethod + def _modify_write_node(self, write_node): + """Family dependent modification of created 'write_node' + + Returns: + node (nuke.Node): group node with data as Knobs + """ + pass diff --git a/openpype/hosts/nuke/plugins/__init__.py b/openpype/hosts/nuke/plugins/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/nuke/plugins/create/__init__.py b/openpype/hosts/nuke/plugins/create/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/nuke/plugins/create/create_read.py b/openpype/hosts/nuke/plugins/create/create_read.py index bdc67add42..87a9dff0f8 100644 --- a/openpype/hosts/nuke/plugins/create/create_read.py +++ b/openpype/hosts/nuke/plugins/create/create_read.py @@ -2,8 +2,6 @@ from collections import OrderedDict import nuke -import avalon.api -from openpype import api as pype from openpype.hosts.nuke.api import plugin from openpype.hosts.nuke.api.lib import ( set_avalon_knob_data diff --git a/openpype/hosts/nuke/plugins/create/create_write_prerender.py b/openpype/hosts/nuke/plugins/create/create_write_prerender.py index 761439fdb2..32ee1fd86f 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_prerender.py +++ b/openpype/hosts/nuke/plugins/create/create_write_prerender.py @@ -1,12 +1,11 @@ -from collections import OrderedDict - import nuke from openpype.hosts.nuke.api import plugin -from openpype.hosts.nuke.api.lib import create_write_node +from openpype.hosts.nuke.api.lib import ( + create_write_node, create_write_node_legacy) -class CreateWritePrerender(plugin.OpenPypeCreator): +class CreateWritePrerender(plugin.AbstractWriteRender): # change this to template preset name = "WritePrerender" label = "Create Write Prerender" @@ -14,108 +13,39 @@ class CreateWritePrerender(plugin.OpenPypeCreator): n_class = "Write" family = "prerender" icon = "sign-out" + + # settings + fpath_template = "{work}/render/nuke/{subset}/{subset}.{frame}.{ext}" defaults = ["Key01", "Bg01", "Fg01", "Branch01", "Part01"] + reviewable = False + use_range_limit = True def __init__(self, *args, **kwargs): super(CreateWritePrerender, self).__init__(*args, **kwargs) - data = OrderedDict() + def _create_write_node(self, selected_node, inputs, outputs, write_data): + # add fpath_template + write_data["fpath_template"] = self.fpath_template + write_data["use_range_limit"] = self.use_range_limit - data["family"] = self.family - data["families"] = self.n_class - - for k, v in self.data.items(): - if k not in data.keys(): - data.update({k: v}) - - self.data = data - self.nodes = nuke.selectedNodes() - self.log.debug("_ self.data: '{}'".format(self.data)) - - def process(self): - inputs = [] - outputs = [] - instance = nuke.toNode(self.data["subset"]) - selected_node = None - - # use selection - if (self.options or {}).get("useSelection"): - nodes = self.nodes - - if not (len(nodes) < 2): - msg = ("Select only one node. The node " - "you want to connect to, " - "or tick off `Use selection`") - self.log.error(msg) - nuke.message(msg) - - if len(nodes) == 0: - msg = ( - "No nodes selected. Please select a single node to connect" - " to or tick off `Use selection`" - ) - self.log.error(msg) - nuke.message(msg) - - selected_node = nodes[0] - inputs = [selected_node] - outputs = selected_node.dependent() - - if instance: - if (instance.name() in selected_node.name()): - selected_node = instance.dependencies()[0] - - # if node already exist - if instance: - # collect input / outputs - inputs = instance.dependencies() - outputs = instance.dependent() - selected_node = inputs[0] - # remove old one - nuke.delete(instance) - - # recreate new - write_data = { - "nodeclass": self.n_class, - "families": [self.family], - "avalon": self.data - } - - # add creator data - creator_data = {"creator": self.__class__.__name__} - self.data.update(creator_data) - write_data.update(creator_data) - - if self.presets.get('fpath_template'): - self.log.info("Adding template path from preset") - write_data.update( - {"fpath_template": self.presets["fpath_template"]} + if not self.is_legacy(): + return create_write_node( + self.data["subset"], + write_data, + input=selected_node, + review=self.reviewable, + linked_knobs=["channels", "___", "first", "last", "use_limit"] ) else: - self.log.info("Adding template path from plugin") - write_data.update({ - "fpath_template": ("{work}/prerenders/nuke/{subset}" - "/{subset}.{frame}.{ext}")}) - - self.log.info("write_data: {}".format(write_data)) - reviewable = self.presets.get("reviewable") - write_node = create_write_node( - self.data["subset"], - write_data, - input=selected_node, - prenodes=[], - review=reviewable, - linked_knobs=["channels", "___", "first", "last", "use_limit"]) - - # relinking to collected connections - for i, input in enumerate(inputs): - write_node.setInput(i, input) - - write_node.autoplace() - - for output in outputs: - output.setInput(0, write_node) + return create_write_node_legacy( + self.data["subset"], + write_data, + input=selected_node, + review=self.reviewable, + linked_knobs=["channels", "___", "first", "last", "use_limit"] + ) + def _modify_write_node(self, write_node): # open group node write_node.begin() for n in nuke.allNodes(): @@ -124,7 +54,7 @@ class CreateWritePrerender(plugin.OpenPypeCreator): w_node = n write_node.end() - if self.presets.get("use_range_limit"): + if self.use_range_limit: w_node["use_limit"].setValue(True) w_node["first"].setValue(nuke.root()["first_frame"].value()) w_node["last"].setValue(nuke.root()["last_frame"].value()) diff --git a/openpype/hosts/nuke/plugins/create/create_write_render.py b/openpype/hosts/nuke/plugins/create/create_write_render.py index a9c4b5341e..23846c0332 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_render.py +++ b/openpype/hosts/nuke/plugins/create/create_write_render.py @@ -1,12 +1,11 @@ -from collections import OrderedDict - import nuke from openpype.hosts.nuke.api import plugin -from openpype.hosts.nuke.api.lib import create_write_node +from openpype.hosts.nuke.api.lib import ( + create_write_node, create_write_node_legacy) -class CreateWriteRender(plugin.OpenPypeCreator): +class CreateWriteRender(plugin.AbstractWriteRender): # change this to template preset name = "WriteRender" label = "Create Write Render" @@ -14,91 +13,35 @@ class CreateWriteRender(plugin.OpenPypeCreator): n_class = "Write" family = "render" icon = "sign-out" + + # settings + fpath_template = "{work}/render/nuke/{subset}/{subset}.{frame}.{ext}" defaults = ["Main", "Mask"] + prenodes = { + "Reformat01": { + "nodeclass": "Reformat", + "dependent": None, + "knobs": [ + { + "type": "text", + "name": "resize", + "value": "none" + }, + { + "type": "bool", + "name": "black_outside", + "value": True + } + ] + } + } def __init__(self, *args, **kwargs): super(CreateWriteRender, self).__init__(*args, **kwargs) - data = OrderedDict() - - data["family"] = self.family - data["families"] = self.n_class - - for k, v in self.data.items(): - if k not in data.keys(): - data.update({k: v}) - - self.data = data - self.nodes = nuke.selectedNodes() - self.log.debug("_ self.data: '{}'".format(self.data)) - - def process(self): - - inputs = [] - outputs = [] - instance = nuke.toNode(self.data["subset"]) - selected_node = None - - # use selection - if (self.options or {}).get("useSelection"): - nodes = self.nodes - - if not (len(nodes) < 2): - msg = ("Select only one node. " - "The node you want to connect to, " - "or tick off `Use selection`") - self.log.error(msg) - nuke.message(msg) - return - - if len(nodes) == 0: - msg = ( - "No nodes selected. Please select a single node to connect" - " to or tick off `Use selection`" - ) - self.log.error(msg) - nuke.message(msg) - return - - selected_node = nodes[0] - inputs = [selected_node] - outputs = selected_node.dependent() - - if instance: - if (instance.name() in selected_node.name()): - selected_node = instance.dependencies()[0] - - # if node already exist - if instance: - # collect input / outputs - inputs = instance.dependencies() - outputs = instance.dependent() - selected_node = inputs[0] - # remove old one - nuke.delete(instance) - - # recreate new - write_data = { - "nodeclass": self.n_class, - "families": [self.family], - "avalon": self.data - } - - # add creator data - creator_data = {"creator": self.__class__.__name__} - self.data.update(creator_data) - write_data.update(creator_data) - - if self.presets.get('fpath_template'): - self.log.info("Adding template path from preset") - write_data.update( - {"fpath_template": self.presets["fpath_template"]} - ) - else: - self.log.info("Adding template path from plugin") - write_data.update({ - "fpath_template": ("{work}/renders/nuke/{subset}" - "/{subset}.{frame}.{ext}")}) + def _create_write_node(self, selected_node, inputs, outputs, write_data): + # add fpath_template + write_data["fpath_template"] = self.fpath_template # add reformat node to cut off all outside of format bounding box # get width and height @@ -108,31 +51,36 @@ class CreateWriteRender(plugin.OpenPypeCreator): actual_format = nuke.root().knob('format').value() width, height = (actual_format.width(), actual_format.height()) - _prenodes = [ - { - "name": "Reformat01", - "class": "Reformat", - "knobs": [ - ("resize", 0), - ("black_outside", 1), - ], - "dependent": None - } - ] + if not self.is_legacy(): + return create_write_node( + self.data["subset"], + write_data, + input=selected_node, + prenodes=self.prenodes, + **{ + "width": width, + "height": height + } + ) + else: + _prenodes = [ + { + "name": "Reformat01", + "class": "Reformat", + "knobs": [ + ("resize", 0), + ("black_outside", 1), + ], + "dependent": None + } + ] - write_node = create_write_node( - self.data["subset"], - write_data, - input=selected_node, - prenodes=_prenodes) - - # relinking to collected connections - for i, input in enumerate(inputs): - write_node.setInput(i, input) - - write_node.autoplace() - - for output in outputs: - output.setInput(0, write_node) + return create_write_node_legacy( + self.data["subset"], + write_data, + input=selected_node, + prenodes=_prenodes + ) + def _modify_write_node(self, write_node): return write_node diff --git a/openpype/hosts/nuke/plugins/create/create_write_still.py b/openpype/hosts/nuke/plugins/create/create_write_still.py index 0037b64ce3..4007ccf51e 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_still.py +++ b/openpype/hosts/nuke/plugins/create/create_write_still.py @@ -1,12 +1,11 @@ -from collections import OrderedDict - import nuke from openpype.hosts.nuke.api import plugin -from openpype.hosts.nuke.api.lib import create_write_node +from openpype.hosts.nuke.api.lib import ( + create_write_node, create_write_node_legacy) -class CreateWriteStill(plugin.OpenPypeCreator): +class CreateWriteStill(plugin.AbstractWriteRender): # change this to template preset name = "WriteStillFrame" label = "Create Write Still Image" @@ -14,120 +13,71 @@ class CreateWriteStill(plugin.OpenPypeCreator): n_class = "Write" family = "still" icon = "image" + + # settings + fpath_template = "{work}/render/nuke/{subset}/{subset}.{ext}" defaults = [ - "ImageFrame{:0>4}".format(nuke.frame()), - "MPFrame{:0>4}".format(nuke.frame()), - "LayoutFrame{:0>4}".format(nuke.frame()) + "ImageFrame", + "MPFrame", + "LayoutFrame" ] + prenodes = { + "FrameHold01": { + "nodeclass": "FrameHold", + "dependent": None, + "knobs": [ + { + "type": "formatable", + "name": "first_frame", + "template": "{frame}", + "to_type": "number" + } + ] + } + } def __init__(self, *args, **kwargs): super(CreateWriteStill, self).__init__(*args, **kwargs) - data = OrderedDict() + def _create_write_node(self, selected_node, inputs, outputs, write_data): + # add fpath_template + write_data["fpath_template"] = self.fpath_template - data["family"] = self.family - data["families"] = self.n_class + if not self.is_legacy(): + return create_write_node( + self.name, + write_data, + input=selected_node, + review=False, + prenodes=self.prenodes, + farm=False, + linked_knobs=["channels", "___", "first", "last", "use_limit"], + **{ + "frame": nuke.frame() + } + ) + else: + _prenodes = [ + { + "name": "FrameHold01", + "class": "FrameHold", + "knobs": [ + ("first_frame", nuke.frame()) + ], + "dependent": None + } + ] + return create_write_node_legacy( + self.name, + write_data, + input=selected_node, + review=False, + prenodes=_prenodes, + farm=False, + linked_knobs=["channels", "___", "first", "last", "use_limit"] + ) - for k, v in self.data.items(): - if k not in data.keys(): - data.update({k: v}) - - self.data = data - self.nodes = nuke.selectedNodes() - self.log.debug("_ self.data: '{}'".format(self.data)) - - def process(self): - - inputs = [] - outputs = [] - instance = nuke.toNode(self.data["subset"]) - selected_node = None - - # use selection - if (self.options or {}).get("useSelection"): - nodes = self.nodes - - if not (len(nodes) < 2): - msg = ("Select only one node. " - "The node you want to connect to, " - "or tick off `Use selection`") - self.log.error(msg) - nuke.message(msg) - return - - if len(nodes) == 0: - msg = ( - "No nodes selected. Please select a single node to connect" - " to or tick off `Use selection`" - ) - self.log.error(msg) - nuke.message(msg) - return - - selected_node = nodes[0] - inputs = [selected_node] - outputs = selected_node.dependent() - - if instance: - if (instance.name() in selected_node.name()): - selected_node = instance.dependencies()[0] - - # if node already exist - if instance: - # collect input / outputs - inputs = instance.dependencies() - outputs = instance.dependent() - selected_node = inputs[0] - # remove old one - nuke.delete(instance) - - # recreate new - write_data = { - "nodeclass": self.n_class, - "families": [self.family], - "avalon": self.data - } - - # add creator data - creator_data = {"creator": self.__class__.__name__} - self.data.update(creator_data) - write_data.update(creator_data) - - self.log.info("Adding template path from plugin") - write_data.update({ - "fpath_template": ( - "{work}/renders/nuke/{subset}/{subset}.{ext}")}) - - _prenodes = [ - { - "name": "FrameHold01", - "class": "FrameHold", - "knobs": [ - ("first_frame", nuke.frame()) - ], - "dependent": None - } - ] - - write_node = create_write_node( - self.name, - write_data, - input=selected_node, - review=False, - prenodes=_prenodes, - farm=False, - linked_knobs=["channels", "___", "first", "last", "use_limit"]) - - # relinking to collected connections - for i, input in enumerate(inputs): - write_node.setInput(i, input) - - write_node.autoplace() - - for output in outputs: - output.setInput(0, write_node) - - # link frame hold to group node + def _modify_write_node(self, write_node): write_node.begin() for n in nuke.allNodes(): # get write node diff --git a/openpype/hosts/nuke/plugins/inventory/select_containers.py b/openpype/hosts/nuke/plugins/inventory/select_containers.py index d7d5f00b87..4e7a20fb26 100644 --- a/openpype/hosts/nuke/plugins/inventory/select_containers.py +++ b/openpype/hosts/nuke/plugins/inventory/select_containers.py @@ -1,5 +1,5 @@ from openpype.pipeline import InventoryAction -from openpype.hosts.nuke.api.commands import viewer_update_and_undo_stop +from openpype.hosts.nuke.api.command import viewer_update_and_undo_stop class SelectContainers(InventoryAction): diff --git a/openpype/hosts/nuke/plugins/load/actions.py b/openpype/hosts/nuke/plugins/load/actions.py index 81840b3a38..d364a4f3a1 100644 --- a/openpype/hosts/nuke/plugins/load/actions.py +++ b/openpype/hosts/nuke/plugins/load/actions.py @@ -9,7 +9,7 @@ log = Logger().get_logger(__name__) class SetFrameRangeLoader(load.LoaderPlugin): - """Specific loader of Alembic for the avalon.animation family""" + """Set frame range excluding pre- and post-handles""" families = ["animation", "camera", @@ -43,7 +43,7 @@ class SetFrameRangeLoader(load.LoaderPlugin): class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): - """Specific loader of Alembic for the avalon.animation family""" + """Set frame range including pre- and post-handles""" families = ["animation", "camera", diff --git a/openpype/hosts/nuke/plugins/load/load_backdrop.py b/openpype/hosts/nuke/plugins/load/load_backdrop.py index 36cec6f4c5..143fdf1f30 100644 --- a/openpype/hosts/nuke/plugins/load/load_backdrop.py +++ b/openpype/hosts/nuke/plugins/load/load_backdrop.py @@ -1,8 +1,8 @@ -from avalon import io import nuke import nukescripts from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -14,7 +14,7 @@ from openpype.hosts.nuke.api.lib import ( get_avalon_knob_data, set_avalon_knob_data ) -from openpype.hosts.nuke.api.commands import viewer_update_and_undo_stop +from openpype.hosts.nuke.api.command import viewer_update_and_undo_stop from openpype.hosts.nuke.api import containerise, update_container @@ -188,7 +188,7 @@ class LoadBackdropNodes(load.LoaderPlugin): # get main variables # Get version from io - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -237,7 +237,7 @@ class LoadBackdropNodes(load.LoaderPlugin): GN["name"].setValue(object_name) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_camera_abc.py b/openpype/hosts/nuke/plugins/load/load_camera_abc.py index fb5f7f8ede..964ca5ec90 100644 --- a/openpype/hosts/nuke/plugins/load/load_camera_abc.py +++ b/openpype/hosts/nuke/plugins/load/load_camera_abc.py @@ -1,7 +1,7 @@ import nuke -from avalon import io from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -102,7 +102,7 @@ class AlembicCameraLoader(load.LoaderPlugin): None """ # Get version from io - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -175,7 +175,7 @@ class AlembicCameraLoader(load.LoaderPlugin): """ Coloring a node by correct color by actual version """ # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_clip.py b/openpype/hosts/nuke/plugins/load/load_clip.py index 9b0588feac..681561e303 100644 --- a/openpype/hosts/nuke/plugins/load/load_clip.py +++ b/openpype/hosts/nuke/plugins/load/load_clip.py @@ -1,8 +1,10 @@ import nuke import qargparse -from avalon import io -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + legacy_io, + get_representation_path, +) from openpype.hosts.nuke.api.lib import ( get_imageio_input_colorspace, maintained_selection @@ -194,7 +196,7 @@ class LoadClip(plugin.NukeLoader): start_at_workfile = bool("start at" in read_node['frame_mode'].value()) - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -264,7 +266,7 @@ class LoadClip(plugin.NukeLoader): # change color of read_node # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_effects.py b/openpype/hosts/nuke/plugins/load/load_effects.py index 56c5acbb0a..6a30330ed0 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects.py +++ b/openpype/hosts/nuke/plugins/load/load_effects.py @@ -3,9 +3,8 @@ from collections import OrderedDict import nuke import six -from avalon import io - from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -149,7 +148,7 @@ class LoadEffects(load.LoaderPlugin): """ # get main variables # Get version from io - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -245,7 +244,7 @@ class LoadEffects(load.LoaderPlugin): self.connect_read_node(GN, namespace, json_f["assignTo"]) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_effects_ip.py b/openpype/hosts/nuke/plugins/load/load_effects_ip.py index 0bc5f5a514..eaf151b3b8 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_effects_ip.py @@ -3,9 +3,8 @@ from collections import OrderedDict import six import nuke -from avalon import io - from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -154,7 +153,7 @@ class LoadEffectsInputProcess(load.LoaderPlugin): # get main variables # Get version from io - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -252,7 +251,7 @@ class LoadEffectsInputProcess(load.LoaderPlugin): # return # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo.py b/openpype/hosts/nuke/plugins/load/load_gizmo.py index 6f2b191be9..4ea9d64d7d 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo.py @@ -1,8 +1,7 @@ import nuke -from avalon import io - from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -102,7 +101,7 @@ class LoadGizmo(load.LoaderPlugin): # get main variables # Get version from io - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -150,7 +149,7 @@ class LoadGizmo(load.LoaderPlugin): GN["name"].setValue(object_name) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py index 46134afcf0..38dd70935e 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py @@ -1,8 +1,8 @@ import nuke import six -from avalon import io from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -108,7 +108,7 @@ class LoadGizmoInputProcess(load.LoaderPlugin): # get main variables # Get version from io - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -156,7 +156,7 @@ class LoadGizmoInputProcess(load.LoaderPlugin): GN["name"].setValue(object_name) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_image.py b/openpype/hosts/nuke/plugins/load/load_image.py index 9a175a0cba..6df286a4f7 100644 --- a/openpype/hosts/nuke/plugins/load/load_image.py +++ b/openpype/hosts/nuke/plugins/load/load_image.py @@ -1,9 +1,9 @@ import nuke import qargparse -from avalon import io from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -186,13 +186,13 @@ class LoadImage(load.LoaderPlugin): format(frame_number, "0{}".format(padding))) # Get start frame from version data - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_model.py b/openpype/hosts/nuke/plugins/load/load_model.py index e445beca05..9788bb25d2 100644 --- a/openpype/hosts/nuke/plugins/load/load_model.py +++ b/openpype/hosts/nuke/plugins/load/load_model.py @@ -1,6 +1,7 @@ import nuke -from avalon import io + from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -99,7 +100,7 @@ class AlembicModelLoader(load.LoaderPlugin): None """ # Get version from io - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -172,7 +173,7 @@ class AlembicModelLoader(load.LoaderPlugin): """ Coloring a node by correct color by actual version """ # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_script_precomp.py b/openpype/hosts/nuke/plugins/load/load_script_precomp.py index 779f101682..bd351ad785 100644 --- a/openpype/hosts/nuke/plugins/load/load_script_precomp.py +++ b/openpype/hosts/nuke/plugins/load/load_script_precomp.py @@ -1,8 +1,7 @@ import nuke -from avalon import io - from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -117,13 +116,13 @@ class LinkAsGroup(load.LoaderPlugin): root = get_representation_path(representation).replace("\\", "/") # Get start frame from version data - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/publish/collect_reads.py b/openpype/hosts/nuke/plugins/publish/collect_reads.py index 45e9969eb9..4d6944f523 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_reads.py +++ b/openpype/hosts/nuke/plugins/publish/collect_reads.py @@ -2,7 +2,8 @@ import os import re import nuke import pyblish.api -from avalon import io, api + +from openpype.pipeline import legacy_io @pyblish.api.log @@ -15,8 +16,10 @@ class CollectNukeReads(pyblish.api.InstancePlugin): families = ["source"] def process(self, instance): - asset_data = io.find_one({"type": "asset", - "name": api.Session["AVALON_ASSET"]}) + asset_data = legacy_io.find_one({ + "type": "asset", + "name": legacy_io.Session["AVALON_ASSET"] + }) self.log.debug("asset_data: {}".format(asset_data["data"])) diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py index 2e8843d2e0..2a79d600ba 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py @@ -52,7 +52,7 @@ class ExtractReviewDataMov(openpype.api.Extractor): for o_name, o_data in self.outputs.items(): f_families = o_data["filter"]["families"] f_task_types = o_data["filter"]["task_types"] - f_subsets = o_data["filter"]["sebsets"] + f_subsets = o_data["filter"]["subsets"] self.log.debug( "f_families `{}` > families: {}".format( diff --git a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py index e917a28046..fb52fc18b4 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py +++ b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py @@ -1,6 +1,9 @@ import os import nuke +import copy + import pyblish.api + import openpype from openpype.hosts.nuke.api.lib import maintained_selection @@ -18,6 +21,13 @@ class ExtractSlateFrame(openpype.api.Extractor): families = ["slate"] hosts = ["nuke"] + # Settings values + # - can be extended by other attributes from node in the future + key_value_mapping = { + "f_submission_note": [True, "{comment}"], + "f_submitting_for": [True, "{intent[value]}"], + "f_vfx_scope_of_work": [False, ""] + } def process(self, instance): if hasattr(self, "viewer_lut_raw"): @@ -129,9 +139,7 @@ class ExtractSlateFrame(openpype.api.Extractor): for node in temporary_nodes: nuke.delete(node) - def get_view_process_node(self): - # Select only the target node if nuke.selectedNodes(): [n.setSelected(False) for n in nuke.selectedNodes()] @@ -162,13 +170,56 @@ class ExtractSlateFrame(openpype.api.Extractor): return comment = instance.context.data.get("comment") - intent_value = instance.context.data.get("intent") - if intent_value and isinstance(intent_value, dict): - intent_value = intent_value.get("value") + intent = instance.context.data.get("intent") + if not isinstance(intent, dict): + intent = { + "label": intent, + "value": intent + } - try: - node["f_submission_note"].setValue(comment) - node["f_submitting_for"].setValue(intent_value or "") - except NameError: - return - instance.data.pop("slateNode") + fill_data = copy.deepcopy(instance.data["anatomyData"]) + fill_data.update({ + "custom": copy.deepcopy( + instance.data.get("customData") or {} + ), + "comment": comment, + "intent": intent + }) + + for key, value in self.key_value_mapping.items(): + enabled, template = value + if not enabled: + self.log.debug("Key \"{}\" is disabled".format(key)) + continue + + try: + value = template.format(**fill_data) + + except ValueError: + self.log.warning( + "Couldn't fill template \"{}\" with data: {}".format( + template, fill_data + ), + exc_info=True + ) + continue + + except KeyError: + self.log.warning( + ( + "Template contains unknown key." + " Template \"{}\" Data: {}" + ).format(template, fill_data), + exc_info=True + ) + continue + + try: + node[key].setValue(value) + self.log.info("Change key \"{}\" to value \"{}\"".format( + key, value + )) + except NameError: + self.log.warning(( + "Failed to set value \"{}\" on node attribute \"{}\"" + ).format(value)) diff --git a/openpype/hosts/nuke/plugins/publish/precollect_instances.py b/openpype/hosts/nuke/plugins/publish/precollect_instances.py index 29c706f302..1a8fa3e6ad 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_instances.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_instances.py @@ -1,6 +1,7 @@ import nuke import pyblish.api -from avalon import io, api + +from openpype.pipeline import legacy_io from openpype.hosts.nuke.api.lib import ( add_publish_knob, get_avalon_knob_data @@ -19,9 +20,9 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): sync_workfile_version_on_families = [] def process(self, context): - asset_data = io.find_one({ + asset_data = legacy_io.find_one({ "type": "asset", - "name": api.Session["AVALON_ASSET"] + "name": legacy_io.Session["AVALON_ASSET"] }) self.log.debug("asset_data: {}".format(asset_data["data"])) @@ -69,6 +70,11 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): instance = context.create_instance(subset) instance.append(node) + suspend_publish = False + if "suspend_publish" in node.knobs(): + suspend_publish = node["suspend_publish"].value() + instance.data["suspend_publish"] = suspend_publish + # get review knob value review = False if "review" in node.knobs(): diff --git a/openpype/hosts/nuke/plugins/publish/precollect_writes.py b/openpype/hosts/nuke/plugins/publish/precollect_writes.py index 4826b2788f..8669f4f485 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_writes.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_writes.py @@ -3,9 +3,12 @@ import re from pprint import pformat import nuke import pyblish.api -from avalon import io + import openpype.api as pype -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + legacy_io, + get_representation_path, +) @pyblish.api.log @@ -180,7 +183,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): repre_doc = None if version_doc: # Try to find it's representation (Expected there is only one) - repre_doc = io.find_one( + repre_doc = legacy_io.find_one( {"type": "representation", "parent": version_doc["_id"]} ) diff --git a/openpype/hosts/nuke/plugins/publish/validate_script.py b/openpype/hosts/nuke/plugins/publish/validate_script.py index c35d09dcde..10c9e93f8b 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_script.py +++ b/openpype/hosts/nuke/plugins/publish/validate_script.py @@ -1,6 +1,7 @@ import pyblish.api -from avalon import io + from openpype import lib +from openpype.pipeline import legacy_io @pyblish.api.log @@ -115,7 +116,7 @@ class ValidateScript(pyblish.api.InstancePlugin): def check_parent_hierarchical(self, entityId, attr): if entityId is None: return None - entity = io.find_one({"_id": entityId}) + entity = legacy_io.find_one({"_id": entityId}) if attr in entity['data']: self.log.info(attr) return entity['data'][attr] diff --git a/openpype/hosts/nuke/startup/menu.py b/openpype/hosts/nuke/startup/menu.py index 2cac6d09e7..9ed43b2110 100644 --- a/openpype/hosts/nuke/startup/menu.py +++ b/openpype/hosts/nuke/startup/menu.py @@ -1,7 +1,7 @@ import nuke -import avalon.api from openpype.api import Logger +from openpype.pipeline import install_host from openpype.hosts.nuke import api from openpype.hosts.nuke.api.lib import ( on_script_load, @@ -13,7 +13,7 @@ from openpype.hosts.nuke.api.lib import ( log = Logger.get_logger(__name__) -avalon.api.install(api) +install_host(api) # fix ffmpeg settings on script nuke.addOnScriptLoad(on_script_load) diff --git a/openpype/hosts/photoshop/api/__init__.py b/openpype/hosts/photoshop/api/__init__.py index 17ea957066..94152b5706 100644 --- a/openpype/hosts/photoshop/api/__init__.py +++ b/openpype/hosts/photoshop/api/__init__.py @@ -12,7 +12,10 @@ from .pipeline import ( remove_instance, install, uninstall, - containerise + containerise, + get_context_data, + update_context_data, + get_context_title ) from .plugin import ( PhotoshopLoader, @@ -43,6 +46,9 @@ __all__ = [ "install", "uninstall", "containerise", + "get_context_data", + "update_context_data", + "get_context_title", # Plugin "PhotoshopLoader", diff --git a/openpype/hosts/photoshop/api/launch_logic.py b/openpype/hosts/photoshop/api/launch_logic.py index 0021905cb5..0bbb19523d 100644 --- a/openpype/hosts/photoshop/api/launch_logic.py +++ b/openpype/hosts/photoshop/api/launch_logic.py @@ -11,9 +11,8 @@ from wsrpc_aiohttp import ( from Qt import QtCore from openpype.api import Logger +from openpype.pipeline import legacy_io from openpype.tools.utils import host_tools - -from avalon import api from openpype.tools.adobe_webserver.app import WebServerTool from .ws_stub import PhotoshopServerStub @@ -320,13 +319,13 @@ class PhotoshopRoute(WebSocketRoute): log.info("Setting context change") log.info("project {} asset {} ".format(project, asset)) if project: - api.Session["AVALON_PROJECT"] = project + legacy_io.Session["AVALON_PROJECT"] = project os.environ["AVALON_PROJECT"] = project if asset: - api.Session["AVALON_ASSET"] = asset + legacy_io.Session["AVALON_ASSET"] = asset os.environ["AVALON_ASSET"] = asset if task: - api.Session["AVALON_TASK"] = task + legacy_io.Session["AVALON_TASK"] = task os.environ["AVALON_TASK"] = task async def read(self): diff --git a/openpype/hosts/photoshop/api/lib.py b/openpype/hosts/photoshop/api/lib.py index 6d2a493a94..2f57d64464 100644 --- a/openpype/hosts/photoshop/api/lib.py +++ b/openpype/hosts/photoshop/api/lib.py @@ -5,9 +5,8 @@ import traceback from Qt import QtWidgets -import avalon.api - from openpype.api import Logger +from openpype.pipeline import install_host from openpype.tools.utils import host_tools from openpype.lib.remote_publish import headless_publish from openpype.lib import env_value_to_bool @@ -24,7 +23,7 @@ def safe_excepthook(*args): def main(*subprocess_args): from openpype.hosts.photoshop import api - avalon.api.install(api) + install_host(api) sys.excepthook = safe_excepthook # coloring in StdOutBroker diff --git a/openpype/hosts/photoshop/api/pipeline.py b/openpype/hosts/photoshop/api/pipeline.py index 7fdaa61b40..20a6e3169f 100644 --- a/openpype/hosts/photoshop/api/pipeline.py +++ b/openpype/hosts/photoshop/api/pipeline.py @@ -3,17 +3,17 @@ from Qt import QtWidgets from bson.objectid import ObjectId import pyblish.api -import avalon.api -from avalon import io from openpype.api import Logger from openpype.lib import register_event_callback from openpype.pipeline import ( + legacy_io, register_loader_plugin_path, register_creator_plugin_path, deregister_loader_plugin_path, deregister_creator_plugin_path, AVALON_CONTAINER_ID, + registered_host, ) import openpype.hosts.photoshop @@ -33,22 +33,11 @@ def check_inventory(): if not lib.any_outdated(): return - host = avalon.api.registered_host() - outdated_containers = [] - for container in host.ls(): - representation = container['representation'] - representation_doc = io.find_one( - { - "_id": ObjectId(representation), - "type": "representation" - }, - projection={"parent": True} - ) - if representation_doc and not lib.is_latest(representation_doc): - outdated_containers.append(container) - # Warn about outdated containers. - print("Starting new QApplication..") + _app = QtWidgets.QApplication.instance() + if not _app: + print("Starting new QApplication..") + _app = QtWidgets.QApplication([]) message_box = QtWidgets.QMessageBox() message_box.setIcon(QtWidgets.QMessageBox.Warning) @@ -149,13 +138,9 @@ def list_instances(): instances = [] layers_meta = stub.get_layers_metadata() if layers_meta: - for key, instance in layers_meta.items(): - schema = instance.get("schema") - if schema and "container" in schema: - continue - - instance['uuid'] = key - instances.append(instance) + for instance in layers_meta: + if instance.get("id") == "pyblish.avalon.instance": + instances.append(instance) return instances @@ -176,11 +161,18 @@ def remove_instance(instance): if not stub: return - stub.remove_instance(instance.get("uuid")) - layer = stub.get_layer(instance.get("uuid")) - if layer: - stub.rename_layer(instance.get("uuid"), - layer.name.replace(stub.PUBLISH_ICON, '')) + inst_id = instance.get("instance_id") or instance.get("uuid") # legacy + if not inst_id: + log.warning("No instance identifier for {}".format(instance)) + return + + stub.remove_instance(inst_id) + + if instance.get("members"): + item = stub.get_layer(instance["members"][0]) + if item: + stub.rename_layer(item.id, + item.name.replace(stub.PUBLISH_ICON, '')) def _get_stub(): @@ -232,6 +224,33 @@ def containerise( "members": [str(layer.id)] } stub = lib.stub() - stub.imprint(layer, data) + stub.imprint(layer.id, data) return layer + + +def get_context_data(): + """Get stored values for context (validation enable/disable etc)""" + meta = _get_stub().get_layers_metadata() + for item in meta: + if item.get("id") == "publish_context": + item.pop("id") + return item + + return {} + + +def update_context_data(data, changes): + """Store value needed for context""" + item = data + item["id"] = "publish_context" + _get_stub().imprint(item["id"], item) + + +def get_context_title(): + """Returns title for Creator window""" + + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + return "{}/{}/{}".format(project_name, asset_name, task_name) diff --git a/openpype/hosts/photoshop/api/ws_stub.py b/openpype/hosts/photoshop/api/ws_stub.py index 64d89f5420..b49bf1c73f 100644 --- a/openpype/hosts/photoshop/api/ws_stub.py +++ b/openpype/hosts/photoshop/api/ws_stub.py @@ -27,6 +27,17 @@ class PSItem(object): members = attr.ib(factory=list) long_name = attr.ib(default=None) color_code = attr.ib(default=None) # color code of layer + instance_id = attr.ib(default=None) + + @property + def clean_name(self): + """Returns layer name without publish icon highlight + + Returns: + (str) + """ + return (self.name.replace(PhotoshopServerStub.PUBLISH_ICON, '') + .replace(PhotoshopServerStub.LOADED_ICON, '')) class PhotoshopServerStub: @@ -76,13 +87,31 @@ class PhotoshopServerStub: layer: (PSItem) layers_meta: full list from Headline (for performance in loops) Returns: + (dict) of layer metadata stored in PS file + + Example: + { + 'id': 'pyblish.avalon.container', + 'loader': 'ImageLoader', + 'members': ['64'], + 'name': 'imageMainMiddle', + 'namespace': 'Hero_imageMainMiddle_001', + 'representation': '6203dc91e80934d9f6ee7d96', + 'schema': 'openpype:container-2.0' + } """ if layers_meta is None: layers_meta = self.get_layers_metadata() - return layers_meta.get(str(layer.id)) + for layer_meta in layers_meta: + layer_id = layer_meta.get("uuid") # legacy + if layer_meta.get("members"): + layer_id = layer_meta["members"][0] + if str(layer.id) == str(layer_id): + return layer_meta + print("Unable to find layer metadata for {}".format(layer.id)) - def imprint(self, layer, data, all_layers=None, layers_meta=None): + def imprint(self, item_id, data, all_layers=None, items_meta=None): """Save layer metadata to Headline field of active document Stores metadata in format: @@ -108,28 +137,37 @@ class PhotoshopServerStub: }] - for loaded instances Args: - layer (PSItem): + item_id (str): data(string): json representation for single layer all_layers (list of PSItem): for performance, could be injected for usage in loop, if not, single call will be triggered - layers_meta(string): json representation from Headline + items_meta(string): json representation from Headline (for performance - provide only if imprint is in loop - value should be same) Returns: None """ - if not layers_meta: - layers_meta = self.get_layers_metadata() + if not items_meta: + items_meta = self.get_layers_metadata() # json.dumps writes integer values in a dictionary to string, so # anticipating it here. - if str(layer.id) in layers_meta and layers_meta[str(layer.id)]: - if data: - layers_meta[str(layer.id)].update(data) + item_id = str(item_id) + is_new = True + result_meta = [] + for item_meta in items_meta: + if ((item_meta.get('members') and + item_id == str(item_meta.get('members')[0])) or + item_meta.get("instance_id") == item_id): + is_new = False + if data: + item_meta.update(data) + result_meta.append(item_meta) else: - layers_meta.pop(str(layer.id)) - else: - layers_meta[str(layer.id)] = data + result_meta.append(item_meta) + + if is_new: + result_meta.append(data) # Ensure only valid ids are stored. if not all_layers: @@ -137,12 +175,14 @@ class PhotoshopServerStub: layer_ids = [layer.id for layer in all_layers] cleaned_data = [] - for layer_id in layers_meta: - if int(layer_id) in layer_ids: - cleaned_data.append(layers_meta[layer_id]) + for item in result_meta: + if item.get("members"): + if int(item["members"][0]) not in layer_ids: + continue + + cleaned_data.append(item) payload = json.dumps(cleaned_data, indent=4) - self.websocketserver.call( self.client.call('Photoshop.imprint', payload=payload) ) @@ -370,38 +410,27 @@ class PhotoshopServerStub: (Headline accessible by File > File Info) Returns: - (string): - json documents + (list) example: {"8":{"active":true,"subset":"imageBG", "family":"image","id":"pyblish.avalon.instance", "asset":"Town"}} 8 is layer(group) id - used for deletion, update etc. """ - layers_data = {} res = self.websocketserver.call(self.client.call('Photoshop.read')) + layers_data = [] try: - layers_data = json.loads(res) + if res: + layers_data = json.loads(res) except json.decoder.JSONDecodeError: - pass + raise ValueError("{} cannot be parsed, recreate meta".format(res)) # format of metadata changed from {} to [] because of standardization # keep current implementation logic as its working - if not isinstance(layers_data, dict): - temp_layers_meta = {} - for layer_meta in layers_data: - layer_id = layer_meta.get("uuid") - if not layer_id: - layer_id = layer_meta.get("members")[0] - - temp_layers_meta[layer_id] = layer_meta - layers_data = temp_layers_meta - else: - # legacy version of metadata + if isinstance(layers_data, dict): for layer_id, layer_meta in layers_data.items(): if layer_meta.get("schema") != "openpype:container-2.0": - layer_meta["uuid"] = str(layer_id) - else: layer_meta["members"] = [str(layer_id)] - + layers_data = list(layers_data.values()) return layers_data def import_smart_object(self, path, layer_name, as_reference=False): @@ -472,11 +501,12 @@ class PhotoshopServerStub: ) def remove_instance(self, instance_id): - cleaned_data = {} + cleaned_data = [] - for key, instance in self.get_layers_metadata().items(): - if key != instance_id: - cleaned_data[key] = instance + for item in self.get_layers_metadata(): + inst_id = item.get("instance_id") or item.get("uuid") + if inst_id != instance_id: + cleaned_data.append(item) payload = json.dumps(cleaned_data, indent=4) @@ -528,6 +558,7 @@ class PhotoshopServerStub: d.get('type'), d.get('members'), d.get('long_name'), - d.get("color_code") + d.get("color_code"), + d.get("instance_id") )) return ret diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py index 5078cbb587..f15068b031 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_image.py @@ -1,99 +1,145 @@ -from Qt import QtWidgets -from openpype.pipeline import LegacyCreator -from openpype.hosts.photoshop import api as photoshop +from openpype.hosts.photoshop import api +from openpype.lib import BoolDef +from openpype.pipeline import ( + Creator, + CreatedInstance, + legacy_io +) -class CreateImage(LegacyCreator): - """Image folder for publish.""" - - name = "imageDefault" +class ImageCreator(Creator): + """Creates image instance for publishing.""" + identifier = "image" label = "Image" family = "image" - defaults = ["Main"] + description = "Image creator" - def process(self): - groups = [] - layers = [] - create_group = False + def collect_instances(self): + for instance_data in api.list_instances(): + # legacy instances have family=='image' + creator_id = (instance_data.get("creator_identifier") or + instance_data.get("family")) - stub = photoshop.stub() - if (self.options or {}).get("useSelection"): - multiple_instances = False - selection = stub.get_selected_layers() - self.log.info("selection {}".format(selection)) - if len(selection) > 1: - # Ask user whether to create one image or image per selected - # item. - msg_box = QtWidgets.QMessageBox() - msg_box.setIcon(QtWidgets.QMessageBox.Warning) - msg_box.setText( - "Multiple layers selected." - "\nDo you want to make one image per layer?" + if creator_id == self.identifier: + instance_data = self._handle_legacy(instance_data) + layer = api.stub().get_layer(instance_data["members"][0]) + instance_data["layer"] = layer + instance = CreatedInstance.from_existing( + instance_data, self ) - msg_box.setStandardButtons( - QtWidgets.QMessageBox.Yes | - QtWidgets.QMessageBox.No | - QtWidgets.QMessageBox.Cancel - ) - ret = msg_box.exec_() - if ret == QtWidgets.QMessageBox.Yes: - multiple_instances = True - elif ret == QtWidgets.QMessageBox.Cancel: - return + self._add_instance_to_context(instance) - if multiple_instances: - for item in selection: - if item.group: - groups.append(item) - else: - layers.append(item) + def create(self, subset_name_from_ui, data, pre_create_data): + groups_to_create = [] + top_layers_to_wrap = [] + create_empty_group = False + + stub = api.stub() # only after PS is up + top_level_selected_items = stub.get_selected_layers() + if pre_create_data.get("use_selection"): + only_single_item_selected = len(top_level_selected_items) == 1 + for selected_item in top_level_selected_items: + if ( + only_single_item_selected or + pre_create_data.get("create_multiple")): + if selected_item.group: + groups_to_create.append(selected_item) + else: + top_layers_to_wrap.append(selected_item) else: - group = stub.group_selected_layers(self.name) - groups.append(group) + group = stub.group_selected_layers(subset_name_from_ui) + groups_to_create.append(group) - elif len(selection) == 1: - # One selected item. Use group if its a LayerSet (group), else - # create a new group. - if selection[0].group: - groups.append(selection[0]) - else: - layers.append(selection[0]) - elif len(selection) == 0: - # No selection creates an empty group. - create_group = True - else: - group = stub.create_group(self.name) - groups.append(group) + if not groups_to_create and not top_layers_to_wrap: + group = stub.create_group(subset_name_from_ui) + groups_to_create.append(group) - if create_group: - group = stub.create_group(self.name) - groups.append(group) - - for layer in layers: + # wrap each top level layer into separate new group + for layer in top_layers_to_wrap: stub.select_layers([layer]) group = stub.group_selected_layers(layer.name) - groups.append(group) + groups_to_create.append(group) - creator_subset_name = self.data["subset"] - for group in groups: - long_names = [] - group.name = group.name.replace(stub.PUBLISH_ICON, ''). \ - replace(stub.LOADED_ICON, '') + creating_multiple_groups = len(groups_to_create) > 1 + for group in groups_to_create: + subset_name = subset_name_from_ui # reset to name from creator UI + layer_names_in_hierarchy = [] + created_group_name = self._clean_highlights(stub, group.name) - subset_name = creator_subset_name - if len(groups) > 1: + if creating_multiple_groups: + # concatenate with layer name to differentiate subsets subset_name += group.name.title().replace(" ", "") if group.long_name: for directory in group.long_name[::-1]: - name = directory.replace(stub.PUBLISH_ICON, '').\ - replace(stub.LOADED_ICON, '') - long_names.append(name) + name = self._clean_highlights(stub, directory) + layer_names_in_hierarchy.append(name) - self.data.update({"subset": subset_name}) - self.data.update({"uuid": str(group.id)}) - self.data.update({"long_name": "_".join(long_names)}) - stub.imprint(group, self.data) + data.update({"subset": subset_name}) + data.update({"members": [str(group.id)]}) + data.update({"long_name": "_".join(layer_names_in_hierarchy)}) + + new_instance = CreatedInstance(self.family, subset_name, data, + self) + + stub.imprint(new_instance.get("instance_id"), + new_instance.data_to_store()) + self._add_instance_to_context(new_instance) # reusing existing group, need to rename afterwards - if not create_group: - stub.rename_layer(group.id, stub.PUBLISH_ICON + group.name) + if not create_empty_group: + stub.rename_layer(group.id, + stub.PUBLISH_ICON + created_group_name) + + def update_instances(self, update_list): + self.log.debug("update_list:: {}".format(update_list)) + for created_inst, _changes in update_list: + if created_inst.get("layer"): + # not storing PSItem layer to metadata + created_inst.pop("layer") + api.stub().imprint(created_inst.get("instance_id"), + created_inst.data_to_store()) + + def remove_instances(self, instances): + for instance in instances: + api.remove_instance(instance) + self._remove_instance_from_context(instance) + + def get_default_variants(self): + return [ + "Main" + ] + + def get_pre_create_attr_defs(self): + output = [ + BoolDef("use_selection", default=True, + label="Create only for selected"), + BoolDef("create_multiple", + default=True, + label="Create separate instance for each selected") + ] + return output + + def get_detail_description(self): + return """Creator for Image instances""" + + def _handle_legacy(self, instance_data): + """Converts old instances to new format.""" + if not instance_data.get("members"): + instance_data["members"] = [instance_data.get("uuid")] + + if instance_data.get("uuid"): + # uuid not needed, replaced with unique instance_id + api.stub().remove_instance(instance_data.get("uuid")) + instance_data.pop("uuid") + + if not instance_data.get("task"): + instance_data["task"] = legacy_io.Session.get("AVALON_TASK") + + if not instance_data.get("variant"): + instance_data["variant"] = '' + + return instance_data + + def _clean_highlights(self, stub, item): + return item.replace(stub.PUBLISH_ICON, '').replace(stub.LOADED_ICON, + '') diff --git a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py new file mode 100644 index 0000000000..9736471a26 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py @@ -0,0 +1,100 @@ +from Qt import QtWidgets +from openpype.pipeline import create +from openpype.hosts.photoshop import api as photoshop + + +class CreateImage(create.LegacyCreator): + """Image folder for publish.""" + + name = "imageDefault" + label = "Image" + family = "image" + defaults = ["Main"] + + def process(self): + groups = [] + layers = [] + create_group = False + + stub = photoshop.stub() + if (self.options or {}).get("useSelection"): + multiple_instances = False + selection = stub.get_selected_layers() + self.log.info("selection {}".format(selection)) + if len(selection) > 1: + # Ask user whether to create one image or image per selected + # item. + msg_box = QtWidgets.QMessageBox() + msg_box.setIcon(QtWidgets.QMessageBox.Warning) + msg_box.setText( + "Multiple layers selected." + "\nDo you want to make one image per layer?" + ) + msg_box.setStandardButtons( + QtWidgets.QMessageBox.Yes | + QtWidgets.QMessageBox.No | + QtWidgets.QMessageBox.Cancel + ) + ret = msg_box.exec_() + if ret == QtWidgets.QMessageBox.Yes: + multiple_instances = True + elif ret == QtWidgets.QMessageBox.Cancel: + return + + if multiple_instances: + for item in selection: + if item.group: + groups.append(item) + else: + layers.append(item) + else: + group = stub.group_selected_layers(self.name) + groups.append(group) + + elif len(selection) == 1: + # One selected item. Use group if its a LayerSet (group), else + # create a new group. + if selection[0].group: + groups.append(selection[0]) + else: + layers.append(selection[0]) + elif len(selection) == 0: + # No selection creates an empty group. + create_group = True + else: + group = stub.create_group(self.name) + groups.append(group) + + if create_group: + group = stub.create_group(self.name) + groups.append(group) + + for layer in layers: + stub.select_layers([layer]) + group = stub.group_selected_layers(layer.name) + groups.append(group) + + creator_subset_name = self.data["subset"] + for group in groups: + long_names = [] + group.name = group.name.replace(stub.PUBLISH_ICON, ''). \ + replace(stub.LOADED_ICON, '') + + subset_name = creator_subset_name + if len(groups) > 1: + subset_name += group.name.title().replace(" ", "") + + if group.long_name: + for directory in group.long_name[::-1]: + name = directory.replace(stub.PUBLISH_ICON, '').\ + replace(stub.LOADED_ICON, '') + long_names.append(name) + + self.data.update({"subset": subset_name}) + self.data.update({"uuid": str(group.id)}) + self.data.update({"members": [str(group.id)]}) + self.data.update({"long_name": "_".join(long_names)}) + stub.imprint(group, self.data) + # reusing existing group, need to rename afterwards + if not create_group: + stub.rename_layer(group.id, stub.PUBLISH_ICON + group.name) diff --git a/openpype/hosts/photoshop/plugins/create/workfile_creator.py b/openpype/hosts/photoshop/plugins/create/workfile_creator.py new file mode 100644 index 0000000000..875a9b8a94 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/create/workfile_creator.py @@ -0,0 +1,78 @@ +import openpype.hosts.photoshop.api as api +from openpype.pipeline import ( + AutoCreator, + CreatedInstance, + legacy_io +) + + +class PSWorkfileCreator(AutoCreator): + identifier = "workfile" + family = "workfile" + + def get_instance_attr_defs(self): + return [] + + def collect_instances(self): + for instance_data in api.list_instances(): + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + subset_name = instance_data["subset"] + instance = CreatedInstance( + self.family, subset_name, instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + # nothing to change on workfiles + pass + + def create(self, options=None): + existing_instance = None + for instance in self.create_context.instances: + if instance.family == self.family: + existing_instance = instance + break + + variant = '' + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + host_name = legacy_io.Session["AVALON_APP"] + if existing_instance is None: + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": variant + } + data.update(self.get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name + )) + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(new_instance) + api.stub().imprint(new_instance.get("instance_id"), + new_instance.data_to_store()) + + elif ( + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name diff --git a/openpype/hosts/photoshop/plugins/load/load_image.py b/openpype/hosts/photoshop/plugins/load/load_image.py index 0a9421b8f2..91a9787781 100644 --- a/openpype/hosts/photoshop/plugins/load/load_image.py +++ b/openpype/hosts/photoshop/plugins/load/load_image.py @@ -61,7 +61,7 @@ class ImageLoader(photoshop.PhotoshopLoader): ) stub.imprint( - layer, {"representation": str(representation["_id"])} + layer.id, {"representation": str(representation["_id"])} ) def remove(self, container): @@ -73,7 +73,7 @@ class ImageLoader(photoshop.PhotoshopLoader): stub = self.get_stub() layer = container.pop("layer") - stub.imprint(layer, {}) + stub.imprint(layer.id, {}) stub.delete_layer(layer.id) def switch(self, container, representation): diff --git a/openpype/hosts/photoshop/plugins/load/load_reference.py b/openpype/hosts/photoshop/plugins/load/load_reference.py index f5f0545d39..1f32a5d23c 100644 --- a/openpype/hosts/photoshop/plugins/load/load_reference.py +++ b/openpype/hosts/photoshop/plugins/load/load_reference.py @@ -61,7 +61,7 @@ class ReferenceLoader(photoshop.PhotoshopLoader): ) stub.imprint( - layer, {"representation": str(representation["_id"])} + layer.id, {"representation": str(representation["_id"])} ) def remove(self, container): @@ -72,7 +72,7 @@ class ReferenceLoader(photoshop.PhotoshopLoader): """ stub = self.get_stub() layer = container.pop("layer") - stub.imprint(layer, {}) + stub.imprint(layer.id, {}) stub.delete_layer(layer.id) def switch(self, container, representation): diff --git a/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py new file mode 100644 index 0000000000..2881ef0ea6 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py @@ -0,0 +1,77 @@ +"""Parses batch context from json and continues in publish process. + +Provides: + context -> Loaded batch file. + - asset + - task (task name) + - taskType + - project_name + - variant + +Code is practically copy of `openype/hosts/webpublish/collect_batch_data` as +webpublisher should be eventually ejected as an addon, eg. mentioned plugin +shouldn't be pushed into general publish plugins. +""" + +import os + +import pyblish.api + +from openpype.lib.plugin_tools import ( + parse_json, + get_batch_asset_task_info +) +from openpype.pipeline import legacy_io + + +class CollectBatchData(pyblish.api.ContextPlugin): + """Collect batch data from json stored in 'OPENPYPE_PUBLISH_DATA' env dir. + + The directory must contain 'manifest.json' file where batch data should be + stored. + """ + # must be really early, context values are only in json file + order = pyblish.api.CollectorOrder - 0.495 + label = "Collect batch data" + hosts = ["photoshop"] + targets = ["remotepublish"] + + def process(self, context): + self.log.info("CollectBatchData") + batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") + if os.environ.get("IS_TEST"): + self.log.debug("Automatic testing, no batch data, skipping") + return + + assert batch_dir, ( + "Missing `OPENPYPE_PUBLISH_DATA`") + + assert os.path.exists(batch_dir), \ + "Folder {} doesn't exist".format(batch_dir) + + project_name = os.environ.get("AVALON_PROJECT") + if project_name is None: + raise AssertionError( + "Environment `AVALON_PROJECT` was not found." + "Could not set project `root` which may cause issues." + ) + + batch_data = parse_json(os.path.join(batch_dir, "manifest.json")) + + context.data["batchDir"] = batch_dir + context.data["batchData"] = batch_data + + asset_name, task_name, task_type = get_batch_asset_task_info( + batch_data["context"] + ) + + os.environ["AVALON_ASSET"] = asset_name + os.environ["AVALON_TASK"] = task_name + legacy_io.Session["AVALON_ASSET"] = asset_name + legacy_io.Session["AVALON_TASK"] = task_name + + context.data["asset"] = asset_name + context.data["task"] = task_name + context.data["taskType"] = task_type + context.data["project_name"] = project_name + context.data["variant"] = batch_data["variant"] diff --git a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py index 7d44d55a80..71bd2cd854 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py @@ -4,8 +4,8 @@ import re import pyblish.api from openpype.lib import prepare_template_data -from openpype.lib.plugin_tools import parse_json, get_batch_asset_task_info from openpype.hosts.photoshop import api as photoshop +from openpype.settings import get_project_settings class CollectColorCodedInstances(pyblish.api.ContextPlugin): @@ -46,7 +46,16 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): existing_subset_names = self._get_existing_subset_names(context) - asset_name, task_name, variant = self._parse_batch(batch_dir) + # from CollectBatchData + asset_name = context.data["asset"] + task_name = context.data["task"] + variant = context.data["variant"] + project_name = context.data["projectEntity"]["name"] + + naming_conventions = get_project_settings(project_name).get( + "photoshop", {}).get( + "publish", {}).get( + "ValidateNaming", {}) stub = photoshop.stub() layers = stub.get_layers() @@ -75,12 +84,15 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): "variant": variant, "family": resolved_family, "task": task_name, - "layer": layer.name + "layer": layer.clean_name } subset = resolved_subset_template.format( **prepare_template_data(fill_pairs)) + subset = self._clean_subset_name(stub, naming_conventions, + subset, layer) + if subset in existing_subset_names: self.log.info( "Subset {} already created, skipping.".format(subset)) @@ -130,25 +142,6 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): return existing_subset_names - def _parse_batch(self, batch_dir): - """Parses asset_name, task_name, variant from batch manifest.""" - task_data = None - if batch_dir and os.path.exists(batch_dir): - task_data = parse_json(os.path.join(batch_dir, - "manifest.json")) - if not task_data: - raise ValueError( - "Cannot parse batch meta in {} folder".format(batch_dir)) - variant = task_data["variant"] - - asset, task_name, task_type = get_batch_asset_task_info( - task_data["context"]) - - if not task_name: - task_name = task_type - - return asset, task_name, variant - def _create_instance(self, context, layer, family, asset, subset, task_name): instance = context.create_instance(layer.name) @@ -158,6 +151,7 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): instance.data["task"] = task_name instance.data["subset"] = subset instance.data["layer"] = layer + instance.data["families"] = [] return instance @@ -203,3 +197,21 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): self.log.debug("resolved_subset_template {}".format( resolved_subset_template)) return family, resolved_subset_template + + def _clean_subset_name(self, stub, naming_conventions, subset, layer): + """Cleans invalid characters from subset name and layer name.""" + if re.search(naming_conventions["invalid_chars"], subset): + subset = re.sub( + naming_conventions["invalid_chars"], + naming_conventions["replace_char"], + subset + ) + layer_name = re.sub( + naming_conventions["invalid_chars"], + naming_conventions["replace_char"], + layer.clean_name + ) + layer.name = layer_name + stub.rename_layer(layer.id, layer_name) + + return subset diff --git a/openpype/hosts/photoshop/plugins/publish/collect_instances.py b/openpype/hosts/photoshop/plugins/publish/collect_instances.py index 6198ed0156..b466ec8687 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_instances.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_instances.py @@ -1,16 +1,18 @@ -from avalon import api +import pprint + import pyblish.api from openpype.settings import get_project_settings from openpype.hosts.photoshop import api as photoshop from openpype.lib import prepare_template_data +from openpype.pipeline import legacy_io class CollectInstances(pyblish.api.ContextPlugin): """Gather instances by LayerSet and file metadata - This collector takes into account assets that are associated with - an LayerSet and marked with a unique identifier; + Collects publishable instances from file metadata or enhance + already collected by creator (family == "image"). If no image instances are explicitly created, it looks if there is value in `flatten_subset_template` (configurable in Settings), in that case it @@ -20,7 +22,7 @@ class CollectInstances(pyblish.api.ContextPlugin): id (str): "pyblish.avalon.instance" """ - label = "Instances" + label = "Collect Instances" order = pyblish.api.CollectorOrder hosts = ["photoshop"] families_mapping = { @@ -30,42 +32,53 @@ class CollectInstances(pyblish.api.ContextPlugin): flatten_subset_template = "" def process(self, context): + instance_by_layer_id = {} + for instance in context: + if ( + instance.data["family"] == "image" and + instance.data.get("members")): + layer_id = str(instance.data["members"][0]) + instance_by_layer_id[layer_id] = instance + stub = photoshop.stub() - layers = stub.get_layers() + layer_items = stub.get_layers() layers_meta = stub.get_layers_metadata() instance_names = [] + all_layer_ids = [] - for layer in layers: - all_layer_ids.append(layer.id) - layer_data = stub.read(layer, layers_meta) + for layer_item in layer_items: + layer_meta_data = stub.read(layer_item, layers_meta) + all_layer_ids.append(layer_item.id) # Skip layers without metadata. - if layer_data is None: + if layer_meta_data is None: continue # Skip containers. - if "container" in layer_data["id"]: + if "container" in layer_meta_data["id"]: continue - # child_layers = [*layer.Layers] - # self.log.debug("child_layers {}".format(child_layers)) - # if not child_layers: - # self.log.info("%s skipped, it was empty." % layer.Name) - # continue + # active might not be in legacy meta + if not layer_meta_data.get("active", True): + continue - instance = context.create_instance(layer_data["subset"]) - instance.data["layer"] = layer - instance.data.update(layer_data) + instance = instance_by_layer_id.get(str(layer_item.id)) + if instance is None: + instance = context.create_instance(layer_meta_data["subset"]) + + instance.data["layer"] = layer_item + instance.data.update(layer_meta_data) instance.data["families"] = self.families_mapping[ - layer_data["family"] + layer_meta_data["family"] ] - instance.data["publish"] = layer.visible - instance_names.append(layer_data["subset"]) + instance.data["publish"] = layer_item.visible + instance_names.append(layer_meta_data["subset"]) # Produce diagnostic message for any graphical # user interface interested in visualising it. self.log.info("Found: \"%s\" " % instance.data["name"]) - self.log.info("instance: {} ".format(instance.data)) + self.log.info("instance: {} ".format( + pprint.pformat(instance.data, indent=4))) if len(instance_names) != len(set(instance_names)): self.log.warning("Duplicate instances found. " + @@ -79,11 +92,12 @@ class CollectInstances(pyblish.api.ContextPlugin): "CreateImage", {}).get( "defaults", ['']) family = "image" - task_name = api.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] asset_name = context.data["assetEntity"]["name"] + variant = context.data.get("variant") or variants[0] fill_pairs = { - "variant": variants[0], + "variant": variant, "family": family, "task": task_name } diff --git a/openpype/hosts/photoshop/plugins/publish/collect_review.py b/openpype/hosts/photoshop/plugins/publish/collect_review.py index f3842b9ee5..2ea5503f3f 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_review.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_review.py @@ -1,3 +1,11 @@ +""" +Requires: + None + +Provides: + instance -> family ("review") +""" + import os import pyblish.api @@ -6,33 +14,35 @@ from openpype.lib import get_subset_name_with_asset_doc class CollectReview(pyblish.api.ContextPlugin): - """Gather the active document as review instance.""" + """Gather the active document as review instance. + Triggers once even if no 'image' is published as by defaults it creates + flatten image from a workfile. + """ + + label = "Collect Review" label = "Review" - order = pyblish.api.CollectorOrder + 0.1 hosts = ["photoshop"] + order = pyblish.api.CollectorOrder + 0.1 def process(self, context): family = "review" subset = get_subset_name_with_asset_doc( family, - "", + context.data.get("variant", ''), context.data["anatomyData"]["task"]["name"], context.data["assetEntity"], context.data["anatomyData"]["project"]["name"], host_name=context.data["hostName"] ) - file_path = context.data["currentFile"] - base_name = os.path.basename(file_path) - instance = context.create_instance(subset) instance.data.update({ "subset": subset, - "label": base_name, - "name": base_name, + "label": subset, + "name": subset, "family": family, - "families": ["ftrack"], + "families": [], "representations": [], "asset": os.environ["AVALON_ASSET"] }) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py index 0dbe2c6609..e4f0a07b34 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py @@ -12,6 +12,13 @@ class CollectWorkfile(pyblish.api.ContextPlugin): hosts = ["photoshop"] def process(self, context): + existing_instance = None + for instance in context: + if instance.data["family"] == "workfile": + self.log.debug("Workfile instance found, won't create new") + existing_instance = instance + break + family = "workfile" subset = get_subset_name_with_asset_doc( family, @@ -27,16 +34,19 @@ class CollectWorkfile(pyblish.api.ContextPlugin): base_name = os.path.basename(file_path) # Create instance - instance = context.create_instance(subset) - instance.data.update({ - "subset": subset, - "label": base_name, - "name": base_name, - "family": family, - "families": [], - "representations": [], - "asset": os.environ["AVALON_ASSET"] - }) + if existing_instance is None: + instance = context.create_instance(subset) + instance.data.update({ + "subset": subset, + "label": base_name, + "name": base_name, + "family": family, + "families": [], + "representations": [], + "asset": os.environ["AVALON_ASSET"] + }) + else: + instance = existing_instance # creating representation _, ext = os.path.splitext(file_path) diff --git a/openpype/hosts/photoshop/plugins/publish/extract_image.py b/openpype/hosts/photoshop/plugins/publish/extract_image.py index b07d0740c1..a133e33409 100644 --- a/openpype/hosts/photoshop/plugins/publish/extract_image.py +++ b/openpype/hosts/photoshop/plugins/publish/extract_image.py @@ -16,7 +16,6 @@ class ExtractImage(openpype.api.Extractor): formats = ["png", "jpg"] def process(self, instance): - staging_dir = self.staging_dir(instance) self.log.info("Outputting image to {}".format(staging_dir)) diff --git a/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml b/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml new file mode 100644 index 0000000000..5a1e266748 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml @@ -0,0 +1,21 @@ + + + +Subset name + +## Invalid subset or layer name + +Subset or layer name cannot contain specific characters (spaces etc) which could cause issue when subset name is used in a published file name. + {msg} + +### How to repair? + +You can fix this with "repair" button on the right. + + +### __Detailed Info__ (optional) + +Not all characters are available in a file names on all OS. Wrong characters could be configured in Settings. + + + \ No newline at end of file diff --git a/openpype/hosts/photoshop/plugins/publish/help/validate_unique_subsets.xml b/openpype/hosts/photoshop/plugins/publish/help/validate_unique_subsets.xml new file mode 100644 index 0000000000..4b47973193 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/help/validate_unique_subsets.xml @@ -0,0 +1,14 @@ + + + +Subset not unique + +## Non unique subset name found + + Non unique subset names: '{non_unique}' +### How to repair? + +Remove offending instance, rename it to have unique name. Maybe layer name wasn't used for multiple instances? + + + \ No newline at end of file diff --git a/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py b/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py index ebe9cc21ea..b65f9d259f 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py @@ -1,6 +1,7 @@ -from avalon import api import pyblish.api + import openpype.api +from openpype.pipeline import legacy_io from openpype.hosts.photoshop import api as photoshop @@ -26,7 +27,7 @@ class ValidateInstanceAssetRepair(pyblish.api.Action): for instance in instances: data = stub.read(instance[0]) - data["asset"] = api.Session["AVALON_ASSET"] + data["asset"] = legacy_io.Session["AVALON_ASSET"] stub.imprint(instance[0], data) @@ -48,7 +49,7 @@ class ValidateInstanceAsset(pyblish.api.InstancePlugin): def process(self, instance): instance_asset = instance.data["asset"] - current_asset = api.Session["AVALON_ASSET"] + current_asset = legacy_io.Session["AVALON_ASSET"] msg = ( f"Instance asset {instance_asset} is not the same " f"as current context {current_asset}. PLEASE DO:\n" diff --git a/openpype/hosts/photoshop/plugins/publish/validate_naming.py b/openpype/hosts/photoshop/plugins/publish/validate_naming.py index 583e9c7a4e..b53f4e8198 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_naming.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_naming.py @@ -2,6 +2,7 @@ import re import pyblish.api import openpype.api +from openpype.pipeline import PublishXmlValidationError from openpype.hosts.photoshop import api as photoshop @@ -22,33 +23,35 @@ class ValidateNamingRepair(pyblish.api.Action): failed.append(result["instance"]) invalid_chars, replace_char = plugin.get_replace_chars() - self.log.info("{} --- {}".format(invalid_chars, replace_char)) + self.log.debug("{} --- {}".format(invalid_chars, replace_char)) # Apply pyblish.logic to get the instances for the plug-in instances = pyblish.api.instances_by_plugin(failed, plugin) stub = photoshop.stub() for instance in instances: - self.log.info("validate_naming instance {}".format(instance)) - layer_item = instance.data["layer"] - metadata = stub.read(layer_item) - self.log.info("metadata instance {}".format(metadata)) - layer_name = None - if metadata.get("uuid"): - layer_data = stub.get_layer(metadata["uuid"]) - self.log.info("layer_data {}".format(layer_data)) - if layer_data: - layer_name = re.sub(invalid_chars, - replace_char, - layer_data.name) + self.log.debug("validate_naming instance {}".format(instance)) + current_layer_state = stub.get_layer(instance.data["layer"].id) + self.log.debug("current_layer{}".format(current_layer_state)) - stub.rename_layer(instance.data["uuid"], layer_name) + layer_meta = stub.read(current_layer_state) + instance_id = (layer_meta.get("instance_id") or + layer_meta.get("uuid")) + if not instance_id: + self.log.warning("Unable to repair, cannot find layer") + continue + + layer_name = re.sub(invalid_chars, + replace_char, + current_layer_state.clean_name) + layer_name = stub.PUBLISH_ICON + layer_name + + stub.rename_layer(current_layer_state.id, layer_name) subset_name = re.sub(invalid_chars, replace_char, instance.data["subset"]) - layer_item.name = layer_name or subset_name - metadata["subset"] = subset_name - stub.imprint(layer_item, metadata) + layer_meta["subset"] = subset_name + stub.imprint(instance_id, layer_meta) return True @@ -71,13 +74,24 @@ class ValidateNaming(pyblish.api.InstancePlugin): def process(self, instance): help_msg = ' Use Repair action (A) in Pyblish to fix it.' - msg = "Name \"{}\" is not allowed.{}".format(instance.data["name"], - help_msg) - assert not re.search(self.invalid_chars, instance.data["name"]), msg + + layer = instance.data.get("layer") + if layer: + msg = "Name \"{}\" is not allowed.{}".format(layer.clean_name, + help_msg) + + formatting_data = {"msg": msg} + if re.search(self.invalid_chars, layer.clean_name): + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data + ) msg = "Subset \"{}\" is not allowed.{}".format(instance.data["subset"], help_msg) - assert not re.search(self.invalid_chars, instance.data["subset"]), msg + formatting_data = {"msg": msg} + if re.search(self.invalid_chars, instance.data["subset"]): + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) @classmethod def get_replace_chars(cls): diff --git a/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py b/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py index 40abfb1bbd..01f2323157 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py @@ -1,6 +1,7 @@ import collections import pyblish.api import openpype.api +from openpype.pipeline import PublishXmlValidationError class ValidateSubsetUniqueness(pyblish.api.ContextPlugin): @@ -27,4 +28,10 @@ class ValidateSubsetUniqueness(pyblish.api.ContextPlugin): if count > 1] msg = ("Instance subset names {} are not unique. ".format(non_unique) + "Remove duplicates via SubsetManager.") - assert not non_unique, msg + formatting_data = { + "non_unique": ",".join(non_unique) + } + + if non_unique: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/hosts/resolve/api/pipeline.py b/openpype/hosts/resolve/api/pipeline.py index 636c826a11..4a7d1c5bea 100644 --- a/openpype/hosts/resolve/api/pipeline.py +++ b/openpype/hosts/resolve/api/pipeline.py @@ -7,10 +7,9 @@ from collections import OrderedDict from pyblish import api as pyblish -from avalon import schema - from openpype.api import Logger from openpype.pipeline import ( + schema, register_loader_plugin_path, register_creator_plugin_path, deregister_loader_plugin_path, diff --git a/openpype/hosts/resolve/plugins/load/load_clip.py b/openpype/hosts/resolve/plugins/load/load_clip.py index 71850d95f6..cf88b14e81 100644 --- a/openpype/hosts/resolve/plugins/load/load_clip.py +++ b/openpype/hosts/resolve/plugins/load/load_clip.py @@ -1,9 +1,11 @@ from copy import deepcopy from importlib import reload -from avalon import io from openpype.hosts import resolve -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + legacy_io, +) from openpype.hosts.resolve.api import lib, plugin reload(plugin) reload(lib) @@ -94,7 +96,7 @@ class LoadClip(resolve.TimelineItemLoader): namespace = container['namespace'] timeline_item_data = resolve.get_pype_timeline_item_by_name(namespace) timeline_item = timeline_item_data["clip"]["item"] - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -140,7 +142,7 @@ class LoadClip(resolve.TimelineItemLoader): # define version name version_name = version.get("name", None) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/resolve/plugins/publish/precollect_workfile.py b/openpype/hosts/resolve/plugins/publish/precollect_workfile.py index 1333516177..a58f288770 100644 --- a/openpype/hosts/resolve/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/resolve/plugins/publish/precollect_workfile.py @@ -1,10 +1,9 @@ import pyblish.api -from openpype.hosts import resolve -from avalon import api as avalon from pprint import pformat - -# dev from importlib import reload + +from openpype.hosts import resolve +from openpype.pipeline import legacy_io from openpype.hosts.resolve.otio import davinci_export reload(davinci_export) @@ -17,7 +16,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin): def process(self, context): - asset = avalon.Session["AVALON_ASSET"] + asset = legacy_io.Session["AVALON_ASSET"] subset = "workfile" project = resolve.get_current_project() fps = project.GetSetting("timelineFrameRate") diff --git a/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py b/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py index ac66916b91..3a16b9c966 100644 --- a/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py +++ b/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py @@ -1,13 +1,14 @@ #!/usr/bin/env python import os import sys -import openpype + +from openpype.pipeline import install_host def main(env): import openpype.hosts.resolve as bmdvr # Registers openpype's Global pyblish plugins - openpype.install() + install_host(bmdvr) bmdvr.setup(env) diff --git a/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py b/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py index b0cef1838a..89ade9238b 100644 --- a/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py +++ b/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py @@ -1,8 +1,7 @@ import os import sys -import avalon.api as avalon -import openpype +from openpype.pipeline import install_host from openpype.api import Logger log = Logger().get_logger(__name__) @@ -10,13 +9,9 @@ log = Logger().get_logger(__name__) def main(env): import openpype.hosts.resolve as bmdvr - # Registers openpype's Global pyblish plugins - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) - - log.info(f"Avalon registered hosts: {avalon.registered_host()}") + install_host(bmdvr) bmdvr.launch_pype_menu() diff --git a/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py b/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py index 5430ad32df..8433bd9172 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py +++ b/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py @@ -1,9 +1,11 @@ #! python3 import os import sys -import avalon.api as avalon -import openpype + import opentimelineio as otio + +from openpype.pipeline import install_host + from openpype.hosts.resolve import TestGUI import openpype.hosts.resolve as bmdvr from openpype.hosts.resolve.otio import davinci_export as otio_export @@ -14,10 +16,8 @@ class ThisTestGUI(TestGUI): def __init__(self): super(ThisTestGUI, self).__init__() - # Registers openpype's Global pyblish plugins - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) + install_host(bmdvr) def _open_dir_button_pressed(self, event): # selected_path = self.fu.RequestFile(os.path.expanduser("~")) diff --git a/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py b/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py index afa311e0b8..477955d527 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py +++ b/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py @@ -1,8 +1,8 @@ #! python3 import os import sys -import avalon.api as avalon -import openpype + +from openpype.pipeline import install_host from openpype.hosts.resolve import TestGUI import openpype.hosts.resolve as bmdvr import clique @@ -13,10 +13,8 @@ class ThisTestGUI(TestGUI): def __init__(self): super(ThisTestGUI, self).__init__() - # Registers openpype's Global pyblish plugins - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) + install_host(bmdvr) def _open_dir_button_pressed(self, event): # selected_path = self.fu.RequestFile(os.path.expanduser("~")) diff --git a/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py b/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py index cfdbe890e5..872d620162 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py +++ b/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py @@ -1,6 +1,5 @@ #! python3 -import avalon.api as avalon -import openpype +from openpype.pipeline import install_host import openpype.hosts.resolve as bmdvr @@ -15,8 +14,7 @@ def file_processing(fpath): if __name__ == "__main__": path = "C:/CODE/__openpype_projects/jtest03dev/shots/sq01/mainsq01sh030/publish/plate/plateMain/v006/jt3d_mainsq01sh030_plateMain_v006.0996.exr" - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) + install_host(bmdvr) - file_processing(path) \ No newline at end of file + file_processing(path) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py deleted file mode 100644 index 4ca1f72cc4..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py +++ /dev/null @@ -1,70 +0,0 @@ -import copy -import pyblish.api -from pprint import pformat - - -class CollectBatchInstances(pyblish.api.InstancePlugin): - """Collect all available instances for batch publish.""" - - label = "Collect Batch Instances" - order = pyblish.api.CollectorOrder + 0.489 - hosts = ["standalonepublisher"] - families = ["background_batch"] - - # presets - default_subset_task = { - "background_batch": "background" - } - subsets = { - "background_batch": { - "backgroundLayout": { - "task": "background", - "family": "backgroundLayout" - }, - "backgroundComp": { - "task": "background", - "family": "backgroundComp" - }, - "workfileBackground": { - "task": "background", - "family": "workfile" - } - } - } - unchecked_by_default = [] - - def process(self, instance): - context = instance.context - asset_name = instance.data["asset"] - family = instance.data["family"] - - default_task_name = self.default_subset_task.get(family) - for subset_name, subset_data in self.subsets[family].items(): - instance_name = f"{asset_name}_{subset_name}" - task_name = subset_data.get("task") or default_task_name - - # create new instance - new_instance = context.create_instance(instance_name) - - # add original instance data except name key - for key, value in instance.data.items(): - if key not in ["name"]: - # Make sure value is copy since value may be object which - # can be shared across all new created objects - new_instance.data[key] = copy.deepcopy(value) - - # add subset data from preset - new_instance.data.update(subset_data) - - new_instance.data["label"] = instance_name - new_instance.data["subset"] = subset_name - new_instance.data["task"] = task_name - - if subset_name in self.unchecked_by_default: - new_instance.data["publish"] = False - - self.log.info(f"Created new instance: {instance_name}") - self.log.debug(f"_ inst_data: {pformat(new_instance.data)}") - - # delete original instance - context.remove(instance) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py index 9f075d66cf..3e7fb19c00 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py @@ -2,8 +2,8 @@ import copy import json import pyblish.api -from avalon import io from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline import legacy_io class CollectBulkMovInstances(pyblish.api.InstancePlugin): @@ -26,7 +26,7 @@ class CollectBulkMovInstances(pyblish.api.InstancePlugin): context = instance.context asset_name = instance.data["asset"] - asset_doc = io.find_one({ + asset_doc = legacy_io.find_one({ "type": "asset", "name": asset_name }) @@ -52,7 +52,7 @@ class CollectBulkMovInstances(pyblish.api.InstancePlugin): self.subset_name_variant, task_name, asset_doc, - io.Session["AVALON_PROJECT"] + legacy_io.Session["AVALON_PROJECT"] ) instance_name = f"{asset_name}_{subset_name}" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py index 6913e0836d..2bf3917e2f 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py @@ -19,7 +19,8 @@ import copy from pprint import pformat import clique import pyblish.api -from avalon import io + +from openpype.pipeline import legacy_io class CollectContextDataSAPublish(pyblish.api.ContextPlugin): @@ -37,7 +38,7 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): def process(self, context): # get json paths from os and load them - io.install() + legacy_io.install() # get json file context input_json_path = os.environ.get("SAPUBLISH_INPATH") @@ -247,7 +248,8 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): self.log.debug("collecting sequence: {}".format(collections)) instance.data["frameStart"] = int(component["frameStart"]) instance.data["frameEnd"] = int(component["frameEnd"]) - instance.data["fps"] = int(component["fps"]) + if component.get("fps"): + instance.data["fps"] = int(component["fps"]) ext = component["ext"] if ext.startswith("."): diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py index b2735f3428..77163651c4 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py @@ -1,8 +1,10 @@ -import pyblish.api -import re import os -from avalon import io +import re from copy import deepcopy +import pyblish.api + +from openpype.pipeline import legacy_io + class CollectHierarchyInstance(pyblish.api.ContextPlugin): """Collecting hierarchy context from `parents` and `hierarchy` data @@ -63,7 +65,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): hierarchy = list() visual_hierarchy = [instance.context.data["assetEntity"]] while True: - visual_parent = io.find_one( + visual_parent = legacy_io.find_one( {"_id": visual_hierarchy[-1]["data"]["visualParent"]} ) if visual_parent: @@ -129,7 +131,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): if self.shot_add_tasks: tasks_to_add = dict() - project_tasks = io.find_one({"type": "project"})["config"]["tasks"] + project_doc = legacy_io.find_one({"type": "project"}) + project_tasks = project_doc["config"]["tasks"] for task_name, task_data in self.shot_add_tasks.items(): _task_data = deepcopy(task_data) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py index 0d629b1b44..9d94bfdc91 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py @@ -2,9 +2,10 @@ import os import re import collections import pyblish.api -from avalon import io from pprint import pformat +from openpype.pipeline import legacy_io + class CollectMatchingAssetToInstance(pyblish.api.InstancePlugin): """ @@ -119,7 +120,7 @@ class CollectMatchingAssetToInstance(pyblish.api.InstancePlugin): def _asset_docs_by_parent_id(self, instance): # Query all assets for project and store them by parent's id to list asset_docs_by_parent_id = collections.defaultdict(list) - for asset_doc in io.find({"type": "asset"}): + for asset_doc in legacy_io.find({"type": "asset"}): parent_id = asset_doc["data"]["visualParent"] asset_docs_by_parent_id[parent_id].append(asset_doc) return asset_docs_by_parent_id diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_for_compositing.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_for_compositing.py deleted file mode 100644 index f07499c15d..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_for_compositing.py +++ /dev/null @@ -1,242 +0,0 @@ -import os -import json -import copy -import openpype.api -from avalon import io - -PSDImage = None - - -class ExtractBGForComp(openpype.api.Extractor): - label = "Extract Background for Compositing" - families = ["backgroundComp"] - hosts = ["standalonepublisher"] - - new_instance_family = "background" - - # Presetable - allowed_group_names = [ - "OL", "BG", "MG", "FG", "SB", "UL", "SKY", "Field Guide", "Field_Guide", - "ANIM" - ] - - def process(self, instance): - # Check if python module `psd_tools` is installed - try: - global PSDImage - from psd_tools import PSDImage - except Exception: - raise AssertionError( - "BUG: Python module `psd-tools` is not installed!" - ) - - self.allowed_group_names = [ - name.lower() - for name in self.allowed_group_names - ] - - self.redo_global_plugins(instance) - - repres = instance.data.get("representations") - if not repres: - self.log.info("There are no representations on instance.") - return - - if not instance.data.get("transfers"): - instance.data["transfers"] = [] - - # Prepare staging dir - staging_dir = self.staging_dir(instance) - if not os.path.exists(staging_dir): - os.makedirs(staging_dir) - - for repre in tuple(repres): - # Skip all files without .psd extension - repre_ext = repre["ext"].lower() - if repre_ext.startswith("."): - repre_ext = repre_ext[1:] - - if repre_ext != "psd": - continue - - # Prepare publish dir for transfers - publish_dir = instance.data["publishDir"] - - # Prepare json filepath where extracted metadata are stored - json_filename = "{}.json".format(instance.name) - json_full_path = os.path.join(staging_dir, json_filename) - - self.log.debug(f"`staging_dir` is \"{staging_dir}\"") - - # Prepare new repre data - new_repre = { - "name": "json", - "ext": "json", - "files": json_filename, - "stagingDir": staging_dir - } - - # TODO add check of list - psd_filename = repre["files"] - psd_folder_path = repre["stagingDir"] - psd_filepath = os.path.join(psd_folder_path, psd_filename) - self.log.debug(f"psd_filepath: \"{psd_filepath}\"") - psd_object = PSDImage.open(psd_filepath) - - json_data, transfers = self.export_compositing_images( - psd_object, staging_dir, publish_dir - ) - self.log.info("Json file path: {}".format(json_full_path)) - with open(json_full_path, "w") as json_filestream: - json.dump(json_data, json_filestream, indent=4) - - instance.data["transfers"].extend(transfers) - instance.data["representations"].remove(repre) - instance.data["representations"].append(new_repre) - - def export_compositing_images(self, psd_object, output_dir, publish_dir): - json_data = { - "__schema_version__": 1, - "children": [] - } - transfers = [] - for main_idx, main_layer in enumerate(psd_object): - if ( - not main_layer.is_visible() - or main_layer.name.lower() not in self.allowed_group_names - or not main_layer.is_group - ): - continue - - export_layers = [] - layers_idx = 0 - for layer in main_layer: - # TODO this way may be added also layers next to "ADJ" - if layer.name.lower() == "adj": - for _layer in layer: - export_layers.append((layers_idx, _layer)) - layers_idx += 1 - - else: - export_layers.append((layers_idx, layer)) - layers_idx += 1 - - if not export_layers: - continue - - main_layer_data = { - "index": main_idx, - "name": main_layer.name, - "children": [] - } - - for layer_idx, layer in export_layers: - has_size = layer.width > 0 and layer.height > 0 - if not has_size: - self.log.debug(( - "Skipping layer \"{}\" because does " - "not have any content." - ).format(layer.name)) - continue - - main_layer_name = main_layer.name.replace(" ", "_") - layer_name = layer.name.replace(" ", "_") - - filename = "{:0>2}_{}_{:0>2}_{}.png".format( - main_idx + 1, main_layer_name, layer_idx + 1, layer_name - ) - layer_data = { - "index": layer_idx, - "name": layer.name, - "filename": filename - } - output_filepath = os.path.join(output_dir, filename) - dst_filepath = os.path.join(publish_dir, filename) - transfers.append((output_filepath, dst_filepath)) - - pil_object = layer.composite(viewport=psd_object.viewbox) - pil_object.save(output_filepath, "PNG") - - main_layer_data["children"].append(layer_data) - - if main_layer_data["children"]: - json_data["children"].append(main_layer_data) - - return json_data, transfers - - def redo_global_plugins(self, instance): - # TODO do this in collection phase - # Copy `families` and check if `family` is not in current families - families = instance.data.get("families") or list() - if families: - families = list(set(families)) - - if self.new_instance_family in families: - families.remove(self.new_instance_family) - - self.log.debug( - "Setting new instance families {}".format(str(families)) - ) - instance.data["families"] = families - - # Override instance data with new information - instance.data["family"] = self.new_instance_family - - subset_name = instance.data["anatomyData"]["subset"] - asset_doc = instance.data["assetEntity"] - latest_version = self.find_last_version(subset_name, asset_doc) - version_number = 1 - if latest_version is not None: - version_number += latest_version - - instance.data["latestVersion"] = latest_version - instance.data["version"] = version_number - - # Same data apply to anatomy data - instance.data["anatomyData"].update({ - "family": self.new_instance_family, - "version": version_number - }) - - # Redo publish and resources dir - anatomy = instance.context.data["anatomy"] - template_data = copy.deepcopy(instance.data["anatomyData"]) - template_data.update({ - "frame": "FRAME_TEMP", - "representation": "TEMP" - }) - anatomy_filled = anatomy.format(template_data) - if "folder" in anatomy.templates["publish"]: - publish_folder = anatomy_filled["publish"]["folder"] - else: - publish_folder = os.path.dirname(anatomy_filled["publish"]["path"]) - - publish_folder = os.path.normpath(publish_folder) - resources_folder = os.path.join(publish_folder, "resources") - - instance.data["publishDir"] = publish_folder - instance.data["resourcesDir"] = resources_folder - - self.log.debug("publishDir: \"{}\"".format(publish_folder)) - self.log.debug("resourcesDir: \"{}\"".format(resources_folder)) - - def find_last_version(self, subset_name, asset_doc): - subset_doc = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset_doc["_id"] - }) - - if subset_doc is None: - self.log.debug("Subset entity does not exist yet.") - else: - version_doc = io.find_one( - { - "type": "version", - "parent": subset_doc["_id"] - }, - sort=[("name", -1)] - ) - if version_doc: - return int(version_doc["name"]) - return None diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_main_groups.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_main_groups.py deleted file mode 100644 index 2c92366ae9..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_main_groups.py +++ /dev/null @@ -1,246 +0,0 @@ -import os -import copy -import json -import openpype.api -import pyblish.api -from avalon import io - -PSDImage = None - - -class ExtractBGMainGroups(openpype.api.Extractor): - label = "Extract Background Layout" - order = pyblish.api.ExtractorOrder + 0.02 - families = ["backgroundLayout"] - hosts = ["standalonepublisher"] - - new_instance_family = "background" - - # Presetable - allowed_group_names = [ - "OL", "BG", "MG", "FG", "UL", "SB", "SKY", "Field Guide", "Field_Guide", - "ANIM" - ] - - def process(self, instance): - # Check if python module `psd_tools` is installed - try: - global PSDImage - from psd_tools import PSDImage - except Exception: - raise AssertionError( - "BUG: Python module `psd-tools` is not installed!" - ) - - self.allowed_group_names = [ - name.lower() - for name in self.allowed_group_names - ] - repres = instance.data.get("representations") - if not repres: - self.log.info("There are no representations on instance.") - return - - self.redo_global_plugins(instance) - - repres = instance.data.get("representations") - if not repres: - self.log.info("There are no representations on instance.") - return - - if not instance.data.get("transfers"): - instance.data["transfers"] = [] - - # Prepare staging dir - staging_dir = self.staging_dir(instance) - if not os.path.exists(staging_dir): - os.makedirs(staging_dir) - - # Prepare publish dir for transfers - publish_dir = instance.data["publishDir"] - - for repre in tuple(repres): - # Skip all files without .psd extension - repre_ext = repre["ext"].lower() - if repre_ext.startswith("."): - repre_ext = repre_ext[1:] - - if repre_ext != "psd": - continue - - # Prepare json filepath where extracted metadata are stored - json_filename = "{}.json".format(instance.name) - json_full_path = os.path.join(staging_dir, json_filename) - - self.log.debug(f"`staging_dir` is \"{staging_dir}\"") - - # Prepare new repre data - new_repre = { - "name": "json", - "ext": "json", - "files": json_filename, - "stagingDir": staging_dir - } - - # TODO add check of list - psd_filename = repre["files"] - psd_folder_path = repre["stagingDir"] - psd_filepath = os.path.join(psd_folder_path, psd_filename) - self.log.debug(f"psd_filepath: \"{psd_filepath}\"") - psd_object = PSDImage.open(psd_filepath) - - json_data, transfers = self.export_compositing_images( - psd_object, staging_dir, publish_dir - ) - self.log.info("Json file path: {}".format(json_full_path)) - with open(json_full_path, "w") as json_filestream: - json.dump(json_data, json_filestream, indent=4) - - instance.data["transfers"].extend(transfers) - instance.data["representations"].remove(repre) - instance.data["representations"].append(new_repre) - - def export_compositing_images(self, psd_object, output_dir, publish_dir): - json_data = { - "__schema_version__": 1, - "children": [] - } - output_ext = ".png" - - to_export = [] - for layer_idx, layer in enumerate(psd_object): - layer_name = layer.name.replace(" ", "_") - if ( - not layer.is_visible() - or layer_name.lower() not in self.allowed_group_names - ): - continue - - has_size = layer.width > 0 and layer.height > 0 - if not has_size: - self.log.debug(( - "Skipping layer \"{}\" because does not have any content." - ).format(layer.name)) - continue - - filebase = "{:0>2}_{}".format(layer_idx, layer_name) - if layer_name.lower() == "anim": - if not layer.is_group: - self.log.warning("ANIM layer is not a group layer.") - continue - - children = [] - for anim_idx, anim_layer in enumerate(layer): - anim_layer_name = anim_layer.name.replace(" ", "_") - filename = "{}_{:0>2}_{}{}".format( - filebase, anim_idx, anim_layer_name, output_ext - ) - children.append({ - "index": anim_idx, - "name": anim_layer.name, - "filename": filename - }) - to_export.append((anim_layer, filename)) - - json_data["children"].append({ - "index": layer_idx, - "name": layer.name, - "children": children - }) - continue - - filename = filebase + output_ext - json_data["children"].append({ - "index": layer_idx, - "name": layer.name, - "filename": filename - }) - to_export.append((layer, filename)) - - transfers = [] - for layer, filename in to_export: - output_filepath = os.path.join(output_dir, filename) - dst_filepath = os.path.join(publish_dir, filename) - transfers.append((output_filepath, dst_filepath)) - - pil_object = layer.composite(viewport=psd_object.viewbox) - pil_object.save(output_filepath, "PNG") - - return json_data, transfers - - def redo_global_plugins(self, instance): - # TODO do this in collection phase - # Copy `families` and check if `family` is not in current families - families = instance.data.get("families") or list() - if families: - families = list(set(families)) - - if self.new_instance_family in families: - families.remove(self.new_instance_family) - - self.log.debug( - "Setting new instance families {}".format(str(families)) - ) - instance.data["families"] = families - - # Override instance data with new information - instance.data["family"] = self.new_instance_family - - subset_name = instance.data["anatomyData"]["subset"] - asset_doc = instance.data["assetEntity"] - latest_version = self.find_last_version(subset_name, asset_doc) - version_number = 1 - if latest_version is not None: - version_number += latest_version - - instance.data["latestVersion"] = latest_version - instance.data["version"] = version_number - - # Same data apply to anatomy data - instance.data["anatomyData"].update({ - "family": self.new_instance_family, - "version": version_number - }) - - # Redo publish and resources dir - anatomy = instance.context.data["anatomy"] - template_data = copy.deepcopy(instance.data["anatomyData"]) - template_data.update({ - "frame": "FRAME_TEMP", - "representation": "TEMP" - }) - anatomy_filled = anatomy.format(template_data) - if "folder" in anatomy.templates["publish"]: - publish_folder = anatomy_filled["publish"]["folder"] - else: - publish_folder = os.path.dirname(anatomy_filled["publish"]["path"]) - - publish_folder = os.path.normpath(publish_folder) - resources_folder = os.path.join(publish_folder, "resources") - - instance.data["publishDir"] = publish_folder - instance.data["resourcesDir"] = resources_folder - - self.log.debug("publishDir: \"{}\"".format(publish_folder)) - self.log.debug("resourcesDir: \"{}\"".format(resources_folder)) - - def find_last_version(self, subset_name, asset_doc): - subset_doc = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset_doc["_id"] - }) - - if subset_doc is None: - self.log.debug("Subset entity does not exist yet.") - else: - version_doc = io.find_one( - { - "type": "version", - "parent": subset_doc["_id"] - }, - sort=[("name", -1)] - ) - if version_doc: - return int(version_doc["name"]) - return None diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_images_from_psd.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_images_from_psd.py deleted file mode 100644 index e3094b2e3f..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_images_from_psd.py +++ /dev/null @@ -1,170 +0,0 @@ -import os -import copy -import openpype.api -import pyblish.api -from avalon import io - -PSDImage = None - - -class ExtractImagesFromPSD(openpype.api.Extractor): - # PLUGIN is not currently enabled because was decided to use different - # approach - enabled = False - active = False - label = "Extract Images from PSD" - order = pyblish.api.ExtractorOrder + 0.02 - families = ["backgroundLayout"] - hosts = ["standalonepublisher"] - - new_instance_family = "image" - ignored_instance_data_keys = ("name", "label", "stagingDir", "version") - # Presetable - allowed_group_names = [ - "OL", "BG", "MG", "FG", "UL", "SKY", "Field Guide", "Field_Guide", - "ANIM" - ] - - def process(self, instance): - # Check if python module `psd_tools` is installed - try: - global PSDImage - from psd_tools import PSDImage - except Exception: - raise AssertionError( - "BUG: Python module `psd-tools` is not installed!" - ) - - self.allowed_group_names = [ - name.lower() - for name in self.allowed_group_names - ] - repres = instance.data.get("representations") - if not repres: - self.log.info("There are no representations on instance.") - return - - for repre in tuple(repres): - # Skip all files without .psd extension - repre_ext = repre["ext"].lower() - if repre_ext.startswith("."): - repre_ext = repre_ext[1:] - - if repre_ext != "psd": - continue - - # TODO add check of list of "files" value - psd_filename = repre["files"] - psd_folder_path = repre["stagingDir"] - psd_filepath = os.path.join(psd_folder_path, psd_filename) - self.log.debug(f"psd_filepath: \"{psd_filepath}\"") - psd_object = PSDImage.open(psd_filepath) - - self.create_new_instances(instance, psd_object) - - # Remove the instance from context - instance.context.remove(instance) - - def create_new_instances(self, instance, psd_object): - asset_doc = instance.data["assetEntity"] - for layer in psd_object: - if ( - not layer.is_visible() - or layer.name.lower() not in self.allowed_group_names - ): - continue - - has_size = layer.width > 0 and layer.height > 0 - if not has_size: - self.log.debug(( - "Skipping layer \"{}\" because does " - "not have any content." - ).format(layer.name)) - continue - - layer_name = layer.name.replace(" ", "_") - instance_name = subset_name = f"image{layer_name}" - self.log.info( - f"Creating new instance with name \"{instance_name}\"" - ) - new_instance = instance.context.create_instance(instance_name) - for key, value in instance.data.items(): - if key not in self.ignored_instance_data_keys: - new_instance.data[key] = copy.deepcopy(value) - - new_instance.data["label"] = " ".join( - (new_instance.data["asset"], instance_name) - ) - - # Find latest version - latest_version = self.find_last_version(subset_name, asset_doc) - version_number = 1 - if latest_version is not None: - version_number += latest_version - - self.log.info( - "Next version of instance \"{}\" will be {}".format( - instance_name, version_number - ) - ) - - # Set family and subset - new_instance.data["family"] = self.new_instance_family - new_instance.data["subset"] = subset_name - new_instance.data["version"] = version_number - new_instance.data["latestVersion"] = latest_version - - new_instance.data["anatomyData"].update({ - "subset": subset_name, - "family": self.new_instance_family, - "version": version_number - }) - - # Copy `families` and check if `family` is not in current families - families = new_instance.data.get("families") or list() - if families: - families = list(set(families)) - - if self.new_instance_family in families: - families.remove(self.new_instance_family) - new_instance.data["families"] = families - - # Prepare staging dir for new instance - staging_dir = self.staging_dir(new_instance) - - output_filename = "{}.png".format(layer_name) - output_filepath = os.path.join(staging_dir, output_filename) - pil_object = layer.composite(viewport=psd_object.viewbox) - pil_object.save(output_filepath, "PNG") - - new_repre = { - "name": "png", - "ext": "png", - "files": output_filename, - "stagingDir": staging_dir - } - self.log.debug( - "Creating new representation: {}".format(new_repre) - ) - new_instance.data["representations"] = [new_repre] - - def find_last_version(self, subset_name, asset_doc): - subset_doc = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset_doc["_id"] - }) - - if subset_doc is None: - self.log.debug("Subset entity does not exist yet.") - else: - version_doc = io.find_one( - { - "type": "version", - "parent": subset_doc["_id"] - }, - sort=[("name", -1)] - ) - if version_doc: - return int(version_doc["name"]) - return None diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py index 23f0b104c8..3ee2f70809 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py @@ -2,7 +2,11 @@ import os import tempfile import pyblish.api import openpype.api -import openpype.lib +from openpype.lib import ( + get_ffmpeg_tool_path, + get_ffprobe_streams, + path_to_subprocess_arg, +) class ExtractThumbnailSP(pyblish.api.InstancePlugin): @@ -34,85 +38,78 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin): if not thumbnail_repre: return + thumbnail_repre.pop("thumbnail") files = thumbnail_repre.get("files") if not files: return if isinstance(files, list): - files_len = len(files) - file = str(files[0]) + first_filename = str(files[0]) else: - files_len = 1 - file = files + first_filename = files staging_dir = None - is_jpeg = False - if file.endswith(".jpeg") or file.endswith(".jpg"): - is_jpeg = True - if is_jpeg and files_len == 1: - # skip if already is single jpeg file - return + # Convert to jpeg if not yet + full_input_path = os.path.join( + thumbnail_repre["stagingDir"], first_filename + ) + self.log.info("input {}".format(full_input_path)) + with tempfile.NamedTemporaryFile(suffix=".jpg") as tmp: + full_thumbnail_path = tmp.name - elif is_jpeg: - # use first frame as thumbnail if is sequence of jpegs - full_thumbnail_path = os.path.join( - thumbnail_repre["stagingDir"], file - ) - self.log.info( - "For thumbnail is used file: {}".format(full_thumbnail_path) - ) + self.log.info("output {}".format(full_thumbnail_path)) - else: - # Convert to jpeg if not yet - full_input_path = os.path.join(thumbnail_repre["stagingDir"], file) - self.log.info("input {}".format(full_input_path)) + instance.context.data["cleanupFullPaths"].append(full_thumbnail_path) - full_thumbnail_path = tempfile.mkstemp(suffix=".jpg")[1] - self.log.info("output {}".format(full_thumbnail_path)) + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") - ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") + ffmpeg_args = self.ffmpeg_args or {} - ffmpeg_args = self.ffmpeg_args or {} + jpeg_items = [ + path_to_subprocess_arg(ffmpeg_path), + # override file if already exists + "-y" + ] - jpeg_items = [ - "\"{}\"".format(ffmpeg_path), - # override file if already exists - "-y" - ] - - # add input filters from peresets - jpeg_items.extend(ffmpeg_args.get("input") or []) - # input file - jpeg_items.append("-i \"{}\"".format(full_input_path)) + # add input filters from peresets + jpeg_items.extend(ffmpeg_args.get("input") or []) + # input file + jpeg_items.extend([ + "-i", path_to_subprocess_arg(full_input_path), # extract only single file - jpeg_items.append("-frames:v 1") + "-frames:v", "1", # Add black background for transparent images - jpeg_items.append(( - "-filter_complex" - " \"color=black,format=rgb24[c]" + "-filter_complex", ( + "\"color=black,format=rgb24[c]" ";[c][0]scale2ref[c][i]" ";[c][i]overlay=format=auto:shortest=1,setsar=1\"" - )) + ), + ]) - jpeg_items.extend(ffmpeg_args.get("output") or []) + jpeg_items.extend(ffmpeg_args.get("output") or []) - # output file - jpeg_items.append("\"{}\"".format(full_thumbnail_path)) + # output file + jpeg_items.append(path_to_subprocess_arg(full_thumbnail_path)) - subprocess_jpeg = " ".join(jpeg_items) + subprocess_jpeg = " ".join(jpeg_items) - # run subprocess - self.log.debug("Executing: {}".format(subprocess_jpeg)) - openpype.api.run_subprocess( - subprocess_jpeg, shell=True, logger=self.log - ) + # run subprocess + self.log.debug("Executing: {}".format(subprocess_jpeg)) + openpype.api.run_subprocess( + subprocess_jpeg, shell=True, logger=self.log + ) # remove thumbnail key from origin repre - thumbnail_repre.pop("thumbnail") + streams = get_ffprobe_streams(full_thumbnail_path) + width = height = None + for stream in streams: + if "width" in stream and "height" in stream: + width = stream["width"] + height = stream["height"] + break - filename = os.path.basename(full_thumbnail_path) - staging_dir = staging_dir or os.path.dirname(full_thumbnail_path) + staging_dir, filename = os.path.split(full_thumbnail_path) # create new thumbnail representation representation = { @@ -120,12 +117,11 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin): 'ext': 'jpg', 'files': filename, "stagingDir": staging_dir, - "tags": ["thumbnail"], + "tags": ["thumbnail", "delete"], } - - # # add Delete tag when temp file was rendered - if not is_jpeg: - representation["tags"].append("delete") + if width and height: + representation["width"] = width + representation["height"] = height self.log.info(f"New representation {representation}") instance.data["representations"].append(representation) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py index 825092c81b..4c761c7a4c 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py @@ -1,7 +1,9 @@ import pyblish.api -from avalon import io -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + legacy_io, +) class ValidateTaskExistence(pyblish.api.ContextPlugin): @@ -18,7 +20,7 @@ class ValidateTaskExistence(pyblish.api.ContextPlugin): for instance in context: asset_names.add(instance.data["asset"]) - asset_docs = io.find( + asset_docs = legacy_io.find( { "type": "asset", "name": {"$in": list(asset_names)} diff --git a/openpype/hosts/testhost/api/__init__.py b/openpype/hosts/testhost/api/__init__.py index 7840b25892..a929a891aa 100644 --- a/openpype/hosts/testhost/api/__init__.py +++ b/openpype/hosts/testhost/api/__init__.py @@ -1,8 +1,8 @@ import os import logging import pyblish.api -import avalon.api -from openpype.pipeline import BaseCreator + +from openpype.pipeline import register_creator_plugin_path from .pipeline import ( ls, @@ -27,7 +27,7 @@ def install(): log.info("OpenPype - Installing TestHost integration") pyblish.api.register_host("testhost") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(BaseCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) __all__ = ( diff --git a/openpype/hosts/testhost/api/pipeline.py b/openpype/hosts/testhost/api/pipeline.py index 1f5d680705..285fe8f8d6 100644 --- a/openpype/hosts/testhost/api/pipeline.py +++ b/openpype/hosts/testhost/api/pipeline.py @@ -1,5 +1,6 @@ import os import json +from openpype.pipeline import legacy_io class HostContext: @@ -16,9 +17,7 @@ class HostContext: if not asset_name: return project_name - from avalon import io - - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"type": "asset", "name": asset_name}, {"data.parents": 1} ) diff --git a/openpype/hosts/testhost/plugins/create/auto_creator.py b/openpype/hosts/testhost/plugins/create/auto_creator.py index 4c22eea9dd..06b95375b1 100644 --- a/openpype/hosts/testhost/plugins/create/auto_creator.py +++ b/openpype/hosts/testhost/plugins/create/auto_creator.py @@ -1,7 +1,7 @@ -from avalon import io from openpype.lib import NumberDef from openpype.hosts.testhost.api import pipeline from openpype.pipeline import ( + legacy_io, AutoCreator, CreatedInstance, ) @@ -38,13 +38,16 @@ class MyAutoCreator(AutoCreator): break variant = "Main" - project_name = io.Session["AVALON_PROJECT"] - asset_name = io.Session["AVALON_ASSET"] - task_name = io.Session["AVALON_TASK"] - host_name = io.Session["AVALON_APP"] + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + host_name = legacy_io.Session["AVALON_APP"] if existing_instance is None: - asset_doc = io.find_one({"type": "asset", "name": asset_name}) + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) subset_name = self.get_subset_name( variant, task_name, asset_doc, project_name, host_name ) @@ -66,7 +69,10 @@ class MyAutoCreator(AutoCreator): existing_instance["asset"] != asset_name or existing_instance["task"] != task_name ): - asset_doc = io.find_one({"type": "asset", "name": asset_name}) + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) subset_name = self.get_subset_name( variant, task_name, asset_doc, project_name, host_name ) diff --git a/openpype/hosts/testhost/run_publish.py b/openpype/hosts/testhost/run_publish.py index 44860a30e4..c7ad63aafd 100644 --- a/openpype/hosts/testhost/run_publish.py +++ b/openpype/hosts/testhost/run_publish.py @@ -22,13 +22,11 @@ openpype_dir = multi_dirname(current_file, 4) os.environ["OPENPYPE_MONGO"] = mongo_url os.environ["OPENPYPE_ROOT"] = openpype_dir -os.environ["AVALON_MONGO"] = mongo_url os.environ["AVALON_PROJECT"] = project_name os.environ["AVALON_ASSET"] = asset_name os.environ["AVALON_TASK"] = task_name os.environ["AVALON_APP"] = host_name os.environ["OPENPYPE_DATABASE_NAME"] = "openpype" -os.environ["AVALON_CONFIG"] = "openpype" os.environ["AVALON_TIMEOUT"] = "1000" os.environ["AVALON_DB"] = "avalon" os.environ["FTRACK_SERVER"] = ftrack_url @@ -48,8 +46,8 @@ from openpype.tools.publisher.window import PublisherWindow def main(): """Main function for testing purposes.""" - import avalon.api import pyblish.api + from openpype.pipeline import install_host from openpype.modules import ModulesManager from openpype.hosts.testhost import api as testhost @@ -57,7 +55,7 @@ def main(): for plugin_path in manager.collect_plugin_paths()["publish"]: pyblish.api.register_plugin_path(plugin_path) - avalon.api.install(testhost) + install_host(testhost) QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling) app = QtWidgets.QApplication([]) diff --git a/openpype/hosts/traypublisher/api/pipeline.py b/openpype/hosts/traypublisher/api/pipeline.py index 24175883d9..954a0bae47 100644 --- a/openpype/hosts/traypublisher/api/pipeline.py +++ b/openpype/hosts/traypublisher/api/pipeline.py @@ -3,11 +3,12 @@ import json import tempfile import atexit -from avalon import io -import avalon.api import pyblish.api -from openpype.pipeline import register_creator_plugin_path +from openpype.pipeline import ( + register_creator_plugin_path, + legacy_io, +) ROOT_DIR = os.path.dirname(os.path.dirname( os.path.abspath(__file__) @@ -175,6 +176,6 @@ def install(): def set_project_name(project_name): # TODO Deregister project specific plugins and register new project plugins os.environ["AVALON_PROJECT"] = project_name - avalon.api.Session["AVALON_PROJECT"] = project_name - io.install() + legacy_io.Session["AVALON_PROJECT"] = project_name + legacy_io.install() HostContext.set_project_name(project_name) diff --git a/openpype/hosts/traypublisher/api/plugin.py b/openpype/hosts/traypublisher/api/plugin.py new file mode 100644 index 0000000000..202664cfc6 --- /dev/null +++ b/openpype/hosts/traypublisher/api/plugin.py @@ -0,0 +1,97 @@ +from openpype.pipeline import ( + Creator, + CreatedInstance +) +from openpype.lib import FileDef + +from .pipeline import ( + list_instances, + update_instances, + remove_instances, + HostContext, +) + + +class TrayPublishCreator(Creator): + create_allow_context_change = True + host_name = "traypublisher" + + def collect_instances(self): + for instance_data in list_instances(): + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + instance = CreatedInstance.from_existing( + instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + update_instances(update_list) + + def remove_instances(self, instances): + remove_instances(instances) + for instance in instances: + self._remove_instance_from_context(instance) + + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attrobites + return self.get_instance_attr_defs() + + +class SettingsCreator(TrayPublishCreator): + create_allow_context_change = True + + extensions = [] + + def collect_instances(self): + for instance_data in list_instances(): + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + instance = CreatedInstance.from_existing( + instance_data, self + ) + self._add_instance_to_context(instance) + + def create(self, subset_name, data, pre_create_data): + # Pass precreate data to creator attributes + data["creator_attributes"] = pre_create_data + data["settings_creator"] = True + # Create new instance + new_instance = CreatedInstance(self.family, subset_name, data, self) + # Host implementation of storing metadata about instance + HostContext.add_instance(new_instance.data_to_store()) + # Add instance to current context + self._add_instance_to_context(new_instance) + + def get_instance_attr_defs(self): + return [ + FileDef( + "filepath", + folders=False, + extensions=self.extensions, + allow_sequences=self.allow_sequences, + label="Filepath", + ) + ] + + @classmethod + def from_settings(cls, item_data): + identifier = item_data["identifier"] + family = item_data["family"] + if not identifier: + identifier = "settings_{}".format(family) + return type( + "{}{}".format(cls.__name__, identifier), + (cls, ), + { + "family": family, + "identifier": identifier, + "label": item_data["label"].strip(), + "icon": item_data["icon"], + "description": item_data["description"], + "detailed_description": item_data["detailed_description"], + "extensions": item_data["extensions"], + "allow_sequences": item_data["allow_sequences"], + "default_variants": item_data["default_variants"] + } + ) diff --git a/openpype/hosts/traypublisher/plugins/create/create_from_settings.py b/openpype/hosts/traypublisher/plugins/create/create_from_settings.py new file mode 100644 index 0000000000..baca274ea6 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/create/create_from_settings.py @@ -0,0 +1,20 @@ +import os + +from openpype.api import get_project_settings + + +def initialize(): + from openpype.hosts.traypublisher.api.plugin import SettingsCreator + + project_name = os.environ["AVALON_PROJECT"] + project_settings = get_project_settings(project_name) + + simple_creators = project_settings["traypublisher"]["simple_creators"] + + global_variables = globals() + for item in simple_creators: + dynamic_plugin = SettingsCreator.from_settings(item) + global_variables[dynamic_plugin.__name__] = dynamic_plugin + + +initialize() diff --git a/openpype/hosts/traypublisher/plugins/create/create_workfile.py b/openpype/hosts/traypublisher/plugins/create/create_workfile.py deleted file mode 100644 index 5e0af350f0..0000000000 --- a/openpype/hosts/traypublisher/plugins/create/create_workfile.py +++ /dev/null @@ -1,97 +0,0 @@ -from openpype.hosts.traypublisher.api import pipeline -from openpype.lib import FileDef -from openpype.pipeline import ( - Creator, - CreatedInstance -) - - -class WorkfileCreator(Creator): - identifier = "workfile" - label = "Workfile" - family = "workfile" - description = "Publish backup of workfile" - - create_allow_context_change = True - - extensions = [ - # Maya - ".ma", ".mb", - # Nuke - ".nk", - # Hiero - ".hrox", - # Houdini - ".hip", ".hiplc", ".hipnc", - # Blender - ".blend", - # Celaction - ".scn", - # TVPaint - ".tvpp", - # Fusion - ".comp", - # Harmony - ".zip", - # Premiere - ".prproj", - # Resolve - ".drp", - # Photoshop - ".psd", ".psb", - # Aftereffects - ".aep" - ] - - def get_icon(self): - return "fa.file" - - def collect_instances(self): - for instance_data in pipeline.list_instances(): - creator_id = instance_data.get("creator_identifier") - if creator_id == self.identifier: - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - pipeline.update_instances(update_list) - - def remove_instances(self, instances): - pipeline.remove_instances(instances) - for instance in instances: - self._remove_instance_from_context(instance) - - def create(self, subset_name, data, pre_create_data): - # Pass precreate data to creator attributes - data["creator_attributes"] = pre_create_data - # Create new instance - new_instance = CreatedInstance(self.family, subset_name, data, self) - # Host implementation of storing metadata about instance - pipeline.HostContext.add_instance(new_instance.data_to_store()) - # Add instance to current context - self._add_instance_to_context(new_instance) - - def get_default_variants(self): - return [ - "Main" - ] - - def get_instance_attr_defs(self): - output = [ - FileDef( - "filepath", - folders=False, - extensions=self.extensions, - label="Filepath" - ) - ] - return output - - def get_pre_create_attr_defs(self): - # Use same attributes as for instance attrobites - return self.get_instance_attr_defs() - - def get_detail_description(self): - return """# Publish workfile backup""" diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_review_family.py b/openpype/hosts/traypublisher/plugins/publish/collect_review_family.py new file mode 100644 index 0000000000..965e251527 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_review_family.py @@ -0,0 +1,31 @@ +import pyblish.api +from openpype.lib import BoolDef +from openpype.pipeline import OpenPypePyblishPluginMixin + + +class CollectReviewFamily( + pyblish.api.InstancePlugin, OpenPypePyblishPluginMixin +): + """Add review family.""" + + label = "Collect Review Family" + order = pyblish.api.CollectorOrder - 0.49 + + hosts = ["traypublisher"] + families = [ + "image", + "render", + "plate", + "review" + ] + + def process(self, instance): + values = self.get_attr_values_from_data(instance.data) + if values.get("add_review_family"): + instance.data["families"].append("review") + + @classmethod + def get_attribute_defs(cls): + return [ + BoolDef("add_review_family", label="Review", default=True) + ] diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py new file mode 100644 index 0000000000..b2be43c701 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py @@ -0,0 +1,50 @@ +import os +import pyblish.api + + +class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): + """Collect data for instances created by settings creators.""" + + label = "Collect Settings Simple Instances" + order = pyblish.api.CollectorOrder - 0.49 + + hosts = ["traypublisher"] + + def process(self, instance): + if not instance.data.get("settings_creator"): + return + + if "families" not in instance.data: + instance.data["families"] = [] + + if "representations" not in instance.data: + instance.data["representations"] = [] + repres = instance.data["representations"] + + creator_attributes = instance.data["creator_attributes"] + filepath_item = creator_attributes["filepath"] + self.log.info(filepath_item) + filepaths = [ + os.path.join(filepath_item["directory"], filename) + for filename in filepath_item["filenames"] + ] + + instance.data["sourceFilepaths"] = filepaths + instance.data["stagingDir"] = filepath_item["directory"] + + filenames = filepath_item["filenames"] + _, ext = os.path.splitext(filenames[0]) + ext = ext[1:] + if len(filenames) == 1: + filenames = filenames[0] + + repres.append({ + "ext": ext, + "name": ext, + "stagingDir": filepath_item["directory"], + "files": filenames + }) + + self.log.debug("Created Simple Settings instance {}".format( + instance.data + )) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_workfile.py b/openpype/hosts/traypublisher/plugins/publish/collect_workfile.py deleted file mode 100644 index d48bace047..0000000000 --- a/openpype/hosts/traypublisher/plugins/publish/collect_workfile.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -import pyblish.api - - -class CollectWorkfile(pyblish.api.InstancePlugin): - """Collect representation of workfile instances.""" - - label = "Collect Workfile" - order = pyblish.api.CollectorOrder - 0.49 - families = ["workfile"] - hosts = ["traypublisher"] - - def process(self, instance): - if "representations" not in instance.data: - instance.data["representations"] = [] - repres = instance.data["representations"] - - creator_attributes = instance.data["creator_attributes"] - filepath = creator_attributes["filepath"] - instance.data["sourceFilepath"] = filepath - - staging_dir = os.path.dirname(filepath) - filename = os.path.basename(filepath) - ext = os.path.splitext(filename)[-1] - - repres.append({ - "ext": ext, - "name": ext, - "stagingDir": staging_dir, - "files": filename - }) diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py b/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py new file mode 100644 index 0000000000..c7302b1005 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py @@ -0,0 +1,47 @@ +import os +import pyblish.api +from openpype.pipeline import PublishValidationError + + +class ValidateWorkfilePath(pyblish.api.InstancePlugin): + """Validate existence of workfile instance existence.""" + + label = "Validate Workfile" + order = pyblish.api.ValidatorOrder - 0.49 + + hosts = ["traypublisher"] + + def process(self, instance): + if "sourceFilepaths" not in instance.data: + self.log.info(( + "Can't validate source filepaths existence." + " Instance does not have collected 'sourceFilepaths'" + )) + return + + filepaths = instance.data.get("sourceFilepaths") + + not_found_files = [ + filepath + for filepath in filepaths + if not os.path.exists(filepath) + ] + if not_found_files: + joined_paths = "\n".join([ + "- {}".format(filepath) + for filepath in not_found_files + ]) + raise PublishValidationError( + ( + "Filepath of '{}' instance \"{}\" does not exist:\n{}" + ).format( + instance.data["family"], + instance.data["name"], + joined_paths + ), + "File not found", + ( + "## Files were not found\nFiles\n{}" + "\n\nCheck if the path is still available." + ).format(joined_paths) + ) diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py b/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py deleted file mode 100644 index 7501051669..0000000000 --- a/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -import pyblish.api -from openpype.pipeline import PublishValidationError - - -class ValidateWorkfilePath(pyblish.api.InstancePlugin): - """Validate existence of workfile instance existence.""" - - label = "Validate Workfile" - order = pyblish.api.ValidatorOrder - 0.49 - families = ["workfile"] - hosts = ["traypublisher"] - - def process(self, instance): - filepath = instance.data["sourceFilepath"] - if not filepath: - raise PublishValidationError( - ( - "Filepath of 'workfile' instance \"{}\" is not set" - ).format(instance.data["name"]), - "File not filled", - "## Missing file\nYou are supposed to fill the path." - ) - - if not os.path.exists(filepath): - raise PublishValidationError( - ( - "Filepath of 'workfile' instance \"{}\" does not exist: {}" - ).format(instance.data["name"], filepath), - "File not found", - ( - "## File was not found\nFile \"{}\" was not found." - " Check if the path is still available." - ).format(filepath) - ) diff --git a/openpype/hosts/tvpaint/api/launch_script.py b/openpype/hosts/tvpaint/api/launch_script.py index e66bf61df6..0b25027fc6 100644 --- a/openpype/hosts/tvpaint/api/launch_script.py +++ b/openpype/hosts/tvpaint/api/launch_script.py @@ -8,8 +8,8 @@ import logging from Qt import QtWidgets, QtCore, QtGui -from avalon import api from openpype import style +from openpype.pipeline import install_host from openpype.hosts.tvpaint.api.communication_server import ( CommunicationWrapper ) @@ -31,7 +31,7 @@ def main(launch_args): qt_app = QtWidgets.QApplication([]) # Execute pipeline installation - api.install(tvpaint_host) + install_host(tvpaint_host) # Create Communicator object and trigger launch # - this must be done before anything is processed diff --git a/openpype/hosts/tvpaint/api/lib.py b/openpype/hosts/tvpaint/api/lib.py index 9e6404e72f..a341f48859 100644 --- a/openpype/hosts/tvpaint/api/lib.py +++ b/openpype/hosts/tvpaint/api/lib.py @@ -2,8 +2,6 @@ import os import logging import tempfile -import avalon.io - from . import CommunicationWrapper log = logging.getLogger(__name__) @@ -167,12 +165,12 @@ def parse_group_data(data): if not group_raw: continue - parts = group_raw.split(" ") + parts = group_raw.split("|") # Check for length and concatenate 2 last items until length match # - this happens if name contain spaces while len(parts) > 6: last_item = parts.pop(-1) - parts[-1] = " ".join([parts[-1], last_item]) + parts[-1] = "|".join([parts[-1], last_item]) clip_id, group_id, red, green, blue, name = parts group = { @@ -203,11 +201,16 @@ def get_groups_data(communicator=None): george_script_lines = ( # Variable containing full path to output file "output_path = \"{}\"".format(output_filepath), - "loop = 1", - "FOR idx = 1 TO 12", + "empty = 0", + # Loop over 100 groups + "FOR idx = 1 TO 100", + # Receive information about groups "tv_layercolor \"getcolor\" 0 idx", - "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' result", - "END" + "PARSE result clip_id group_index c_red c_green c_blue group_name", + # Create and add line to output file + "line = clip_id'|'group_index'|'c_red'|'c_green'|'c_blue'|'group_name", + "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line", + "END", ) george_script = "\n".join(george_script_lines) execute_george_through_file(george_script, communicator) diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py index cafdf0701d..f473f51457 100644 --- a/openpype/hosts/tvpaint/api/pipeline.py +++ b/openpype/hosts/tvpaint/api/pipeline.py @@ -7,14 +7,12 @@ import logging import requests import pyblish.api -import avalon.api - -from avalon import io from openpype.hosts import tvpaint from openpype.api import get_current_project_settings from openpype.lib import register_event_callback from openpype.pipeline import ( + legacy_io, register_loader_plugin_path, register_creator_plugin_path, deregister_loader_plugin_path, @@ -67,16 +65,13 @@ instances=2 def install(): - """Install Maya-specific functionality of avalon-core. + """Install TVPaint-specific functionality.""" - This function is called automatically on calling `api.install(maya)`. - - """ log.info("OpenPype - Installing TVPaint integration") - io.install() + legacy_io.install() # Create workdir folder if does not exist yet - workdir = io.Session["AVALON_WORKDIR"] + workdir = legacy_io.Session["AVALON_WORKDIR"] if not os.path.exists(workdir): os.makedirs(workdir) @@ -96,11 +91,11 @@ def install(): def uninstall(): - """Uninstall TVPaint-specific functionality of avalon-core. - - This function is called automatically on calling `api.uninstall()`. + """Uninstall TVPaint-specific functionality. + This function is called automatically on calling `uninstall_host()`. """ + log.info("OpenPype - Uninstalling TVPaint integration") pyblish.api.deregister_host("tvpaint") pyblish.api.deregister_plugin_path(PUBLISH_PATH) @@ -449,12 +444,12 @@ def set_context_settings(asset_doc=None): """ if asset_doc is None: # Use current session asset if not passed - asset_doc = avalon.io.find_one({ + asset_doc = legacy_io.find_one({ "type": "asset", - "name": avalon.io.Session["AVALON_ASSET"] + "name": legacy_io.Session["AVALON_ASSET"] }) - project_doc = avalon.io.find_one({"type": "project"}) + project_doc = legacy_io.find_one({"type": "project"}) framerate = asset_doc["data"].get("fps") if framerate is None: diff --git a/openpype/hosts/tvpaint/api/workio.py b/openpype/hosts/tvpaint/api/workio.py index 88bdd7117e..1a5ad00ca8 100644 --- a/openpype/hosts/tvpaint/api/workio.py +++ b/openpype/hosts/tvpaint/api/workio.py @@ -3,8 +3,10 @@ has_unsaved_changes """ -from avalon import api -from openpype.pipeline import HOST_WORKFILE_EXTENSIONS +from openpype.pipeline import ( + HOST_WORKFILE_EXTENSIONS, + legacy_io, +) from .lib import ( execute_george, execute_george_through_file @@ -24,9 +26,9 @@ def save_file(filepath): """Save the open scene file.""" # Store context to workfile before save context = { - "project": api.Session["AVALON_PROJECT"], - "asset": api.Session["AVALON_ASSET"], - "task": api.Session["AVALON_TASK"] + "project": legacy_io.Session["AVALON_PROJECT"], + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] } save_current_workfile_context(context) diff --git a/openpype/hosts/tvpaint/hooks/pre_launch_args.py b/openpype/hosts/tvpaint/hooks/pre_launch_args.py index 2a8f49d5b0..c31403437a 100644 --- a/openpype/hosts/tvpaint/hooks/pre_launch_args.py +++ b/openpype/hosts/tvpaint/hooks/pre_launch_args.py @@ -1,14 +1,8 @@ -import os -import shutil - -from openpype.hosts import tvpaint from openpype.lib import ( PreLaunchHook, get_openpype_execute_args ) -import avalon - class TvpaintPrelaunchHook(PreLaunchHook): """Launch arguments preparation. diff --git a/openpype/hosts/tvpaint/lib.py b/openpype/hosts/tvpaint/lib.py index 715ebb4a9d..c67ab1e4fb 100644 --- a/openpype/hosts/tvpaint/lib.py +++ b/openpype/hosts/tvpaint/lib.py @@ -573,7 +573,7 @@ def composite_rendered_layers( layer_ids_by_position[layer_position] = layer["layer_id"] # Sort layer positions - sorted_positions = tuple(sorted(layer_ids_by_position.keys())) + sorted_positions = tuple(reversed(sorted(layer_ids_by_position.keys()))) # Prepare variable where filepaths without any rendered content # - transparent will be created transparent_filepaths = set() diff --git a/openpype/hosts/tvpaint/plugins/create/create_render_layer.py b/openpype/hosts/tvpaint/plugins/create/create_render_layer.py index c1af9632b1..3b5bd47189 100644 --- a/openpype/hosts/tvpaint/plugins/create/create_render_layer.py +++ b/openpype/hosts/tvpaint/plugins/create/create_render_layer.py @@ -24,7 +24,9 @@ class CreateRenderlayer(plugin.Creator): " {clip_id} {group_id} {r} {g} {b} \"{name}\"" ) - dynamic_subset_keys = ["render_pass", "render_layer", "group"] + dynamic_subset_keys = [ + "renderpass", "renderlayer", "render_pass", "render_layer", "group" + ] @classmethod def get_dynamic_data( @@ -34,12 +36,17 @@ class CreateRenderlayer(plugin.Creator): variant, task_name, asset_id, project_name, host_name ) # Use render pass name from creator's plugin - dynamic_data["render_pass"] = cls.render_pass + dynamic_data["renderpass"] = cls.render_pass # Add variant to render layer - dynamic_data["render_layer"] = variant + dynamic_data["renderlayer"] = variant # Change family for subset name fill dynamic_data["family"] = "render" + # TODO remove - Backwards compatibility for old subset name templates + # - added 2022/04/28 + dynamic_data["render_pass"] = dynamic_data["renderpass"] + dynamic_data["render_layer"] = dynamic_data["renderlayer"] + return dynamic_data @classmethod diff --git a/openpype/hosts/tvpaint/plugins/create/create_render_pass.py b/openpype/hosts/tvpaint/plugins/create/create_render_pass.py index a7f717ccec..26fa8ac51a 100644 --- a/openpype/hosts/tvpaint/plugins/create/create_render_pass.py +++ b/openpype/hosts/tvpaint/plugins/create/create_render_pass.py @@ -20,7 +20,9 @@ class CreateRenderPass(plugin.Creator): icon = "cube" defaults = ["Main"] - dynamic_subset_keys = ["render_pass", "render_layer"] + dynamic_subset_keys = [ + "renderpass", "renderlayer", "render_pass", "render_layer" + ] @classmethod def get_dynamic_data( @@ -29,9 +31,13 @@ class CreateRenderPass(plugin.Creator): dynamic_data = super(CreateRenderPass, cls).get_dynamic_data( variant, task_name, asset_id, project_name, host_name ) - dynamic_data["render_pass"] = variant + dynamic_data["renderpass"] = variant dynamic_data["family"] = "render" + # TODO remove - Backwards compatibility for old subset name templates + # - added 2022/04/28 + dynamic_data["render_pass"] = dynamic_data["renderpass"] + return dynamic_data @classmethod @@ -115,6 +121,7 @@ class CreateRenderPass(plugin.Creator): else: render_layer = beauty_instance["variant"] + subset_name_fill_data["renderlayer"] = render_layer subset_name_fill_data["render_layer"] = render_layer # Format dynamic keys in subset name @@ -129,7 +136,7 @@ class CreateRenderPass(plugin.Creator): self.data["group_id"] = group_id self.data["pass"] = variant - self.data["render_layer"] = render_layer + self.data["renderlayer"] = render_layer # Collect selected layer ids to be stored into instance layer_names = [layer["name"] for layer in selected_layers] diff --git a/openpype/hosts/tvpaint/plugins/load/load_workfile.py b/openpype/hosts/tvpaint/plugins/load/load_workfile.py index d224cfc390..0eab083c22 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_workfile.py +++ b/openpype/hosts/tvpaint/plugins/load/load_workfile.py @@ -1,12 +1,15 @@ import os -from avalon import api, io from openpype.lib import ( StringTemplate, get_workfile_template_key_from_context, get_workdir_data, get_last_workfile_with_version, ) +from openpype.pipeline import ( + registered_host, + legacy_io, +) from openpype.api import Anatomy from openpype.hosts.tvpaint.api import lib, pipeline, plugin @@ -22,7 +25,7 @@ class LoadWorkfile(plugin.Loader): def load(self, context, name, namespace, options): # Load context of current workfile as first thing # - which context and extension has - host = api.registered_host() + host = registered_host() current_file = host.current_file() context = pipeline.get_current_workfile_context() @@ -45,13 +48,13 @@ class LoadWorkfile(plugin.Loader): task_name = context.get("task") # Far cases when there is workfile without context if not asset_name: - asset_name = io.Session["AVALON_ASSET"] - task_name = io.Session["AVALON_TASK"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] - project_doc = io.find_one({ + project_doc = legacy_io.find_one({ "type": "project" }) - asset_doc = io.find_one({ + asset_doc = legacy_io.find_one({ "type": "asset", "name": asset_name }) @@ -62,7 +65,7 @@ class LoadWorkfile(plugin.Loader): task_name, host_name, project_name=project_name, - dbcon=io + dbcon=legacy_io ) anatomy = Anatomy(project_name) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py index 5e8d13592c..782907b65d 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py @@ -1,10 +1,9 @@ -import os import json import copy import pyblish.api -from avalon import io from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline import legacy_io class CollectInstances(pyblish.api.ContextPlugin): @@ -46,6 +45,21 @@ class CollectInstances(pyblish.api.ContextPlugin): for instance_data in filtered_instance_data: instance_data["fps"] = context.data["sceneFps"] + # Conversion from older instances + # - change 'render_layer' to 'renderlayer' + render_layer = instance_data.get("instance_data") + if not render_layer: + # Render Layer has only variant + if instance_data["family"] == "renderLayer": + render_layer = instance_data.get("variant") + + # Backwards compatibility for renderPasses + elif "render_layer" in instance_data: + render_layer = instance_data["render_layer"] + + if render_layer: + instance_data["renderlayer"] = render_layer + # Store workfile instance data to instance data instance_data["originData"] = copy.deepcopy(instance_data) # Global instance data modifications @@ -82,7 +96,7 @@ class CollectInstances(pyblish.api.ContextPlugin): # - not sure if it's good idea to require asset id in # get_subset_name? asset_name = context.data["workfile_context"]["asset"] - asset_doc = io.find_one({ + asset_doc = legacy_io.find_one({ "type": "asset", "name": asset_name }) @@ -93,7 +107,7 @@ class CollectInstances(pyblish.api.ContextPlugin): host_name = context.data["hostName"] # Use empty variant value variant = "" - task_name = io.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] new_subset_name = get_subset_name_with_asset_doc( family, variant, @@ -157,7 +171,7 @@ class CollectInstances(pyblish.api.ContextPlugin): # Change subset name # Final family of an instance will be `render` new_family = "render" - task_name = io.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] new_subset_name = "{}{}_{}_Beauty".format( new_family, task_name.capitalize(), name ) @@ -192,7 +206,7 @@ class CollectInstances(pyblish.api.ContextPlugin): "Creating render pass instance. \"{}\"".format(pass_name) ) # Change label - render_layer = instance_data["render_layer"] + render_layer = instance_data["renderlayer"] # Backwards compatibility # - subset names were not stored as final subset names during creation @@ -202,7 +216,7 @@ class CollectInstances(pyblish.api.ContextPlugin): # Final family of an instance will be `render` new_family = "render" old_subset_name = instance_data["subset"] - task_name = io.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] new_subset_name = "{}{}_{}_{}".format( new_family, task_name.capitalize(), render_layer, pass_name ) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py b/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py index 0af9a9a400..2b8dbdc5b4 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py @@ -1,9 +1,9 @@ import json import copy import pyblish.api -from avalon import io from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline import legacy_io class CollectRenderScene(pyblish.api.ContextPlugin): @@ -57,7 +57,7 @@ class CollectRenderScene(pyblish.api.ContextPlugin): # get_subset_name? workfile_context = context.data["workfile_context"] asset_name = workfile_context["asset"] - asset_doc = io.find_one({ + asset_doc = legacy_io.find_one({ "type": "asset", "name": asset_name }) @@ -69,9 +69,13 @@ class CollectRenderScene(pyblish.api.ContextPlugin): # Variant is using render pass name variant = self.render_layer dynamic_data = { - "render_layer": self.render_layer, - "render_pass": self.render_pass + "renderlayer": self.render_layer, + "renderpass": self.render_pass, } + # TODO remove - Backwards compatibility for old subset name templates + # - added 2022/04/28 + dynamic_data["render_layer"] = dynamic_data["renderlayer"] + dynamic_data["render_pass"] = dynamic_data["renderpass"] task_name = workfile_context["task"] subset_name = get_subset_name_with_asset_doc( @@ -100,7 +104,9 @@ class CollectRenderScene(pyblish.api.ContextPlugin): "representations": [], "layers": copy.deepcopy(context.data["layersData"]), "asset": asset_name, - "task": task_name + "task": task_name, + # Add render layer to instance data + "renderlayer": self.render_layer } instance = context.create_instance(**instance_data) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py index 89348037d3..70d92f82e9 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py @@ -1,9 +1,9 @@ import os import json import pyblish.api -from avalon import io from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline import legacy_io class CollectWorkfile(pyblish.api.ContextPlugin): @@ -28,7 +28,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): # get_subset_name? family = "workfile" asset_name = context.data["workfile_context"]["asset"] - asset_doc = io.find_one({ + asset_doc = legacy_io.find_one({ "type": "asset", "name": asset_name }) @@ -39,7 +39,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): host_name = os.environ["AVALON_APP"] # Use empty variant value variant = "" - task_name = io.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] subset_name = get_subset_name_with_asset_doc( family, variant, diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py index f5c86c613b..c59ef82f85 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py @@ -3,7 +3,8 @@ import json import tempfile import pyblish.api -import avalon.api + +from openpype.pipeline import legacy_io from openpype.hosts.tvpaint.api import pipeline, lib @@ -49,9 +50,9 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Collect and store current context to have reference current_context = { - "project": avalon.api.Session["AVALON_PROJECT"], - "asset": avalon.api.Session["AVALON_ASSET"], - "task": avalon.api.Session["AVALON_TASK"] + "project": legacy_io.Session["AVALON_PROJECT"], + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] } context.data["previous_context"] = current_context self.log.debug("Current context is: {}".format(current_context)) @@ -69,7 +70,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): ("AVALON_TASK", "task") ) for env_key, key in key_map: - avalon.api.Session[env_key] = workfile_context[key] + legacy_io.Session[env_key] = workfile_context[key] os.environ[env_key] = workfile_context[key] self.log.info("Context changed to: {}".format(workfile_context)) diff --git a/openpype/hosts/tvpaint/worker/init_file.tvpp b/openpype/hosts/tvpaint/worker/init_file.tvpp new file mode 100644 index 0000000000..572d278fdb Binary files /dev/null and b/openpype/hosts/tvpaint/worker/init_file.tvpp differ diff --git a/openpype/hosts/tvpaint/worker/worker.py b/openpype/hosts/tvpaint/worker/worker.py index cfd40bc7ba..9295c8afb4 100644 --- a/openpype/hosts/tvpaint/worker/worker.py +++ b/openpype/hosts/tvpaint/worker/worker.py @@ -1,5 +1,8 @@ +import os import signal import time +import tempfile +import shutil import asyncio from openpype.hosts.tvpaint.api.communication_server import ( @@ -36,8 +39,28 @@ class TVPaintWorkerCommunicator(BaseCommunicator): super()._start_webserver() + def _open_init_file(self): + """Open init TVPaint file. + + File triggers dialog missing path to audio file which must be closed + once and is ignored for rest of running process. + """ + current_dir = os.path.dirname(os.path.abspath(__file__)) + init_filepath = os.path.join(current_dir, "init_file.tvpp") + with tempfile.NamedTemporaryFile( + mode="w", prefix="a_tvp_", suffix=".tvpp" + ) as tmp_file: + tmp_filepath = tmp_file.name.replace("\\", "/") + + shutil.copy(init_filepath, tmp_filepath) + george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(tmp_filepath) + self.execute_george_through_file(george_script) + self.execute_george("tv_projectclose") + os.remove(tmp_filepath) + def _on_client_connect(self, *args, **kwargs): super()._on_client_connect(*args, **kwargs) + self._open_init_file() # Register as "ready to work" worker self._worker_connection.register_as_worker() diff --git a/openpype/hosts/unreal/api/pipeline.py b/openpype/hosts/unreal/api/pipeline.py index f2c264e5a4..bbca7916d3 100644 --- a/openpype/hosts/unreal/api/pipeline.py +++ b/openpype/hosts/unreal/api/pipeline.py @@ -47,6 +47,7 @@ def install(): print("installing OpenPype for Unreal ...") print("-=" * 40) logger.info("installing OpenPype for Unreal") + pyblish.api.register_host("unreal") pyblish.api.register_plugin_path(str(PUBLISH_PATH)) register_loader_plugin_path(str(LOAD_PATH)) register_creator_plugin_path(str(CREATE_PATH)) @@ -392,3 +393,24 @@ def cast_map_to_str_dict(umap) -> dict: """ return {str(key): str(value) for (key, value) in umap.items()} + + +def get_subsequences(sequence: unreal.LevelSequence): + """Get list of subsequences from sequence. + + Args: + sequence (unreal.LevelSequence): Sequence + + Returns: + list(unreal.LevelSequence): List of subsequences + + """ + tracks = sequence.get_master_tracks() + subscene_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + break + if subscene_track is not None and subscene_track.get_sections(): + return subscene_track.get_sections() + return [] diff --git a/openpype/hosts/unreal/api/plugin.py b/openpype/hosts/unreal/api/plugin.py index b24bab831d..d8d2f2420d 100644 --- a/openpype/hosts/unreal/api/plugin.py +++ b/openpype/hosts/unreal/api/plugin.py @@ -10,6 +10,7 @@ from openpype.pipeline import ( class Creator(LegacyCreator): """This serves as skeleton for future OpenPype specific functionality""" defaults = ['Main'] + maintain_selection = False class Loader(LoaderPlugin, ABC): diff --git a/openpype/hosts/unreal/api/rendering.py b/openpype/hosts/unreal/api/rendering.py new file mode 100644 index 0000000000..b2732506fc --- /dev/null +++ b/openpype/hosts/unreal/api/rendering.py @@ -0,0 +1,137 @@ +import os + +import unreal + +from openpype.api import Anatomy +from openpype.hosts.unreal.api import pipeline + + +queue = None +executor = None + + +def _queue_finish_callback(exec, success): + unreal.log("Render completed. Success: " + str(success)) + + # Delete our reference so we don't keep it alive. + global executor + global queue + del executor + del queue + + +def _job_finish_callback(job, success): + # You can make any edits you want to the editor world here, and the world + # will be duplicated when the next render happens. Make sure you undo your + # edits in OnQueueFinishedCallback if you don't want to leak state changes + # into the editor world. + unreal.log("Individual job completed.") + + +def start_rendering(): + """ + Start the rendering process. + """ + print("Starting rendering...") + + # Get selected sequences + assets = unreal.EditorUtilityLibrary.get_selected_assets() + + # instances = pipeline.ls_inst() + instances = [ + a for a in assets + if a.get_class().get_name() == "OpenPypePublishInstance"] + + inst_data = [] + + for i in instances: + data = pipeline.parse_container(i.get_path_name()) + if data["family"] == "render": + inst_data.append(data) + + try: + project = os.environ.get("AVALON_PROJECT") + anatomy = Anatomy(project) + root = anatomy.roots['renders'] + except Exception: + raise Exception("Could not find render root in anatomy settings.") + + render_dir = f"{root}/{project}" + + # subsystem = unreal.get_editor_subsystem( + # unreal.MoviePipelineQueueSubsystem) + # queue = subsystem.get_queue() + global queue + queue = unreal.MoviePipelineQueue() + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + for i in inst_data: + sequence = ar.get_asset_by_object_path(i["sequence"]).get_asset() + + sequences = [{ + "sequence": sequence, + "output": f"{i['output']}", + "frame_range": ( + int(float(i["frameStart"])), + int(float(i["frameEnd"])) + 1) + }] + render_list = [] + + # Get all the sequences to render. If there are subsequences, + # add them and their frame ranges to the render list. We also + # use the names for the output paths. + for s in sequences: + subscenes = pipeline.get_subsequences(s.get('sequence')) + + if subscenes: + for ss in subscenes: + sequences.append({ + "sequence": ss.get_sequence(), + "output": (f"{s.get('output')}/" + f"{ss.get_sequence().get_name()}"), + "frame_range": ( + ss.get_start_frame(), ss.get_end_frame()) + }) + else: + # Avoid rendering camera sequences + if "_camera" not in s.get('sequence').get_name(): + render_list.append(s) + + # Create the rendering jobs and add them to the queue. + for r in render_list: + job = queue.allocate_new_job(unreal.MoviePipelineExecutorJob) + job.sequence = unreal.SoftObjectPath(i["master_sequence"]) + job.map = unreal.SoftObjectPath(i["master_level"]) + job.author = "OpenPype" + + # User data could be used to pass data to the job, that can be + # read in the job's OnJobFinished callback. We could, + # for instance, pass the AvalonPublishInstance's path to the job. + # job.user_data = "" + + settings = job.get_configuration().find_or_add_setting_by_class( + unreal.MoviePipelineOutputSetting) + settings.output_resolution = unreal.IntPoint(1920, 1080) + settings.custom_start_frame = r.get("frame_range")[0] + settings.custom_end_frame = r.get("frame_range")[1] + settings.use_custom_playback_range = True + settings.file_name_format = "{sequence_name}.{frame_number}" + settings.output_directory.path = f"{render_dir}/{r.get('output')}" + + renderPass = job.get_configuration().find_or_add_setting_by_class( + unreal.MoviePipelineDeferredPassBase) + renderPass.disable_multisample_effects = True + + job.get_configuration().find_or_add_setting_by_class( + unreal.MoviePipelineImageSequenceOutput_PNG) + + # If there are jobs in the queue, start the rendering process. + if queue.get_jobs(): + global executor + executor = unreal.MoviePipelinePIEExecutor() + executor.on_executor_finished_delegate.add_callable_unique( + _queue_finish_callback) + executor.on_individual_job_finished_delegate.add_callable_unique( + _job_finish_callback) # Only available on PIE Executor + executor.execute(queue) diff --git a/openpype/hosts/unreal/api/tools_ui.py b/openpype/hosts/unreal/api/tools_ui.py index 93361c3574..2500f8495f 100644 --- a/openpype/hosts/unreal/api/tools_ui.py +++ b/openpype/hosts/unreal/api/tools_ui.py @@ -7,6 +7,7 @@ from openpype import ( ) from openpype.tools.utils import host_tools from openpype.tools.utils.lib import qt_app_context +from openpype.hosts.unreal.api import rendering class ToolsBtnsWidget(QtWidgets.QWidget): @@ -20,6 +21,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget): load_btn = QtWidgets.QPushButton("Load...", self) publish_btn = QtWidgets.QPushButton("Publish...", self) manage_btn = QtWidgets.QPushButton("Manage...", self) + render_btn = QtWidgets.QPushButton("Render...", self) experimental_tools_btn = QtWidgets.QPushButton( "Experimental tools...", self ) @@ -30,6 +32,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget): layout.addWidget(load_btn, 0) layout.addWidget(publish_btn, 0) layout.addWidget(manage_btn, 0) + layout.addWidget(render_btn, 0) layout.addWidget(experimental_tools_btn, 0) layout.addStretch(1) @@ -37,6 +40,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget): load_btn.clicked.connect(self._on_load) publish_btn.clicked.connect(self._on_publish) manage_btn.clicked.connect(self._on_manage) + render_btn.clicked.connect(self._on_render) experimental_tools_btn.clicked.connect(self._on_experimental) def _on_create(self): @@ -51,6 +55,9 @@ class ToolsBtnsWidget(QtWidgets.QWidget): def _on_manage(self): self.tool_required.emit("sceneinventory") + def _on_render(self): + rendering.start_rendering() + def _on_experimental(self): self.tool_required.emit("experimental_tools") diff --git a/openpype/hosts/unreal/integration/Content/Python/init_unreal.py b/openpype/hosts/unreal/integration/Content/Python/init_unreal.py index 2ecd301c25..4bb03b07ed 100644 --- a/openpype/hosts/unreal/integration/Content/Python/init_unreal.py +++ b/openpype/hosts/unreal/integration/Content/Python/init_unreal.py @@ -2,13 +2,7 @@ import unreal openpype_detected = True try: - from avalon import api -except ImportError as exc: - api = None - openpype_detected = False - unreal.log_error("Avalon: cannot load Avalon [ {} ]".format(exc)) - -try: + from openpype.pipeline import install_host from openpype.hosts.unreal import api as openpype_host except ImportError as exc: openpype_host = None @@ -16,7 +10,7 @@ except ImportError as exc: unreal.log_error("OpenPype: cannot load OpenPype [ {} ]".format(exc)) if openpype_detected: - api.install(openpype_host) + install_host(openpype_host) @unreal.uclass() diff --git a/openpype/hosts/unreal/lib.py b/openpype/hosts/unreal/lib.py index d4a776e892..805e883c64 100644 --- a/openpype/hosts/unreal/lib.py +++ b/openpype/hosts/unreal/lib.py @@ -254,6 +254,7 @@ def create_unreal_project(project_name: str, {"Name": "PythonScriptPlugin", "Enabled": True}, {"Name": "EditorScriptingUtilities", "Enabled": True}, {"Name": "SequencerScripting", "Enabled": True}, + {"Name": "MovieRenderPipeline", "Enabled": True}, {"Name": "OpenPype", "Enabled": True} ] } diff --git a/openpype/hosts/unreal/plugins/create/create_layout.py b/openpype/hosts/unreal/plugins/create/create_layout.py index 751bece167..5fef08ce2a 100644 --- a/openpype/hosts/unreal/plugins/create/create_layout.py +++ b/openpype/hosts/unreal/plugins/create/create_layout.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- -from unreal import EditorLevelLibrary as ell +from unreal import EditorLevelLibrary + from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api.pipeline import instantiate @@ -28,13 +29,13 @@ class CreateLayout(plugin.Creator): # sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() # selection = [a.get_path_name() for a in sel_objects] - data["level"] = ell.get_editor_world().get_path_name() + data["level"] = EditorLevelLibrary.get_editor_world().get_path_name() data["members"] = [] if (self.options or {}).get("useSelection"): # Set as members the selected actors - for actor in ell.get_selected_level_actors(): + for actor in EditorLevelLibrary.get_selected_level_actors(): data["members"].append("{}.{}".format( actor.get_outer().get_name(), actor.get_name())) diff --git a/openpype/hosts/unreal/plugins/create/create_render.py b/openpype/hosts/unreal/plugins/create/create_render.py new file mode 100644 index 0000000000..3b6c7a9f1e --- /dev/null +++ b/openpype/hosts/unreal/plugins/create/create_render.py @@ -0,0 +1,111 @@ +import unreal + +from openpype.pipeline import legacy_io +from openpype.hosts.unreal.api import pipeline +from openpype.hosts.unreal.api.plugin import Creator + + +class CreateRender(Creator): + """Create instance for sequence for rendering""" + + name = "unrealRender" + label = "Unreal - Render" + family = "render" + icon = "cube" + asset_types = ["LevelSequence"] + + root = "/Game/OpenPype/PublishInstances" + suffix = "_INS" + + def process(self): + subset = self.data["subset"] + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + # Get the master sequence and the master level. + # There should be only one sequence and one level in the directory. + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"/Game/OpenPype/{self.data['asset']}"], + recursive_paths=False) + sequences = ar.get_assets(filter) + ms = sequences[0].get_editor_property('object_path') + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"/Game/OpenPype/{self.data['asset']}"], + recursive_paths=False) + levels = ar.get_assets(filter) + ml = levels[0].get_editor_property('object_path') + + selection = [] + if (self.options or {}).get("useSelection"): + sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() + selection = [ + a.get_path_name() for a in sel_objects + if a.get_class().get_name() in self.asset_types] + else: + selection.append(self.data['sequence']) + + unreal.log(f"selection: {selection}") + + path = f"{self.root}" + unreal.EditorAssetLibrary.make_directory(path) + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + for a in selection: + ms_obj = ar.get_asset_by_object_path(ms).get_asset() + + seq_data = None + + if a == ms: + seq_data = { + "sequence": ms_obj, + "output": f"{ms_obj.get_name()}", + "frame_range": ( + ms_obj.get_playback_start(), ms_obj.get_playback_end()) + } + else: + seq_data_list = [{ + "sequence": ms_obj, + "output": f"{ms_obj.get_name()}", + "frame_range": ( + ms_obj.get_playback_start(), ms_obj.get_playback_end()) + }] + + for s in seq_data_list: + subscenes = pipeline.get_subsequences(s.get('sequence')) + + for ss in subscenes: + curr_data = { + "sequence": ss.get_sequence(), + "output": (f"{s.get('output')}/" + f"{ss.get_sequence().get_name()}"), + "frame_range": ( + ss.get_start_frame(), ss.get_end_frame() - 1) + } + + if ss.get_sequence().get_path_name() == a: + seq_data = curr_data + break + seq_data_list.append(curr_data) + + if seq_data is not None: + break + + if not seq_data: + continue + + d = self.data.copy() + d["members"] = [a] + d["sequence"] = a + d["master_sequence"] = ms + d["master_level"] = ml + d["output"] = seq_data.get('output') + d["frameStart"] = seq_data.get('frame_range')[0] + d["frameEnd"] = seq_data.get('frame_range')[1] + + container_name = f"{subset}{self.suffix}" + pipeline.create_publish_instance( + instance=container_name, path=path) + pipeline.imprint(f"{path}/{container_name}", d) diff --git a/openpype/hosts/unreal/plugins/load/load_animation.py b/openpype/hosts/unreal/plugins/load/load_animation.py index c9a1633031..60c1526d3d 100644 --- a/openpype/hosts/unreal/plugins/load/load_animation.py +++ b/openpype/hosts/unreal/plugins/load/load_animation.py @@ -3,13 +3,17 @@ import os import json +import unreal +from unreal import EditorAssetLibrary +from unreal import MovieSceneSkeletalAnimationTrack +from unreal import MovieSceneSkeletalAnimationSection + from openpype.pipeline import ( get_representation_path, AVALON_CONTAINER_ID ) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline -import unreal # noqa class AnimationFBXLoader(plugin.Loader): @@ -21,59 +25,13 @@ class AnimationFBXLoader(plugin.Loader): icon = "cube" color = "orange" - def load(self, context, name, namespace, options=None): - """ - Load and containerise representation into Content Browser. - - This is two step process. First, import FBX to temporary path and - then call `containerise()` on it - this moves all content to new - directory and then it will create AssetContainer there and imprint it - with metadata. This will mark this path as container. - - Args: - context (dict): application context - name (str): subset name - namespace (str): in Unreal this is basically path to container. - This is not passed here, so namespace is set - by `containerise()` because only then we know - real path. - data (dict): Those would be data to be imprinted. This is not used - now, data are imprinted by `containerise()`. - - Returns: - list(str): list of container content - - """ - # Create directory for asset and OpenPype container - root = "/Game/OpenPype/Assets" - asset = context.get('asset').get('name') - suffix = "_CON" - if asset: - asset_name = "{}_{}".format(asset, name) - else: - asset_name = "{}".format(name) - - tools = unreal.AssetToolsHelpers().get_asset_tools() - asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") - - container_name += suffix - - unreal.EditorAssetLibrary.make_directory(asset_dir) - + def _process(self, asset_dir, asset_name, instance_name): automated = False actor = None task = unreal.AssetImportTask() task.options = unreal.FbxImportUI() - lib_path = self.fname.replace("fbx", "json") - - with open(lib_path, "r") as fp: - data = json.load(fp) - - instance_name = data.get("instance_name") - if instance_name: automated = True # Old method to get the actor @@ -131,6 +89,134 @@ class AnimationFBXLoader(plugin.Loader): unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) + asset_content = EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=True + ) + + animation = None + + for a in asset_content: + imported_asset_data = EditorAssetLibrary.find_asset_data(a) + imported_asset = unreal.AssetRegistryHelpers.get_asset( + imported_asset_data) + if imported_asset.__class__ == unreal.AnimSequence: + animation = imported_asset + break + + if animation: + animation.set_editor_property('enable_root_motion', True) + actor.skeletal_mesh_component.set_editor_property( + 'animation_mode', unreal.AnimationMode.ANIMATION_SINGLE_NODE) + actor.skeletal_mesh_component.animation_data.set_editor_property( + 'anim_to_play', animation) + + return animation + + def load(self, context, name, namespace, options=None): + """ + Load and containerise representation into Content Browser. + + This is two step process. First, import FBX to temporary path and + then call `containerise()` on it - this moves all content to new + directory and then it will create AssetContainer there and imprint it + with metadata. This will mark this path as container. + + Args: + context (dict): application context + name (str): subset name + namespace (str): in Unreal this is basically path to container. + This is not passed here, so namespace is set + by `containerise()` because only then we know + real path. + data (dict): Those would be data to be imprinted. This is not used + now, data are imprinted by `containerise()`. + + Returns: + list(str): list of container content + """ + # Create directory for asset and avalon container + hierarchy = context.get('asset').get('data').get('parents') + root = "/Game/OpenPype" + asset = context.get('asset').get('name') + suffix = "_CON" + if asset: + asset_name = "{}_{}".format(asset, name) + else: + asset_name = "{}".format(name) + + tools = unreal.AssetToolsHelpers().get_asset_tools() + asset_dir, container_name = tools.create_unique_asset_name( + f"{root}/Animations/{asset}/{name}", suffix="") + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{root}/{hierarchy[0]}"], + recursive_paths=False) + levels = ar.get_assets(filter) + master_level = levels[0].get_editor_property('object_path') + + hierarchy_dir = root + for h in hierarchy: + hierarchy_dir = f"{hierarchy_dir}/{h}" + hierarchy_dir = f"{hierarchy_dir}/{asset}" + + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{hierarchy_dir}/"], + recursive_paths=True) + levels = ar.get_assets(filter) + level = levels[0].get_editor_property('object_path') + + unreal.EditorLevelLibrary.save_all_dirty_levels() + unreal.EditorLevelLibrary.load_level(level) + + container_name += suffix + + EditorAssetLibrary.make_directory(asset_dir) + + libpath = self.fname.replace("fbx", "json") + + with open(libpath, "r") as fp: + data = json.load(fp) + + instance_name = data.get("instance_name") + + animation = self._process(asset_dir, asset_name, instance_name) + + asset_content = EditorAssetLibrary.list_assets( + hierarchy_dir, recursive=True, include_folder=False) + + # Get the sequence for the layout, excluding the camera one. + sequences = [a for a in asset_content + if (EditorAssetLibrary.find_asset_data(a).get_class() == + unreal.LevelSequence.static_class() and + "_camera" not in a.split("/")[-1])] + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + for s in sequences: + sequence = ar.get_asset_by_object_path(s).get_asset() + possessables = [ + p for p in sequence.get_possessables() + if p.get_display_name() == instance_name] + + for p in possessables: + tracks = [ + t for t in p.get_tracks() + if (t.get_class() == + MovieSceneSkeletalAnimationTrack.static_class())] + + for t in tracks: + sections = [ + s for s in t.get_sections() + if (s.get_class() == + MovieSceneSkeletalAnimationSection.static_class())] + + for s in sections: + s.params.set_editor_property('animation', animation) + # Create Asset Container unreal_pipeline.create_container( container=container_name, path=asset_dir) @@ -150,29 +236,14 @@ class AnimationFBXLoader(plugin.Loader): unreal_pipeline.imprint( "{}/{}".format(asset_dir, container_name), data) - asset_content = unreal.EditorAssetLibrary.list_assets( - asset_dir, recursive=True, include_folder=True - ) + imported_content = EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=False) - animation = None + for a in imported_content: + EditorAssetLibrary.save_asset(a) - for a in asset_content: - unreal.EditorAssetLibrary.save_asset(a) - imported_asset_data = unreal.EditorAssetLibrary.find_asset_data(a) - imported_asset = unreal.AssetRegistryHelpers.get_asset( - imported_asset_data) - if imported_asset.__class__ == unreal.AnimSequence: - animation = imported_asset - break - - if animation: - animation.set_editor_property('enable_root_motion', True) - actor.skeletal_mesh_component.set_editor_property( - 'animation_mode', unreal.AnimationMode.ANIMATION_SINGLE_NODE) - actor.skeletal_mesh_component.animation_data.set_editor_property( - 'anim_to_play', animation) - - return asset_content + unreal.EditorLevelLibrary.save_current_level() + unreal.EditorLevelLibrary.load_level(master_level) def update(self, container, representation): name = container["asset_name"] @@ -218,7 +289,7 @@ class AnimationFBXLoader(plugin.Loader): task.options.anim_sequence_import_data.set_editor_property( 'convert_scene', True) - skeletal_mesh = unreal.EditorAssetLibrary.load_asset( + skeletal_mesh = EditorAssetLibrary.load_asset( container.get('namespace') + "/" + container.get('asset_name')) skeleton = skeletal_mesh.get_editor_property('skeleton') task.options.set_editor_property('skeleton', skeleton) @@ -235,22 +306,22 @@ class AnimationFBXLoader(plugin.Loader): "parent": str(representation["parent"]) }) - asset_content = unreal.EditorAssetLibrary.list_assets( + asset_content = EditorAssetLibrary.list_assets( destination_path, recursive=True, include_folder=True ) for a in asset_content: - unreal.EditorAssetLibrary.save_asset(a) + EditorAssetLibrary.save_asset(a) def remove(self, container): path = container["namespace"] parent_path = os.path.dirname(path) - unreal.EditorAssetLibrary.delete_directory(path) + EditorAssetLibrary.delete_directory(path) - asset_content = unreal.EditorAssetLibrary.list_assets( + asset_content = EditorAssetLibrary.list_assets( parent_path, recursive=False, include_folder=True ) if len(asset_content) == 0: - unreal.EditorAssetLibrary.delete_directory(parent_path) + EditorAssetLibrary.delete_directory(parent_path) diff --git a/openpype/hosts/unreal/plugins/load/load_camera.py b/openpype/hosts/unreal/plugins/load/load_camera.py index 40bca0b0c7..b33e45b6e9 100644 --- a/openpype/hosts/unreal/plugins/load/load_camera.py +++ b/openpype/hosts/unreal/plugins/load/load_camera.py @@ -1,12 +1,17 @@ # -*- coding: utf-8 -*- """Load camera from FBX.""" -import os +from pathlib import Path -from avalon import io -from openpype.pipeline import AVALON_CONTAINER_ID +import unreal +from unreal import EditorAssetLibrary +from unreal import EditorLevelLibrary + +from openpype.pipeline import ( + AVALON_CONTAINER_ID, + legacy_io, +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline -import unreal # noqa class CameraLoader(plugin.Loader): @@ -18,6 +23,40 @@ class CameraLoader(plugin.Loader): icon = "cube" color = "orange" + def _get_data(self, asset_name): + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) + + return asset_doc.get("data") + + def _set_sequence_hierarchy( + self, seq_i, seq_j, min_frame_j, max_frame_j + ): + tracks = seq_i.get_master_tracks() + track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + track = t + break + if not track: + track = seq_i.add_master_track(unreal.MovieSceneSubTrack) + + subscenes = track.get_sections() + subscene = None + for s in subscenes: + if s.get_editor_property('sub_sequence') == seq_j: + subscene = s + break + if not subscene: + subscene = track.add_section() + subscene.set_row_index(len(track.get_sections())) + subscene.set_editor_property('sub_sequence', seq_j) + subscene.set_range( + min_frame_j, + max_frame_j + 1) + def load(self, context, name, namespace, data): """ Load and containerise representation into Content Browser. @@ -41,8 +80,14 @@ class CameraLoader(plugin.Loader): list(str): list of container content """ - # Create directory for asset and OpenPype container - root = "/Game/OpenPype/Assets" + # Create directory for asset and avalon container + hierarchy = context.get('asset').get('data').get('parents') + root = "/Game/OpenPype" + hierarchy_dir = root + hierarchy_list = [] + for h in hierarchy: + hierarchy_dir = f"{hierarchy_dir}/{h}" + hierarchy_list.append(hierarchy_dir) asset = context.get('asset').get('name') suffix = "_CON" if asset: @@ -52,10 +97,10 @@ class CameraLoader(plugin.Loader): tools = unreal.AssetToolsHelpers().get_asset_tools() + # Create a unique name for the camera directory unique_number = 1 - - if unreal.EditorAssetLibrary.does_directory_exist(f"{root}/{asset}"): - asset_content = unreal.EditorAssetLibrary.list_assets( + if EditorAssetLibrary.does_directory_exist(f"{hierarchy_dir}/{asset}"): + asset_content = EditorAssetLibrary.list_assets( f"{root}/{asset}", recursive=False, include_folder=True ) @@ -74,42 +119,122 @@ class CameraLoader(plugin.Loader): unique_number = f_numbers[-1] + 1 asset_dir, container_name = tools.create_unique_asset_name( - f"{root}/{asset}/{name}_{unique_number:02d}", suffix="") + f"{hierarchy_dir}/{asset}/{name}_{unique_number:02d}", suffix="") container_name += suffix - unreal.EditorAssetLibrary.make_directory(asset_dir) + current_level = EditorLevelLibrary.get_editor_world().get_full_name() + EditorLevelLibrary.save_all_dirty_levels() - sequence = tools.create_asset( - asset_name=asset_name, + ar = unreal.AssetRegistryHelpers.get_asset_registry() + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{hierarchy_dir}/{asset}/"], + recursive_paths=True) + maps = ar.get_assets(filter) + + # There should be only one map in the list + EditorLevelLibrary.load_level(maps[0].get_full_name()) + + # Get all the sequences in the hierarchy. It will create them, if + # they don't exist. + sequences = [] + frame_ranges = [] + i = 0 + for h in hierarchy_list: + root_content = EditorAssetLibrary.list_assets( + h, recursive=False, include_folder=False) + + existing_sequences = [ + EditorAssetLibrary.find_asset_data(asset) + for asset in root_content + if EditorAssetLibrary.find_asset_data( + asset).get_class().get_name() == 'LevelSequence' + ] + + if not existing_sequences: + scene = tools.create_asset( + asset_name=hierarchy[i], + package_path=h, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) + + asset_data = legacy_io.find_one({ + "type": "asset", + "name": h.split('/')[-1] + }) + + id = asset_data.get('_id') + + start_frames = [] + end_frames = [] + + elements = list( + legacy_io.find({"type": "asset", "data.visualParent": id})) + for e in elements: + start_frames.append(e.get('data').get('clipIn')) + end_frames.append(e.get('data').get('clipOut')) + + elements.extend(legacy_io.find({ + "type": "asset", + "data.visualParent": e.get('_id') + })) + + min_frame = min(start_frames) + max_frame = max(end_frames) + + scene.set_display_rate( + unreal.FrameRate(asset_data.get('data').get("fps"), 1.0)) + scene.set_playback_start(min_frame) + scene.set_playback_end(max_frame) + + sequences.append(scene) + frame_ranges.append((min_frame, max_frame)) + else: + for e in existing_sequences: + sequences.append(e.get_asset()) + frame_ranges.append(( + e.get_asset().get_playback_start(), + e.get_asset().get_playback_end())) + + i += 1 + + EditorAssetLibrary.make_directory(asset_dir) + + cam_seq = tools.create_asset( + asset_name=f"{asset}_camera", package_path=asset_dir, asset_class=unreal.LevelSequence, factory=unreal.LevelSequenceFactoryNew() ) - io_asset = io.Session["AVALON_ASSET"] - asset_doc = io.find_one({ - "type": "asset", - "name": io_asset - }) + # Add sequences data to hierarchy + for i in range(0, len(sequences) - 1): + self._set_sequence_hierarchy( + sequences[i], sequences[i + 1], + frame_ranges[i + 1][0], frame_ranges[i + 1][1]) - data = asset_doc.get("data") - - if data: - sequence.set_display_rate(unreal.FrameRate(data.get("fps"), 1.0)) - sequence.set_playback_start(data.get("frameStart")) - sequence.set_playback_end(data.get("frameEnd")) + data = self._get_data(asset) + cam_seq.set_display_rate( + unreal.FrameRate(data.get("fps"), 1.0)) + cam_seq.set_playback_start(0) + cam_seq.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1) + self._set_sequence_hierarchy( + sequences[-1], cam_seq, + data.get('clipIn'), data.get('clipOut')) settings = unreal.MovieSceneUserImportFBXSettings() settings.set_editor_property('reduce_keys', False) - unreal.SequencerTools.import_fbx( - unreal.EditorLevelLibrary.get_editor_world(), - sequence, - sequence.get_bindings(), - settings, - self.fname - ) + if cam_seq: + unreal.SequencerTools.import_fbx( + EditorLevelLibrary.get_editor_world(), + cam_seq, + cam_seq.get_bindings(), + settings, + self.fname + ) # Create Asset Container unreal_pipeline.create_container( @@ -130,81 +255,258 @@ class CameraLoader(plugin.Loader): unreal_pipeline.imprint( "{}/{}".format(asset_dir, container_name), data) - asset_content = unreal.EditorAssetLibrary.list_assets( + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(current_level) + + asset_content = EditorAssetLibrary.list_assets( asset_dir, recursive=True, include_folder=True ) for a in asset_content: - unreal.EditorAssetLibrary.save_asset(a) + EditorAssetLibrary.save_asset(a) return asset_content def update(self, container, representation): - path = container["namespace"] - ar = unreal.AssetRegistryHelpers.get_asset_registry() - tools = unreal.AssetToolsHelpers().get_asset_tools() - asset_content = unreal.EditorAssetLibrary.list_assets( - path, recursive=False, include_folder=False - ) - asset_name = "" - for a in asset_content: - asset = ar.get_asset_by_object_path(a) - if a.endswith("_CON"): - loaded_asset = unreal.EditorAssetLibrary.load_asset(a) - unreal.EditorAssetLibrary.set_metadata_tag( - loaded_asset, "representation", str(representation["_id"]) - ) - unreal.EditorAssetLibrary.set_metadata_tag( - loaded_asset, "parent", str(representation["parent"]) - ) - asset_name = unreal.EditorAssetLibrary.get_metadata_tag( - loaded_asset, "asset_name" - ) - elif asset.asset_class == "LevelSequence": - unreal.EditorAssetLibrary.delete_asset(a) + root = "/Game/OpenPype" - sequence = tools.create_asset( - asset_name=asset_name, - package_path=path, - asset_class=unreal.LevelSequence, - factory=unreal.LevelSequenceFactoryNew() + asset_dir = container.get('namespace') + + context = representation.get("context") + + hierarchy = context.get('hierarchy').split("/") + h_dir = f"{root}/{hierarchy[0]}" + h_asset = hierarchy[0] + master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + + EditorLevelLibrary.save_current_level() + + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[asset_dir], + recursive_paths=False) + sequences = ar.get_assets(filter) + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[str(Path(asset_dir).parent.as_posix())], + recursive_paths=True) + maps = ar.get_assets(filter) + + # There should be only one map in the list + EditorLevelLibrary.load_level(maps[0].get_full_name()) + + level_sequence = sequences[0].get_asset() + + display_rate = level_sequence.get_display_rate() + playback_start = level_sequence.get_playback_start() + playback_end = level_sequence.get_playback_end() + + sequence_name = f"{container.get('asset')}_camera" + + # Get the actors in the level sequence. + objs = unreal.SequencerTools.get_bound_objects( + unreal.EditorLevelLibrary.get_editor_world(), + level_sequence, + level_sequence.get_bindings(), + unreal.SequencerScriptingRange( + has_start_value=True, + has_end_value=True, + inclusive_start=level_sequence.get_playback_start(), + exclusive_end=level_sequence.get_playback_end() + ) ) - io_asset = io.Session["AVALON_ASSET"] - asset_doc = io.find_one({ - "type": "asset", - "name": io_asset - }) + # Delete actors from the map + for o in objs: + if o.bound_objects[0].get_class().get_name() == "CineCameraActor": + actor_path = o.bound_objects[0].get_path_name().split(":")[-1] + actor = EditorLevelLibrary.get_actor_reference(actor_path) + EditorLevelLibrary.destroy_actor(actor) - data = asset_doc.get("data") + # Remove the Level Sequence from the parent. + # We need to traverse the hierarchy from the master sequence to find + # the level sequence. + root = "/Game/OpenPype" + namespace = container.get('namespace').replace(f"{root}/", "") + ms_asset = namespace.split('/')[0] + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + sequences = ar.get_assets(filter) + master_sequence = sequences[0].get_asset() - if data: - sequence.set_display_rate(unreal.FrameRate(data.get("fps"), 1.0)) - sequence.set_playback_start(data.get("frameStart")) - sequence.set_playback_end(data.get("frameEnd")) + sequences = [master_sequence] + + parent = None + sub_scene = None + for s in sequences: + tracks = s.get_master_tracks() + subscene_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + break + if subscene_track: + sections = subscene_track.get_sections() + for ss in sections: + if ss.get_sequence().get_name() == sequence_name: + parent = s + sub_scene = ss + # subscene_track.remove_section(ss) + break + sequences.append(ss.get_sequence()) + # Update subscenes indexes. + i = 0 + for ss in sections: + ss.set_row_index(i) + i += 1 + + if parent: + break + + assert parent, "Could not find the parent sequence" + + EditorAssetLibrary.delete_asset(level_sequence.get_path_name()) settings = unreal.MovieSceneUserImportFBXSettings() settings.set_editor_property('reduce_keys', False) + tools = unreal.AssetToolsHelpers().get_asset_tools() + new_sequence = tools.create_asset( + asset_name=sequence_name, + package_path=asset_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) + + new_sequence.set_display_rate(display_rate) + new_sequence.set_playback_start(playback_start) + new_sequence.set_playback_end(playback_end) + + sub_scene.set_sequence(new_sequence) + unreal.SequencerTools.import_fbx( - unreal.EditorLevelLibrary.get_editor_world(), - sequence, - sequence.get_bindings(), + EditorLevelLibrary.get_editor_world(), + new_sequence, + new_sequence.get_bindings(), settings, str(representation["data"]["path"]) ) + data = { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]) + } + unreal_pipeline.imprint( + "{}/{}".format(asset_dir, container.get('container_name')), data) + + EditorLevelLibrary.save_current_level() + + asset_content = EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=False) + + for a in asset_content: + EditorAssetLibrary.save_asset(a) + + EditorLevelLibrary.load_level(master_level) + def remove(self, container): - path = container["namespace"] - parent_path = os.path.dirname(path) + path = Path(container.get("namespace")) + parent_path = str(path.parent.as_posix()) - unreal.EditorAssetLibrary.delete_directory(path) + ar = unreal.AssetRegistryHelpers.get_asset_registry() + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{str(path.as_posix())}"], + recursive_paths=False) + sequences = ar.get_assets(filter) - asset_content = unreal.EditorAssetLibrary.list_assets( + if not sequences: + raise Exception("Could not find sequence.") + + world = ar.get_asset_by_object_path( + EditorLevelLibrary.get_editor_world().get_path_name()) + + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{parent_path}"], + recursive_paths=True) + maps = ar.get_assets(filter) + + # There should be only one map in the list + if not maps: + raise Exception("Could not find map.") + + map = maps[0] + + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(map.get_full_name()) + + # Remove the camera from the level. + actors = EditorLevelLibrary.get_all_level_actors() + + for a in actors: + if a.__class__ == unreal.CineCameraActor: + EditorLevelLibrary.destroy_actor(a) + + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(world.get_full_name()) + + # There should be only one sequence in the path. + sequence_name = sequences[0].asset_name + + # Remove the Level Sequence from the parent. + # We need to traverse the hierarchy from the master sequence to find + # the level sequence. + root = "/Game/OpenPype" + namespace = container.get('namespace').replace(f"{root}/", "") + ms_asset = namespace.split('/')[0] + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + sequences = ar.get_assets(filter) + master_sequence = sequences[0].get_asset() + + sequences = [master_sequence] + + parent = None + for s in sequences: + tracks = s.get_master_tracks() + subscene_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + break + if subscene_track: + sections = subscene_track.get_sections() + for ss in sections: + if ss.get_sequence().get_name() == sequence_name: + parent = s + subscene_track.remove_section(ss) + break + sequences.append(ss.get_sequence()) + # Update subscenes indexes. + i = 0 + for ss in sections: + ss.set_row_index(i) + i += 1 + + if parent: + break + + assert parent, "Could not find the parent sequence" + + EditorAssetLibrary.delete_directory(str(path.as_posix())) + + # Check if there isn't any more assets in the parent folder, and + # delete it if not. + asset_content = EditorAssetLibrary.list_assets( parent_path, recursive=False, include_folder=True ) if len(asset_content) == 0: - unreal.EditorAssetLibrary.delete_directory(parent_path) + EditorAssetLibrary.delete_directory(parent_path) diff --git a/openpype/hosts/unreal/plugins/load/load_layout.py b/openpype/hosts/unreal/plugins/load/load_layout.py index 7f6ce7d822..412f77e3a9 100644 --- a/openpype/hosts/unreal/plugins/load/load_layout.py +++ b/openpype/hosts/unreal/plugins/load/load_layout.py @@ -7,6 +7,7 @@ from pathlib import Path import unreal from unreal import EditorAssetLibrary from unreal import EditorLevelLibrary +from unreal import EditorLevelUtils from unreal import AssetToolsHelpers from unreal import FBXImportType from unreal import MathLibrary as umath @@ -17,6 +18,7 @@ from openpype.pipeline import ( load_container, get_representation_path, AVALON_CONTAINER_ID, + legacy_io, ) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -31,7 +33,7 @@ class LayoutLoader(plugin.Loader): label = "Load Layout" icon = "code-fork" color = "orange" - ASSET_ROOT = "/Game/OpenPype/Assets" + ASSET_ROOT = "/Game/OpenPype" def _get_asset_containers(self, path): ar = unreal.AssetRegistryHelpers.get_asset_registry() @@ -85,11 +87,91 @@ class LayoutLoader(plugin.Loader): return None - @staticmethod - def _process_family(assets, class_name, transform, inst_name=None): + def _get_data(self, asset_name): + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) + + return asset_doc.get("data") + + def _set_sequence_hierarchy( + self, seq_i, seq_j, max_frame_i, min_frame_j, max_frame_j, map_paths + ): + # Get existing sequencer tracks or create them if they don't exist + tracks = seq_i.get_master_tracks() + subscene_track = None + visibility_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + if (t.get_class() == + unreal.MovieSceneLevelVisibilityTrack.static_class()): + visibility_track = t + if not subscene_track: + subscene_track = seq_i.add_master_track(unreal.MovieSceneSubTrack) + if not visibility_track: + visibility_track = seq_i.add_master_track( + unreal.MovieSceneLevelVisibilityTrack) + + # Create the sub-scene section + subscenes = subscene_track.get_sections() + subscene = None + for s in subscenes: + if s.get_editor_property('sub_sequence') == seq_j: + subscene = s + break + if not subscene: + subscene = subscene_track.add_section() + subscene.set_row_index(len(subscene_track.get_sections())) + subscene.set_editor_property('sub_sequence', seq_j) + subscene.set_range( + min_frame_j, + max_frame_j + 1) + + # Create the visibility section + ar = unreal.AssetRegistryHelpers.get_asset_registry() + maps = [] + for m in map_paths: + # Unreal requires to load the level to get the map name + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(m) + maps.append(str(ar.get_asset_by_object_path(m).asset_name)) + + vis_section = visibility_track.add_section() + index = len(visibility_track.get_sections()) + + vis_section.set_range( + min_frame_j, + max_frame_j + 1) + vis_section.set_visibility(unreal.LevelVisibility.VISIBLE) + vis_section.set_row_index(index) + vis_section.set_level_names(maps) + + if min_frame_j > 1: + hid_section = visibility_track.add_section() + hid_section.set_range( + 1, + min_frame_j) + hid_section.set_visibility(unreal.LevelVisibility.HIDDEN) + hid_section.set_row_index(index) + hid_section.set_level_names(maps) + if max_frame_j < max_frame_i: + hid_section = visibility_track.add_section() + hid_section.set_range( + max_frame_j + 1, + max_frame_i + 1) + hid_section.set_visibility(unreal.LevelVisibility.HIDDEN) + hid_section.set_row_index(index) + hid_section.set_level_names(maps) + + def _process_family( + self, assets, class_name, transform, sequence, inst_name=None + ): ar = unreal.AssetRegistryHelpers.get_asset_registry() actors = [] + bindings = [] for asset in assets: obj = ar.get_asset_by_object_path(asset).get_asset() @@ -119,14 +201,30 @@ class LayoutLoader(plugin.Loader): ), False) actor.set_actor_scale3d(transform.get('scale')) + if class_name == 'SkeletalMesh': + skm_comp = actor.get_editor_property( + 'skeletal_mesh_component') + skm_comp.set_bounds_scale(10.0) + actors.append(actor) - return actors + binding = None + for p in sequence.get_possessables(): + if p.get_name() == actor.get_name(): + binding = p + break + + if not binding: + binding = sequence.add_possessable(actor) + + bindings.append(binding) + + return actors, bindings - @staticmethod def _import_animation( - asset_dir, path, instance_name, skeleton, actors_dict, - animation_file): + self, asset_dir, path, instance_name, skeleton, actors_dict, + animation_file, bindings_dict, sequence + ): anim_file = Path(animation_file) anim_file_name = anim_file.with_suffix('') @@ -205,7 +303,106 @@ class LayoutLoader(plugin.Loader): actor.skeletal_mesh_component.animation_data.set_editor_property( 'anim_to_play', animation) - def _process(self, lib_path, asset_dir, loaded=None): + # Add animation to the sequencer + bindings = bindings_dict.get(instance_name) + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + for binding in bindings: + tracks = binding.get_tracks() + track = None + if not tracks: + track = binding.add_track( + unreal.MovieSceneSkeletalAnimationTrack) + else: + track = tracks[0] + + sections = track.get_sections() + section = None + if not sections: + section = track.add_section() + else: + section = sections[0] + + sec_params = section.get_editor_property('params') + curr_anim = sec_params.get_editor_property('animation') + + if curr_anim: + # Checks if the animation path has a container. + # If it does, it means that the animation is already + # in the sequencer. + anim_path = str(Path( + curr_anim.get_path_name()).parent + ).replace('\\', '/') + + filter = unreal.ARFilter( + class_names=["AssetContainer"], + package_paths=[anim_path], + recursive_paths=False) + containers = ar.get_assets(filter) + + if len(containers) > 0: + return + + section.set_range( + sequence.get_playback_start(), + sequence.get_playback_end()) + sec_params = section.get_editor_property('params') + sec_params.set_editor_property('animation', animation) + + def _generate_sequence(self, h, h_dir): + tools = unreal.AssetToolsHelpers().get_asset_tools() + + sequence = tools.create_asset( + asset_name=h, + package_path=h_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) + + asset_data = legacy_io.find_one({ + "type": "asset", + "name": h_dir.split('/')[-1] + }) + + id = asset_data.get('_id') + + start_frames = [] + end_frames = [] + + elements = list( + legacy_io.find({"type": "asset", "data.visualParent": id})) + for e in elements: + start_frames.append(e.get('data').get('clipIn')) + end_frames.append(e.get('data').get('clipOut')) + + elements.extend(legacy_io.find({ + "type": "asset", + "data.visualParent": e.get('_id') + })) + + min_frame = min(start_frames) + max_frame = max(end_frames) + + sequence.set_display_rate( + unreal.FrameRate(asset_data.get('data').get("fps"), 1.0)) + sequence.set_playback_start(min_frame) + sequence.set_playback_end(max_frame) + + tracks = sequence.get_master_tracks() + track = None + for t in tracks: + if (t.get_class() == + unreal.MovieSceneCameraCutTrack.static_class()): + track = t + break + if not track: + track = sequence.add_master_track( + unreal.MovieSceneCameraCutTrack) + + return sequence, (min_frame, max_frame) + + def _process(self, lib_path, asset_dir, sequence, loaded=None): ar = unreal.AssetRegistryHelpers.get_asset_registry() with open(lib_path, "r") as fp: @@ -220,6 +417,9 @@ class LayoutLoader(plugin.Loader): skeleton_dict = {} actors_dict = {} + bindings_dict = {} + + loaded_assets = [] for element in data: reference = None @@ -255,7 +455,7 @@ class LayoutLoader(plugin.Loader): continue options = { - "asset_dir": asset_dir + # "asset_dir": asset_dir } assets = load_container( @@ -265,6 +465,17 @@ class LayoutLoader(plugin.Loader): options=options ) + container = None + + for asset in assets: + obj = ar.get_asset_by_object_path(asset).get_asset() + if obj.get_class().get_name() == 'AssetContainer': + container = obj + if obj.get_class().get_name() == 'Skeleton': + skeleton = obj + + loaded_assets.append(container.get_path_name()) + instances = [ item for item in data if (item.get('reference_fbx') == reference or @@ -277,21 +488,13 @@ class LayoutLoader(plugin.Loader): actors = [] if family == 'model': - actors = self._process_family( - assets, 'StaticMesh', transform, inst) + actors, _ = self._process_family( + assets, 'StaticMesh', transform, sequence, inst) elif family == 'rig': - actors = self._process_family( - assets, 'SkeletalMesh', transform, inst) + actors, bindings = self._process_family( + assets, 'SkeletalMesh', transform, sequence, inst) actors_dict[inst] = actors - - if family == 'rig': - # Finds skeleton among the imported assets - for asset in assets: - obj = ar.get_asset_by_object_path(asset).get_asset() - if obj.get_class().get_name() == 'Skeleton': - skeleton = obj - if skeleton: - break + bindings_dict[inst] = bindings if skeleton: skeleton_dict[reference] = skeleton @@ -302,8 +505,10 @@ class LayoutLoader(plugin.Loader): if animation_file and skeleton: self._import_animation( - asset_dir, path, instance_name, skeleton, - actors_dict, animation_file) + asset_dir, path, instance_name, skeleton, actors_dict, + animation_file, bindings_dict, sequence) + + return loaded_assets @staticmethod def _remove_family(assets, components, class_name, prop_name): @@ -369,7 +574,13 @@ class LayoutLoader(plugin.Loader): list(str): list of container content """ # Create directory for asset and avalon container + hierarchy = context.get('asset').get('data').get('parents') root = self.ASSET_ROOT + hierarchy_dir = root + hierarchy_dir_list = [] + for h in hierarchy: + hierarchy_dir = f"{hierarchy_dir}/{h}" + hierarchy_dir_list.append(hierarchy_dir) asset = context.get('asset').get('name') suffix = "_CON" if asset: @@ -379,13 +590,93 @@ class LayoutLoader(plugin.Loader): tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") + "{}/{}/{}".format(hierarchy_dir, asset, name), suffix="") container_name += suffix EditorAssetLibrary.make_directory(asset_dir) - self._process(self.fname, asset_dir) + # Create map for the shot, and create hierarchy of map. If the maps + # already exist, we will use them. + h_dir = hierarchy_dir_list[0] + h_asset = hierarchy[0] + master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + if not EditorAssetLibrary.does_asset_exist(master_level): + EditorLevelLibrary.new_level(f"{h_dir}/{h_asset}_map") + + level = f"{asset_dir}/{asset}_map.{asset}_map" + EditorLevelLibrary.new_level(f"{asset_dir}/{asset}_map") + + EditorLevelLibrary.load_level(master_level) + EditorLevelUtils.add_level_to_world( + EditorLevelLibrary.get_editor_world(), + level, + unreal.LevelStreamingDynamic + ) + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(level) + + # Get all the sequences in the hierarchy. It will create them, if + # they don't exist. + sequences = [] + frame_ranges = [] + for (h_dir, h) in zip(hierarchy_dir_list, hierarchy): + root_content = EditorAssetLibrary.list_assets( + h_dir, recursive=False, include_folder=False) + + existing_sequences = [ + EditorAssetLibrary.find_asset_data(asset) + for asset in root_content + if EditorAssetLibrary.find_asset_data( + asset).get_class().get_name() == 'LevelSequence' + ] + + if not existing_sequences: + sequence, frame_range = self._generate_sequence(h, h_dir) + + sequences.append(sequence) + frame_ranges.append(frame_range) + else: + for e in existing_sequences: + sequences.append(e.get_asset()) + frame_ranges.append(( + e.get_asset().get_playback_start(), + e.get_asset().get_playback_end())) + + shot = tools.create_asset( + asset_name=asset, + package_path=asset_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) + + # sequences and frame_ranges have the same length + for i in range(0, len(sequences) - 1): + self._set_sequence_hierarchy( + sequences[i], sequences[i + 1], + frame_ranges[i][1], + frame_ranges[i + 1][0], frame_ranges[i + 1][1], + [level]) + + data = self._get_data(asset) + shot.set_display_rate( + unreal.FrameRate(data.get("fps"), 1.0)) + shot.set_playback_start(0) + shot.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1) + self._set_sequence_hierarchy( + sequences[-1], shot, + frame_ranges[-1][1], + data.get('clipIn'), data.get('clipOut'), + [level]) + + EditorLevelLibrary.load_level(level) + + loaded_assets = self._process(self.fname, asset_dir, shot) + + for s in sequences: + EditorAssetLibrary.save_asset(s.get_full_name()) + + EditorLevelLibrary.save_current_level() # Create Asset Container unreal_pipeline.create_container( @@ -401,7 +692,8 @@ class LayoutLoader(plugin.Loader): "loader": str(self.__class__.__name__), "representation": context["representation"]["_id"], "parent": context["representation"]["parent"], - "family": context["representation"]["context"]["family"] + "family": context["representation"]["context"]["family"], + "loaded_assets": loaded_assets } unreal_pipeline.imprint( "{}/{}".format(asset_dir, container_name), data) @@ -412,146 +704,192 @@ class LayoutLoader(plugin.Loader): for a in asset_content: EditorAssetLibrary.save_asset(a) + EditorLevelLibrary.load_level(master_level) + return asset_content def update(self, container, representation): ar = unreal.AssetRegistryHelpers.get_asset_registry() + root = "/Game/OpenPype" + + asset_dir = container.get('namespace') + + context = representation.get("context") + + hierarchy = context.get('hierarchy').split("/") + h_dir = f"{root}/{hierarchy[0]}" + h_asset = hierarchy[0] + master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + + # # Create a temporary level to delete the layout level. + # EditorLevelLibrary.save_all_dirty_levels() + # EditorAssetLibrary.make_directory(f"{root}/tmp") + # tmp_level = f"{root}/tmp/temp_map" + # if not EditorAssetLibrary.does_asset_exist(f"{tmp_level}.temp_map"): + # EditorLevelLibrary.new_level(tmp_level) + # else: + # EditorLevelLibrary.load_level(tmp_level) + + # Get layout level + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[asset_dir], + recursive_paths=False) + levels = ar.get_assets(filter) + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[asset_dir], + recursive_paths=False) + sequences = ar.get_assets(filter) + + layout_level = levels[0].get_editor_property('object_path') + + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(layout_level) + + # Delete all the actors in the level + actors = unreal.EditorLevelLibrary.get_all_level_actors() + for actor in actors: + unreal.EditorLevelLibrary.destroy_actor(actor) + + EditorLevelLibrary.save_current_level() + + EditorAssetLibrary.delete_directory(f"{asset_dir}/animations/") + source_path = get_representation_path(representation) - destination_path = container["namespace"] - lib_path = Path(get_representation_path(representation)) - self._remove_actors(destination_path) + loaded_assets = self._process( + source_path, asset_dir, sequences[0].get_asset()) - # Delete old animations - anim_path = f"{destination_path}/animations/" - EditorAssetLibrary.delete_directory(anim_path) - - with open(source_path, "r") as fp: - data = json.load(fp) - - references = [e.get('reference_fbx') for e in data] - asset_containers = self._get_asset_containers(destination_path) - loaded = [] - - # Delete all the assets imported with the previous version of the - # layout, if they're not in the new layout. - for asset_container in asset_containers: - if asset_container.get_editor_property( - 'asset_name') == container["objectName"]: - continue - ref = EditorAssetLibrary.get_metadata_tag( - asset_container.get_asset(), 'representation') - ppath = asset_container.get_editor_property('package_path') - - if ref not in references: - # If the asset is not in the new layout, delete it. - # Also check if the parent directory is empty, and delete that - # as well, if it is. - EditorAssetLibrary.delete_directory(ppath) - - parent = os.path.dirname(str(ppath)) - parent_content = EditorAssetLibrary.list_assets( - parent, recursive=False, include_folder=True - ) - - if len(parent_content) == 0: - EditorAssetLibrary.delete_directory(parent) - else: - # If the asset is in the new layout, search the instances in - # the JSON file, and create actors for them. - - actors_dict = {} - skeleton_dict = {} - - for element in data: - reference = element.get('reference_fbx') - instance_name = element.get('instance_name') - - skeleton = None - - if reference == ref and ref not in loaded: - loaded.append(ref) - - family = element.get('family') - - assets = EditorAssetLibrary.list_assets( - ppath, recursive=True, include_folder=False) - - instances = [ - item for item in data - if item.get('reference_fbx') == reference] - - for instance in instances: - transform = instance.get('transform') - inst = instance.get('instance_name') - - actors = [] - - if family == 'model': - actors = self._process_family( - assets, 'StaticMesh', transform, inst) - elif family == 'rig': - actors = self._process_family( - assets, 'SkeletalMesh', transform, inst) - actors_dict[inst] = actors - - if family == 'rig': - # Finds skeleton among the imported assets - for asset in assets: - obj = ar.get_asset_by_object_path( - asset).get_asset() - if obj.get_class().get_name() == 'Skeleton': - skeleton = obj - if skeleton: - break - - if skeleton: - skeleton_dict[reference] = skeleton - else: - skeleton = skeleton_dict.get(reference) - - animation_file = element.get('animation') - - if animation_file and skeleton: - self._import_animation( - destination_path, lib_path, - instance_name, skeleton, - actors_dict, animation_file) - - self._process(source_path, destination_path, loaded) - - container_path = "{}/{}".format(container["namespace"], - container["objectName"]) - # update metadata + data = { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]), + "loaded_assets": loaded_assets + } unreal_pipeline.imprint( - container_path, - { - "representation": str(representation["_id"]), - "parent": str(representation["parent"]) - }) + "{}/{}".format(asset_dir, container.get('container_name')), data) + + EditorLevelLibrary.save_current_level() asset_content = EditorAssetLibrary.list_assets( - destination_path, recursive=True, include_folder=False) + asset_dir, recursive=True, include_folder=False) for a in asset_content: EditorAssetLibrary.save_asset(a) + EditorLevelLibrary.load_level(master_level) + def remove(self, container): """ - First, destroy all actors of the assets to be removed. Then, deletes - the asset's directory. + Delete the layout. First, check if the assets loaded with the layout + are used by other layouts. If not, delete the assets. """ - path = container["namespace"] - parent_path = os.path.dirname(path) + path = Path(container.get("namespace")) - self._remove_actors(path) + containers = unreal_pipeline.ls() + layout_containers = [ + c for c in containers + if (c.get('asset_name') != container.get('asset_name') and + c.get('family') == "layout")] - EditorAssetLibrary.delete_directory(path) + # Check if the assets have been loaded by other layouts, and deletes + # them if they haven't. + for asset in container.get('loaded_assets'): + layouts = [ + lc for lc in layout_containers + if asset in lc.get('loaded_assets')] + if len(layouts) == 0: + EditorAssetLibrary.delete_directory(str(Path(asset).parent)) + + # Remove the Level Sequence from the parent. + # We need to traverse the hierarchy from the master sequence to find + # the level sequence. + root = "/Game/OpenPype" + namespace = container.get('namespace').replace(f"{root}/", "") + ms_asset = namespace.split('/')[0] + ar = unreal.AssetRegistryHelpers.get_asset_registry() + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + sequences = ar.get_assets(filter) + master_sequence = sequences[0].get_asset() + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + levels = ar.get_assets(filter) + master_level = levels[0].get_editor_property('object_path') + + sequences = [master_sequence] + + parent = None + for s in sequences: + tracks = s.get_master_tracks() + subscene_track = None + visibility_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + if (t.get_class() == + unreal.MovieSceneLevelVisibilityTrack.static_class()): + visibility_track = t + if subscene_track: + sections = subscene_track.get_sections() + for ss in sections: + if ss.get_sequence().get_name() == container.get('asset'): + parent = s + subscene_track.remove_section(ss) + break + sequences.append(ss.get_sequence()) + # Update subscenes indexes. + i = 0 + for ss in sections: + ss.set_row_index(i) + i += 1 + + if visibility_track: + sections = visibility_track.get_sections() + for ss in sections: + if (unreal.Name(f"{container.get('asset')}_map") + in ss.get_level_names()): + visibility_track.remove_section(ss) + # Update visibility sections indexes. + i = -1 + prev_name = [] + for ss in sections: + if prev_name != ss.get_level_names(): + i += 1 + ss.set_row_index(i) + prev_name = ss.get_level_names() + if parent: + break + + assert parent, "Could not find the parent sequence" + + # Create a temporary level to delete the layout level. + EditorLevelLibrary.save_all_dirty_levels() + EditorAssetLibrary.make_directory(f"{root}/tmp") + tmp_level = f"{root}/tmp/temp_map" + if not EditorAssetLibrary.does_asset_exist(f"{tmp_level}.temp_map"): + EditorLevelLibrary.new_level(tmp_level) + else: + EditorLevelLibrary.load_level(tmp_level) + + # Delete the layout directory. + EditorAssetLibrary.delete_directory(str(path)) + + EditorLevelLibrary.load_level(master_level) + EditorAssetLibrary.delete_directory(f"{root}/tmp") + + EditorLevelLibrary.save_current_level() + + # Delete the parent folder if there aren't any more layouts in it. asset_content = EditorAssetLibrary.list_assets( - parent_path, recursive=False, include_folder=True + str(path.parent), recursive=False, include_folder=True ) if len(asset_content) == 0: - EditorAssetLibrary.delete_directory(parent_path) + EditorAssetLibrary.delete_directory(str(path.parent)) diff --git a/openpype/hosts/unreal/plugins/load/load_rig.py b/openpype/hosts/unreal/plugins/load/load_rig.py index ff844a5e94..c27bd23aaf 100644 --- a/openpype/hosts/unreal/plugins/load/load_rig.py +++ b/openpype/hosts/unreal/plugins/load/load_rig.py @@ -52,54 +52,55 @@ class SkeletalMeshFBXLoader(plugin.Loader): asset_name = "{}_{}".format(asset, name) else: asset_name = "{}".format(name) + version = context.get('version').get('name') tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") + f"{root}/{asset}/{name}_v{version:03d}", suffix="") container_name += suffix - unreal.EditorAssetLibrary.make_directory(asset_dir) + if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir): + unreal.EditorAssetLibrary.make_directory(asset_dir) - task = unreal.AssetImportTask() + task = unreal.AssetImportTask() - task.set_editor_property('filename', self.fname) - task.set_editor_property('destination_path', asset_dir) - task.set_editor_property('destination_name', asset_name) - task.set_editor_property('replace_existing', False) - task.set_editor_property('automated', True) - task.set_editor_property('save', False) + task.set_editor_property('filename', self.fname) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', False) + task.set_editor_property('automated', True) + task.set_editor_property('save', False) - # set import options here - options = unreal.FbxImportUI() - options.set_editor_property('import_as_skeletal', True) - options.set_editor_property('import_animations', False) - options.set_editor_property('import_mesh', True) - options.set_editor_property('import_materials', True) - options.set_editor_property('import_textures', True) - options.set_editor_property('skeleton', None) - options.set_editor_property('create_physics_asset', False) + # set import options here + options = unreal.FbxImportUI() + options.set_editor_property('import_as_skeletal', True) + options.set_editor_property('import_animations', False) + options.set_editor_property('import_mesh', True) + options.set_editor_property('import_materials', False) + options.set_editor_property('import_textures', False) + options.set_editor_property('skeleton', None) + options.set_editor_property('create_physics_asset', False) - options.set_editor_property('mesh_type_to_import', - unreal.FBXImportType.FBXIT_SKELETAL_MESH) + options.set_editor_property( + 'mesh_type_to_import', + unreal.FBXImportType.FBXIT_SKELETAL_MESH) - options.skeletal_mesh_import_data.set_editor_property( - 'import_content_type', - unreal.FBXImportContentType.FBXICT_ALL - ) - # set to import normals, otherwise Unreal will compute them - # and it will take a long time, depending on the size of the mesh - options.skeletal_mesh_import_data.set_editor_property( - 'normal_import_method', - unreal.FBXNormalImportMethod.FBXNIM_IMPORT_NORMALS - ) + options.skeletal_mesh_import_data.set_editor_property( + 'import_content_type', + unreal.FBXImportContentType.FBXICT_ALL) + # set to import normals, otherwise Unreal will compute them + # and it will take a long time, depending on the size of the mesh + options.skeletal_mesh_import_data.set_editor_property( + 'normal_import_method', + unreal.FBXNormalImportMethod.FBXNIM_IMPORT_NORMALS) - task.options = options - unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 + task.options = options + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 - # Create Asset Container - unreal_pipeline.create_container( - container=container_name, path=asset_dir) + # Create Asset Container + unreal_pipeline.create_container( + container=container_name, path=asset_dir) data = { "schema": "openpype:container-2.0", diff --git a/openpype/hosts/unreal/plugins/publish/collect_instances.py b/openpype/hosts/unreal/plugins/publish/collect_instances.py index 94e732d728..2f604cb322 100644 --- a/openpype/hosts/unreal/plugins/publish/collect_instances.py +++ b/openpype/hosts/unreal/plugins/publish/collect_instances.py @@ -17,7 +17,7 @@ class CollectInstances(pyblish.api.ContextPlugin): """ label = "Collect Instances" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.1 hosts = ["unreal"] def process(self, context): diff --git a/openpype/hosts/unreal/plugins/publish/collect_remove_marked.py b/openpype/hosts/unreal/plugins/publish/collect_remove_marked.py new file mode 100644 index 0000000000..69e69f6630 --- /dev/null +++ b/openpype/hosts/unreal/plugins/publish/collect_remove_marked.py @@ -0,0 +1,24 @@ +import pyblish.api + + +class CollectRemoveMarked(pyblish.api.ContextPlugin): + """Remove marked data + + Remove instances that have 'remove' in their instance.data + + """ + + order = pyblish.api.CollectorOrder + 0.499 + label = 'Remove Marked Instances' + + def process(self, context): + + self.log.debug(context) + # make ftrack publishable + instances_to_remove = [] + for instance in context: + if instance.data.get('remove'): + instances_to_remove.append(instance) + + for instance in instances_to_remove: + context.remove(instance) diff --git a/openpype/hosts/unreal/plugins/publish/collect_render_instances.py b/openpype/hosts/unreal/plugins/publish/collect_render_instances.py new file mode 100644 index 0000000000..9fb45ea7a7 --- /dev/null +++ b/openpype/hosts/unreal/plugins/publish/collect_render_instances.py @@ -0,0 +1,112 @@ +import os +from pathlib import Path + +import unreal + +from openpype.api import Anatomy +from openpype.hosts.unreal.api import pipeline +import pyblish.api + + +class CollectRenderInstances(pyblish.api.InstancePlugin): + """ This collector will try to find all the rendered frames. + + """ + order = pyblish.api.CollectorOrder + hosts = ["unreal"] + families = ["render"] + label = "Collect Render Instances" + + def process(self, instance): + self.log.debug("Preparing Rendering Instances") + + context = instance.context + + data = instance.data + data['remove'] = True + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + sequence = ar.get_asset_by_object_path( + data.get('sequence')).get_asset() + + sequences = [{ + "sequence": sequence, + "output": data.get('output'), + "frame_range": ( + data.get('frameStart'), data.get('frameEnd')) + }] + + for s in sequences: + self.log.debug(f"Processing: {s.get('sequence').get_name()}") + subscenes = pipeline.get_subsequences(s.get('sequence')) + + if subscenes: + for ss in subscenes: + sequences.append({ + "sequence": ss.get_sequence(), + "output": (f"{s.get('output')}/" + f"{ss.get_sequence().get_name()}"), + "frame_range": ( + ss.get_start_frame(), ss.get_end_frame() - 1) + }) + else: + # Avoid creating instances for camera sequences + if "_camera" not in s.get('sequence').get_name(): + seq = s.get('sequence') + seq_name = seq.get_name() + + new_instance = context.create_instance( + f"{data.get('subset')}_" + f"{seq_name}") + new_instance[:] = seq_name + + new_data = new_instance.data + + new_data["asset"] = seq_name + new_data["setMembers"] = seq_name + new_data["family"] = "render" + new_data["families"] = ["render", "review"] + new_data["parent"] = data.get("parent") + new_data["subset"] = f"{data.get('subset')}_{seq_name}" + new_data["level"] = data.get("level") + new_data["output"] = s.get('output') + new_data["fps"] = seq.get_display_rate().numerator + new_data["frameStart"] = s.get('frame_range')[0] + new_data["frameEnd"] = s.get('frame_range')[1] + new_data["sequence"] = seq.get_path_name() + new_data["master_sequence"] = data["master_sequence"] + new_data["master_level"] = data["master_level"] + + self.log.debug(f"new instance data: {new_data}") + + try: + project = os.environ.get("AVALON_PROJECT") + anatomy = Anatomy(project) + root = anatomy.roots['renders'] + except Exception: + raise Exception( + "Could not find render root in anatomy settings.") + + render_dir = f"{root}/{project}/{s.get('output')}" + render_path = Path(render_dir) + + frames = [] + + for x in render_path.iterdir(): + if x.is_file() and x.suffix == '.png': + frames.append(str(x.name)) + + if "representations" not in new_instance.data: + new_instance.data["representations"] = [] + + repr = { + 'frameStart': s.get('frame_range')[0], + 'frameEnd': s.get('frame_range')[1], + 'name': 'png', + 'ext': 'png', + 'files': frames, + 'stagingDir': render_dir, + 'tags': ['review'] + } + new_instance.data["representations"].append(repr) diff --git a/openpype/hosts/unreal/plugins/publish/extract_layout.py b/openpype/hosts/unreal/plugins/publish/extract_layout.py index f34a47b89f..87e6693a97 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_layout.py +++ b/openpype/hosts/unreal/plugins/publish/extract_layout.py @@ -10,7 +10,7 @@ from unreal import EditorLevelLibrary as ell from unreal import EditorAssetLibrary as eal import openpype.api -from avalon import io +from openpype.pipeline import legacy_io class ExtractLayout(openpype.api.Extractor): @@ -61,7 +61,7 @@ class ExtractLayout(openpype.api.Extractor): family = eal.get_metadata_tag(asset_container, "family") self.log.info("Parent: {}".format(parent)) - blend = io.find_one( + blend = legacy_io.find_one( { "type": "representation", "parent": ObjectId(parent), diff --git a/openpype/hosts/unreal/plugins/publish/extract_render.py b/openpype/hosts/unreal/plugins/publish/extract_render.py new file mode 100644 index 0000000000..37fe7e916f --- /dev/null +++ b/openpype/hosts/unreal/plugins/publish/extract_render.py @@ -0,0 +1,48 @@ +from pathlib import Path + +import unreal + +import openpype.api + + +class ExtractRender(openpype.api.Extractor): + """Extract render.""" + + label = "Extract Render" + hosts = ["unreal"] + families = ["render"] + optional = True + + def process(self, instance): + # Define extract output file path + stagingdir = self.staging_dir(instance) + + # Perform extraction + self.log.info("Performing extraction..") + + # Get the render output directory + project_dir = unreal.Paths.project_dir() + render_dir = (f"{project_dir}/Saved/MovieRenders/" + f"{instance.data['subset']}") + + assert unreal.Paths.directory_exists(render_dir), \ + "Render directory does not exist" + + render_path = Path(render_dir) + + frames = [] + + for x in render_path.iterdir(): + if x.is_file() and x.suffix == '.png': + frames.append(str(x)) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + render_representation = { + 'name': 'png', + 'ext': 'png', + 'files': frames, + "stagingDir": stagingdir, + } + instance.data["representations"].append(render_representation) diff --git a/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py b/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py new file mode 100644 index 0000000000..87f1338ee8 --- /dev/null +++ b/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py @@ -0,0 +1,41 @@ +import clique + +import pyblish.api + + +class ValidateSequenceFrames(pyblish.api.InstancePlugin): + """Ensure the sequence of frames is complete + + The files found in the folder are checked against the frameStart and + frameEnd of the instance. If the first or last file is not + corresponding with the first or last frame it is flagged as invalid. + """ + + order = pyblish.api.ValidatorOrder + label = "Validate Sequence Frames" + families = ["render"] + hosts = ["unreal"] + optional = True + + def process(self, instance): + representations = instance.data.get("representations") + for repr in representations: + patterns = [clique.PATTERNS["frames"]] + collections, remainder = clique.assemble( + repr["files"], minimum_items=1, patterns=patterns) + + assert not remainder, "Must not have remainder" + assert len(collections) == 1, "Must detect single collection" + collection = collections[0] + frames = list(collection.indexes) + + current_range = (frames[0], frames[-1]) + required_range = (instance.data["frameStart"], + instance.data["frameEnd"]) + + if current_range != required_range: + raise ValueError(f"Invalid frame range: {current_range} - " + f"expected: {required_range}") + + missing = collection.holes().indexes + assert not missing, "Missing frames: %s" % (missing,) diff --git a/openpype/hosts/webpublisher/api/__init__.py b/openpype/hosts/webpublisher/api/__init__.py index dbeb628073..18e3a16cf5 100644 --- a/openpype/hosts/webpublisher/api/__init__.py +++ b/openpype/hosts/webpublisher/api/__init__.py @@ -1,10 +1,9 @@ import os import logging -from avalon import api as avalon -from avalon import io from pyblish import api as pyblish import openpype.hosts.webpublisher +from openpype.pipeline import legacy_io log = logging.getLogger("openpype.hosts.webpublisher") @@ -20,7 +19,7 @@ def install(): pyblish.register_plugin_path(PUBLISH_PATH) log.info(PUBLISH_PATH) - io.install() + legacy_io.install() def uninstall(): diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py index ca14538d7d..9ff779636a 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py @@ -1,18 +1,24 @@ -"""Loads batch context from json and continues in publish process. +"""Parses batch context from json and continues in publish process. Provides: context -> Loaded batch file. + - asset + - task (task name) + - taskType + - project_name + - variant """ import os import pyblish.api -from avalon import io + from openpype.lib.plugin_tools import ( parse_json, get_batch_asset_task_info ) from openpype.lib.remote_publish import get_webpublish_conn, IN_PROGRESS_STATUS +from openpype.pipeline import legacy_io class CollectBatchData(pyblish.api.ContextPlugin): @@ -24,7 +30,7 @@ class CollectBatchData(pyblish.api.ContextPlugin): # must be really early, context values are only in json file order = pyblish.api.CollectorOrder - 0.495 label = "Collect batch data" - host = ["webpublisher"] + hosts = ["webpublisher"] def process(self, context): batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") @@ -52,14 +58,15 @@ class CollectBatchData(pyblish.api.ContextPlugin): ) os.environ["AVALON_ASSET"] = asset_name - io.Session["AVALON_ASSET"] = asset_name + legacy_io.Session["AVALON_ASSET"] = asset_name os.environ["AVALON_TASK"] = task_name - io.Session["AVALON_TASK"] = task_name + legacy_io.Session["AVALON_TASK"] = task_name context.data["asset"] = asset_name context.data["task"] = task_name context.data["taskType"] = task_type context.data["project_name"] = project_name + context.data["variant"] = batch_data["variant"] self._set_ctx_path(batch_data) diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py index 65cef14703..bdd3caccfd 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py @@ -12,7 +12,6 @@ import clique import tempfile import math -from avalon import io import pyblish.api from openpype.lib import ( prepare_template_data, @@ -24,6 +23,7 @@ from openpype.lib.plugin_tools import ( parse_json, get_subset_name_with_asset_doc ) +from openpype.pipeline import legacy_io class CollectPublishedFiles(pyblish.api.ContextPlugin): @@ -40,7 +40,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): # must be really early, context values are only in json file order = pyblish.api.CollectorOrder - 0.490 label = "Collect rendered frames" - host = ["webpublisher"] + hosts = ["webpublisher"] targets = ["filespublish"] # from Settings @@ -61,6 +61,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): task_name = context.data["task"] task_type = context.data["taskType"] project_name = context.data["project_name"] + variant = context.data["variant"] for task_dir in task_subfolders: task_data = parse_json(os.path.join(task_dir, "manifest.json")) @@ -76,7 +77,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): extension.replace(".", '')) subset_name = get_subset_name_with_asset_doc( - family, task_data["variant"], task_name, asset_doc, + family, variant, task_name, asset_doc, project_name=project_name, host_name="webpublisher" ) version = self._get_last_version(asset_name, subset_name) + 1 @@ -108,15 +109,18 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): instance.data["representations"] = self._get_single_repre( task_dir, task_data["files"], tags ) - file_url = os.path.join(task_dir, task_data["files"][0]) - no_of_frames = self._get_number_of_frames(file_url) - if no_of_frames: + if family != 'workfile': + file_url = os.path.join(task_dir, task_data["files"][0]) try: - frame_end = int(frame_start) + math.ceil(no_of_frames) - instance.data["frameEnd"] = math.ceil(frame_end) - 1 - self.log.debug("frameEnd:: {}".format( - instance.data["frameEnd"])) - except ValueError: + no_of_frames = self._get_number_of_frames(file_url) + if no_of_frames: + frame_end = int(frame_start) + \ + math.ceil(no_of_frames) + frame_end = math.ceil(frame_end) - 1 + instance.data["frameEnd"] = frame_end + self.log.debug("frameEnd:: {}".format( + instance.data["frameEnd"])) + except Exception: self.log.warning("Unable to count frames " "duration {}".format(no_of_frames)) @@ -209,7 +213,6 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): msg = "No family found for combination of " +\ "task_type: {}, is_sequence:{}, extension: {}".format( task_type, is_sequence, extension) - found_family = "render" assert found_family, msg return (found_family, @@ -259,7 +262,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): } } ] - version = list(io.aggregate(query)) + version = list(legacy_io.aggregate(query)) if version: return version[0].get("version") or 0 diff --git a/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py b/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py index cb6ed8481c..a56521891b 100644 --- a/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py @@ -8,7 +8,7 @@ from openpype.lib import ( run_subprocess, get_transcode_temp_directory, - convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, should_convert_for_ffmpeg ) @@ -59,11 +59,9 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): if do_convert: convert_dir = get_transcode_temp_directory() filename = os.path.basename(full_input_path) - convert_for_ffmpeg( - full_input_path, + convert_input_paths_for_ffmpeg( + [full_input_path], convert_dir, - None, - None, self.log ) full_input_path = os.path.join(convert_dir, filename) diff --git a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py index 1f9089aa27..e82ba7f2b8 100644 --- a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py +++ b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py @@ -7,18 +7,20 @@ import collections from aiohttp.web_response import Response import subprocess -from avalon.api import AvalonMongoDB - -from openpype.lib import OpenPypeMongoConnection -from openpype_modules.avalon_apps.rest_api import _RestApiEndpoint -from openpype.settings import get_project_settings - -from openpype.lib import PypeLogger +from openpype.lib import ( + OpenPypeMongoConnection, + PypeLogger, +) from openpype.lib.remote_publish import ( get_task_data, ERROR_STATUS, REPROCESS_STATUS ) +from openpype.pipeline import AvalonMongoDB +from openpype_modules.avalon_apps.rest_api import _RestApiEndpoint +from openpype.settings import get_project_settings + + log = PypeLogger.get_logger("WebServer") diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index e8b6d18f4e..3c1d71ecd5 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -42,12 +42,12 @@ from .attribute_definitions import ( EnumDef, BoolDef, FileDef, + FileDefItem, ) from .env_tools import ( env_value_to_bool, get_paths_from_environ, - get_global_environments ) from .terminal import Terminal @@ -105,6 +105,7 @@ from .transcoding import ( get_transcode_temp_directory, should_convert_for_ffmpeg, convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, get_ffprobe_data, get_ffprobe_streams, get_ffmpeg_codec_args, @@ -221,6 +222,12 @@ from .openpype_version import ( is_current_version_higher_than_expected ) + +from .connections import ( + requests_get, + requests_post +) + terminal = Terminal __all__ = [ @@ -241,7 +248,6 @@ __all__ = [ "env_value_to_bool", "get_paths_from_environ", - "get_global_environments", "get_vendor_bin_path", "get_oiio_tools_path", @@ -260,6 +266,7 @@ __all__ = [ "EnumDef", "BoolDef", "FileDef", + "FileDefItem", "import_filepath", "modules_from_path", @@ -270,6 +277,7 @@ __all__ = [ "get_transcode_temp_directory", "should_convert_for_ffmpeg", "convert_for_ffmpeg", + "convert_input_paths_for_ffmpeg", "get_ffprobe_data", "get_ffprobe_streams", "get_ffmpeg_codec_args", @@ -390,4 +398,7 @@ __all__ = [ "is_running_from_build", "is_running_staging", "is_current_version_studio_latest", + + "requests_get", + "requests_post" ] diff --git a/openpype/lib/abstract_collect_render.py b/openpype/lib/abstract_collect_render.py index 7c768e280c..3d81f6d794 100644 --- a/openpype/lib/abstract_collect_render.py +++ b/openpype/lib/abstract_collect_render.py @@ -9,9 +9,10 @@ from abc import abstractmethod import attr import six -from avalon import api import pyblish.api +from openpype.pipeline import legacy_io + from .abstract_metaplugins import AbstractMetaContextPlugin @@ -30,6 +31,7 @@ class RenderInstance(object): source = attr.ib() # path to source scene file label = attr.ib() # label to show in GUI subset = attr.ib() # subset name + task = attr.ib() # task name asset = attr.ib() # asset name (AVALON_ASSET) attachTo = attr.ib() # subset name to attach render to setMembers = attr.ib() # list of nodes/members producing render output @@ -127,7 +129,7 @@ class AbstractCollectRender(pyblish.api.ContextPlugin): """Constructor.""" super(AbstractCollectRender, self).__init__(*args, **kwargs) self._file_path = None - self._asset = api.Session["AVALON_ASSET"] + self._asset = legacy_io.Session["AVALON_ASSET"] self._context = None def process(self, context): @@ -138,7 +140,9 @@ class AbstractCollectRender(pyblish.api.ContextPlugin): try: if "workfile" in instance.data["families"]: instance.data["publish"] = True - if "renderFarm" in instance.data["families"]: + # TODO merge renderFarm and render.farm + if ("renderFarm" in instance.data["families"] or + "render.farm" in instance.data["families"]): instance.data["remove"] = True except KeyError: # be tolerant if 'families' is missing. diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index 5821c863d7..6ade33b59c 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -13,7 +13,8 @@ import six from openpype.settings import ( get_system_settings, - get_project_settings + get_project_settings, + get_local_settings ) from openpype.settings.constants import ( METADATA_KEYS, @@ -1011,8 +1012,8 @@ class ApplicationLaunchContext: self.log.debug("Discovery of launch hooks started.") paths = self.paths_to_launch_hooks() - self.log.debug("Paths where will look for launch hooks:{}".format( - "\n- ".join(paths) + self.log.debug("Paths searched for launch hooks:\n{}".format( + "\n".join("- {}".format(path) for path in paths) )) all_classes = { @@ -1022,7 +1023,7 @@ class ApplicationLaunchContext: for path in paths: if not os.path.exists(path): self.log.info( - "Path to launch hooks does not exists: \"{}\"".format(path) + "Path to launch hooks does not exist: \"{}\"".format(path) ) continue @@ -1043,13 +1044,14 @@ class ApplicationLaunchContext: hook = klass(self) if not hook.is_valid: self.log.debug( - "Hook is not valid for current launch context." + "Skipped hook invalid for current launch context: " + "{}".format(klass.__name__) ) continue if inspect.isabstract(hook): self.log.debug("Skipped abstract hook: {}".format( - str(hook) + klass.__name__ )) continue @@ -1061,7 +1063,8 @@ class ApplicationLaunchContext: except Exception: self.log.warning( - "Initialization of hook failed. {}".format(str(klass)), + "Initialization of hook failed: " + "{}".format(klass.__name__), exc_info=True ) @@ -1272,6 +1275,9 @@ class EnvironmentPrepData(dict): if data.get("env") is None: data["env"] = os.environ.copy() + if "system_settings" not in data: + data["system_settings"] = get_system_settings() + super(EnvironmentPrepData, self).__init__(data) @@ -1291,7 +1297,7 @@ def get_app_environments_for_context( Returns: dict: Environments for passed context and application. """ - from avalon.api import AvalonMongoDB + from openpype.pipeline import AvalonMongoDB # Avalon database connection dbcon = AvalonMongoDB() @@ -1395,8 +1401,27 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): app = data["app"] log = data["log"] + source_env = data["env"].copy() - _add_python_version_paths(app, data["env"], log) + _add_python_version_paths(app, source_env, log) + + # Use environments from local settings + filtered_local_envs = {} + system_settings = data["system_settings"] + whitelist_envs = system_settings["general"].get("local_env_white_list") + if whitelist_envs: + local_settings = get_local_settings() + local_envs = local_settings.get("environments") or {} + filtered_local_envs = { + key: value + for key, value in local_envs.items() + if key in whitelist_envs + } + + # Apply local environment variables for already existing values + for key, value in filtered_local_envs.items(): + if key in source_env: + source_env[key] = value # `added_env_keys` has debug purpose added_env_keys = {app.group.name, app.name} @@ -1441,10 +1466,19 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): # Choose right platform tool_env = parse_environments(_env_values, env_group) + + # Apply local environment variables + # - must happen between all values because they may be used during + # merge + for key, value in filtered_local_envs.items(): + if key in tool_env: + tool_env[key] = value + # Merge dictionaries env_values = _merge_env(tool_env, env_values) - merged_env = _merge_env(env_values, data["env"]) + merged_env = _merge_env(env_values, source_env) + loaded_env = acre.compute(merged_env, cleanup=False) final_env = None @@ -1464,7 +1498,7 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): if final_env is None: final_env = loaded_env - keys_to_remove = set(data["env"].keys()) - set(final_env.keys()) + keys_to_remove = set(source_env.keys()) - set(final_env.keys()) # Update env data["env"].update(final_env) @@ -1611,7 +1645,6 @@ def _prepare_last_workfile(data, workdir): result will be stored. workdir (str): Path to folder where workfiles should be stored. """ - import avalon.api from openpype.pipeline import HOST_WORKFILE_EXTENSIONS log = data["log"] diff --git a/openpype/lib/attribute_definitions.py b/openpype/lib/attribute_definitions.py index 189a5e7acd..a1f7c1e0f4 100644 --- a/openpype/lib/attribute_definitions.py +++ b/openpype/lib/attribute_definitions.py @@ -1,8 +1,12 @@ +import os import re import collections import uuid +import json from abc import ABCMeta, abstractmethod + import six +import clique class AbstractAttrDefMeta(ABCMeta): @@ -302,12 +306,230 @@ class BoolDef(AbtractAttrDef): return self.default +class FileDefItem(object): + def __init__( + self, directory, filenames, frames=None, template=None + ): + self.directory = directory + + self.filenames = [] + self.is_sequence = False + self.template = None + self.frames = [] + self.is_empty = True + + self.set_filenames(filenames, frames, template) + + def __str__(self): + return json.dumps(self.to_dict()) + + def __repr__(self): + if self.is_empty: + filename = "< empty >" + elif self.is_sequence: + filename = self.template + else: + filename = self.filenames[0] + + return "<{}: \"{}\">".format( + self.__class__.__name__, + os.path.join(self.directory, filename) + ) + + @property + def label(self): + if self.is_empty: + return None + + if not self.is_sequence: + return self.filenames[0] + + frame_start = self.frames[0] + filename_template = os.path.basename(self.template) + if len(self.frames) == 1: + return "{} [{}]".format(filename_template, frame_start) + + frame_end = self.frames[-1] + expected_len = (frame_end - frame_start) + 1 + if expected_len == len(self.frames): + return "{} [{}-{}]".format( + filename_template, frame_start, frame_end + ) + + ranges = [] + _frame_start = None + _frame_end = None + for frame in range(frame_start, frame_end + 1): + if frame not in self.frames: + add_to_ranges = _frame_start is not None + elif _frame_start is None: + _frame_start = _frame_end = frame + add_to_ranges = frame == frame_end + else: + _frame_end = frame + add_to_ranges = frame == frame_end + + if add_to_ranges: + if _frame_start != _frame_end: + _range = "{}-{}".format(_frame_start, _frame_end) + else: + _range = str(_frame_start) + ranges.append(_range) + _frame_start = _frame_end = None + return "{} [{}]".format( + filename_template, ",".join(ranges) + ) + + def split_sequence(self): + if not self.is_sequence: + raise ValueError("Cannot split single file item") + + paths = [ + os.path.join(self.directory, filename) + for filename in self.filenames + ] + return self.from_paths(paths, False) + + @property + def ext(self): + if self.is_empty: + return None + _, ext = os.path.splitext(self.filenames[0]) + if ext: + return ext + return None + + @property + def is_dir(self): + if self.is_empty: + return False + + # QUESTION a better way how to define folder (in init argument?) + if self.ext: + return False + return True + + def set_directory(self, directory): + self.directory = directory + + def set_filenames(self, filenames, frames=None, template=None): + if frames is None: + frames = [] + is_sequence = False + if frames: + is_sequence = True + + if is_sequence and not template: + raise ValueError("Missing template for sequence") + + self.is_empty = len(filenames) == 0 + self.filenames = filenames + self.template = template + self.frames = frames + self.is_sequence = is_sequence + + @classmethod + def create_empty_item(cls): + return cls("", "") + + @classmethod + def from_value(cls, value, allow_sequences): + """Convert passed value to FileDefItem objects. + + Returns: + list: Created FileDefItem objects. + """ + + # Convert single item to iterable + if not isinstance(value, (list, tuple, set)): + value = [value] + + output = [] + str_filepaths = [] + for item in value: + if isinstance(item, dict): + item = cls.from_dict(item) + + if isinstance(item, FileDefItem): + if not allow_sequences and item.is_sequence: + output.extend(item.split_sequence()) + else: + output.append(item) + + elif isinstance(item, six.string_types): + str_filepaths.append(item) + else: + raise TypeError( + "Unknown type \"{}\". Can't convert to {}".format( + str(type(item)), cls.__name__ + ) + ) + + if str_filepaths: + output.extend(cls.from_paths(str_filepaths, allow_sequences)) + + return output + + @classmethod + def from_dict(cls, data): + return cls( + data["directory"], + data["filenames"], + data.get("frames"), + data.get("template") + ) + + @classmethod + def from_paths(cls, paths, allow_sequences): + filenames_by_dir = collections.defaultdict(list) + for path in paths: + normalized = os.path.normpath(path) + directory, filename = os.path.split(normalized) + filenames_by_dir[directory].append(filename) + + output = [] + for directory, filenames in filenames_by_dir.items(): + if allow_sequences: + cols, remainders = clique.assemble(filenames) + else: + cols = [] + remainders = filenames + + for remainder in remainders: + output.append(cls(directory, [remainder])) + + for col in cols: + frames = list(col.indexes) + paths = [filename for filename in col] + template = col.format("{head}{padding}{tail}") + + output.append(cls( + directory, paths, frames, template + )) + + return output + + def to_dict(self): + output = { + "is_sequence": self.is_sequence, + "directory": self.directory, + "filenames": list(self.filenames), + } + if self.is_sequence: + output.update({ + "template": self.template, + "frames": list(sorted(self.frames)), + }) + + return output + + class FileDef(AbtractAttrDef): """File definition. It is possible to define filters of allowed file extensions and if supports folders. Args: - multipath(bool): Allow multiple path. + single_item(bool): Allow only single path item. folders(bool): Allow folder paths. extensions(list): Allow files with extensions. Empty list will allow all extensions and None will disable files completely. @@ -315,44 +537,47 @@ class FileDef(AbtractAttrDef): """ def __init__( - self, key, multipath=False, folders=None, extensions=None, - default=None, **kwargs + self, key, single_item=True, folders=None, extensions=None, + allow_sequences=True, default=None, **kwargs ): if folders is None and extensions is None: folders = True extensions = [] if default is None: - if multipath: - default = [] + if single_item: + default = FileDefItem.create_empty_item().to_dict() else: - default = "" + default = [] else: - if multipath: + if single_item: + if isinstance(default, dict): + FileDefItem.from_dict(default) + + elif isinstance(default, six.string_types): + default = FileDefItem.from_paths([default.strip()])[0] + + else: + raise TypeError(( + "'default' argument must be 'str' or 'dict' not '{}'" + ).format(type(default))) + + else: if not isinstance(default, (tuple, list, set)): raise TypeError(( "'default' argument must be 'list', 'tuple' or 'set'" ", not '{}'" ).format(type(default))) - else: - if not isinstance(default, six.string_types): - raise TypeError(( - "'default' argument must be 'str' not '{}'" - ).format(type(default))) - default = default.strip() - # Change horizontal label is_label_horizontal = kwargs.get("is_label_horizontal") if is_label_horizontal is None: - is_label_horizontal = True - if multipath: - is_label_horizontal = False - kwargs["is_label_horizontal"] = is_label_horizontal + kwargs["is_label_horizontal"] = False - self.multipath = multipath + self.single_item = single_item self.folders = folders - self.extensions = extensions + self.extensions = set(extensions) + self.allow_sequences = allow_sequences super(FileDef, self).__init__(key, default=default, **kwargs) def __eq__(self, other): @@ -360,30 +585,43 @@ class FileDef(AbtractAttrDef): return False return ( - self.multipath == other.multipath + self.single_item == other.single_item and self.folders == other.folders and self.extensions == other.extensions + and self.allow_sequences == other.allow_sequences ) def convert_value(self, value): - if isinstance(value, six.string_types): - if self.multipath: - value = [value.strip()] - else: - value = value.strip() - return value + if isinstance(value, six.string_types) or isinstance(value, dict): + value = [value] if isinstance(value, (tuple, list, set)): - _value = [] + string_paths = [] + dict_items = [] for item in value: if isinstance(item, six.string_types): - _value.append(item.strip()) + string_paths.append(item.strip()) + elif isinstance(item, dict): + try: + FileDefItem.from_dict(item) + dict_items.append(item) + except (ValueError, KeyError): + pass - if self.multipath: - return _value + if string_paths: + file_items = FileDefItem.from_paths(string_paths) + dict_items.extend([ + file_item.to_dict() + for file_item in file_items + ]) - if not _value: + if not self.single_item: + return dict_items + + if not dict_items: return self.default - return _value[0].strip() + return dict_items[0] - return str(value).strip() + if self.single_item: + return FileDefItem.create_empty_item().to_dict() + return [] diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index 0348d88be2..9d8a92cfe9 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -20,9 +20,7 @@ from .profiles_filtering import filter_profiles from .events import emit_event from .path_templates import StringTemplate -# avalon module is not imported at the top -# - may not be in path at the time of pype.lib initialization -avalon = None +legacy_io = None log = logging.getLogger("AvalonContext") @@ -64,8 +62,8 @@ def create_project( """ from openpype.settings import ProjectSettings, SaveWarningExc - from avalon.api import AvalonMongoDB - from avalon.schema import validate + from openpype.pipeline import AvalonMongoDB + from openpype.pipeline.schema import validate if dbcon is None: dbcon = AvalonMongoDB() @@ -120,17 +118,17 @@ def create_project( return project_doc -def with_avalon(func): +def with_pipeline_io(func): @functools.wraps(func) - def wrap_avalon(*args, **kwargs): - global avalon - if avalon is None: - import avalon + def wrapped(*args, **kwargs): + global legacy_io + if legacy_io is None: + from openpype.pipeline import legacy_io return func(*args, **kwargs) - return wrap_avalon + return wrapped -@with_avalon +@with_pipeline_io def is_latest(representation): """Return whether the representation is from latest version @@ -142,12 +140,12 @@ def is_latest(representation): """ - version = avalon.io.find_one({"_id": representation['parent']}) + version = legacy_io.find_one({"_id": representation['parent']}) if version["type"] == "hero_version": return True # Get highest version under the parent - highest_version = avalon.io.find_one({ + highest_version = legacy_io.find_one({ "type": "version", "parent": version["parent"] }, sort=[("name", -1)], projection={"name": True}) @@ -158,18 +156,19 @@ def is_latest(representation): return False -@with_avalon +@with_pipeline_io def any_outdated(): """Return whether the current scene has any outdated content""" + from openpype.pipeline import registered_host checked = set() - host = avalon.api.registered_host() + host = registered_host() for container in host.ls(): representation = container['representation'] if representation in checked: continue - representation_doc = avalon.io.find_one( + representation_doc = legacy_io.find_one( { "_id": ObjectId(representation), "type": "representation" @@ -188,7 +187,7 @@ def any_outdated(): return False -@with_avalon +@with_pipeline_io def get_asset(asset_name=None): """ Returning asset document from database by its name. @@ -201,9 +200,9 @@ def get_asset(asset_name=None): (MongoDB document) """ if not asset_name: - asset_name = avalon.api.Session["AVALON_ASSET"] + asset_name = legacy_io.Session["AVALON_ASSET"] - asset_document = avalon.io.find_one({ + asset_document = legacy_io.find_one({ "name": asset_name, "type": "asset" }) @@ -214,7 +213,7 @@ def get_asset(asset_name=None): return asset_document -@with_avalon +@with_pipeline_io def get_hierarchy(asset_name=None): """ Obtain asset hierarchy path string from mongo db @@ -227,12 +226,12 @@ def get_hierarchy(asset_name=None): """ if not asset_name: - asset_name = avalon.io.Session.get( + asset_name = legacy_io.Session.get( "AVALON_ASSET", os.environ["AVALON_ASSET"] ) - asset_entity = avalon.io.find_one({ + asset_entity = legacy_io.find_one({ "type": 'asset', "name": asset_name }) @@ -251,13 +250,13 @@ def get_hierarchy(asset_name=None): parent_id = entity.get("data", {}).get("visualParent") if not parent_id: break - entity = avalon.io.find_one({"_id": parent_id}) + entity = legacy_io.find_one({"_id": parent_id}) hierarchy_items.append(entity["name"]) # Add parents to entity data for next query entity_data = asset_entity.get("data", {}) entity_data["parents"] = hierarchy_items - avalon.io.update_many( + legacy_io.update_many( {"_id": asset_entity["_id"]}, {"$set": {"data": entity_data}} ) @@ -304,7 +303,7 @@ def get_linked_asset_ids(asset_doc): return output -@with_avalon +@with_pipeline_io def get_linked_assets(asset_doc): """Return linked assets for `asset_doc` from DB @@ -318,10 +317,10 @@ def get_linked_assets(asset_doc): if not link_ids: return [] - return list(avalon.io.find({"_id": {"$in": link_ids}})) + return list(legacy_io.find({"_id": {"$in": link_ids}})) -@with_avalon +@with_pipeline_io def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): """Retrieve latest version from `asset_name`, and `subset_name`. @@ -332,8 +331,7 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): Args: asset_name (str): Name of asset. subset_name (str): Name of subset. - dbcon (avalon.mongodb.AvalonMongoDB, optional): Avalon Mongo connection - with Session. + dbcon (AvalonMongoDB, optional): Avalon Mongo connection with Session. project_name (str, optional): Find latest version in specific project. Returns: @@ -342,13 +340,13 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): """ if not dbcon: - log.debug("Using `avalon.io` for query.") - dbcon = avalon.io + log.debug("Using `legacy_io` for query.") + dbcon = legacy_io # Make sure is installed dbcon.install() if project_name and project_name != dbcon.Session.get("AVALON_PROJECT"): - # `avalon.io` has only `_database` attribute + # `legacy_io` has only `_database` attribute # but `AvalonMongoDB` has `database` database = getattr(dbcon, "database", dbcon._database) collection = database[project_name] @@ -428,7 +426,7 @@ def get_workfile_template_key_from_context( "`get_workfile_template_key_from_context` requires to pass" " one of 'dbcon' or 'project_name' arguments." )) - from avalon.api import AvalonMongoDB + from openpype.pipeline import AvalonMongoDB dbcon = AvalonMongoDB() dbcon.Session["AVALON_PROJECT"] = project_name @@ -648,6 +646,7 @@ def get_workdir( ) +@with_pipeline_io def template_data_from_session(session=None): """ Return dictionary with template from session keys. @@ -657,15 +656,15 @@ def template_data_from_session(session=None): Returns: dict: All available data from session. """ - from avalon import io - import avalon.api if session is None: - session = avalon.api.Session + session = legacy_io.Session project_name = session["AVALON_PROJECT"] - project_doc = io._database[project_name].find_one({"type": "project"}) - asset_doc = io._database[project_name].find_one({ + project_doc = legacy_io.database[project_name].find_one({ + "type": "project" + }) + asset_doc = legacy_io.database[project_name].find_one({ "type": "asset", "name": session["AVALON_ASSET"] }) @@ -674,6 +673,7 @@ def template_data_from_session(session=None): return get_workdir_data(project_doc, asset_doc, task_name, host_name) +@with_pipeline_io def compute_session_changes( session, task=None, asset=None, app=None, template_key=None ): @@ -712,10 +712,8 @@ def compute_session_changes( asset = asset["name"] if not asset_document or not asset_tasks: - from avalon import io - # Assume asset name - asset_document = io.find_one( + asset_document = legacy_io.find_one( { "name": asset, "type": "asset" @@ -747,11 +745,10 @@ def compute_session_changes( return changes +@with_pipeline_io def get_workdir_from_session(session=None, template_key=None): - import avalon.api - if session is None: - session = avalon.api.Session + session = legacy_io.Session project_name = session["AVALON_PROJECT"] host_name = session["AVALON_APP"] anatomy = Anatomy(project_name) @@ -768,6 +765,7 @@ def get_workdir_from_session(session=None, template_key=None): return anatomy_filled[template_key]["folder"] +@with_pipeline_io def update_current_task(task=None, asset=None, app=None, template_key=None): """Update active Session to a new task work area. @@ -782,10 +780,8 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): dict: The changed key, values in the current Session. """ - import avalon.api - changes = compute_session_changes( - avalon.api.Session, + legacy_io.Session, task=task, asset=asset, app=app, @@ -795,7 +791,7 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): # Update the Session and environments. Pop from environments all keys with # value set to None. for key, value in changes.items(): - avalon.api.Session[key] = value + legacy_io.Session[key] = value if value is None: os.environ.pop(key, None) else: @@ -807,7 +803,7 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): return changes -@with_avalon +@with_pipeline_io def get_workfile_doc(asset_id, task_name, filename, dbcon=None): """Return workfile document for entered context. @@ -819,14 +815,14 @@ def get_workfile_doc(asset_id, task_name, filename, dbcon=None): task_name (str): Name of task under which the workfile belongs. filename (str): Name of a workfile. dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and - `avalon.io` is used if not entered. + `legacy_io` is used if not entered. Returns: dict: Workfile document or None. """ - # Use avalon.io if dbcon is not entered + # Use legacy_io if dbcon is not entered if not dbcon: - dbcon = avalon.io + dbcon = legacy_io return dbcon.find_one({ "type": "workfile", @@ -836,7 +832,7 @@ def get_workfile_doc(asset_id, task_name, filename, dbcon=None): }) -@with_avalon +@with_pipeline_io def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): """Creates or replace workfile document in mongo. @@ -849,11 +845,11 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): filename (str): Filename of workfile. workdir (str): Path to directory where `filename` is located. dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and - `avalon.io` is used if not entered. + `legacy_io` is used if not entered. """ - # Use avalon.io if dbcon is not entered + # Use legacy_io if dbcon is not entered if not dbcon: - dbcon = avalon.io + dbcon = legacy_io # Filter of workfile document doc_filter = { @@ -898,7 +894,7 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): ) -@with_avalon +@with_pipeline_io def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): if not workfile_doc: # TODO add log message @@ -907,9 +903,9 @@ def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): if not data: return - # Use avalon.io if dbcon is not entered + # Use legacy_io if dbcon is not entered if not dbcon: - dbcon = avalon.io + dbcon = legacy_io # Convert data to mongo modification keys/values # - this is naive implementation which does not expect nested @@ -959,7 +955,7 @@ class BuildWorkfile: return containers - @with_avalon + @with_pipeline_io def build_workfile(self): """Prepares and load containers into workfile. @@ -986,8 +982,8 @@ class BuildWorkfile: from openpype.pipeline import discover_loader_plugins # Get current asset name and entity - current_asset_name = avalon.io.Session["AVALON_ASSET"] - current_asset_entity = avalon.io.find_one({ + current_asset_name = legacy_io.Session["AVALON_ASSET"] + current_asset_entity = legacy_io.find_one({ "type": "asset", "name": current_asset_name }) @@ -1015,7 +1011,7 @@ class BuildWorkfile: return # Get current task name - current_task_name = avalon.io.Session["AVALON_TASK"] + current_task_name = legacy_io.Session["AVALON_TASK"] # Load workfile presets for task self.build_presets = self.get_build_presets( @@ -1103,7 +1099,7 @@ class BuildWorkfile: # Return list of loaded containers return loaded_containers - @with_avalon + @with_pipeline_io def get_build_presets(self, task_name, asset_doc): """ Returns presets to build workfile for task name. @@ -1119,7 +1115,7 @@ class BuildWorkfile: """ host_name = os.environ["AVALON_APP"] project_settings = get_project_settings( - avalon.io.Session["AVALON_PROJECT"] + legacy_io.Session["AVALON_PROJECT"] ) host_settings = project_settings.get(host_name) or {} @@ -1369,7 +1365,7 @@ class BuildWorkfile: "containers": containers } - @with_avalon + @with_pipeline_io def _load_containers( self, repres_by_subset_id, subsets_by_id, profiles_per_subset_id, loaders_by_name @@ -1495,7 +1491,7 @@ class BuildWorkfile: return loaded_containers - @with_avalon + @with_pipeline_io def _collect_last_version_repres(self, asset_entities): """Collect subsets, versions and representations for asset_entities. @@ -1534,15 +1530,15 @@ class BuildWorkfile: asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities} - subsets = list(avalon.io.find({ + subsets = list(legacy_io.find({ "type": "subset", - "parent": {"$in": asset_entity_by_ids.keys()} + "parent": {"$in": list(asset_entity_by_ids.keys())} })) subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} - sorted_versions = list(avalon.io.find({ + sorted_versions = list(legacy_io.find({ "type": "version", - "parent": {"$in": subset_entity_by_ids.keys()} + "parent": {"$in": list(subset_entity_by_ids.keys())} }).sort("name", -1)) subset_id_with_latest_version = [] @@ -1554,9 +1550,9 @@ class BuildWorkfile: subset_id_with_latest_version.append(subset_id) last_versions_by_id[version["_id"]] = version - repres = avalon.io.find({ + repres = legacy_io.find({ "type": "representation", - "parent": {"$in": last_versions_by_id.keys()} + "parent": {"$in": list(last_versions_by_id.keys())} }) output = {} @@ -1592,7 +1588,7 @@ class BuildWorkfile: return output -@with_avalon +@with_pipeline_io def get_creator_by_name(creator_name, case_sensitive=False): """Find creator plugin by name. @@ -1622,7 +1618,7 @@ def get_creator_by_name(creator_name, case_sensitive=False): return None -@with_avalon +@with_pipeline_io def change_timer_to_current_context(): """Called after context change to change timers. @@ -1641,9 +1637,9 @@ def change_timer_to_current_context(): log.warning("Couldn't start timer") return data = { - "project_name": avalon.io.Session["AVALON_PROJECT"], - "asset_name": avalon.io.Session["AVALON_ASSET"], - "task_name": avalon.io.Session["AVALON_TASK"] + "project_name": legacy_io.Session["AVALON_PROJECT"], + "asset_name": legacy_io.Session["AVALON_ASSET"], + "task_name": legacy_io.Session["AVALON_TASK"] } requests.post(rest_api_url, json=data) @@ -1793,7 +1789,7 @@ def get_custom_workfile_template_by_string_context( """ if dbcon is None: - from avalon.api import AvalonMongoDB + from openpype.pipeline import AvalonMongoDB dbcon = AvalonMongoDB() @@ -1827,10 +1823,11 @@ def get_custom_workfile_template_by_string_context( ) +@with_pipeline_io def get_custom_workfile_template(template_profiles): """Filter and fill workfile template profiles by current context. - Current context is defined by `avalon.api.Session`. That's why this + Current context is defined by `legacy_io.Session`. That's why this function should be used only inside host where context is set and stable. Args: @@ -1840,15 +1837,13 @@ def get_custom_workfile_template(template_profiles): str: Path to template or None if none of profiles match current context. (Existence of formatted path is not validated.) """ - # Use `avalon.io` as Mongo connection - from avalon import io return get_custom_workfile_template_by_string_context( template_profiles, - io.Session["AVALON_PROJECT"], - io.Session["AVALON_ASSET"], - io.Session["AVALON_TASK"], - io + legacy_io.Session["AVALON_PROJECT"], + legacy_io.Session["AVALON_ASSET"], + legacy_io.Session["AVALON_TASK"], + legacy_io ) @@ -1972,3 +1967,119 @@ def get_last_workfile( return os.path.normpath(os.path.join(workdir, filename)) return filename + + +@with_pipeline_io +def get_linked_ids_for_representations(project_name, repre_ids, dbcon=None, + link_type=None, max_depth=0): + """Returns list of linked ids of particular type (if provided). + + Goes from representations to version, back to representations + Args: + project_name (str) + repre_ids (list) or (ObjectId) + dbcon (avalon.mongodb.AvalonMongoDB, optional): Avalon Mongo connection + with Session. + link_type (str): ['reference', '..] + max_depth (int): limit how many levels of recursion + Returns: + (list) of ObjectId - linked representations + """ + # Create new dbcon if not passed and use passed project name + if not dbcon: + from openpype.pipeline import AvalonMongoDB + dbcon = AvalonMongoDB() + dbcon.Session["AVALON_PROJECT"] = project_name + # Validate that passed dbcon has same project + elif dbcon.Session["AVALON_PROJECT"] != project_name: + raise ValueError("Passed connection does not have right project") + + if not isinstance(repre_ids, list): + repre_ids = [repre_ids] + + version_ids = dbcon.distinct("parent", { + "_id": {"$in": repre_ids}, + "type": "representation" + }) + + match = { + "_id": {"$in": version_ids}, + "type": "version" + } + + graph_lookup = { + "from": project_name, + "startWith": "$data.inputLinks.id", + "connectFromField": "data.inputLinks.id", + "connectToField": "_id", + "as": "outputs_recursive", + "depthField": "depth" + } + if max_depth != 0: + # We offset by -1 since 0 basically means no recursion + # but the recursion only happens after the initial lookup + # for outputs. + graph_lookup["maxDepth"] = max_depth - 1 + + pipeline_ = [ + # Match + {"$match": match}, + # Recursive graph lookup for inputs + {"$graphLookup": graph_lookup} + ] + + result = dbcon.aggregate(pipeline_) + referenced_version_ids = _process_referenced_pipeline_result(result, + link_type) + + ref_ids = dbcon.distinct( + "_id", + filter={ + "parent": {"$in": list(referenced_version_ids)}, + "type": "representation" + } + ) + + return list(ref_ids) + + +def _process_referenced_pipeline_result(result, link_type): + """Filters result from pipeline for particular link_type. + + Pipeline cannot use link_type directly in a query. + Returns: + (list) + """ + referenced_version_ids = set() + correctly_linked_ids = set() + for item in result: + input_links = item["data"].get("inputLinks", []) + correctly_linked_ids = _filter_input_links(input_links, + link_type, + correctly_linked_ids) + + # outputs_recursive in random order, sort by depth + outputs_recursive = sorted(item.get("outputs_recursive", []), + key=lambda d: d["depth"]) + + for output in outputs_recursive: + if output["_id"] not in correctly_linked_ids: # leaf + continue + + correctly_linked_ids = _filter_input_links( + output["data"].get("inputLinks", []), + link_type, + correctly_linked_ids) + + referenced_version_ids.add(output["_id"]) + + return referenced_version_ids + + +def _filter_input_links(input_links, link_type, correctly_linked_ids): + for input_link in input_links: + if not link_type or input_link["type"] == link_type: + correctly_linked_ids.add(input_link.get("id") or + input_link.get("_id")) # legacy + + return correctly_linked_ids diff --git a/openpype/lib/connections.py b/openpype/lib/connections.py new file mode 100644 index 0000000000..91b745a4c1 --- /dev/null +++ b/openpype/lib/connections.py @@ -0,0 +1,38 @@ +import requests +import os + + +def requests_post(*args, **kwargs): + """Wrap request post method. + + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. + + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. + + """ + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.post(*args, **kwargs) + + +def requests_get(*args, **kwargs): + """Wrap request get method. + + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. + + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. + + """ + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.get(*args, **kwargs) diff --git a/openpype/lib/editorial.py b/openpype/lib/editorial.py index bf868953ea..1ee21deedc 100644 --- a/openpype/lib/editorial.py +++ b/openpype/lib/editorial.py @@ -17,7 +17,7 @@ def otio_range_to_frame_range(otio_range): start = _ot.to_frames( otio_range.start_time, otio_range.start_time.rate) end = start + _ot.to_frames( - otio_range.duration, otio_range.duration.rate) - 1 + otio_range.duration, otio_range.duration.rate) return start, end @@ -254,7 +254,7 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end): media_in + source_in + offset_in) media_out_trimmed = ( media_in + source_in + ( - ((source_range.duration.value - 1) * abs( + (source_range.duration.value * abs( time_scalar)) + offset_out)) # calculate available handles diff --git a/openpype/lib/env_tools.py b/openpype/lib/env_tools.py index 6521d20f1e..25bcbf7c1b 100644 --- a/openpype/lib/env_tools.py +++ b/openpype/lib/env_tools.py @@ -69,57 +69,3 @@ def get_paths_from_environ(env_key=None, env_value=None, return_first=False): return None # Return all existing paths from environment variable return existing_paths - - -def get_global_environments(env=None): - """Load global environments from Pype. - - Return prepared and parsed global environments by pype's settings. Use - combination of "global" environments set in pype's settings and enabled - modules. - - Args: - env (dict, optional): Initial environments. Empty dictionary is used - when not entered. - - Returns; - dict of str: Loaded and processed environments. - - """ - import acre - from openpype.modules import ModulesManager - from openpype.settings import get_environments - - if env is None: - env = {} - - # Get global environments from settings - all_settings_env = get_environments() - parsed_global_env = acre.parse(all_settings_env["global"]) - - # Merge with entered environments - merged_env = acre.append(env, parsed_global_env) - - # Get environments from Pype modules - modules_manager = ModulesManager() - - module_envs = modules_manager.collect_global_environments() - publish_plugin_dirs = modules_manager.collect_plugin_paths()["publish"] - - # Set pyblish plugins paths if any module want to register them - if publish_plugin_dirs: - publish_paths_str = os.environ.get("PYBLISHPLUGINPATH") or "" - publish_paths = publish_paths_str.split(os.pathsep) - _publish_paths = { - os.path.normpath(path) for path in publish_paths if path - } - for path in publish_plugin_dirs: - _publish_paths.add(os.path.normpath(path)) - module_envs["PYBLISHPLUGINPATH"] = os.pathsep.join(_publish_paths) - - # Merge environments with current environments and update values - if module_envs: - parsed_envs = acre.parse(module_envs) - merged_env = acre.merge(parsed_envs, merged_env) - - return acre.compute(merged_env, cleanup=True) diff --git a/openpype/lib/log.py b/openpype/lib/log.py index f33385e0ba..2cdb7ec8e4 100644 --- a/openpype/lib/log.py +++ b/openpype/lib/log.py @@ -216,8 +216,8 @@ class PypeLogger: # Collection name under database in Mongo log_collection_name = "logs" - # OPENPYPE_DEBUG - pype_debug = 0 + # Logging level - OPENPYPE_LOG_LEVEL + log_level = None # Data same for all record documents process_data = None @@ -231,10 +231,7 @@ class PypeLogger: logger = logging.getLogger(name or "__main__") - if cls.pype_debug > 0: - logger.setLevel(logging.DEBUG) - else: - logger.setLevel(logging.INFO) + logger.setLevel(cls.log_level) add_mongo_handler = cls.use_mongo_logging add_console_handler = True @@ -333,6 +330,9 @@ class PypeLogger: # Define if should logging to mongo be used use_mongo_logging = bool(log4mongo is not None) + if use_mongo_logging: + use_mongo_logging = os.environ.get("OPENPYPE_LOG_TO_SERVER") == "1" + # Set mongo id for process (ONLY ONCE) if use_mongo_logging and cls.mongo_process_id is None: try: @@ -357,8 +357,16 @@ class PypeLogger: # Store result to class definition cls.use_mongo_logging = use_mongo_logging - # Define if is in OPENPYPE_DEBUG mode - cls.pype_debug = int(os.getenv("OPENPYPE_DEBUG") or "0") + # Define what is logging level + log_level = os.getenv("OPENPYPE_LOG_LEVEL") + if not log_level: + # Check OPENPYPE_DEBUG for backwards compatibility + op_debug = os.getenv("OPENPYPE_DEBUG") + if op_debug and int(op_debug) > 0: + log_level = 10 + else: + log_level = 20 + cls.log_level = int(log_level) if not os.environ.get("OPENPYPE_MONGO"): cls.use_mongo_logging = False diff --git a/openpype/lib/path_templates.py b/openpype/lib/path_templates.py index 14e5fe59f8..5c40aa4549 100644 --- a/openpype/lib/path_templates.py +++ b/openpype/lib/path_templates.py @@ -365,6 +365,7 @@ class TemplateResult(str): when value of key in data is dictionary but template expect string of number. """ + used_values = None solved = None template = None @@ -383,6 +384,12 @@ class TemplateResult(str): new_obj.invalid_types = invalid_types return new_obj + def __copy__(self, *args, **kwargs): + return self.copy() + + def __deepcopy__(self, *args, **kwargs): + return self.copy() + def validate(self): if not self.solved: raise TemplateUnsolved( @@ -391,6 +398,17 @@ class TemplateResult(str): self.invalid_types ) + def copy(self): + cls = self.__class__ + return cls( + str(self), + self.template, + self.solved, + self.used_values, + self.missing_keys, + self.invalid_types + ) + class TemplatesResultDict(dict): """Holds and wrap TemplateResults for easy bug report.""" diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index f11ba56865..bcbf06a0e8 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -72,9 +72,9 @@ def get_subset_name_with_asset_doc( family = family.rsplit(".", 1)[-1] if project_name is None: - import avalon.api + from openpype.pipeline import legacy_io - project_name = avalon.api.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] asset_tasks = asset_doc.get("data", {}).get("tasks") or {} task_info = asset_tasks.get(task_name) or {} @@ -136,7 +136,7 @@ def get_subset_name( `get_subset_name_with_asset_doc` where asset document is expected. """ if dbcon is None: - from avalon.api import AvalonMongoDB + from openpype.pipeline import AvalonMongoDB dbcon = AvalonMongoDB() dbcon.Session["AVALON_PROJECT"] = project_name diff --git a/openpype/lib/profiles_filtering.py b/openpype/lib/profiles_filtering.py index 0bb901aff8..370703a68b 100644 --- a/openpype/lib/profiles_filtering.py +++ b/openpype/lib/profiles_filtering.py @@ -44,12 +44,6 @@ def _profile_exclusion(matching_profiles, logger): Returns: dict: Most matching profile. """ - - logger.info( - "Search for first most matching profile in match order:" - " Host name -> Task name -> Family." - ) - if not matching_profiles: return None @@ -168,6 +162,15 @@ def filter_profiles(profiles_data, key_values, keys_order=None, logger=None): _keys_order.append(key) keys_order = tuple(_keys_order) + log_parts = " | ".join([ + "{}: \"{}\"".format(*item) + for item in key_values.items() + ]) + + logger.info( + "Looking for matching profile for: {}".format(log_parts) + ) + matching_profiles = None highest_profile_points = -1 # Each profile get 1 point for each matching filter. Profile with most @@ -205,11 +208,6 @@ def filter_profiles(profiles_data, key_values, keys_order=None, logger=None): if profile_points == highest_profile_points: matching_profiles.append((profile, profile_scores)) - log_parts = " | ".join([ - "{}: \"{}\"".format(*item) - for item in key_values.items() - ]) - if not matching_profiles: logger.info( "None of profiles match your setup. {}".format(log_parts) @@ -221,4 +219,9 @@ def filter_profiles(profiles_data, key_values, keys_order=None, logger=None): "More than one profile match your setup. {}".format(log_parts) ) - return _profile_exclusion(matching_profiles, logger) + profile = _profile_exclusion(matching_profiles, logger) + if profile: + logger.info( + "Profile selected: {}".format(profile) + ) + return profile diff --git a/openpype/lib/project_backpack.py b/openpype/lib/project_backpack.py index 11fd0c0c3e..396479c725 100644 --- a/openpype/lib/project_backpack.py +++ b/openpype/lib/project_backpack.py @@ -25,7 +25,7 @@ from bson.json_util import ( CANONICAL_JSON_OPTIONS ) -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB DOCUMENTS_FILE_NAME = "database" METADATA_FILE_NAME = "metadata" diff --git a/openpype/lib/remote_publish.py b/openpype/lib/remote_publish.py index 9d97671a61..8a42daf4e9 100644 --- a/openpype/lib/remote_publish.py +++ b/openpype/lib/remote_publish.py @@ -1,13 +1,12 @@ import os from datetime import datetime -import sys -from bson.objectid import ObjectId import collections +from bson.objectid import ObjectId + import pyblish.util import pyblish.api -from openpype import uninstall from openpype.lib.mongo import OpenPypeMongoConnection from openpype.lib.plugin_tools import parse_json @@ -81,7 +80,6 @@ def publish(log, close_plugin_name=None): if result["error"]: log.error(error_format.format(**result)) - uninstall() if close_plugin: # close host app explicitly after error context = pyblish.api.Context() close_plugin().process(context) @@ -118,7 +116,6 @@ def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): if result["error"]: log.error(error_format.format(**result)) - uninstall() log_lines = [error_format.format(**result)] + log_lines dbcon.update_one( {"_id": _id}, diff --git a/openpype/lib/terminal.py b/openpype/lib/terminal.py index 5121b6ec26..f6072ed209 100644 --- a/openpype/lib/terminal.py +++ b/openpype/lib/terminal.py @@ -98,7 +98,7 @@ class Terminal: r"\*\*\* WRN": _SB + _LY + r"*** WRN" + _RST, r" \- ": _SB + _LY + r" - " + _RST, r"\[ ": _SB + _LG + r"[ " + _RST, - r"\]": _SB + _LG + r"]" + _RST, + r" \]": _SB + _LG + r" ]" + _RST, r"{": _LG + r"{", r"}": r"}" + _RST, r"\(": _LY + r"(", diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py index 8e79aba0ae..adb9bb2c3a 100644 --- a/openpype/lib/transcoding.py +++ b/openpype/lib/transcoding.py @@ -17,6 +17,9 @@ from .vendor_bin_utils import ( # Max length of string that is supported by ffmpeg MAX_FFMPEG_STRING_LEN = 8196 +# Not allowed symbols in attributes for ffmpeg +NOT_ALLOWED_FFMPEG_CHARS = ("\"", ) + # OIIO known xml tags STRING_TAGS = { "format" @@ -367,14 +370,23 @@ def should_convert_for_ffmpeg(src_filepath): return None for attr_value in input_info["attribs"].values(): - if ( - isinstance(attr_value, str) - and len(attr_value) > MAX_FFMPEG_STRING_LEN - ): + if not isinstance(attr_value, str): + continue + + if len(attr_value) > MAX_FFMPEG_STRING_LEN: return True + + for char in NOT_ALLOWED_FFMPEG_CHARS: + if char in attr_value: + return True return False +# Deprecated since 2022 4 20 +# - Reason - Doesn't convert sequences right way: Can't handle gaps, reuse +# first frame for all frames and changes filenames when input +# is sequence. +# - use 'convert_input_paths_for_ffmpeg' instead def convert_for_ffmpeg( first_input_path, output_dir, @@ -402,6 +414,12 @@ def convert_for_ffmpeg( if logger is None: logger = logging.getLogger(__name__) + logger.warning(( + "DEPRECATED: 'openpype.lib.transcoding.convert_for_ffmpeg' is" + " deprecated function of conversion for FFMpeg. Please replace usage" + " with 'openpype.lib.transcoding.convert_input_paths_for_ffmpeg'" + )) + ext = os.path.splitext(first_input_path)[1].lower() if ext != ".exr": raise ValueError(( @@ -422,7 +440,12 @@ def convert_for_ffmpeg( compression = "none" # Prepare subprocess arguments - oiio_cmd = [get_oiio_tools_path()] + oiio_cmd = [ + get_oiio_tools_path(), + + # Don't add any additional attributes + "--nosoftwareattrib", + ] # Add input compression if available if compression: oiio_cmd.extend(["--compression", compression]) @@ -458,23 +481,34 @@ def convert_for_ffmpeg( "--frames", "{}-{}".format(input_frame_start, input_frame_end) ]) - ignore_attr_changes_added = False for attr_name, attr_value in input_info["attribs"].items(): if not isinstance(attr_value, str): continue # Remove attributes that have string value longer than allowed length - # for ffmpeg + # for ffmpeg or when containt unallowed symbols + erase_reason = "Missing reason" + erase_attribute = False if len(attr_value) > MAX_FFMPEG_STRING_LEN: - if not ignore_attr_changes_added: - # Attrite changes won't be added to attributes itself - ignore_attr_changes_added = True - oiio_cmd.append("--sansattrib") + erase_reason = "has too long value ({} chars).".format( + len(attr_value) + ) + erase_attribute = True + + if not erase_attribute: + for char in NOT_ALLOWED_FFMPEG_CHARS: + if char in attr_value: + erase_attribute = True + erase_reason = ( + "contains unsupported character \"{}\"." + ).format(char) + break + + if erase_attribute: # Set attribute to empty string logger.info(( - "Removed attribute \"{}\" from metadata" - " because has too long value ({} chars)." - ).format(attr_name, len(attr_value))) + "Removed attribute \"{}\" from metadata because {}." + ).format(attr_name, erase_reason)) oiio_cmd.extend(["--eraseattrib", attr_name]) # Add last argument - path to output @@ -494,6 +528,131 @@ def convert_for_ffmpeg( run_subprocess(oiio_cmd, logger=logger) +def convert_input_paths_for_ffmpeg( + input_paths, + output_dir, + logger=None +): + """Contert source file to format supported in ffmpeg. + + Currently can convert only exrs. The input filepaths should be files + with same type. Information about input is loaded only from first found + file. + + Filenames of input files are kept so make sure that output directory + is not the same directory as input files have. + - This way it can handle gaps and can keep input filenames without handling + frame template + + Args: + input_paths (str): Paths that should be converted. It is expected that + contains single file or image sequence of samy type. + output_dir (str): Path to directory where output will be rendered. + Must not be same as input's directory. + logger (logging.Logger): Logger used for logging. + + Raises: + ValueError: If input filepath has extension not supported by function. + Currently is supported only ".exr" extension. + """ + if logger is None: + logger = logging.getLogger(__name__) + + first_input_path = input_paths[0] + ext = os.path.splitext(first_input_path)[1].lower() + if ext != ".exr": + raise ValueError(( + "Function 'convert_for_ffmpeg' currently support only" + " \".exr\" extension. Got \"{}\"." + ).format(ext)) + + input_info = get_oiio_info_for_input(first_input_path) + + # Change compression only if source compression is "dwaa" or "dwab" + # - they're not supported in ffmpeg + compression = input_info["attribs"].get("compression") + if compression in ("dwaa", "dwab"): + compression = "none" + + # Collect channels to export + channel_names = input_info["channelnames"] + review_channels = get_convert_rgb_channels(channel_names) + if review_channels is None: + raise ValueError( + "Couldn't find channels that can be used for conversion." + ) + + red, green, blue, alpha = review_channels + input_channels = [red, green, blue] + channels_arg = "R={},G={},B={}".format(red, green, blue) + if alpha is not None: + channels_arg += ",A={}".format(alpha) + input_channels.append(alpha) + input_channels_str = ",".join(input_channels) + + for input_path in input_paths: + # Prepare subprocess arguments + oiio_cmd = [ + get_oiio_tools_path(), + + # Don't add any additional attributes + "--nosoftwareattrib", + ] + # Add input compression if available + if compression: + oiio_cmd.extend(["--compression", compression]) + + oiio_cmd.extend([ + # Tell oiiotool which channels should be loaded + # - other channels are not loaded to memory so helps to + # avoid memory leak issues + "-i:ch={}".format(input_channels_str), input_path, + # Tell oiiotool which channels should be put to top stack + # (and output) + "--ch", channels_arg + ]) + + for attr_name, attr_value in input_info["attribs"].items(): + if not isinstance(attr_value, str): + continue + + # Remove attributes that have string value longer than allowed + # length for ffmpeg or when containt unallowed symbols + erase_reason = "Missing reason" + erase_attribute = False + if len(attr_value) > MAX_FFMPEG_STRING_LEN: + erase_reason = "has too long value ({} chars).".format( + len(attr_value) + ) + erase_attribute = True + + if not erase_attribute: + for char in NOT_ALLOWED_FFMPEG_CHARS: + if char in attr_value: + erase_attribute = True + erase_reason = ( + "contains unsupported character \"{}\"." + ).format(char) + break + + if erase_attribute: + # Set attribute to empty string + logger.info(( + "Removed attribute \"{}\" from metadata because {}." + ).format(attr_name, erase_reason)) + oiio_cmd.extend(["--eraseattrib", attr_name]) + + # Add last argument - path to output + base_filename = os.path.basename(input_path) + output_path = os.path.join(output_dir, base_filename) + oiio_cmd.extend([ + "-o", output_path + ]) + + logger.debug("Conversion command: {}".format(" ".join(oiio_cmd))) + run_subprocess(oiio_cmd, logger=logger) + + # FFMPEG functions def get_ffprobe_data(path_to_file, logger=None): """Load data about entered filepath via ffprobe. @@ -570,9 +729,9 @@ def get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd=None): def _ffmpeg_mxf_format_args(ffprobe_data, source_ffmpeg_cmd): input_format = ffprobe_data["format"] format_tags = input_format.get("tags") or {} - product_name = format_tags.get("product_name") or "" + operational_pattern_ul = format_tags.get("operational_pattern_ul") or "" output = [] - if "opatom" in product_name.lower(): + if operational_pattern_ul == "060e2b34.04010102.0d010201.10030000": output.extend(["-f", "mxf_opatom"]) return output diff --git a/openpype/lib/usdlib.py b/openpype/lib/usdlib.py index 89021156b4..86de19b4be 100644 --- a/openpype/lib/usdlib.py +++ b/openpype/lib/usdlib.py @@ -8,7 +8,10 @@ except ImportError: # Allow to fall back on Multiverse 6.3.0+ pxr usd library from mvpxr import Usd, UsdGeom, Sdf, Kind -from avalon import io, api +from openpype.pipeline import ( + registered_root, + legacy_io, +) log = logging.getLogger(__name__) @@ -125,7 +128,7 @@ def create_model(filename, asset, variant_subsets): """ - asset_doc = io.find_one({"name": asset, "type": "asset"}) + asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -175,7 +178,7 @@ def create_shade(filename, asset, variant_subsets): """ - asset_doc = io.find_one({"name": asset, "type": "asset"}) + asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -210,7 +213,7 @@ def create_shade_variation(filename, asset, model_variant, shade_variants): """ - asset_doc = io.find_one({"name": asset, "type": "asset"}) + asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -310,7 +313,7 @@ def get_usd_master_path(asset, subset, representation): """ - project = io.find_one( + project = legacy_io.find_one( {"type": "project"}, projection={"config.template.publish": True} ) template = project["config"]["template"]["publish"] @@ -319,12 +322,12 @@ def get_usd_master_path(asset, subset, representation): # Allow explicitly passing asset document asset_doc = asset else: - asset_doc = io.find_one({"name": asset, "type": "asset"}) + asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) path = template.format( **{ - "root": api.registered_root(), - "project": api.Session["AVALON_PROJECT"], + "root": registered_root(), + "project": legacy_io.Session["AVALON_PROJECT"], "asset": asset_doc["name"], "subset": subset, "representation": representation, diff --git a/openpype/modules/avalon_apps/avalon_app.py b/openpype/modules/avalon_apps/avalon_app.py index 51a22323f1..1d21de129b 100644 --- a/openpype/modules/avalon_apps/avalon_app.py +++ b/openpype/modules/avalon_apps/avalon_app.py @@ -1,5 +1,5 @@ import os -import openpype + from openpype.modules import OpenPypeModule from openpype_interfaces import ITrayModule @@ -26,7 +26,8 @@ class AvalonModule(OpenPypeModule, ITrayModule): self.avalon_mongo_timeout = avalon_mongo_timeout # Tray attributes - self.libraryloader = None + self._library_loader_imported = None + self._library_loader_window = None self.rest_api_obj = None def get_global_environments(self): @@ -41,21 +42,11 @@ class AvalonModule(OpenPypeModule, ITrayModule): def tray_init(self): # Add library tool + self._library_loader_imported = False try: - from Qt import QtCore from openpype.tools.libraryloader import LibraryLoaderWindow - libraryloader = LibraryLoaderWindow( - show_projects=True, - show_libraries=True - ) - # Remove always on top flag for tray - window_flags = libraryloader.windowFlags() - if window_flags | QtCore.Qt.WindowStaysOnTopHint: - window_flags ^= QtCore.Qt.WindowStaysOnTopHint - libraryloader.setWindowFlags(window_flags) - self.libraryloader = libraryloader - + self._library_loader_imported = True except Exception: self.log.warning( "Couldn't load Library loader tool for tray.", @@ -64,7 +55,7 @@ class AvalonModule(OpenPypeModule, ITrayModule): # Definition of Tray menu def tray_menu(self, tray_menu): - if self.libraryloader is None: + if not self._library_loader_imported: return from Qt import QtWidgets @@ -84,17 +75,31 @@ class AvalonModule(OpenPypeModule, ITrayModule): return def show_library_loader(self): - if self.libraryloader is None: - return + if self._library_loader_window is None: + from Qt import QtCore + from openpype.tools.libraryloader import LibraryLoaderWindow + from openpype.pipeline import install_openpype_plugins - self.libraryloader.show() + libraryloader = LibraryLoaderWindow( + show_projects=True, + show_libraries=True + ) + # Remove always on top flag for tray + window_flags = libraryloader.windowFlags() + if window_flags | QtCore.Qt.WindowStaysOnTopHint: + window_flags ^= QtCore.Qt.WindowStaysOnTopHint + libraryloader.setWindowFlags(window_flags) + self._library_loader_window = libraryloader + + install_openpype_plugins() + + self._library_loader_window.show() # Raise and activate the window # for MacOS - self.libraryloader.raise_() + self._library_loader_window.raise_() # for Windows - self.libraryloader.activateWindow() - self.libraryloader.refresh() + self._library_loader_window.activateWindow() # Webserver module implementation def webserver_initialization(self, server_manager): diff --git a/openpype/modules/avalon_apps/rest_api.py b/openpype/modules/avalon_apps/rest_api.py index 533050fc0c..b35f5bf357 100644 --- a/openpype/modules/avalon_apps/rest_api.py +++ b/openpype/modules/avalon_apps/rest_api.py @@ -1,4 +1,3 @@ -import os import json import datetime @@ -6,7 +5,7 @@ from bson.objectid import ObjectId from aiohttp.web_response import Response -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB from openpype_modules.webserver.base_routes import RestApiEndpoint diff --git a/openpype/modules/base.py b/openpype/modules/base.py index 23c908299f..5b49649359 100644 --- a/openpype/modules/base.py +++ b/openpype/modules/base.py @@ -290,49 +290,16 @@ def _load_modules(): log = PypeLogger.get_logger("ModulesLoader") - current_dir = os.path.abspath(os.path.dirname(__file__)) - processed_paths = set() - processed_paths.add(current_dir) - # Import default modules imported from 'openpype.modules' - for filename in os.listdir(current_dir): - # Ignore filenames - if ( - filename in IGNORED_FILENAMES - or filename in IGNORED_DEFAULT_FILENAMES - ): - continue - - fullpath = os.path.join(current_dir, filename) - basename, ext = os.path.splitext(filename) - - if os.path.isdir(fullpath): - # Check existence of init fil - init_path = os.path.join(fullpath, "__init__.py") - if not os.path.exists(init_path): - log.debug(( - "Module directory does not contan __init__.py file {}" - ).format(fullpath)) - continue - - elif ext not in (".py", ): - continue - - try: - import_str = "openpype.modules.{}".format(basename) - new_import_str = "{}.{}".format(modules_key, basename) - default_module = __import__(import_str, fromlist=("", )) - sys.modules[new_import_str] = default_module - setattr(openpype_modules, basename, default_module) - - except Exception: - msg = ( - "Failed to import default module '{}'." - ).format(basename) - log.error(msg, exc_info=True) - # Look for OpenPype modules in paths defined with `get_module_dirs` # - dynamically imported OpenPype modules and addons - for dirpath in get_module_dirs(): + module_dirs = get_module_dirs() + # Add current directory at first place + # - has small differences in import logic + current_dir = os.path.abspath(os.path.dirname(__file__)) + module_dirs.insert(0, current_dir) + + processed_paths = set() + for dirpath in module_dirs: # Skip already processed paths if dirpath in processed_paths: continue @@ -344,20 +311,29 @@ def _load_modules(): ).format(dirpath)) continue + is_in_current_dir = dirpath == current_dir for filename in os.listdir(dirpath): # Ignore filenames if filename in IGNORED_FILENAMES: continue + if ( + is_in_current_dir + and filename in IGNORED_DEFAULT_FILENAMES + ): + continue + fullpath = os.path.join(dirpath, filename) basename, ext = os.path.splitext(filename) + # Validations if os.path.isdir(fullpath): - # Check existence of init fil + # Check existence of init file init_path = os.path.join(fullpath, "__init__.py") if not os.path.exists(init_path): log.debug(( - "Module directory does not contan __init__.py file {}" + "Module directory does not contain __init__.py" + " file {}" ).format(fullpath)) continue @@ -367,27 +343,29 @@ def _load_modules(): # TODO add more logic how to define if folder is module or not # - check manifest and content of manifest try: - if os.path.isdir(fullpath): - # Module without init file can't be used as OpenPype module - # because the module class could not be imported - init_file = os.path.join(fullpath, "__init__.py") - if not os.path.exists(init_file): - log.info(( - "Skipping module directory because of" - " missing \"__init__.py\" file. \"{}\"" - ).format(fullpath)) - continue + # Don't import dynamically current directory modules + if is_in_current_dir: + import_str = "openpype.modules.{}".format(basename) + new_import_str = "{}.{}".format(modules_key, basename) + default_module = __import__(import_str, fromlist=("", )) + sys.modules[new_import_str] = default_module + setattr(openpype_modules, basename, default_module) + + elif os.path.isdir(fullpath): import_module_from_dirpath(dirpath, filename, modules_key) - elif ext in (".py", ): + else: module = import_filepath(fullpath) setattr(openpype_modules, basename, module) except Exception: - log.error( - "Failed to import '{}'.".format(fullpath), - exc_info=True - ) + if is_in_current_dir: + msg = "Failed to import default module '{}'.".format( + basename + ) + else: + msg = "Failed to import module '{}'.".format(fullpath) + log.error(msg, exc_info=True) class _OpenPypeInterfaceMeta(ABCMeta): @@ -724,6 +702,32 @@ class ModulesManager: ).format(expected_keys, " | ".join(msg_items))) return output + def collect_creator_plugin_paths(self, host_name): + """Helper to collect creator plugin paths from modules. + + Args: + host_name (str): For which host are creators meants. + + Returns: + list: List of creator plugin paths. + """ + # Output structure + from openpype_interfaces import IPluginPaths + + output = [] + for module in self.get_enabled_modules(): + # Skip module that do not inherit from `IPluginPaths` + if not isinstance(module, IPluginPaths): + continue + + paths = module.get_creator_plugin_paths(host_name) + if paths: + # Convert to list if value is not list + if not isinstance(paths, (list, tuple, set)): + paths = [paths] + output.extend(paths) + return output + def collect_launch_hook_paths(self): """Helper to collect hooks from modules inherited ILaunchHookPaths. diff --git a/openpype/modules/clockify/launcher_actions/ClockifyStart.py b/openpype/modules/clockify/launcher_actions/ClockifyStart.py index db51964eb7..4669f98b01 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifyStart.py +++ b/openpype/modules/clockify/launcher_actions/ClockifyStart.py @@ -1,12 +1,15 @@ -from avalon import api, io from openpype.api import Logger +from openpype.pipeline import ( + legacy_io, + LauncherAction, +) from openpype_modules.clockify.clockify_api import ClockifyAPI -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) -class ClockifyStart(api.Action): +class ClockifyStart(LauncherAction): name = "clockify_start_timer" label = "Clockify - Start Timer" @@ -26,7 +29,7 @@ class ClockifyStart(api.Action): task_name = session['AVALON_TASK'] description = asset_name - asset = io.find_one({ + asset = legacy_io.find_one({ 'type': 'asset', 'name': asset_name }) diff --git a/openpype/modules/clockify/launcher_actions/ClockifySync.py b/openpype/modules/clockify/launcher_actions/ClockifySync.py index 02982d373a..356bbd0306 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifySync.py +++ b/openpype/modules/clockify/launcher_actions/ClockifySync.py @@ -1,10 +1,14 @@ -from avalon import api, io from openpype_modules.clockify.clockify_api import ClockifyAPI from openpype.api import Logger -log = Logger().get_logger(__name__) +from openpype.pipeline import ( + legacy_io, + LauncherAction, +) + +log = Logger.get_logger(__name__) -class ClockifySync(api.Action): +class ClockifySync(LauncherAction): name = "sync_to_clockify" label = "Sync to Clockify" @@ -22,10 +26,10 @@ class ClockifySync(api.Action): projects_to_sync = [] if project_name.strip() == '' or project_name is None: - for project in io.projects(): + for project in legacy_io.projects(): projects_to_sync.append(project) else: - project = io.find_one({'type': 'project'}) + project = legacy_io.find_one({'type': 'project'}) projects_to_sync.append(project) projects_info = {} diff --git a/openpype/modules/deadline/deadline_module.py b/openpype/modules/deadline/deadline_module.py index 1a179e9aaf..c30db75188 100644 --- a/openpype/modules/deadline/deadline_module.py +++ b/openpype/modules/deadline/deadline_module.py @@ -1,8 +1,19 @@ import os +import requests +import six +import sys + +from openpype.lib import requests_get, PypeLogger from openpype.modules import OpenPypeModule from openpype_interfaces import IPluginPaths +class DeadlineWebserviceError(Exception): + """ + Exception to throw when connection to Deadline server fails. + """ + + class DeadlineModule(OpenPypeModule, IPluginPaths): name = "deadline" @@ -32,3 +43,35 @@ class DeadlineModule(OpenPypeModule, IPluginPaths): return { "publish": [os.path.join(current_dir, "plugins", "publish")] } + + @staticmethod + def get_deadline_pools(webservice, log=None): + # type: (str) -> list + """Get pools from Deadline. + Args: + webservice (str): Server url. + log (Logger) + Returns: + list: Pools. + Throws: + RuntimeError: If deadline webservice is unreachable. + + """ + if not log: + log = PypeLogger.get_logger(__name__) + + argument = "{}/api/pools?NamesOnly=true".format(webservice) + try: + response = requests_get(argument) + except requests.exceptions.ConnectionError as exc: + msg = 'Cannot connect to DL web service {}'.format(webservice) + log.error(msg) + six.reraise( + DeadlineWebserviceError, + DeadlineWebserviceError('{} - {}'.format(msg, exc)), + sys.exc_info()[2]) + if not response.ok: + log.warning("No pools retrieved") + return [] + + return response.json() diff --git a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py index 1bc4eaa067..a7035cd99f 100644 --- a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py +++ b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py @@ -11,7 +11,7 @@ import pyblish.api class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): """Collect Deadline Webservice URL from instance.""" - order = pyblish.api.CollectorOrder + 0.02 + order = pyblish.api.CollectorOrder + 0.415 label = "Deadline Webservice from the Instance" families = ["rendering"] diff --git a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py index fc056342a8..e6ad6a9aa1 100644 --- a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py +++ b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py @@ -6,7 +6,7 @@ import pyblish.api class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): """Collect default Deadline Webservice URL.""" - order = pyblish.api.CollectorOrder + 0.01 + order = pyblish.api.CollectorOrder + 0.410 label = "Default Deadline Webservice" pass_mongo_url = False diff --git a/openpype/modules/deadline/plugins/publish/collect_pools.py b/openpype/modules/deadline/plugins/publish/collect_pools.py new file mode 100644 index 0000000000..48130848d5 --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/collect_pools.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +"""Collect Deadline pools. Choose default one from Settings + +""" +import pyblish.api + + +class CollectDeadlinePools(pyblish.api.InstancePlugin): + """Collect pools from instance if present, from Setting otherwise.""" + + order = pyblish.api.CollectorOrder + 0.420 + label = "Collect Deadline Pools" + families = ["rendering", "render.farm", "renderFarm", "renderlayer"] + + primary_pool = None + secondary_pool = None + + def process(self, instance): + if not instance.data.get("primaryPool"): + instance.data["primaryPool"] = self.primary_pool or "none" + + if not instance.data.get("secondaryPool"): + instance.data["secondaryPool"] = self.secondary_pool or "none" diff --git a/openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml b/openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml new file mode 100644 index 0000000000..0e7d72910e --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml @@ -0,0 +1,31 @@ + + + + Scene setting + + ## Invalid Deadline pools found + + Configured pools don't match what is set in Deadline. + + {invalid_value_str} + + ### How to repair? + + If your instance had deadline pools set on creation, remove or + change them. + + In other cases inform admin to change them in Settings. + + Available deadline pools {pools_str}. + + + ### __Detailed Info__ + + This error is shown when deadline pool is not on Deadline anymore. It + could happen in case of republish old workfile which was created with + previous deadline pools, + or someone changed pools on Deadline side, but didn't modify Openpype + Settings. + + + \ No newline at end of file diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index c499c14d40..b6584f239e 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -3,10 +3,9 @@ import attr import getpass import pyblish.api -from avalon import api - from openpype.lib import env_value_to_bool from openpype.lib.delivery import collect_frames +from openpype.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo @@ -37,8 +36,6 @@ class AfterEffectsSubmitDeadline( priority = 50 chunk_size = 1000000 - primary_pool = None - secondary_pool = None group = None department = None multiprocess = True @@ -62,8 +59,8 @@ class AfterEffectsSubmitDeadline( dln_job_info.Frames = frame_range dln_job_info.Priority = self.priority - dln_job_info.Pool = self.primary_pool - dln_job_info.SecondaryPool = self.secondary_pool + dln_job_info.Pool = self._instance.data.get("primaryPool") + dln_job_info.SecondaryPool = self._instance.data.get("secondaryPool") dln_job_info.Group = self.group dln_job_info.Department = self.department dln_job_info.ChunkSize = self.chunk_size @@ -89,7 +86,7 @@ class AfterEffectsSubmitDeadline( keys.append("OPENPYPE_MONGO") environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) for key in keys: val = environment.get(key) if val: diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py index 918efb6630..912f0f4026 100644 --- a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py @@ -8,8 +8,8 @@ import re import attr import pyblish.api -from avalon import api +from openpype.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo @@ -241,8 +241,6 @@ class HarmonySubmitDeadline( optional = True use_published = False - primary_pool = "" - secondary_pool = "" priority = 50 chunk_size = 1000000 group = "none" @@ -259,8 +257,8 @@ class HarmonySubmitDeadline( # for now, get those from presets. Later on it should be # configurable in Harmony UI directly. job_info.Priority = self.priority - job_info.Pool = self.primary_pool - job_info.SecondaryPool = self.secondary_pool + job_info.Pool = self._instance.data.get("primaryPool") + job_info.SecondaryPool = self._instance.data.get("secondaryPool") job_info.ChunkSize = self.chunk_size job_info.BatchName = os.path.basename(self._instance.data["source"]) job_info.Department = self.department @@ -282,7 +280,7 @@ class HarmonySubmitDeadline( keys.append("OPENPYPE_MONGO") environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) for key in keys: val = environment.get(key) if val: diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py index c683eb68a8..f834ae7e92 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py @@ -4,10 +4,10 @@ import json import requests import hou -from avalon import api, io - import pyblish.api +from openpype.pipeline import legacy_io + class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): """Submit Houdini scene to perform a local publish in Deadline. @@ -35,7 +35,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): ), "Errors found, aborting integration.." # Deadline connection - AVALON_DEADLINE = api.Session.get( + AVALON_DEADLINE = legacy_io.Session.get( "AVALON_DEADLINE", "http://localhost:8082" ) assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" @@ -55,7 +55,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): scenename = os.path.basename(scene) # Get project code - project = io.find_one({"type": "project"}) + project = legacy_io.find_one({"type": "project"}) code = project["data"].get("code", project["name"]) job_name = "{scene} [PUBLISH]".format(scene=scenename) @@ -137,7 +137,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): environment = dict( {key: os.environ[key] for key in keys if key in os.environ}, - **api.Session + **legacy_io.Session ) environment["PYBLISH_ACTIVE_INSTANCES"] = ",".join(instances) diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index 59aeb68b79..aca88c7440 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -3,11 +3,11 @@ import json import getpass import requests -from avalon import api - import pyblish.api -import hou +# import hou ??? + +from openpype.pipeline import legacy_io class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): @@ -71,7 +71,8 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): "UserName": deadline_user, "Plugin": "Houdini", - "Pool": "houdini_redshift", # todo: remove hardcoded pool + "Pool": instance.data.get("primaryPool"), + "secondaryPool": instance.data.get("secondaryPool"), "Frames": frames, "ChunkSize": instance.data.get("chunkSize", 10), @@ -106,7 +107,7 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): keys.append("OPENPYPE_MONGO") environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) payload["JobInfo"].update({ "EnvironmentKeyValue%d" % index: "{key}={value}".format( @@ -140,7 +141,7 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): def submit(self, instance, payload): - AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE", + AVALON_DEADLINE = legacy_io.Session.get("AVALON_DEADLINE", "http://localhost:8082") assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index f59fd3af1c..d4a34fbd41 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -32,10 +32,11 @@ import requests from maya import cmds -from avalon import api import pyblish.api +from openpype.lib import requests_post from openpype.hosts.maya.api import lib +from openpype.pipeline import legacy_io # Documentation for keys available at: # https://docs.thinkboxsoftware.com @@ -187,6 +188,10 @@ def get_renderer_variables(renderlayer, root): filename_0 = re.sub('_', '_beauty', filename_0, flags=re.IGNORECASE) prefix_attr = "defaultRenderGlobals.imageFilePrefix" + + scene = cmds.file(query=True, sceneName=True) + scene, _ = os.path.splitext(os.path.basename(scene)) + if renderer == "vray": renderlayer = renderlayer.split("_")[-1] # Maya's renderSettings function does not return V-Ray file extension @@ -206,8 +211,7 @@ def get_renderer_variables(renderlayer, root): filename_prefix = cmds.getAttr(prefix_attr) # we need to determine path for vray as maya `renderSettings` query # does not work for vray. - scene = cmds.file(query=True, sceneName=True) - scene, _ = os.path.splitext(os.path.basename(scene)) + filename_0 = re.sub('', scene, filename_prefix, flags=re.IGNORECASE) # noqa: E501 filename_0 = re.sub('', renderlayer, filename_0, flags=re.IGNORECASE) # noqa: E501 filename_0 = "{}.{}.{}".format( @@ -215,6 +219,39 @@ def get_renderer_variables(renderlayer, root): filename_0 = os.path.normpath(os.path.join(root, filename_0)) elif renderer == "renderman": prefix_attr = "rmanGlobals.imageFileFormat" + # NOTE: This is guessing extensions from renderman display types. + # Some of them are just framebuffers, d_texture format can be + # set in display setting. We set those now to None, but it + # should be handled more gracefully. + display_types = { + "d_deepexr": "exr", + "d_it": None, + "d_null": None, + "d_openexr": "exr", + "d_png": "png", + "d_pointcloud": "ptc", + "d_targa": "tga", + "d_texture": None, + "d_tiff": "tif" + } + + extension = display_types.get( + cmds.listConnections("rmanDefaultDisplay.displayType")[0], + "exr" + ) or "exr" + + filename_prefix = "{}/{}".format( + cmds.getAttr("rmanGlobals.imageOutputDir"), + cmds.getAttr("rmanGlobals.imageFileFormat") + ) + + renderlayer = renderlayer.split("_")[-1] + + filename_0 = re.sub('', scene, filename_prefix, flags=re.IGNORECASE) # noqa: E501 + filename_0 = re.sub('', renderlayer, filename_0, flags=re.IGNORECASE) # noqa: E501 + filename_0 = re.sub('', "#" * int(padding), filename_0, flags=re.IGNORECASE) # noqa: E501 + filename_0 = re.sub('', extension, filename_0, flags=re.IGNORECASE) # noqa: E501 + filename_0 = os.path.normpath(os.path.join(root, filename_0)) elif renderer == "redshift": # mapping redshift extension dropdown values to strings ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"] @@ -403,6 +440,11 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): output_filename_0 = filename_0 + # this is needed because renderman handles directory and file + # prefixes separately + if self._instance.data["renderer"] == "renderman": + dirname = os.path.dirname(output_filename_0) + # Create render folder ---------------------------------------------- try: # Ensure render folder exists @@ -489,7 +531,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): keys.append("OPENPYPE_MONGO") environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) environment["OPENPYPE_LOG_NO_COLORS"] = "1" environment["OPENPYPE_MAYA_VERSION"] = cmds.about(v=True) # to recognize job from PYPE for turning Event On/Off @@ -701,7 +743,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): tiles_count = instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501 for tile_job in frame_payloads: - response = self._requests_post(url, json=tile_job) + response = requests_post(url, json=tile_job) if not response.ok: raise Exception(response.text) @@ -764,7 +806,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): job_idx, len(assembly_payloads) )) self.log.debug(json.dumps(ass_job, indent=4, sort_keys=True)) - response = self._requests_post(url, json=ass_job) + response = requests_post(url, json=ass_job) if not response.ok: raise Exception(response.text) @@ -782,7 +824,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # E.g. http://192.168.0.1:8082/api/jobs url = "{}/api/jobs".format(self.deadline_url) - response = self._requests_post(url, json=payload) + response = requests_post(url, json=payload) if not response.ok: raise Exception(response.text) instance.data["deadlineSubmissionJob"] = response.json() @@ -799,6 +841,23 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "AssetDependency0": data["filepath"], } + renderer = self._instance.data["renderer"] + + # This hack is here because of how Deadline handles Renderman version. + # it considers everything with `renderman` set as version older than + # Renderman 22, and so if we are using renderman > 21 we need to set + # renderer string on the job to `renderman22`. We will have to change + # this when Deadline releases new version handling this. + if self._instance.data["renderer"] == "renderman": + try: + from rfm2.config import cfg # noqa + except ImportError: + raise Exception("Cannot determine renderman version") + + rman_version = cfg().build_info.version() # type: str + if int(rman_version.split(".")[0]) > 22: + renderer = "renderman22" + plugin_info = { "SceneFile": data["filepath"], # Output directory and filename @@ -812,7 +871,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "RenderLayer": data["renderlayer"], # Determine which renderer to use from the file itself - "Renderer": self._instance.data["renderer"], + "Renderer": renderer, # Resolve relative references "ProjectPath": data["workspace"], @@ -990,7 +1049,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): self.log.info("Submitting ass export job.") url = "{}/api/jobs".format(self.deadline_url) - response = self._requests_post(url, json=payload) + response = requests_post(url, json=payload) if not response.ok: self.log.error("Submition failed!") self.log.error(response.status_code) @@ -1014,44 +1073,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): % (value, int(value)) ) - def _requests_post(self, *args, **kwargs): - """Wrap request post method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if 'verify' not in kwargs: - kwargs['verify'] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - # add 10sec timeout before bailing out - kwargs['timeout'] = 10 - return requests.post(*args, **kwargs) - - def _requests_get(self, *args, **kwargs): - """Wrap request get method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if 'verify' not in kwargs: - kwargs['verify'] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - # add 10sec timeout before bailing out - kwargs['timeout'] = 10 - return requests.get(*args, **kwargs) - def format_vray_output_filename(self, filename, template, dir=False): """Format the expected output file of the Export job. diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py index 9b5800c33f..94c703d66d 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -4,10 +4,10 @@ import json import getpass import requests - -from avalon import api import pyblish.api + import nuke +from openpype.pipeline import legacy_io class NukeSubmitDeadline(pyblish.api.InstancePlugin): @@ -28,8 +28,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): priority = 50 chunk_size = 1 concurrent_tasks = 1 - primary_pool = "" - secondary_pool = "" group = "" department = "" limit_groups = {} @@ -187,8 +185,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "Department": self.department, - "Pool": self.primary_pool, - "SecondaryPool": self.secondary_pool, + "Pool": instance.data.get("primaryPool"), + "SecondaryPool": instance.data.get("secondaryPool"), "Group": self.group, "Plugin": "Nuke", @@ -244,7 +242,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): keys = [ "PYTHONPATH", "PATH", - "AVALON_SCHEMA", "AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK", @@ -266,7 +263,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): keys += self.env_allowed_keys environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) for _path in os.environ: if _path.lower().startswith('openpype_'): diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index 3c4e0d2913..78ab935e42 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -7,13 +7,15 @@ import re from copy import copy, deepcopy import requests import clique -import openpype.api - -from avalon import api, io import pyblish.api -from openpype.pipeline import get_representation_path +import openpype.api +from openpype.pipeline import ( + get_representation_path, + legacy_io, +) +from openpype.pipeline.farm.patterning import match_aov_pattern def get_resources(version, extension=None): @@ -22,7 +24,7 @@ def get_resources(version, extension=None): if extension: query["name"] = extension - representation = io.find_one(query) + representation = legacy_io.find_one(query) assert representation, "This is a bug" directory = get_representation_path(representation) @@ -107,7 +109,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): families = ["render.farm", "prerender.farm", "renderlayer", "imagesequence", "vrayscene"] - aov_filter = {"maya": [r".*(?:[\._-])*([Bb]eauty)(?:[\.|_])*.*"], + aov_filter = {"maya": [r".*([Bb]eauty).*"], "aftereffects": [r".*"], # for everything from AE "harmony": [r".*"], # for everything from AE "celaction": [r".*"]} @@ -129,7 +131,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "OPENPYPE_PUBLISH_JOB" ] - # custom deadline atributes + # custom deadline attributes deadline_department = "" deadline_pool = "" deadline_pool_secondary = "" @@ -221,9 +223,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self._create_metadata_path(instance) environment = job["Props"].get("Env", {}) - environment["AVALON_PROJECT"] = io.Session["AVALON_PROJECT"] - environment["AVALON_ASSET"] = io.Session["AVALON_ASSET"] - environment["AVALON_TASK"] = io.Session["AVALON_TASK"] + environment["AVALON_PROJECT"] = legacy_io.Session["AVALON_PROJECT"] + environment["AVALON_ASSET"] = legacy_io.Session["AVALON_ASSET"] + environment["AVALON_TASK"] = legacy_io.Session["AVALON_TASK"] environment["AVALON_APP_NAME"] = os.environ.get("AVALON_APP_NAME") environment["OPENPYPE_LOG_NO_COLORS"] = "1" environment["OPENPYPE_USERNAME"] = instance.context.data["user"] @@ -259,8 +261,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "Priority": priority, "Group": self.deadline_group, - "Pool": self.deadline_pool, - "SecondaryPool": self.deadline_pool_secondary, + "Pool": instance.data.get("primaryPool"), + "SecondaryPool": instance.data.get("secondaryPool"), "OutputDirectory0": output_dir }, @@ -283,6 +285,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): else: payload["JobInfo"]["JobDependency0"] = job["_id"] + if instance.data.get("suspend_publish"): + payload["JobInfo"]["InitialStatus"] = "Suspended" + index = 0 for key in environment: if key.upper() in self.enviro_filter: @@ -449,16 +454,19 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): app = os.environ.get("AVALON_APP", "") preview = False - if app in self.aov_filter.keys(): - for aov_pattern in self.aov_filter[app]: - if re.match(aov_pattern, aov): - preview = True - break + if isinstance(col, list): + render_file_name = os.path.basename(col[0]) + else: + render_file_name = os.path.basename(col) + aov_patterns = self.aov_filter + preview = match_aov_pattern(app, aov_patterns, render_file_name) + + # toggle preview on if multipart is on if instance_data.get("multipartExr"): preview = True - new_instance = copy(instance_data) + new_instance = deepcopy(instance_data) new_instance["subset"] = subset_name new_instance["subsetGroup"] = group_name if preview: @@ -520,6 +528,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): """ representations = [] + host_name = os.environ.get("AVALON_APP", "") collections, remainders = clique.assemble(exp_files) # create representation for every collected sequento ce @@ -532,22 +541,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # should be review made. # - "review" tag is never added when is set to 'False' if instance["useSequenceForReview"]: - # if filtered aov name is found in filename, toggle it for - # preview video rendering - for app in self.aov_filter.keys(): - if os.environ.get("AVALON_APP", "") == app: - # iteratre all aov filters - for aov in self.aov_filter[app]: - if re.match( - aov, - list(collection)[0] - ): - preview = True - break - # toggle preview on if multipart is on if instance.get("multipartExr", False): preview = True + else: + render_file_name = list(collection)[0] + # if filtered aov name is found in filename, toggle it for + # preview video rendering + preview = match_aov_pattern( + host_name, self.aov_filter, render_file_name + ) staging = os.path.dirname(list(collection)[0]) success, rootless_staging_dir = ( @@ -611,12 +614,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "files": os.path.basename(remainder), "stagingDir": os.path.dirname(remainder), } - if "render" in instance.get("families"): + + preview = match_aov_pattern( + host_name, self.aov_filter, remainder + ) + if preview: rep.update({ "fps": instance.get("fps"), "tags": ["review"] }) - self._solve_families(instance, True) + self._solve_families(instance, preview) already_there = False for repre in instance.get("representations", []): @@ -663,7 +670,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if hasattr(instance, "_log"): data['_log'] = instance._log - asset = data.get("asset") or api.Session["AVALON_ASSET"] + asset = data.get("asset") or legacy_io.Session["AVALON_ASSET"] subset = data.get("subset") start = instance.data.get("frameStart") @@ -876,8 +883,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): new_i = copy(i) new_i["version"] = at.get("version") new_i["subset"] = at.get("subset") + new_i["family"] = at.get("family") new_i["append"] = True - new_i["families"].append(at.get("family")) + # don't set subsetGroup if we are attaching + new_i.pop("subsetGroup") new_instances.append(new_i) self.log.info(" - {} / v{}".format( at.get("subset"), at.get("version"))) @@ -955,7 +964,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "intent": context.data.get("intent"), "comment": context.data.get("comment"), "job": render_job or None, - "session": api.Session.copy(), + "session": legacy_io.Session.copy(), "instances": instances } @@ -1063,7 +1072,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): else: # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = api.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." diff --git a/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py new file mode 100644 index 0000000000..78eed17c98 --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py @@ -0,0 +1,48 @@ +import pyblish.api + +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) +from openpype.modules.deadline.deadline_module import DeadlineModule + + +class ValidateDeadlinePools(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """Validate primaryPool and secondaryPool on instance. + + Values are on instance based on value insertion when Creating instance or + by Settings in CollectDeadlinePools. + """ + + label = "Validate Deadline Pools" + order = pyblish.api.ValidatorOrder + families = ["rendering", "render.farm", "renderFarm", "renderlayer"] + optional = True + + def process(self, instance): + # get default deadline webservice url from deadline module + deadline_url = instance.context.data["defaultDeadline"] + self.log.info("deadline_url::{}".format(deadline_url)) + pools = DeadlineModule.get_deadline_pools(deadline_url, log=self.log) + self.log.info("pools::{}".format(pools)) + + formatting_data = { + "pools_str": ",".join(pools) + } + + primary_pool = instance.data.get("primaryPool") + if primary_pool and primary_pool not in pools: + msg = "Configured primary '{}' not present on Deadline".format( + instance.data["primaryPool"]) + formatting_data["invalid_value_str"] = msg + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + secondary_pool = instance.data.get("secondaryPool") + if secondary_pool and secondary_pool not in pools: + msg = "Configured secondary '{}' not present on Deadline".format( + instance.data["secondaryPool"]) + formatting_data["invalid_value_str"] = msg + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py index 2e55be2743..975e49cb28 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py @@ -1,8 +1,8 @@ import json -from avalon.api import AvalonMongoDB from openpype.api import ProjectSettings from openpype.lib import create_project +from openpype.pipeline import AvalonMongoDB from openpype.settings import SaveWarningExc from openpype_modules.ftrack.lib import ( diff --git a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py index 868bbb8463..1209375f82 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py +++ b/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py @@ -356,7 +356,7 @@ class PushHierValuesToNonHier(ServerAction): values_per_entity_id[entity_id][key] = None values = query_custom_attributes( - session, all_ids_with_parents, hier_attr_ids, True + session, hier_attr_ids, all_ids_with_parents, True ) for item in values: entity_id = item["entity_id"] diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_links.py b/openpype/modules/ftrack/event_handlers_server/event_sync_links.py index 9610e7f5de..ae70c6756f 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_links.py +++ b/openpype/modules/ftrack/event_handlers_server/event_sync_links.py @@ -1,7 +1,7 @@ from pymongo import UpdateOne from bson.objectid import ObjectId -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB from openpype_modules.ftrack.lib import ( CUST_ATTR_ID_KEY, diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index 46c333c4c4..b5f199b3e4 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -12,8 +12,7 @@ from pymongo import UpdateOne import arrow import ftrack_api -from avalon import schema -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB, schema from openpype_modules.ftrack.lib import ( get_openpype_attr, diff --git a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py index 96243c8c36..593fc5e596 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py +++ b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py @@ -1,10 +1,9 @@ -import os import re import subprocess from openpype_modules.ftrack.lib import BaseEvent from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB from bson.objectid import ObjectId diff --git a/openpype/modules/ftrack/event_handlers_user/action_applications.py b/openpype/modules/ftrack/event_handlers_user/action_applications.py index 48a0dea006..b25bc1b5cb 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_applications.py +++ b/openpype/modules/ftrack/event_handlers_user/action_applications.py @@ -1,5 +1,4 @@ import os -from uuid import uuid4 from openpype_modules.ftrack.lib import BaseAction from openpype.lib.applications import ( @@ -8,7 +7,7 @@ from openpype.lib.applications import ( ApplictionExecutableNotFound, CUSTOM_LAUNCH_APP_GROUPS ) -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB class AppplicationsAction(BaseAction): diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py index 0ed12bd03e..81f38e0c39 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py @@ -1,6 +1,8 @@ import os -from openpype_modules.ftrack.lib import BaseAction, statics_icon +import collections +import copy from openpype.api import Anatomy +from openpype_modules.ftrack.lib import BaseAction, statics_icon class CreateFolders(BaseAction): @@ -9,55 +11,59 @@ class CreateFolders(BaseAction): icon = statics_icon("ftrack", "action_icons", "CreateFolders.svg") def discover(self, session, entities, event): - if len(entities) != 1: - return False - - not_allowed = ["assetversion", "project"] - if entities[0].entity_type.lower() in not_allowed: - return False - - return True + for entity_item in event["data"]["selection"]: + if entity_item.get("entityType").lower() in ("task", "show"): + return True + return False def interface(self, session, entities, event): if event["data"].get("values", {}): return - entity = entities[0] - without_interface = True - for child in entity["children"]: - if child["object_type"]["name"].lower() != "task": - without_interface = False + + with_interface = False + for entity in entities: + if entity.entity_type.lower() != "task": + with_interface = True break - self.without_interface = without_interface - if without_interface: + + if "values" not in event["data"]: + event["data"]["values"] = {} + + event["data"]["values"]["with_interface"] = with_interface + if not with_interface: return + title = "Create folders" entity_name = entity["name"] msg = ( "

Do you want create folders also" - " for all children of \"{}\"?

" + " for all children of your selection?" ) if entity.entity_type.lower() == "project": entity_name = entity["full_name"] msg = msg.replace(" also", "") msg += "

(Project root won't be created if not checked)

" - items = [] - item_msg = { - "type": "label", - "value": msg.format(entity_name) - } - item_label = { - "type": "label", - "value": "With all chilren entities" - } - item = { - "name": "children_included", - "type": "boolean", - "value": False - } - items.append(item_msg) - items.append(item_label) - items.append(item) + items = [ + { + "type": "label", + "value": msg.format(entity_name) + }, + { + "type": "label", + "value": "With all chilren entities" + }, + { + "name": "children_included", + "type": "boolean", + "value": False + }, + { + "type": "hidden", + "name": "with_interface", + "value": with_interface + } + ] return { "items": items, @@ -66,30 +72,47 @@ class CreateFolders(BaseAction): def launch(self, session, entities, event): '''Callback method for custom action.''' + + if "values" not in event["data"]: + return + + with_interface = event["data"]["values"]["with_interface"] with_childrens = True - if self.without_interface is False: - if "values" not in event["data"]: - return + if with_interface: with_childrens = event["data"]["values"]["children_included"] - entity = entities[0] - if entity.entity_type.lower() == "project": - proj = entity - else: - proj = entity["project"] - project_name = proj["full_name"] - project_code = proj["name"] + filtered_entities = [] + for entity in entities: + low_context_type = entity["context_type"].lower() + if low_context_type in ("task", "show"): + if not with_childrens and low_context_type == "show": + continue + filtered_entities.append(entity) - if entity.entity_type.lower() == 'project' and with_childrens is False: + if not filtered_entities: return { - 'success': True, - 'message': 'Nothing was created' + "success": True, + "message": 'Nothing was created' } - all_entities = [] - all_entities.append(entity) - if with_childrens: - all_entities = self.get_notask_children(entity) + project_entity = self.get_project_from_entity(filtered_entities[0]) + + project_name = project_entity["full_name"] + project_code = project_entity["name"] + + task_entities = [] + other_entities = [] + self.get_all_entities( + session, entities, task_entities, other_entities + ) + hierarchy = self.get_entities_hierarchy( + session, task_entities, other_entities + ) + task_types = session.query("select id, name from Type").all() + task_type_names_by_id = { + task_type["id"]: task_type["name"] + for task_type in task_types + } anatomy = Anatomy(project_name) @@ -97,77 +120,67 @@ class CreateFolders(BaseAction): work_template = anatomy.templates for key in work_keys: work_template = work_template[key] - work_has_apps = "{app" in work_template publish_keys = ["publish", "folder"] publish_template = anatomy.templates for key in publish_keys: publish_template = publish_template[key] - publish_has_apps = "{app" in publish_template + + project_data = { + "project": { + "name": project_name, + "code": project_code + } + } collected_paths = [] - for entity in all_entities: - if entity.entity_type.lower() == "project": - continue - ent_data = { - "project": { - "name": project_name, - "code": project_code - } - } + for item in hierarchy: + parent_entity, task_entities = item - ent_data["asset"] = entity["name"] + parent_data = copy.deepcopy(project_data) - parents = entity["link"][1:-1] + parents = parent_entity["link"][1:-1] hierarchy_names = [p["name"] for p in parents] - hierarchy = "" + hierarchy = "/".join(hierarchy_names) + if hierarchy_names: - hierarchy = os.path.sep.join(hierarchy_names) - ent_data["hierarchy"] = hierarchy + parent_name = hierarchy_names[-1] + else: + parent_name = project_name - tasks_created = False - for child in entity["children"]: - if child["object_type"]["name"].lower() != "task": - continue - tasks_created = True - task_data = ent_data.copy() - task_data["task"] = child["name"] + parent_data.update({ + "asset": parent_entity["name"], + "hierarchy": hierarchy, + "parent": parent_name + }) - apps = [] - - # Template wok - if work_has_apps: - app_data = task_data.copy() - for app in apps: - app_data["app"] = app - collected_paths.append(self.compute_template( - anatomy, app_data, work_keys - )) - else: - collected_paths.append(self.compute_template( - anatomy, task_data, work_keys - )) - - # Template publish - if publish_has_apps: - app_data = task_data.copy() - for app in apps: - app_data["app"] = app - collected_paths.append(self.compute_template( - anatomy, app_data, publish_keys - )) - else: - collected_paths.append(self.compute_template( - anatomy, task_data, publish_keys - )) - - if not tasks_created: + if not task_entities: # create path for entity collected_paths.append(self.compute_template( - anatomy, ent_data, work_keys + anatomy, parent_data, work_keys )) collected_paths.append(self.compute_template( - anatomy, ent_data, publish_keys + anatomy, parent_data, publish_keys + )) + continue + + for task_entity in task_entities: + task_type_id = task_entity["type_id"] + task_type_name = task_type_names_by_id[task_type_id] + task_data = copy.deepcopy(parent_data) + task_data["task"] = { + "name": task_entity["name"], + "type": task_type_name + } + + # Template wok + collected_paths.append(self.compute_template( + anatomy, task_data, work_keys + )) + + # Template publish + collected_paths.append(self.compute_template( + anatomy, task_data, publish_keys )) if len(collected_paths) == 0: @@ -188,14 +201,65 @@ class CreateFolders(BaseAction): "message": "Successfully created project folders." } - def get_notask_children(self, entity): + def get_all_entities( + self, session, entities, task_entities, other_entities + ): + if not entities: + return + + no_task_entities = [] + for entity in entities: + if entity.entity_type.lower() == "task": + task_entities.append(entity) + else: + no_task_entities.append(entity) + + if not no_task_entities: + return task_entities + + other_entities.extend(no_task_entities) + + no_task_entity_ids = [entity["id"] for entity in no_task_entities] + next_entities = session.query(( + "select id, parent_id" + " from TypedContext where parent_id in ({})" + ).format(self.join_query_keys(no_task_entity_ids))).all() + + self.get_all_entities( + session, next_entities, task_entities, other_entities + ) + + def get_entities_hierarchy(self, session, task_entities, other_entities): + task_entity_ids = [entity["id"] for entity in task_entities] + full_task_entities = session.query(( + "select id, name, type_id, parent_id" + " from TypedContext where id in ({})" + ).format(self.join_query_keys(task_entity_ids))) + task_entities_by_parent_id = collections.defaultdict(list) + for entity in full_task_entities: + parent_id = entity["parent_id"] + task_entities_by_parent_id[parent_id].append(entity) + output = [] - if entity.entity_type.lower() == "task": + if not task_entities_by_parent_id: return output - output.append(entity) - for child in entity["children"]: - output.extend(self.get_notask_children(child)) + other_ids = set() + for entity in other_entities: + other_ids.add(entity["id"]) + other_ids |= set(task_entities_by_parent_id.keys()) + + parent_entities = session.query(( + "select id, name from TypedContext where id in ({})" + ).format(self.join_query_keys(other_ids))).all() + + for parent_entity in parent_entities: + parent_id = parent_entity["id"] + output.append(( + parent_entity, + task_entities_by_parent_id[parent_id] + )) + return output def compute_template(self, anatomy, data, anatomy_keys): diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py index 94f359c317..ebea8872f9 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py @@ -1,6 +1,4 @@ -import os import re -import json from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype.api import get_project_basic_paths, create_project_folders diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py index 94385a36c5..ee5c3d0d97 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py @@ -3,7 +3,8 @@ import uuid from datetime import datetime from bson.objectid import ObjectId -from avalon.api import AvalonMongoDB + +from openpype.pipeline import AvalonMongoDB from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import create_chunks diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py index 5871646b20..a0bf6622e9 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py @@ -5,10 +5,10 @@ import uuid import clique from pymongo import UpdateOne -from avalon.api import AvalonMongoDB from openpype.api import Anatomy from openpype.lib import StringTemplate, TemplateUnsolved +from openpype.pipeline import AvalonMongoDB from openpype_modules.ftrack.lib import BaseAction, statics_icon @@ -569,7 +569,7 @@ class DeleteOldVersions(BaseAction): context["frame"] = self.sequence_splitter sequence_path = os.path.normpath( StringTemplate.format_strict_template( - context, template + template, context ) ) diff --git a/openpype/modules/ftrack/event_handlers_user/action_delivery.py b/openpype/modules/ftrack/event_handlers_user/action_delivery.py index 1f28b18900..9ef2a1668e 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delivery.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delivery.py @@ -15,7 +15,7 @@ from openpype.lib.delivery import ( process_single_file, process_sequence ) -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB class Delivery(BaseAction): diff --git a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py index 3888379e04..c7237a1150 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py +++ b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py @@ -7,7 +7,6 @@ import datetime import ftrack_api -from avalon.api import AvalonMongoDB from openpype.api import get_project_settings from openpype.lib import ( get_workfile_template_key, @@ -15,6 +14,7 @@ from openpype.lib import ( Anatomy, StringTemplate, ) +from openpype.pipeline import AvalonMongoDB from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import create_chunks diff --git a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py index 3759bc81ac..0b14e7aa2b 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py @@ -1,8 +1,8 @@ import json -from avalon.api import AvalonMongoDB from openpype.api import ProjectSettings from openpype.lib import create_project +from openpype.pipeline import AvalonMongoDB from openpype.settings import SaveWarningExc from openpype_modules.ftrack.lib import ( diff --git a/openpype/modules/ftrack/event_handlers_user/action_rv.py b/openpype/modules/ftrack/event_handlers_user/action_rv.py index bdb0eaf250..040ca75582 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_rv.py +++ b/openpype/modules/ftrack/event_handlers_user/action_rv.py @@ -4,8 +4,11 @@ import traceback import json import ftrack_api -from avalon import io, api -from openpype.pipeline import get_representation_path + +from openpype.pipeline import ( + get_representation_path, + legacy_io, +) from openpype_modules.ftrack.lib import BaseAction, statics_icon @@ -253,8 +256,8 @@ class RVAction(BaseAction): )["version"]["asset"]["parent"]["link"][0] project = session.get(link["type"], link["id"]) os.environ["AVALON_PROJECT"] = project["name"] - api.Session["AVALON_PROJECT"] = project["name"] - io.install() + legacy_io.Session["AVALON_PROJECT"] = project["name"] + legacy_io.install() location = ftrack_api.Session().pick_location() @@ -278,22 +281,22 @@ class RVAction(BaseAction): if online_source: continue - asset = io.find_one({"type": "asset", "name": parent_name}) - subset = io.find_one( + asset = legacy_io.find_one({"type": "asset", "name": parent_name}) + subset = legacy_io.find_one( { "type": "subset", "name": component["version"]["asset"]["name"], "parent": asset["_id"] } ) - version = io.find_one( + version = legacy_io.find_one( { "type": "version", "name": component["version"]["version"], "parent": subset["_id"] } ) - representation = io.find_one( + representation = legacy_io.find_one( { "type": "representation", "parent": version["_id"], @@ -301,7 +304,7 @@ class RVAction(BaseAction): } ) if representation is None: - representation = io.find_one( + representation = legacy_io.find_one( { "type": "representation", "parent": version["_id"], diff --git a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py index 4820925844..62fdfa2bdd 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py @@ -4,9 +4,10 @@ import json import requests from bson.objectid import ObjectId + from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype.api import Anatomy -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY diff --git a/openpype/modules/ftrack/ftrack_server/lib.py b/openpype/modules/ftrack/ftrack_server/lib.py index f8319b67d4..5c6d6352d2 100644 --- a/openpype/modules/ftrack/ftrack_server/lib.py +++ b/openpype/modules/ftrack/ftrack_server/lib.py @@ -31,10 +31,13 @@ TOPIC_STATUS_SERVER = "openpype.event.server.status" TOPIC_STATUS_SERVER_RESULT = "openpype.event.server.status.result" -def check_ftrack_url(url, log_errors=True): +def check_ftrack_url(url, log_errors=True, logger=None): """Checks if Ftrack server is responding""" + if logger is None: + logger = Logger.get_logger(__name__) + if not url: - print('ERROR: Ftrack URL is not set!') + logger.error("Ftrack URL is not set!") return None url = url.strip('/ ') @@ -48,15 +51,15 @@ def check_ftrack_url(url, log_errors=True): result = requests.get(url, allow_redirects=False) except requests.exceptions.RequestException: if log_errors: - print('ERROR: Entered Ftrack URL is not accesible!') + logger.error("Entered Ftrack URL is not accesible!") return False if (result.status_code != 200 or 'FTRACK_VERSION' not in result.headers): if log_errors: - print('ERROR: Entered Ftrack URL is not accesible!') + logger.error("Entered Ftrack URL is not accesible!") return False - print('DEBUG: Ftrack server {} is accessible.'.format(url)) + logger.debug("Ftrack server {} is accessible.".format(url)) return url @@ -133,7 +136,7 @@ class ProcessEventHub(SocketBaseEventHub): hearbeat_msg = b"processor" is_collection_created = False - pypelog = Logger().get_logger("Session Processor") + pypelog = Logger.get_logger("Session Processor") def __init__(self, *args, **kwargs): self.mongo_url = None @@ -192,7 +195,7 @@ class ProcessEventHub(SocketBaseEventHub): except pymongo.errors.AutoReconnect: self.pypelog.error(( "Mongo server \"{}\" is not responding, exiting." - ).format(os.environ["AVALON_MONGO"])) + ).format(os.environ["OPENPYPE_MONGO"])) sys.exit(0) # Additional special processing of events. if event['topic'] == 'ftrack.meta.disconnected': diff --git a/openpype/modules/ftrack/lib/avalon_sync.py b/openpype/modules/ftrack/lib/avalon_sync.py index c5b58ca94d..124787e467 100644 --- a/openpype/modules/ftrack/lib/avalon_sync.py +++ b/openpype/modules/ftrack/lib/avalon_sync.py @@ -6,16 +6,12 @@ import numbers import six -from avalon.api import AvalonMongoDB - -import avalon - from openpype.api import ( Logger, - Anatomy, get_anatomy_settings ) from openpype.lib import ApplicationManager +from openpype.pipeline import AvalonMongoDB, schema from .constants import CUST_ATTR_ID_KEY, FPS_KEYS from .custom_attributes import get_openpype_attr, query_custom_attributes @@ -175,7 +171,7 @@ def check_regex(name, entity_type, in_schema=None, schema_patterns=None): if not name_pattern: default_pattern = "^[a-zA-Z0-9_.]*$" - schema_obj = avalon.schema._cache.get(schema_name + ".json") + schema_obj = schema._cache.get(schema_name + ".json") if not schema_obj: name_pattern = default_pattern else: diff --git a/openpype/modules/ftrack/lib/credentials.py b/openpype/modules/ftrack/lib/credentials.py index 4e29e66382..2eb64254d1 100644 --- a/openpype/modules/ftrack/lib/credentials.py +++ b/openpype/modules/ftrack/lib/credentials.py @@ -92,14 +92,18 @@ def check_credentials(username, api_key, ftrack_server=None): if not ftrack_server or not username or not api_key: return False + user_exists = False try: session = ftrack_api.Session( server_url=ftrack_server, api_key=api_key, api_user=username ) + # Validated that the username actually exists + user = session.query("User where username is \"{}\"".format(username)) + user_exists = user is not None session.close() except Exception: - return False - return True + pass + return user_exists diff --git a/openpype/modules/ftrack/lib/custom_attributes.py b/openpype/modules/ftrack/lib/custom_attributes.py index 29c6b5e7f8..2f53815368 100644 --- a/openpype/modules/ftrack/lib/custom_attributes.py +++ b/openpype/modules/ftrack/lib/custom_attributes.py @@ -135,7 +135,7 @@ def query_custom_attributes( output.extend( session.query( ( - "select value, entity_id from {}" + "select value, entity_id, configuration_id from {}" " where entity_id in ({}) and configuration_id in ({})" ).format( table_name, diff --git a/openpype/modules/ftrack/plugins/publish/collect_custom_attributes_data.py b/openpype/modules/ftrack/plugins/publish/collect_custom_attributes_data.py new file mode 100644 index 0000000000..43fa3bc3f8 --- /dev/null +++ b/openpype/modules/ftrack/plugins/publish/collect_custom_attributes_data.py @@ -0,0 +1,148 @@ +""" +Requires: + context > ftrackSession + context > ftrackEntity + instance > ftrackEntity + +Provides: + instance > customData > ftrack +""" +import copy + +import pyblish.api + + +class CollectFtrackCustomAttributeData(pyblish.api.ContextPlugin): + """Collect custom attribute values and store them to customData. + + Data are stored into each instance in context under + instance.data["customData"]["ftrack"]. + + Hierarchical attributes are not looked up properly for that functionality + custom attribute values lookup must be extended. + """ + + order = pyblish.api.CollectorOrder + 0.4992 + label = "Collect Ftrack Custom Attribute Data" + + # Name of custom attributes for which will be look for + custom_attribute_keys = [] + + def process(self, context): + if not self.custom_attribute_keys: + self.log.info("Custom attribute keys are not set. Skipping") + return + + ftrack_entities_by_id = {} + default_entity_id = None + + context_entity = context.data.get("ftrackEntity") + if context_entity: + entity_id = context_entity["id"] + default_entity_id = entity_id + ftrack_entities_by_id[entity_id] = context_entity + + instances_by_entity_id = { + default_entity_id: [] + } + for instance in context: + entity = instance.data.get("ftrackEntity") + if not entity: + instances_by_entity_id[default_entity_id].append(instance) + continue + + entity_id = entity["id"] + ftrack_entities_by_id[entity_id] = entity + if entity_id not in instances_by_entity_id: + instances_by_entity_id[entity_id] = [] + instances_by_entity_id[entity_id].append(instance) + + if not ftrack_entities_by_id: + self.log.info("Ftrack entities are not set. Skipping") + return + + session = context.data["ftrackSession"] + custom_attr_key_by_id = self.query_attr_confs(session) + if not custom_attr_key_by_id: + self.log.info(( + "Didn't find any of defined custom attributes {}" + ).format(", ".join(self.custom_attribute_keys))) + return + + entity_ids = list(instances_by_entity_id.keys()) + values_by_entity_id = self.query_attr_values( + session, entity_ids, custom_attr_key_by_id + ) + + for entity_id, instances in instances_by_entity_id.items(): + if entity_id not in values_by_entity_id: + # Use defaut empty values + entity_id = None + + for instance in instances: + value = copy.deepcopy(values_by_entity_id[entity_id]) + if "customData" not in instance.data: + instance.data["customData"] = {} + instance.data["customData"]["ftrack"] = value + instance_label = ( + instance.data.get("label") or instance.data["name"] + ) + self.log.debug(( + "Added ftrack custom data to instance \"{}\": {}" + ).format(instance_label, value)) + + def query_attr_values(self, session, entity_ids, custom_attr_key_by_id): + # Prepare values for query + entity_ids_joined = ",".join([ + '"{}"'.format(entity_id) + for entity_id in entity_ids + ]) + conf_ids_joined = ",".join([ + '"{}"'.format(conf_id) + for conf_id in custom_attr_key_by_id.keys() + ]) + # Query custom attribute values + value_items = session.query( + ( + "select value, entity_id, configuration_id" + " from CustomAttributeValue" + " where entity_id in ({}) and configuration_id in ({})" + ).format( + entity_ids_joined, + conf_ids_joined + ) + ).all() + + # Prepare default value output per entity id + values_by_key = { + key: None for key in self.custom_attribute_keys + } + # Prepare all entity ids that were queried + values_by_entity_id = { + entity_id: copy.deepcopy(values_by_key) + for entity_id in entity_ids + } + # Add none entity id which is used as default value + values_by_entity_id[None] = copy.deepcopy(values_by_key) + # Go through queried data and store them + for item in value_items: + conf_id = item["configuration_id"] + conf_key = custom_attr_key_by_id[conf_id] + entity_id = item["entity_id"] + values_by_entity_id[entity_id][conf_key] = item["value"] + return values_by_entity_id + + def query_attr_confs(self, session): + custom_attributes = set(self.custom_attribute_keys) + cust_attrs_query = ( + "select id, key from CustomAttributeConfiguration" + " where key in ({})" + ).format(", ".join( + ["\"{}\"".format(attr_name) for attr_name in custom_attributes] + )) + + custom_attr_confs = session.query(cust_attrs_query).all() + return { + conf["id"]: conf["key"] + for conf in custom_attr_confs + } diff --git a/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py index 07af217fb6..14da188150 100644 --- a/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py +++ b/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py @@ -1,12 +1,13 @@ import logging import pyblish.api -import avalon.api + +from openpype.pipeline import legacy_io class CollectFtrackApi(pyblish.api.ContextPlugin): """ Collects an ftrack session and the current task id. """ - order = pyblish.api.CollectorOrder + 0.4999 + order = pyblish.api.CollectorOrder + 0.4991 label = "Collect Ftrack Api" def process(self, context): @@ -23,9 +24,9 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): self.log.debug("Ftrack user: \"{0}\"".format(session.api_user)) # Collect task - project_name = avalon.api.Session["AVALON_PROJECT"] - asset_name = avalon.api.Session["AVALON_ASSET"] - task_name = avalon.api.Session["AVALON_TASK"] + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] # Find project entity project_query = 'Project where full_name is "{0}"'.format(project_name) diff --git a/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py b/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py index 70030acad9..5758068f86 100644 --- a/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py +++ b/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py @@ -6,8 +6,8 @@ Provides: instance -> families ([]) """ import pyblish.api -import avalon.api +from openpype.pipeline import legacy_io from openpype.lib.plugin_tools import filter_profiles @@ -25,7 +25,7 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): based on 'families' (editorial drives it by presence of 'review') """ label = "Collect Ftrack Family" - order = pyblish.api.CollectorOrder + 0.4998 + order = pyblish.api.CollectorOrder + 0.4990 profiles = None @@ -34,9 +34,10 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): self.log.warning("No profiles present for adding Ftrack family") return + add_ftrack_family = False task_name = instance.data.get("task", - avalon.api.Session["AVALON_TASK"]) - host_name = avalon.api.Session["AVALON_APP"] + legacy_io.Session["AVALON_TASK"]) + host_name = legacy_io.Session["AVALON_APP"] family = instance.data["family"] filtering_criteria = { @@ -53,6 +54,8 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): additional_filters = profile.get("advanced_filtering") if additional_filters: + self.log.info("'{}' families used for additional filtering". + format(families)) add_ftrack_family = self._get_add_ftrack_f_from_addit_filters( additional_filters, families, @@ -69,6 +72,13 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): else: instance.data["families"] = ["ftrack"] + result_str = "Adding" + if not add_ftrack_family: + result_str = "Not adding" + self.log.info("{} 'ftrack' family for instance with '{}'".format( + result_str, family + )) + def _get_add_ftrack_f_from_addit_filters(self, additional_filters, families, diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py index 7ebf807f55..c4f7b1f05d 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py @@ -12,6 +12,7 @@ Provides: import os import sys +import collections import six import pyblish.api import clique @@ -24,48 +25,6 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): label = "Integrate Ftrack Api" families = ["ftrack"] - def query(self, entitytype, data): - """ Generate a query expression from data supplied. - - If a value is not a string, we'll add the id of the entity to the - query. - - Args: - entitytype (str): The type of entity to query. - data (dict): The data to identify the entity. - exclusions (list): All keys to exclude from the query. - - Returns: - str: String query to use with "session.query" - """ - queries = [] - if sys.version_info[0] < 3: - for key, value in data.iteritems(): - if not isinstance(value, (basestring, int)): - self.log.info("value: {}".format(value)) - if "id" in value.keys(): - queries.append( - "{0}.id is \"{1}\"".format(key, value["id"]) - ) - else: - queries.append("{0} is \"{1}\"".format(key, value)) - else: - for key, value in data.items(): - if not isinstance(value, (str, int)): - self.log.info("value: {}".format(value)) - if "id" in value.keys(): - queries.append( - "{0}.id is \"{1}\"".format(key, value["id"]) - ) - else: - queries.append("{0} is \"{1}\"".format(key, value)) - - query = ( - "select id from " + entitytype + " where " + " and ".join(queries) - ) - self.log.debug(query) - return query - def process(self, instance): session = instance.context.data["ftrackSession"] context = instance.context @@ -108,12 +67,25 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): default_asset_name = parent_entity["name"] # Change status on task - self._set_task_status(instance, task_entity, session) + asset_version_status_ids_by_name = {} + project_entity = instance.context.data.get("ftrackProject") + if project_entity: + project_schema = project_entity["project_schema"] + asset_version_statuses = ( + project_schema.get_statuses("AssetVersion") + ) + asset_version_status_ids_by_name = { + status["name"].lower(): status["id"] + for status in asset_version_statuses + } + + self._set_task_status(instance, project_entity, task_entity, session) # Prepare AssetTypes asset_types_by_short = self._ensure_asset_types_exists( session, component_list ) + self._fill_component_locations(session, component_list) asset_versions_data_by_id = {} used_asset_versions = [] @@ -139,7 +111,11 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): # Asset Version asset_version_data = data.get("assetversion_data") or {} asset_version_entity = self._ensure_asset_version_exists( - session, asset_version_data, asset_entity["id"], task_entity + session, + asset_version_data, + asset_entity["id"], + task_entity, + asset_version_status_ids_by_name ) # Component @@ -174,8 +150,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): if asset_version not in instance.data[asset_versions_key]: instance.data[asset_versions_key].append(asset_version) - def _set_task_status(self, instance, task_entity, session): - project_entity = instance.context.data.get("ftrackProject") + def _set_task_status(self, instance, project_entity, task_entity, session): if not project_entity: self.log.info("Task status won't be set, project is not known.") return @@ -220,6 +195,70 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): session._configure_locations() six.reraise(tp, value, tb) + def _fill_component_locations(self, session, component_list): + components_by_location_name = collections.defaultdict(list) + components_by_location_id = collections.defaultdict(list) + for component_item in component_list: + # Location entity can be prefilled + # - this is not recommended as connection to ftrack server may + # be lost and in that case the entity is not valid when gets + # to this plugin + location = component_item.get("component_location") + if location is not None: + continue + + # Collect location id + location_id = component_item.get("component_location_id") + if location_id: + components_by_location_id[location_id].append( + component_item + ) + continue + + location_name = component_item.get("component_location_name") + if location_name: + components_by_location_name[location_name].append( + component_item + ) + continue + + # Skip if there is nothing to do + if not components_by_location_name and not components_by_location_id: + return + + # Query locations + query_filters = [] + if components_by_location_id: + joined_location_ids = ",".join([ + '"{}"'.format(location_id) + for location_id in components_by_location_id + ]) + query_filters.append("id in ({})".format(joined_location_ids)) + + if components_by_location_name: + joined_location_names = ",".join([ + '"{}"'.format(location_name) + for location_name in components_by_location_name + ]) + query_filters.append("name in ({})".format(joined_location_names)) + + locations = session.query( + "select id, name from Location where {}".format( + " or ".join(query_filters) + ) + ).all() + # Fill locations in components + for location in locations: + location_id = location["id"] + location_name = location["name"] + if location_id in components_by_location_id: + for component in components_by_location_id[location_id]: + component["component_location"] = location + + if location_name in components_by_location_name: + for component in components_by_location_name[location_name]: + component["component_location"] = location + def _ensure_asset_types_exists(self, session, component_list): """Make sure that all AssetType entities exists for integration. @@ -263,7 +302,9 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): self.log.info("Creating asset types with short names: {}".format( ", ".join(asset_type_names_by_missing_shorts.keys()) )) - for missing_short, type_name in asset_type_names_by_missing_shorts: + for missing_short, type_name in ( + asset_type_names_by_missing_shorts.items() + ): # Use short for name if name is not defined if not type_name: type_name = missing_short @@ -317,12 +358,19 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): ).first() def _ensure_asset_version_exists( - self, session, asset_version_data, asset_id, task_entity + self, + session, + asset_version_data, + asset_id, + task_entity, + status_ids_by_name ): task_id = None if task_entity: task_id = task_entity["id"] + status_name = asset_version_data.pop("status_name", None) + # Try query asset version by criteria (asset id and version) version = asset_version_data.get("version") or 0 asset_version_entity = self._query_asset_version( @@ -364,6 +412,18 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): session, version, asset_id ) + if status_name: + status_id = status_ids_by_name.get(status_name.lower()) + if not status_id: + self.log.info(( + "Ftrack status with name \"{}\"" + " for AssetVersion was not found." + ).format(status_name)) + + elif asset_version_entity["status_id"] != status_id: + asset_version_entity["status_id"] = status_id + session.commit() + # Set custom attributes if there were any set custom_attrs = asset_version_data.get("custom_attributes") or {} for attr_key, attr_value in custom_attrs.items(): diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py index 5ea0469bce..c8d9e4117d 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py @@ -3,6 +3,9 @@ import json import copy import pyblish.api +from openpype.lib import get_ffprobe_streams +from openpype.lib.profiles_filtering import filter_profiles + class IntegrateFtrackInstance(pyblish.api.InstancePlugin): """Collect ftrack component data (not integrate yet). @@ -36,6 +39,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): "reference": "reference" } keep_first_subset_name_for_review = True + asset_versions_status_profiles = {} def process(self, instance): self.log.debug("instance {}".format(instance)) @@ -80,6 +84,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): if instance_fps is None: instance_fps = instance.context.data["fps"] + status_name = self._get_asset_version_status_name(instance) + # Base of component item data # - create a copy of this object when want to use it base_component_item = { @@ -91,7 +97,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): }, "assetversion_data": { "version": version_number, - "comment": instance.context.data.get("comment") or "" + "comment": instance.context.data.get("comment") or "", + "status_name": status_name }, "component_overwrite": False, # This can be change optionally @@ -99,11 +106,10 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # These must be changed for each component "component_data": None, "component_path": None, - "component_location": None + "component_location": None, + "component_location_name": None } - ft_session = instance.context.data["ftrackSession"] - # Filter types of representations review_representations = [] thumbnail_representations = [] @@ -121,12 +127,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): other_representations.append(repre) # Prepare ftrack locations - unmanaged_location = ft_session.query( - "Location where name is \"ftrack.unmanaged\"" - ).one() - ftrack_server_location = ft_session.query( - "Location where name is \"ftrack.server\"" - ).one() + unmanaged_location_name = "ftrack.unmanaged" + ftrack_server_location_name = "ftrack.server" # Components data component_list = [] @@ -136,6 +138,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Create thumbnail components # TODO what if there is multiple thumbnails? first_thumbnail_component = None + first_thumbnail_component_repre = None for repre in thumbnail_representations: published_path = repre.get("published_path") if not published_path: @@ -163,12 +166,49 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): src_components_to_add.append(copy.deepcopy(thumbnail_item)) # Create copy of first thumbnail if first_thumbnail_component is None: - first_thumbnail_component = copy.deepcopy(thumbnail_item) + first_thumbnail_component_repre = repre + first_thumbnail_component = thumbnail_item # Set location - thumbnail_item["component_location"] = ftrack_server_location + thumbnail_item["component_location_name"] = ( + ftrack_server_location_name + ) + # Add item to component list component_list.append(thumbnail_item) + if ( + not review_representations + and first_thumbnail_component is not None + ): + width = first_thumbnail_component_repre.get("width") + height = first_thumbnail_component_repre.get("height") + if not width or not height: + component_path = first_thumbnail_component["component_path"] + streams = [] + try: + streams = get_ffprobe_streams(component_path) + except Exception: + self.log.debug(( + "Failed to retrieve information about intput {}" + ).format(component_path)) + + for stream in streams: + if "width" in stream and "height" in stream: + width = stream["width"] + height = stream["height"] + break + + if width and height: + component_data = first_thumbnail_component["component_data"] + component_data["name"] = "ftrackreview-image" + component_data["metadata"] = { + "ftr_meta": json.dumps({ + "width": width, + "height": height, + "format": "image" + }) + } + # Create review components # Change asset name of each new component for review is_first_review_repre = True @@ -254,7 +294,9 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): src_components_to_add.append(copy.deepcopy(review_item)) # Set location - review_item["component_location"] = ftrack_server_location + review_item["component_location_name"] = ( + ftrack_server_location_name + ) # Add item to component list component_list.append(review_item) @@ -266,8 +308,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): first_thumbnail_component ) new_thumbnail_component["asset_data"]["name"] = asset_name - new_thumbnail_component["component_location"] = ( - ftrack_server_location + new_thumbnail_component["component_location_name"] = ( + ftrack_server_location_name ) component_list.append(new_thumbnail_component) @@ -276,7 +318,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Make sure thumbnail is disabled copy_src_item["thumbnail"] = False # Set location - copy_src_item["component_location"] = unmanaged_location + copy_src_item["component_location_name"] = unmanaged_location_name # Modify name of component to have suffix "_src" component_data = copy_src_item["component_data"] component_name = component_data["name"] @@ -301,7 +343,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): other_item["component_data"] = { "name": repre["name"] } - other_item["component_location"] = unmanaged_location + other_item["component_location_name"] = unmanaged_location_name other_item["component_path"] = published_path component_list.append(other_item) @@ -317,3 +359,24 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ) )) instance.data["ftrackComponentsList"] = component_list + + def _get_asset_version_status_name(self, instance): + if not self.asset_versions_status_profiles: + return None + + # Prepare filtering data for new asset version status + anatomy_data = instance.data["anatomyData"] + task_type = anatomy_data.get("task", {}).get("type") + filtering_criteria = { + "families": instance.data["family"], + "hosts": instance.context.data["hostName"], + "task_types": task_type + } + matching_profile = filter_profiles( + self.asset_versions_status_profiles, + filtering_criteria + ) + if not matching_profile: + return None + + return matching_profile["status"] or None diff --git a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py index 61892240d7..cf90c11b65 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py @@ -2,7 +2,8 @@ import sys import collections import six import pyblish.api -from avalon import io + +from openpype.pipeline import legacy_io # Copy of constant `openpype_modules.ftrack.lib.avalon_sync.CUST_ATTR_AUTO_SYNC` CUST_ATTR_AUTO_SYNC = "avalon_auto_sync" @@ -80,8 +81,8 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): auto_sync_state = project[ "custom_attributes"][CUST_ATTR_AUTO_SYNC] - if not io.Session: - io.install() + if not legacy_io.Session: + legacy_io.install() self.ft_project = None @@ -271,7 +272,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): # Create new links. for input in entity_data.get("inputs", []): - input_id = io.find_one({"_id": input})["data"]["ftrackId"] + input_id = legacy_io.find_one({"_id": input})["data"]["ftrackId"] assetbuild = self.session.get("AssetBuild", input_id) self.log.debug( "Creating link from {0} to {1}".format( diff --git a/openpype/modules/ftrack/scripts/sub_event_storer.py b/openpype/modules/ftrack/scripts/sub_event_storer.py index 5543ed74e2..946ecbff79 100644 --- a/openpype/modules/ftrack/scripts/sub_event_storer.py +++ b/openpype/modules/ftrack/scripts/sub_event_storer.py @@ -67,7 +67,7 @@ def launch(event): except pymongo.errors.AutoReconnect: log.error("Mongo server \"{}\" is not responding, exiting.".format( - os.environ["AVALON_MONGO"] + os.environ["OPENPYPE_MONGO"] )) sys.exit(0) diff --git a/openpype/modules/interfaces.py b/openpype/modules/interfaces.py index 13cbea690b..334485cab2 100644 --- a/openpype/modules/interfaces.py +++ b/openpype/modules/interfaces.py @@ -14,11 +14,38 @@ class IPluginPaths(OpenPypeInterface): "publish": ["path/to/publish_plugins"] } """ - # TODO validation of an output + @abstractmethod def get_plugin_paths(self): pass + def get_creator_plugin_paths(self, host_name): + """Retreive creator plugin paths. + + Give addons ability to add creator plugin paths based on host name. + + NOTES: + - Default implementation uses 'get_plugin_paths' and always return + all creator plugins. + - Host name may help to organize plugins by host, but each creator + alsomay have host filtering. + + Args: + host_name (str): For which host are the plugins meant. + """ + + paths = self.get_plugin_paths() + if not paths or "create" not in paths: + return [] + + create_paths = paths["create"] + if not create_paths: + return [] + + if not isinstance(create_paths, (list, tuple, set)): + create_paths = [create_paths] + return create_paths + class ILaunchHookPaths(OpenPypeInterface): """Module has launch hook paths to return. diff --git a/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py b/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py index 4d216c1c0a..65af90e8a6 100644 --- a/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py +++ b/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py @@ -7,7 +7,8 @@ import json from pprint import pformat import pyblish.api -from avalon import api + +from openpype.pipeline import legacy_io def collect(root, @@ -127,7 +128,7 @@ class CollectSequencesFromJob(pyblish.api.ContextPlugin): session = metadata.get("session") if session: self.log.info("setting session using metadata") - api.Session.update(session) + legacy_io.Session.update(session) os.environ.update(session) else: @@ -187,7 +188,9 @@ class CollectSequencesFromJob(pyblish.api.ContextPlugin): "family": families[0], # backwards compatibility / pyblish "families": list(families), "subset": subset, - "asset": data.get("asset", api.Session["AVALON_ASSET"]), + "asset": data.get( + "asset", legacy_io.Session["AVALON_ASSET"] + ), "stagingDir": root, "frameStart": start, "frameEnd": end, diff --git a/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py b/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py index 82a79daf3b..cdc37588cd 100644 --- a/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py +++ b/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py @@ -119,7 +119,7 @@ class OpenPypeContextSelector: # app names and versions, but since app_name is not used # currently down the line (but it is required by OP publish command # right now). - self.context["app_name"] = "maya/2020" + # self.context["app_name"] = "maya/2022" return True @staticmethod @@ -139,7 +139,8 @@ class OpenPypeContextSelector: env = {"AVALON_PROJECT": str(self.context.get("project")), "AVALON_ASSET": str(self.context.get("asset")), "AVALON_TASK": str(self.context.get("task")), - "AVALON_APP_NAME": str(self.context.get("app_name"))} + # "AVALON_APP_NAME": str(self.context.get("app_name")) + } print(">>> setting environment:") for k, v in env.items(): @@ -184,7 +185,7 @@ selector = OpenPypeContextSelector() selector.context["project"] = os.getenv("AVALON_PROJECT") selector.context["asset"] = os.getenv("AVALON_ASSET") selector.context["task"] = os.getenv("AVALON_TASK") -selector.context["app_name"] = os.getenv("AVALON_APP_NAME") +# selector.context["app_name"] = os.getenv("AVALON_APP_NAME") # if anything inside is None, scratch the whole thing and # ask user for context. diff --git a/openpype/modules/slack/plugins/publish/collect_slack_family.py b/openpype/modules/slack/plugins/publish/collect_slack_family.py index 7475bdc89e..39b05937dc 100644 --- a/openpype/modules/slack/plugins/publish/collect_slack_family.py +++ b/openpype/modules/slack/plugins/publish/collect_slack_family.py @@ -1,7 +1,7 @@ -from avalon import io import pyblish.api from openpype.lib.profiles_filtering import filter_profiles +from openpype.pipeline import legacy_io class CollectSlackFamilies(pyblish.api.InstancePlugin): @@ -18,7 +18,7 @@ class CollectSlackFamilies(pyblish.api.InstancePlugin): profiles = None def process(self, instance): - task_name = io.Session.get("AVALON_TASK") + task_name = legacy_io.Session.get("AVALON_TASK") family = self.main_family_from_instance(instance) key_values = { "families": family, diff --git a/openpype/modules/sync_server/sync_server_module.py b/openpype/modules/sync_server/sync_server_module.py index caf58503f1..5a1d8467ec 100644 --- a/openpype/modules/sync_server/sync_server_module.py +++ b/openpype/modules/sync_server/sync_server_module.py @@ -4,9 +4,8 @@ from datetime import datetime import threading import platform import copy -from collections import deque +from collections import deque, defaultdict -from avalon.api import AvalonMongoDB from openpype.modules import OpenPypeModule from openpype_interfaces import ITrayModule @@ -14,16 +13,19 @@ from openpype.api import ( Anatomy, get_project_settings, get_system_settings, - get_local_site_id) + get_local_site_id +) from openpype.lib import PypeLogger +from openpype.pipeline import AvalonMongoDB from openpype.settings.lib import ( get_default_anatomy_settings, - get_anatomy_settings) + get_anatomy_settings +) from .providers.local_drive import LocalDriveHandler from .providers import lib -from .utils import time_function, SyncStatus +from .utils import time_function, SyncStatus, SiteAlreadyPresentError log = PypeLogger().get_logger("SyncServer") @@ -131,21 +133,25 @@ class SyncServerModule(OpenPypeModule, ITrayModule): def add_site(self, collection, representation_id, site_name=None, force=False): """ - Adds new site to representation to be synced. + Adds new site to representation to be synced. - 'collection' must have synchronization enabled (globally or - project only) + 'collection' must have synchronization enabled (globally or + project only) - Used as a API endpoint from outside applications (Loader etc) + Used as a API endpoint from outside applications (Loader etc). - Args: - collection (string): project name (must match DB) - representation_id (string): MongoDB _id value - site_name (string): name of configured and active site - force (bool): reset site if exists + Use 'force' to reset existing site. - Returns: - throws ValueError if any issue + Args: + collection (string): project name (must match DB) + representation_id (string): MongoDB _id value + site_name (string): name of configured and active site + force (bool): reset site if exists + + Throws: + SiteAlreadyPresentError - if adding already existing site and + not 'force' + ValueError - other errors (repre not found, misconfiguration) """ if not self.get_sync_project_setting(collection): raise ValueError("Project not configured") @@ -155,9 +161,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.reset_site_on_representation(collection, representation_id, - site_name=site_name, force=force) + site_name=site_name, + force=force) - # public facing API def remove_site(self, collection, representation_id, site_name, remove_local_files=False): """ @@ -184,6 +190,151 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if remove_local_files: self._remove_local_file(collection, representation_id, site_name) + def compute_resource_sync_sites(self, project_name): + """Get available resource sync sites state for publish process. + + Returns dict with prepared state of sync sites for 'project_name'. + It checks if Site Sync is enabled, handles alternative sites. + Publish process stores this dictionary as a part of representation + document in DB. + + Example: + [ + { + 'name': '42abbc09-d62a-44a4-815c-a12cd679d2d7', + 'created_dt': datetime.datetime(2022, 3, 30, 12, 16, 9, 778637) + }, + {'name': 'studio'}, + {'name': 'SFTP'} + ] -- representation is published locally, artist or Settings have set + remote site as 'studio'. 'SFTP' is alternate site to 'studio'. Eg. + whenever file is on 'studio', it is also on 'SFTP'. + """ + + def create_metadata(name, created=True): + """Create sync site metadata for site with `name`""" + metadata = {"name": name} + if created: + metadata["created_dt"] = datetime.now() + return metadata + + if ( + not self.sync_system_settings["enabled"] or + not self.sync_project_settings[project_name]["enabled"]): + return [create_metadata(self.DEFAULT_SITE)] + + local_site = self.get_active_site(project_name) + remote_site = self.get_remote_site(project_name) + + # Attached sites metadata by site name + # That is the local site, remote site, the always accesible sites + # and their alternate sites (alias of sites with different protocol) + attached_sites = dict() + attached_sites[local_site] = create_metadata(local_site) + + if remote_site and remote_site not in attached_sites: + attached_sites[remote_site] = create_metadata(remote_site, + created=False) + + attached_sites = self._add_alternative_sites(attached_sites) + # add skeleton for sites where it should be always synced to + # usually it would be a backup site which is handled by separate + # background process + for site in self._get_always_accessible_sites(project_name): + if site not in attached_sites: + attached_sites[site] = create_metadata(site, created=False) + + return list(attached_sites.values()) + + def _get_always_accessible_sites(self, project_name): + """Sites that synced to as a part of background process. + + Artist machine doesn't handle those, explicit Tray with that site name + as a local id must be running. + Example is dropbox site serving as a backup solution + """ + always_accessible_sites = ( + self.get_sync_project_setting(project_name)["config"]. + get("always_accessible_on", []) + ) + return [site.strip() for site in always_accessible_sites] + + def _add_alternative_sites(self, attached_sites): + """Add skeleton document for alternative sites + + Each new configured site in System Setting could serve as a alternative + site, it's a kind of alias. It means that files on 'a site' are + physically accessible also on 'a alternative' site. + Example is sftp site serving studio files via sftp protocol, physically + file is only in studio, sftp server has this location mounted. + """ + additional_sites = self.sync_system_settings.get("sites", {}) + + alt_site_pairs = self._get_alt_site_pairs(additional_sites) + + for site_name in additional_sites.keys(): + # Get alternate sites (stripped names) for this site name + alt_sites = alt_site_pairs.get(site_name) + alt_sites = [site.strip() for site in alt_sites] + alt_sites = set(alt_sites) + + # If no alternative sites we don't need to add + if not alt_sites: + continue + + # Take a copy of data of the first alternate site that is already + # defined as an attached site to match the same state. + match_meta = next((attached_sites[site] for site in alt_sites + if site in attached_sites), None) + if not match_meta: + continue + + alt_site_meta = copy.deepcopy(match_meta) + alt_site_meta["name"] = site_name + + # Note: We change mutable `attached_site` dict in-place + attached_sites[site_name] = alt_site_meta + + return attached_sites + + def _get_alt_site_pairs(self, conf_sites): + """Returns dict of site and its alternative sites. + + If `site` has alternative site, it means that alt_site has 'site' as + alternative site + Args: + conf_sites (dict) + Returns: + (dict): {'site': [alternative sites]...} + """ + alt_site_pairs = defaultdict(set) + for site_name, site_info in conf_sites.items(): + alt_sites = set(site_info.get("alternative_sites", [])) + alt_site_pairs[site_name].update(alt_sites) + + for alt_site in alt_sites: + alt_site_pairs[alt_site].add(site_name) + + for site_name, alt_sites in alt_site_pairs.items(): + sites_queue = deque(alt_sites) + while sites_queue: + alt_site = sites_queue.popleft() + + # safety against wrong config + # {"SFTP": {"alternative_site": "SFTP"} + if alt_site == site_name or alt_site not in alt_site_pairs: + continue + + for alt_alt_site in alt_site_pairs[alt_site]: + if ( + alt_alt_site != site_name + and alt_alt_site not in alt_sites + ): + alt_sites.add(alt_alt_site) + sites_queue.append(alt_alt_site) + + return alt_site_pairs + def clear_project(self, collection, site_name): """ Clear 'collection' of 'site_name' and its local files @@ -207,36 +358,38 @@ class SyncServerModule(OpenPypeModule, ITrayModule): def create_validate_project_task(self, collection, site_name): """Adds metadata about project files validation on a queue. - This process will loop through all representation and check if - their files actually exist on an active site. + This process will loop through all representation and check if + their files actually exist on an active site. - This might be useful for edge cases when artists is switching - between sites, remote site is actually physically mounted and - active site has same file urls etc. + It also checks if site is set in DB, but file is physically not + present - Task will run on a asyncio loop, shouldn't be blocking. + This might be useful for edge cases when artists is switching + between sites, remote site is actually physically mounted and + active site has same file urls etc. + + Task will run on a asyncio loop, shouldn't be blocking. """ task = { "type": "validate", "project_name": collection, - "func": lambda: self.validate_project(collection, site_name) + "func": lambda: self.validate_project(collection, site_name, + reset_missing=True) } self.projects_processed.add(collection) self.long_running_tasks.append(task) - def validate_project(self, collection, site_name, remove_missing=False): - """ - Validate 'collection' of 'site_name' and its local files + def validate_project(self, collection, site_name, reset_missing=False): + """Validate 'collection' of 'site_name' and its local files - If file present and not marked with a 'site_name' in DB, DB is - updated with site name and file modified date. + If file present and not marked with a 'site_name' in DB, DB is + updated with site name and file modified date. - Args: - module (SyncServerModule) - collection (string): project name - site_name (string): active site name - remove_missing (bool): if True remove sites in DB if missing - physically + Args: + collection (string): project name + site_name (string): active site name + reset_missing (bool): if True reset site in DB if missing + physically """ self.log.debug("Validation of {} for {} started".format(collection, site_name)) @@ -251,29 +404,32 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return sites_added = 0 - sites_removed = 0 + sites_reset = 0 for repre in representations: repre_id = repre["_id"] for repre_file in repre.get("files", []): try: - has_site = site_name in [site["name"] - for site in repre_file["sites"]] - except TypeError: + is_on_site = site_name in [site["name"] + for site in repre_file["sites"] + if (site.get("created_dt") and + not site.get("error"))] + except (TypeError, AttributeError): self.log.debug("Structure error in {}".format(repre_id)) continue - if has_site and not remove_missing: - continue - file_path = repre_file.get("path", "") local_file_path = self.get_local_file_path(collection, site_name, file_path) - if local_file_path and os.path.exists(local_file_path): - self.log.debug("Adding site {} for {}".format(site_name, - repre_id)) - if not has_site: + file_exists = (local_file_path and + os.path.exists(local_file_path)) + if not is_on_site: + if file_exists: + self.log.debug( + "Adding site {} for {}".format(site_name, + repre_id)) + query = { "_id": repre_id } @@ -281,27 +437,27 @@ class SyncServerModule(OpenPypeModule, ITrayModule): os.path.getmtime(local_file_path)) elem = {"name": site_name, "created_dt": created_dt} - self._add_site(collection, query, [repre], elem, + self._add_site(collection, query, repre, elem, site_name=site_name, - file_id=repre_file["_id"]) + file_id=repre_file["_id"], + force=True) sites_added += 1 else: - if has_site and remove_missing: - self.log.debug("Removing site {} for {}". + if not file_exists and reset_missing: + self.log.debug("Resetting site {} for {}". format(site_name, repre_id)) - self.reset_provider_for_file(collection, - repre_id, - file_id=repre_file["_id"], - remove=True) - sites_removed += 1 + self.reset_site_on_representation( + collection, repre_id, site_name=site_name, + file_id=repre_file["_id"]) + sites_reset += 1 if sites_added % 100 == 0: self.log.debug("Sites added {}".format(sites_added)) self.log.debug("Validation of {} for {} ended".format(collection, site_name)) - self.log.info("Sites added {}, sites removed {}".format(sites_added, - sites_removed)) + self.log.info("Sites added {}, sites reset {}".format(sites_added, + reset_missing)) def pause_representation(self, collection, representation_id, site_name): """ @@ -765,12 +921,18 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if self.enabled: for project in self.connection.projects(projection={"name": 1}): project_name = project["name"] - project_settings = self.get_sync_project_setting(project_name) - if project_settings and project_settings.get("enabled"): + if self.is_project_enabled(project_name): enabled_projects.append(project_name) return enabled_projects + def is_project_enabled(self, project_name): + if self.enabled: + project_settings = self.get_sync_project_setting(project_name) + if project_settings and project_settings.get("enabled"): + return True + return False + def handle_alternate_site(self, collection, representation, processed_site, file_id, synced_file_id): """ @@ -819,7 +981,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.log.debug("Adding alternate {} to {}".format( alt_site, representation["_id"])) self._add_site(collection, query, - [representation], elem, + representation, elem, alt_site, file_id=file_id, force=True) """ End of Public API """ @@ -848,6 +1010,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if self.enabled and sync_settings.get('enabled'): sites.append(self.LOCAL_SITE) + active_site = sync_settings["config"]["active_site"] + # for Tray running background process + if active_site not in sites and active_site == get_local_site_id(): + sites.append(active_site) + return sites def tray_init(self): @@ -1418,14 +1585,16 @@ class SyncServerModule(OpenPypeModule, ITrayModule): pause (bool or None): if True - pause, False - unpause force (bool): hard reset - currently only for add_site - Returns: - throws ValueError + Raises: + SiteAlreadyPresentError - if adding already existing site and + not 'force' + ValueError - other errors (repre not found, misconfiguration) """ query = { "_id": ObjectId(representation_id) } - representation = list(self.connection.database[collection].find(query)) + representation = self.connection.database[collection].find_one(query) if not representation: raise ValueError("Representation {} not found in {}". format(representation_id, collection)) @@ -1456,7 +1625,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): representation, site_name, pause) else: # add new site to all files for representation self._add_site(collection, query, representation, elem, site_name, - force) + force=force) def _update_site(self, collection, query, update, arr_filter): """ @@ -1511,7 +1680,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Throws ValueError if 'site_name' not found on 'representation' """ found = False - for repre_file in representation.pop().get("files"): + for repre_file in representation.get("files"): for site in repre_file.get("sites"): if site.get("name") == site_name: found = True @@ -1537,7 +1706,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): """ found = False site = None - for repre_file in representation.pop().get("files"): + for repre_file in representation.get("files"): for site in repre_file.get("sites"): if site["name"] == site_name: found = True @@ -1569,29 +1738,34 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Adds 'site_name' to 'representation' on 'collection' Args: - representation (list of 1 dict) + representation (dict) file_id (ObjectId) Use 'force' to remove existing or raises ValueError """ - reseted_existing = False - for repre_file in representation.pop().get("files"): + reset_existing = False + files = representation.get("files", []) + if not files: + log.debug("No files for {}".format(representation["_id"])) + return + + for repre_file in files: if file_id and file_id != repre_file["_id"]: continue for site in repre_file.get("sites"): if site["name"] == site_name: - if force: + if force or site.get("error"): self._reset_site_for_file(collection, query, elem, repre_file["_id"], site_name) - reseted_existing = True + reset_existing = True else: msg = "Site {} already present".format(site_name) log.info(msg) - raise ValueError(msg) + raise SiteAlreadyPresentError(msg) - if reseted_existing: + if reset_existing: return if not file_id: @@ -1755,7 +1929,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): (int) - number of failed attempts """ _, rec = self._get_site_rec(file.get("sites", []), provider) - return rec.get("tries", 0) + return self._get_tries_count_from_rec(rec) def _get_progress_dict(self, progress): """ diff --git a/openpype/modules/sync_server/utils.py b/openpype/modules/sync_server/utils.py index 85e4e03f77..03f362202f 100644 --- a/openpype/modules/sync_server/utils.py +++ b/openpype/modules/sync_server/utils.py @@ -8,6 +8,11 @@ class ResumableError(Exception): pass +class SiteAlreadyPresentError(Exception): + """Representation has already site skeleton present.""" + pass + + class SyncStatus: DO_NOTHING = 0 DO_UPLOAD = 1 diff --git a/openpype/modules/timers_manager/timers_manager.py b/openpype/modules/timers_manager/timers_manager.py index 47d020104b..3f77a2b7dc 100644 --- a/openpype/modules/timers_manager/timers_manager.py +++ b/openpype/modules/timers_manager/timers_manager.py @@ -1,13 +1,14 @@ import os import platform -from avalon.api import AvalonMongoDB from openpype.modules import OpenPypeModule from openpype_interfaces import ( ITrayService, ILaunchHookPaths ) +from openpype.pipeline import AvalonMongoDB + from .exceptions import InvalidContextError diff --git a/openpype/pipeline/__init__.py b/openpype/pipeline/__init__.py index 883713b078..2e441fbf27 100644 --- a/openpype/pipeline/__init__.py +++ b/openpype/pipeline/__init__.py @@ -3,12 +3,15 @@ from .constants import ( HOST_WORKFILE_EXTENSIONS, ) +from .mongodb import ( + AvalonMongoDB, +) + from .create import ( BaseCreator, Creator, AutoCreator, CreatedInstance, - CreatorError, LegacyCreator, @@ -69,18 +72,36 @@ from .actions import ( deregister_inventory_action_path, ) +from .context_tools import ( + install_openpype_plugins, + install_host, + uninstall_host, + is_installed, + + register_root, + registered_root, + + register_host, + registered_host, + deregister_host, +) +install = install_host +uninstall = uninstall_host + __all__ = ( "AVALON_CONTAINER_ID", "HOST_WORKFILE_EXTENSIONS", - "attribute_definitions", + # --- MongoDB --- + "AvalonMongoDB", # --- Create --- "BaseCreator", "Creator", "AutoCreator", "CreatedInstance", + "CreatorError", "CreatorError", @@ -137,4 +158,21 @@ __all__ = ( "register_inventory_action_path", "deregister_inventory_action", "deregister_inventory_action_path", + + # --- Process context --- + "install_openpype_plugins", + "install_host", + "uninstall_host", + "is_installed", + + "register_root", + "registered_root", + + "register_host", + "registered_host", + "deregister_host", + + # Backwards compatible function names + "install", + "uninstall", ) diff --git a/openpype/pipeline/context_tools.py b/openpype/pipeline/context_tools.py new file mode 100644 index 0000000000..e849f5b0d1 --- /dev/null +++ b/openpype/pipeline/context_tools.py @@ -0,0 +1,346 @@ +"""Core pipeline functionality""" + +import os +import sys +import json +import types +import logging +import inspect +import platform + +import pyblish.api +from pyblish.lib import MessageHandler + +import openpype +from openpype.modules import load_modules, ModulesManager +from openpype.settings import get_project_settings +from openpype.lib import ( + Anatomy, + register_event_callback, + filter_pyblish_plugins, + change_timer_to_current_context, +) + +from . import ( + legacy_io, + register_loader_plugin_path, + register_inventory_action, + register_creator_plugin_path, + deregister_loader_plugin_path, +) + + +_is_installed = False +_registered_root = {"_": ""} +_registered_host = {"_": None} + +log = logging.getLogger(__name__) + +PACKAGE_DIR = os.path.dirname(os.path.abspath(openpype.__file__)) +PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") + +# Global plugin paths +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") + + +def register_root(path): + """Register currently active root""" + log.info("Registering root: %s" % path) + _registered_root["_"] = path + + +def registered_root(): + """Return currently registered root""" + root = _registered_root["_"] + if root: + return root + + root = legacy_io.Session.get("AVALON_PROJECTS") + if root: + return os.path.normpath(root) + return "" + + +def install_host(host): + """Install `host` into the running Python session. + + Args: + host (module): A Python module containing the Avalon + avalon host-interface. + """ + global _is_installed + + _is_installed = True + + legacy_io.install() + + missing = list() + for key in ("AVALON_PROJECT", "AVALON_ASSET"): + if key not in legacy_io.Session: + missing.append(key) + + assert not missing, ( + "%s missing from environment, %s" % ( + ", ".join(missing), + json.dumps(legacy_io.Session, indent=4, sort_keys=True) + )) + + project_name = legacy_io.Session["AVALON_PROJECT"] + log.info("Activating %s.." % project_name) + + # Optional host install function + if hasattr(host, "install"): + host.install() + + register_host(host) + + register_event_callback("taskChanged", _on_task_change) + + def modified_emit(obj, record): + """Method replacing `emit` in Pyblish's MessageHandler.""" + record.msg = record.getMessage() + obj.records.append(record) + + MessageHandler.emit = modified_emit + + install_openpype_plugins() + + +def install_openpype_plugins(project_name=None, host_name=None): + # Make sure modules are loaded + load_modules() + + log.info("Registering global plug-ins..") + pyblish.api.register_plugin_path(PUBLISH_PATH) + pyblish.api.register_discovery_filter(filter_pyblish_plugins) + register_loader_plugin_path(LOAD_PATH) + + modules_manager = ModulesManager() + publish_plugin_dirs = modules_manager.collect_plugin_paths()["publish"] + for path in publish_plugin_dirs: + pyblish.api.register_plugin_path(path) + + if host_name is None: + host_name = os.environ.get("AVALON_APP") + + creator_paths = modules_manager.collect_creator_plugin_paths(host_name) + for creator_path in creator_paths: + register_creator_plugin_path(creator_path) + + if project_name is None: + project_name = os.environ.get("AVALON_PROJECT") + + # Register studio specific plugins + if project_name: + anatomy = Anatomy(project_name) + anatomy.set_root_environments() + register_root(anatomy.roots) + + project_settings = get_project_settings(project_name) + platform_name = platform.system().lower() + project_plugins = ( + project_settings + .get("global", {}) + .get("project_plugins", {}) + .get(platform_name) + ) or [] + for path in project_plugins: + try: + path = str(path.format(**os.environ)) + except KeyError: + pass + + if not path or not os.path.exists(path): + continue + + pyblish.api.register_plugin_path(path) + register_loader_plugin_path(path) + register_creator_plugin_path(path) + register_inventory_action(path) + + +def _on_task_change(): + change_timer_to_current_context() + + +def uninstall_host(): + """Undo all of what `install()` did""" + host = registered_host() + + try: + host.uninstall() + except AttributeError: + pass + + log.info("Deregistering global plug-ins..") + pyblish.api.deregister_plugin_path(PUBLISH_PATH) + pyblish.api.deregister_discovery_filter(filter_pyblish_plugins) + deregister_loader_plugin_path(LOAD_PATH) + log.info("Global plug-ins unregistred") + + deregister_host() + + legacy_io.uninstall() + + log.info("Successfully uninstalled Avalon!") + + +def is_installed(): + """Return state of installation + + Returns: + True if installed, False otherwise + + """ + + return _is_installed + + +def register_host(host): + """Register a new host for the current process + + Arguments: + host (ModuleType): A module implementing the + Host API interface. See the Host API + documentation for details on what is + required, or browse the source code. + + """ + signatures = { + "ls": [] + } + + _validate_signature(host, signatures) + _registered_host["_"] = host + + +def _validate_signature(module, signatures): + # Required signatures for each member + + missing = list() + invalid = list() + success = True + + for member in signatures: + if not hasattr(module, member): + missing.append(member) + success = False + + else: + attr = getattr(module, member) + if sys.version_info.major >= 3: + signature = inspect.getfullargspec(attr)[0] + else: + signature = inspect.getargspec(attr)[0] + required_signature = signatures[member] + + assert isinstance(signature, list) + assert isinstance(required_signature, list) + + if not all(member in signature + for member in required_signature): + invalid.append({ + "member": member, + "signature": ", ".join(signature), + "required": ", ".join(required_signature) + }) + success = False + + if not success: + report = list() + + if missing: + report.append( + "Incomplete interface for module: '%s'\n" + "Missing: %s" % (module, ", ".join( + "'%s'" % member for member in missing)) + ) + + if invalid: + report.append( + "'%s': One or more members were found, but didn't " + "have the right argument signature." % module.__name__ + ) + + for member in invalid: + report.append( + " Found: {member}({signature})".format(**member) + ) + report.append( + " Expected: {member}({required})".format(**member) + ) + + raise ValueError("\n".join(report)) + + +def registered_host(): + """Return currently registered host""" + return _registered_host["_"] + + +def deregister_host(): + _registered_host["_"] = default_host() + + +def default_host(): + """A default host, in place of anything better + + This may be considered as reference for the + interface a host must implement. It also ensures + that the system runs, even when nothing is there + to support it. + + """ + + host = types.ModuleType("defaultHost") + + def ls(): + return list() + + host.__dict__.update({ + "ls": ls + }) + + return host + + +def debug_host(): + """A debug host, useful to debugging features that depend on a host""" + + host = types.ModuleType("debugHost") + + def ls(): + containers = [ + { + "representation": "ee-ft-a-uuid1", + "schema": "openpype:container-1.0", + "name": "Bruce01", + "objectName": "Bruce01_node", + "namespace": "_bruce01_", + "version": 3, + }, + { + "representation": "aa-bc-s-uuid2", + "schema": "openpype:container-1.0", + "name": "Bruce02", + "objectName": "Bruce01_node", + "namespace": "_bruce02_", + "version": 2, + } + ] + + for container in containers: + yield container + + host.__dict__.update({ + "ls": ls, + "open_file": lambda fname: None, + "save_file": lambda fname: None, + "current_file": lambda: os.path.expanduser("~/temp.txt"), + "has_unsaved_changes": lambda: False, + "work_root": lambda: os.path.expanduser("~/temp"), + "file_extensions": lambda: ["txt"], + }) + + return host diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index 3efdb0e5c3..2f1922c103 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -6,8 +6,13 @@ import inspect from uuid import uuid4 from contextlib import contextmanager +from openpype.pipeline import legacy_io +from openpype.pipeline.mongodb import ( + AvalonMongoDB, + session_data_from_environment, +) + from .creator_plugins import ( - BaseCreator, Creator, AutoCreator, discover_creator_plugins, @@ -356,7 +361,7 @@ class CreatedInstance: already existing instance. creator(BaseCreator): Creator responsible for instance. host(ModuleType): Host implementation loaded with - `avalon.api.registered_host`. + `openpype.pipeline.registered_host`. new(bool): Is instance new. """ # Keys that can't be changed or removed from data after loading using @@ -659,10 +664,8 @@ class CreateContext: ): # Create conncetion if is not passed if dbcon is None: - import avalon.api - - session = avalon.api.session_data_from_environment(True) - dbcon = avalon.api.AvalonMongoDB(session) + session = session_data_from_environment(True) + dbcon = AvalonMongoDB(session) dbcon.install() self.dbcon = dbcon @@ -746,6 +749,10 @@ class CreateContext: """Is host valid for creation.""" return self._host_is_valid + @property + def host_name(self): + return os.environ["AVALON_APP"] + @property def log(self): """Dynamic access to logger.""" @@ -770,12 +777,11 @@ class CreateContext: """Give ability to reset avalon context. Reset is based on optional host implementation of `get_current_context` - function or using `avalon.api.Session`. + function or using `legacy_io.Session`. Some hosts have ability to change context file without using workfiles tool but that change is not propagated to """ - import avalon.api project_name = asset_name = task_name = None if hasattr(self.host, "get_current_context"): @@ -786,11 +792,11 @@ class CreateContext: task_name = host_context.get("task_name") if not project_name: - project_name = avalon.api.Session.get("AVALON_PROJECT") + project_name = legacy_io.Session.get("AVALON_PROJECT") if not asset_name: - asset_name = avalon.api.Session.get("AVALON_ASSET") + asset_name = legacy_io.Session.get("AVALON_ASSET") if not task_name: - task_name = avalon.api.Session.get("AVALON_TASK") + task_name = legacy_io.Session.get("AVALON_TASK") if project_name: self.dbcon.Session["AVALON_PROJECT"] = project_name @@ -805,7 +811,6 @@ class CreateContext: Reloads creators from preregistered paths and can load publish plugins if it's enabled on context. """ - import avalon.api import pyblish.logic from openpype.pipeline import OpenPypePyblishPluginMixin @@ -860,6 +865,17 @@ class CreateContext: "Using first and skipping following" )) continue + + # Filter by host name + if ( + creator_class.host_name + and creator_class.host_name != self.host_name + ): + self.log.info(( + "Creator's host name is not supported for current host {}" + ).format(creator_class.host_name, self.host_name)) + continue + creator = creator_class( self, system_settings, diff --git a/openpype/pipeline/create/creator_plugins.py b/openpype/pipeline/create/creator_plugins.py index 36bccd427e..8006d4f4f8 100644 --- a/openpype/pipeline/create/creator_plugins.py +++ b/openpype/pipeline/create/creator_plugins.py @@ -63,6 +63,12 @@ class BaseCreator: # `openpype.pipeline.attribute_definitions` instance_attr_defs = [] + # Filtering by host name - can be used to be filtered by host name + # - used on all hosts when set to 'None' for Backwards compatibility + # - was added afterwards + # QUESTION make this required? + host_name = None + def __init__( self, create_context, system_settings, project_settings, headless=False ): @@ -89,7 +95,9 @@ class BaseCreator: @property def log(self): if self._log is None: - self._log = logging.getLogger(self.__class__.__name__) + from openpype.api import Logger + + self._log = Logger.get_logger(self.__class__.__name__) return self._log def _add_instance_to_context(self, instance): diff --git a/openpype/pipeline/create/legacy_create.py b/openpype/pipeline/create/legacy_create.py index cf6629047e..46e0e3d663 100644 --- a/openpype/pipeline/create/legacy_create.py +++ b/openpype/pipeline/create/legacy_create.py @@ -142,7 +142,8 @@ def legacy_create(Creator, name, asset, options=None, data=None): Name of instance """ - from avalon.api import registered_host + from openpype.pipeline import registered_host + host = registered_host() plugin = Creator(name, asset, options, data) diff --git a/openpype/pipeline/farm/__init__.py b/openpype/pipeline/farm/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/pipeline/farm/patterning.py b/openpype/pipeline/farm/patterning.py new file mode 100644 index 0000000000..1e4b5bf37d --- /dev/null +++ b/openpype/pipeline/farm/patterning.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +import re + + +def match_aov_pattern(host_name, aov_patterns, render_file_name): + """Matching against a `AOV` pattern in the render files. + + In order to match the AOV name we must compare + against the render filename string that we are + grabbing the render filename string from the collection + that we have grabbed from `exp_files`. + + Args: + app (str): Host name. + aov_patterns (dict): AOV patterns from AOV filters. + render_file_name (str): Incoming file name to match against. + + Returns: + bool: Review state for rendered file (render_file_name). + """ + aov_pattern = aov_patterns.get(host_name, []) + if not aov_pattern: + return False + return any(re.match(p, render_file_name) for p in aov_pattern) diff --git a/openpype/pipeline/legacy_io.py b/openpype/pipeline/legacy_io.py new file mode 100644 index 0000000000..c8e7e79600 --- /dev/null +++ b/openpype/pipeline/legacy_io.py @@ -0,0 +1,146 @@ +"""Wrapper around interactions with the database""" + +import sys +import logging +import functools + +from . import schema +from .mongodb import AvalonMongoDB, session_data_from_environment + +module = sys.modules[__name__] + +Session = {} +_is_installed = False +_connection_object = AvalonMongoDB(Session) +_mongo_client = None +_database = database = None + +log = logging.getLogger(__name__) + + +def install(): + """Establish a persistent connection to the database""" + if module._is_installed: + return + + session = session_data_from_environment(context_keys=True) + + session["schema"] = "openpype:session-3.0" + try: + schema.validate(session) + except schema.ValidationError as e: + # TODO(marcus): Make this mandatory + log.warning(e) + + _connection_object.Session.update(session) + _connection_object.install() + + module._mongo_client = _connection_object.mongo_client + module._database = module.database = _connection_object.database + + module._is_installed = True + + +def uninstall(): + """Close any connection to the database""" + module._mongo_client = None + module._database = module.database = None + module._is_installed = False + try: + module._connection_object.uninstall() + except AttributeError: + pass + + +def requires_install(func): + @functools.wraps(func) + def decorated(*args, **kwargs): + if not module._is_installed: + install() + return func(*args, **kwargs) + return decorated + + +@requires_install +def projects(*args, **kwargs): + return _connection_object.projects(*args, **kwargs) + + +@requires_install +def insert_one(doc, *args, **kwargs): + return _connection_object.insert_one(doc, *args, **kwargs) + + +@requires_install +def insert_many(docs, *args, **kwargs): + return _connection_object.insert_many(docs, *args, **kwargs) + + +@requires_install +def update_one(*args, **kwargs): + return _connection_object.update_one(*args, **kwargs) + + +@requires_install +def update_many(*args, **kwargs): + return _connection_object.update_many(*args, **kwargs) + + +@requires_install +def replace_one(*args, **kwargs): + return _connection_object.replace_one(*args, **kwargs) + + +@requires_install +def replace_many(*args, **kwargs): + return _connection_object.replace_many(*args, **kwargs) + + +@requires_install +def delete_one(*args, **kwargs): + return _connection_object.delete_one(*args, **kwargs) + + +@requires_install +def delete_many(*args, **kwargs): + return _connection_object.delete_many(*args, **kwargs) + + +@requires_install +def find(*args, **kwargs): + return _connection_object.find(*args, **kwargs) + + +@requires_install +def find_one(*args, **kwargs): + return _connection_object.find_one(*args, **kwargs) + + +@requires_install +def distinct(*args, **kwargs): + return _connection_object.distinct(*args, **kwargs) + + +@requires_install +def aggregate(*args, **kwargs): + return _connection_object.aggregate(*args, **kwargs) + + +@requires_install +def save(*args, **kwargs): + return _connection_object.save(*args, **kwargs) + + +@requires_install +def drop(*args, **kwargs): + return _connection_object.drop(*args, **kwargs) + + +@requires_install +def parenthood(*args, **kwargs): + return _connection_object.parenthood(*args, **kwargs) + + +@requires_install +def bulk_write(*args, **kwargs): + return _connection_object.bulk_write(*args, **kwargs) diff --git a/openpype/pipeline/load/utils.py b/openpype/pipeline/load/utils.py index 53ac6b626d..99e5d11f82 100644 --- a/openpype/pipeline/load/utils.py +++ b/openpype/pipeline/load/utils.py @@ -9,10 +9,11 @@ import numbers import six from bson.objectid import ObjectId -from avalon import io, schema -from avalon.api import Session, registered_root - from openpype.lib import Anatomy +from openpype.pipeline import ( + schema, + legacy_io, +) log = logging.getLogger(__name__) @@ -59,7 +60,7 @@ def get_repres_contexts(representation_ids, dbcon=None): """ if not dbcon: - dbcon = io + dbcon = legacy_io contexts = {} if not representation_ids: @@ -166,7 +167,7 @@ def get_subset_contexts(subset_ids, dbcon=None): dict: The full representation context by representation id. """ if not dbcon: - dbcon = io + dbcon = legacy_io contexts = {} if not subset_ids: @@ -229,10 +230,10 @@ def get_representation_context(representation): assert representation is not None, "This is a bug" if isinstance(representation, (six.string_types, ObjectId)): - representation = io.find_one( + representation = legacy_io.find_one( {"_id": ObjectId(str(representation))}) - version, subset, asset, project = io.parenthood(representation) + version, subset, asset, project = legacy_io.parenthood(representation) assert all([representation, version, subset, asset, project]), ( "This is a bug" @@ -404,17 +405,17 @@ def update_container(container, version=-1): """Update a container""" # Compute the different version from 'representation' - current_representation = io.find_one({ + current_representation = legacy_io.find_one({ "_id": ObjectId(container["representation"]) }) assert current_representation is not None, "This is a bug" - current_version, subset, asset, project = io.parenthood( + current_version, subset, asset, project = legacy_io.parenthood( current_representation) if version == -1: - new_version = io.find_one({ + new_version = legacy_io.find_one({ "type": "version", "parent": subset["_id"] }, sort=[("name", -1)]) @@ -430,11 +431,11 @@ def update_container(container, version=-1): "type": "version", "name": version } - new_version = io.find_one(version_query) + new_version = legacy_io.find_one(version_query) assert new_version is not None, "This is a bug" - new_representation = io.find_one({ + new_representation = legacy_io.find_one({ "type": "representation", "parent": new_version["_id"], "name": current_representation["name"] @@ -481,7 +482,7 @@ def switch_container(container, representation, loader_plugin=None): )) # Get the new representation to switch to - new_representation = io.find_one({ + new_representation = legacy_io.find_one({ "type": "representation", "_id": representation["_id"], }) @@ -500,7 +501,7 @@ def get_representation_path_from_context(context): representation = context['representation'] project_doc = context.get("project") root = None - session_project = Session.get("AVALON_PROJECT") + session_project = legacy_io.Session.get("AVALON_PROJECT") if project_doc and project_doc["name"] != session_project: anatomy = Anatomy(project_doc["name"]) root = anatomy.roots @@ -529,9 +530,11 @@ def get_representation_path(representation, root=None, dbcon=None): from openpype.lib import StringTemplate, TemplateUnsolved if dbcon is None: - dbcon = io + dbcon = legacy_io if root is None: + from openpype.pipeline import registered_root + root = registered_root() def path_from_represenation(): diff --git a/openpype/pipeline/mongodb.py b/openpype/pipeline/mongodb.py new file mode 100644 index 0000000000..565e26b966 --- /dev/null +++ b/openpype/pipeline/mongodb.py @@ -0,0 +1,272 @@ +import os +import time +import functools +import logging +import pymongo +from uuid import uuid4 + +from . import schema + + +def requires_install(func): + func_obj = getattr(func, "__self__", None) + + @functools.wraps(func) + def decorated(*args, **kwargs): + if func_obj is not None: + _obj = func_obj + else: + _obj = args[0] + if not _obj.is_installed(): + if _obj.auto_install: + _obj.install() + else: + raise IOError( + "'{}.{}()' requires to run install() first".format( + _obj.__class__.__name__, func.__name__ + ) + ) + return func(*args, **kwargs) + return decorated + + +def auto_reconnect(func): + """Handling auto reconnect in 3 retry times""" + retry_times = 3 + reconnect_msg = "Reconnecting..." + func_obj = getattr(func, "__self__", None) + + @functools.wraps(func) + def decorated(*args, **kwargs): + if func_obj is not None: + _obj = func_obj + else: + _obj = args[0] + + for retry in range(1, retry_times + 1): + try: + return func(*args, **kwargs) + except pymongo.errors.AutoReconnect: + if hasattr(_obj, "log"): + _obj.log.warning(reconnect_msg) + else: + print(reconnect_msg) + + if retry >= retry_times: + raise + time.sleep(0.1) + return decorated + + +SESSION_CONTEXT_KEYS = ( + # Root directory of projects on disk + "AVALON_PROJECTS", + # Name of current Project + "AVALON_PROJECT", + # Name of current Asset + "AVALON_ASSET", + # Name of current task + "AVALON_TASK", + # Name of current app + "AVALON_APP", + # Path to working directory + "AVALON_WORKDIR", + # Optional path to scenes directory (see Work Files API) + "AVALON_SCENEDIR" +) + + +def session_data_from_environment(context_keys=False): + session_data = {} + if context_keys: + for key in SESSION_CONTEXT_KEYS: + value = os.environ.get(key) + session_data[key] = value or "" + else: + for key in SESSION_CONTEXT_KEYS: + session_data[key] = None + + for key, default_value in ( + # Name of Avalon in graphical user interfaces + # Use this to customise the visual appearance of Avalon + # to better integrate with your surrounding pipeline + ("AVALON_LABEL", "Avalon"), + + # Used during any connections to the outside world + ("AVALON_TIMEOUT", "1000"), + + # Name of database used in MongoDB + ("AVALON_DB", "avalon"), + ): + value = os.environ.get(key) or default_value + if value is not None: + session_data[key] = value + + return session_data + + +class AvalonMongoDB: + def __init__(self, session=None, auto_install=True): + self._id = uuid4() + self._database = None + self.auto_install = auto_install + self._installed = False + + if session is None: + session = session_data_from_environment(context_keys=False) + + self.Session = session + + self.log = logging.getLogger(self.__class__.__name__) + + def __getattr__(self, attr_name): + attr = None + if not self.is_installed() and self.auto_install: + self.install() + + if not self.is_installed(): + raise IOError( + "'{}.{}()' requires to run install() first".format( + self.__class__.__name__, attr_name + ) + ) + + project_name = self.active_project() + if project_name is None: + raise ValueError( + "Value of 'Session[\"AVALON_PROJECT\"]' is not set." + ) + + collection = self._database[project_name] + not_set = object() + attr = getattr(collection, attr_name, not_set) + + if attr is not_set: + # Raise attribute error + raise AttributeError( + "{} has no attribute '{}'.".format( + collection.__class__.__name__, attr_name + ) + ) + + # Decorate function + if callable(attr): + attr = auto_reconnect(attr) + return attr + + @property + def mongo_client(self): + from openpype.lib import OpenPypeMongoConnection + + return OpenPypeMongoConnection.get_mongo_client() + + @property + def id(self): + return self._id + + @property + def database(self): + if not self.is_installed() and self.auto_install: + self.install() + + if self.is_installed(): + return self._database + + raise IOError( + "'{}.database' requires to run install() first".format( + self.__class__.__name__ + ) + ) + + def is_installed(self): + return self._installed + + def install(self): + """Establish a persistent connection to the database""" + if self.is_installed(): + return + + self._installed = True + self._database = self.mongo_client[str(os.environ["AVALON_DB"])] + + def uninstall(self): + """Close any connection to the database""" + self._installed = False + self._database = None + + @requires_install + def active_project(self): + """Return the name of the active project""" + return self.Session["AVALON_PROJECT"] + + @requires_install + @auto_reconnect + def projects(self, projection=None, only_active=True): + """Iter project documents + + Args: + projection (optional): MongoDB query projection operation + only_active (optional): Skip inactive projects, default True. + + Returns: + Project documents iterator + + """ + query_filter = {"type": "project"} + if only_active: + query_filter.update({ + "$or": [ + {"data.active": {"$exists": 0}}, + {"data.active": True}, + ] + }) + + for project_name in self._database.collection_names(): + if project_name in ("system.indexes",): + continue + + # Each collection will have exactly one project document + + doc = self._database[project_name].find_one( + query_filter, projection=projection + ) + if doc is not None: + yield doc + + @auto_reconnect + def insert_one(self, item, *args, **kwargs): + assert isinstance(item, dict), "item must be of type " + schema.validate(item) + return self._database[self.active_project()].insert_one( + item, *args, **kwargs + ) + + @auto_reconnect + def insert_many(self, items, *args, **kwargs): + # check if all items are valid + assert isinstance(items, list), "`items` must be of type " + for item in items: + assert isinstance(item, dict), "`item` must be of type " + schema.validate(item) + + return self._database[self.active_project()].insert_many( + items, *args, **kwargs + ) + + def parenthood(self, document): + assert document is not None, "This is a bug" + + parents = list() + + while document.get("parent") is not None: + document = self.find_one({"_id": document["parent"]}) + if document is None: + break + + if document.get("type") == "hero_version": + _document = self.find_one({"_id": document["version_id"]}) + document["data"] = _document["data"] + + parents.append(document) + + return parents diff --git a/openpype/pipeline/plugin_discover.py b/openpype/pipeline/plugin_discover.py index fb860fe5f2..004e530b1c 100644 --- a/openpype/pipeline/plugin_discover.py +++ b/openpype/pipeline/plugin_discover.py @@ -59,7 +59,7 @@ class DiscoverResult: self.ignored_plugins ))) for cls in self.ignored_plugins: - lines.append("- {}".format(cls.__class__.__name__)) + lines.append("- {}".format(cls.__name__)) # Abstract classes if self.abstract_plugins or full_report: @@ -67,7 +67,7 @@ class DiscoverResult: self.abstract_plugins ))) for cls in self.abstract_plugins: - lines.append("- {}".format(cls.__class__.__name__)) + lines.append("- {}".format(cls.__name__)) # Abstract classes if self.duplicated_plugins or full_report: @@ -75,7 +75,7 @@ class DiscoverResult: self.duplicated_plugins ))) for cls in self.duplicated_plugins: - lines.append("- {}".format(cls.__class__.__name__)) + lines.append("- {}".format(cls.__name__)) if self.crashed_file_paths or full_report: lines.append("*** Failed to load {} files".format(len( diff --git a/openpype/pipeline/schema.py b/openpype/pipeline/schema.py new file mode 100644 index 0000000000..7e96bfe1b1 --- /dev/null +++ b/openpype/pipeline/schema.py @@ -0,0 +1,137 @@ +"""Wrapper around :mod:`jsonschema` + +Schemas are implicitly loaded from the /schema directory of this project. + +Attributes: + _cache: Cache of previously loaded schemas + +Resources: + http://json-schema.org/ + http://json-schema.org/latest/json-schema-core.html + http://spacetelescope.github.io/understanding-json-schema/index.html + +""" + +import os +import re +import json +import logging + +import jsonschema +import six + +log_ = logging.getLogger(__name__) + +ValidationError = jsonschema.ValidationError +SchemaError = jsonschema.SchemaError + +_CACHED = False + + +def get_schema_version(schema_name): + """Extract version form schema name. + + It is expected that schema name contain only major and minor version. + + Expected name should match to: + "{name}:{type}-{major version}.{minor version}" + - `name` - must not contain colon + - `type` - must not contain dash + - major and minor versions must be numbers separated by dot + + Args: + schema_name(str): Name of schema that should be parsed. + + Returns: + tuple: Contain two values major version as first and minor version as + second. When schema does not match parsing regex then `(0, 0)` is + returned. + """ + schema_regex = re.compile(r"[^:]+:[^-]+-(\d.\d)") + groups = schema_regex.findall(schema_name) + if not groups: + return 0, 0 + + maj_version, min_version = groups[0].split(".") + return int(maj_version), int(min_version) + + +def validate(data, schema=None): + """Validate `data` with `schema` + + Arguments: + data (dict): JSON-compatible data + schema (str): DEPRECATED Name of schema. Now included in the data. + + Raises: + ValidationError on invalid schema + + """ + if not _CACHED: + _precache() + + root, schema = data["schema"].rsplit(":", 1) + # assert root in ( + # "mindbender-core", # Backwards compatiblity + # "avalon-core", + # "pype" + # ) + + if isinstance(schema, six.string_types): + schema = _cache[schema + ".json"] + + resolver = jsonschema.RefResolver( + "", + None, + store=_cache, + cache_remote=True + ) + + jsonschema.validate(data, + schema, + types={"array": (list, tuple)}, + resolver=resolver) + + +_cache = { + # A mock schema for docstring tests + "_doctest.json": { + "$schema": "http://json-schema.org/schema#", + + "title": "_doctest", + "description": "A test schema", + + "type": "object", + + "additionalProperties": False, + + "required": ["key"], + + "properties": { + "key": { + "description": "A test key", + "type": "string" + } + } + } +} + + +def _precache(): + """Store available schemas in-memory for reduced disk access""" + global _CACHED + + repos_root = os.environ["OPENPYPE_REPOS_ROOT"] + schema_dir = os.path.join(repos_root, "schema") + + for schema in os.listdir(schema_dir): + if schema.startswith(("_", ".")): + continue + if not schema.endswith(".json"): + continue + if not os.path.isfile(os.path.join(schema_dir, schema)): + continue + with open(os.path.join(schema_dir, schema)) as f: + log_.debug("Installing schema '%s'.." % schema) + _cache[schema] = json.load(f) + _CACHED = True diff --git a/openpype/pipeline/thumbnail.py b/openpype/pipeline/thumbnail.py index c09dab70eb..ec97b36954 100644 --- a/openpype/pipeline/thumbnail.py +++ b/openpype/pipeline/thumbnail.py @@ -2,6 +2,7 @@ import os import copy import logging +from . import legacy_io from .plugin_discover import ( discover, register_plugin, @@ -17,8 +18,7 @@ def get_thumbnail_binary(thumbnail_entity, thumbnail_type, dbcon=None): resolvers = discover_thumbnail_resolvers() resolvers = sorted(resolvers, key=lambda cls: cls.priority) if dbcon is None: - from avalon import io - dbcon = io + dbcon = legacy_io for Resolver in resolvers: available_types = Resolver.thumbnail_types diff --git a/openpype/plugin.py b/openpype/plugin.py index 3569936dac..bb9bc2ff85 100644 --- a/openpype/plugin.py +++ b/openpype/plugin.py @@ -1,7 +1,6 @@ import tempfile import os import pyblish.api -import avalon.api ValidatePipelineOrder = pyblish.api.ValidatorOrder + 0.05 ValidateContentsOrder = pyblish.api.ValidatorOrder + 0.1 diff --git a/openpype/plugins/load/add_site.py b/openpype/plugins/load/add_site.py index 95001691e2..55fda55d17 100644 --- a/openpype/plugins/load/add_site.py +++ b/openpype/plugins/load/add_site.py @@ -1,9 +1,19 @@ from openpype.modules import ModulesManager from openpype.pipeline import load +from openpype.lib.avalon_context import get_linked_ids_for_representations +from openpype.modules.sync_server.utils import SiteAlreadyPresentError class AddSyncSite(load.LoaderPlugin): - """Add sync site to representation""" + """Add sync site to representation + + If family of synced representation is 'workfile', it looks for all + representations which are referenced (loaded) in workfile with content of + 'inputLinks'. + It doesn't do any checks for site, most common use case is when artist is + downloading workfile to his local site, but it might be helpful when + artist is re-uploading broken representation on remote site also. + """ representations = ["*"] families = ["*"] @@ -12,21 +22,42 @@ class AddSyncSite(load.LoaderPlugin): icon = "download" color = "#999999" + _sync_server = None + is_add_site_loader = True + + @property + def sync_server(self): + if not self._sync_server: + manager = ModulesManager() + self._sync_server = manager.modules_by_name["sync_server"] + + return self._sync_server + def load(self, context, name=None, namespace=None, data=None): self.log.info("Adding {} to representation: {}".format( data["site_name"], data["_id"])) - self.add_site_to_representation(data["project_name"], - data["_id"], - data["site_name"]) - self.log.debug("Site added.") + family = context["representation"]["context"]["family"] + project_name = data["project_name"] + repre_id = data["_id"] + site_name = data["site_name"] - @staticmethod - def add_site_to_representation(project_name, representation_id, site_name): - """Adds new site to representation_id, resets if exists""" - manager = ModulesManager() - sync_server = manager.modules_by_name["sync_server"] - sync_server.add_site(project_name, representation_id, site_name, - force=True) + self.sync_server.add_site(project_name, repre_id, site_name, + force=True) + + if family == "workfile": + links = get_linked_ids_for_representations(project_name, + [repre_id], + link_type="reference") + for link_repre_id in links: + try: + self.sync_server.add_site(project_name, link_repre_id, + site_name, + force=False) + except SiteAlreadyPresentError: + # do not add/reset working site for references + self.log.debug("Site present", exc_info=True) + + self.log.debug("Site added.") def filepath_from_context(self, context): """No real file loading""" diff --git a/openpype/plugins/load/delete_old_versions.py b/openpype/plugins/load/delete_old_versions.py index 2789f4ea23..c3e9e9fa0a 100644 --- a/openpype/plugins/load/delete_old_versions.py +++ b/openpype/plugins/load/delete_old_versions.py @@ -8,9 +8,8 @@ import ftrack_api import qargparse from Qt import QtWidgets, QtCore -from avalon.api import AvalonMongoDB from openpype import style -from openpype.pipeline import load +from openpype.pipeline import load, AvalonMongoDB from openpype.lib import StringTemplate from openpype.api import Anatomy diff --git a/openpype/plugins/load/delivery.py b/openpype/plugins/load/delivery.py index 04080053e3..7df07e3f64 100644 --- a/openpype/plugins/load/delivery.py +++ b/openpype/plugins/load/delivery.py @@ -3,9 +3,7 @@ from collections import defaultdict from Qt import QtWidgets, QtCore, QtGui -from avalon.api import AvalonMongoDB - -from openpype.pipeline import load +from openpype.pipeline import load, AvalonMongoDB from openpype.api import Anatomy, config from openpype import resources, style diff --git a/openpype/plugins/load/remove_site.py b/openpype/plugins/load/remove_site.py index adffec9986..c5f442b2f5 100644 --- a/openpype/plugins/load/remove_site.py +++ b/openpype/plugins/load/remove_site.py @@ -12,22 +12,26 @@ class RemoveSyncSite(load.LoaderPlugin): icon = "download" color = "#999999" + _sync_server = None + is_remove_site_loader = True + + @property + def sync_server(self): + if not self._sync_server: + manager = ModulesManager() + self._sync_server = manager.modules_by_name["sync_server"] + + return self._sync_server + def load(self, context, name=None, namespace=None, data=None): self.log.info("Removing {} on representation: {}".format( data["site_name"], data["_id"])) - self.remove_site_on_representation(data["project_name"], - data["_id"], - data["site_name"]) + self.sync_server.remove_site(data["project_name"], + data["_id"], + data["site_name"], + True) self.log.debug("Site added.") - @staticmethod - def remove_site_on_representation(project_name, representation_id, - site_name): - manager = ModulesManager() - sync_server = manager.modules_by_name["sync_server"] - sync_server.remove_site(project_name, representation_id, - site_name, True) - def filepath_from_context(self, context): """No real file loading""" return "" diff --git a/openpype/plugins/publish/cleanup_farm.py b/openpype/plugins/publish/cleanup_farm.py index ab0c6e469e..2c6c1625bb 100644 --- a/openpype/plugins/publish/cleanup_farm.py +++ b/openpype/plugins/publish/cleanup_farm.py @@ -3,7 +3,8 @@ import os import shutil import pyblish.api -import avalon.api + +from openpype.pipeline import legacy_io class CleanUpFarm(pyblish.api.ContextPlugin): @@ -22,7 +23,7 @@ class CleanUpFarm(pyblish.api.ContextPlugin): def process(self, context): # Get source host from which farm publishing was started - src_host_name = avalon.api.Session.get("AVALON_APP") + src_host_name = legacy_io.Session.get("AVALON_APP") self.log.debug("Host name from session is {}".format(src_host_name)) # Skip process if is not in list of source hosts in which this # plugin should run diff --git a/openpype/plugins/publish/collect_anatomy_context_data.py b/openpype/plugins/publish/collect_anatomy_context_data.py index bd8d9e50c4..0794adfb67 100644 --- a/openpype/plugins/publish/collect_anatomy_context_data.py +++ b/openpype/plugins/publish/collect_anatomy_context_data.py @@ -13,11 +13,12 @@ Provides: """ import json +import pyblish.api + from openpype.lib import ( get_system_general_anatomy_data ) -from avalon import api -import pyblish.api +from openpype.pipeline import legacy_io class CollectAnatomyContextData(pyblish.api.ContextPlugin): @@ -65,7 +66,7 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): asset_entity = context.data.get("assetEntity") if asset_entity: - task_name = api.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] asset_tasks = asset_entity["data"]["tasks"] task_type = asset_tasks.get(task_name, {}).get("type") diff --git a/openpype/plugins/publish/collect_anatomy_instance_data.py b/openpype/plugins/publish/collect_anatomy_instance_data.py index 42836e796b..6a6ea170b5 100644 --- a/openpype/plugins/publish/collect_anatomy_instance_data.py +++ b/openpype/plugins/publish/collect_anatomy_instance_data.py @@ -25,9 +25,10 @@ import copy import json import collections -from avalon import io import pyblish.api +from openpype.pipeline import legacy_io + class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): """Collect Instance specific Anatomy data. @@ -83,7 +84,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): self.log.debug("Querying asset documents with names: {}".format( ", ".join(["\"{}\"".format(name) for name in asset_names]) )) - asset_docs = io.find({ + asset_docs = legacy_io.find({ "type": "asset", "name": {"$in": asset_names} }) @@ -153,7 +154,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): subset_docs = [] if subset_filters: - subset_docs = list(io.find({ + subset_docs = list(legacy_io.find({ "type": "subset", "$or": subset_filters })) @@ -202,7 +203,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): ] last_version_by_subset_id = {} - for doc in io.aggregate(_pipeline): + for doc in legacy_io.aggregate(_pipeline): subset_id = doc["_id"] last_version_by_subset_id[subset_id] = doc["name"] diff --git a/openpype/plugins/publish/collect_avalon_entities.py b/openpype/plugins/publish/collect_avalon_entities.py index c099a2cf75..3e7843407f 100644 --- a/openpype/plugins/publish/collect_avalon_entities.py +++ b/openpype/plugins/publish/collect_avalon_entities.py @@ -8,9 +8,10 @@ Provides: context -> assetEntity - asset entity from database """ -from avalon import io, api import pyblish.api +from openpype.pipeline import legacy_io + class CollectAvalonEntities(pyblish.api.ContextPlugin): """Collect Anatomy into Context""" @@ -19,12 +20,12 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): label = "Collect Avalon Entities" def process(self, context): - io.install() - project_name = api.Session["AVALON_PROJECT"] - asset_name = api.Session["AVALON_ASSET"] - task_name = api.Session["AVALON_TASK"] + legacy_io.install() + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] - project_entity = io.find_one({ + project_entity = legacy_io.find_one({ "type": "project", "name": project_name }) @@ -38,7 +39,7 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): if not asset_name: self.log.info("Context is not set. Can't collect global data.") return - asset_entity = io.find_one({ + asset_entity = legacy_io.find_one({ "type": "asset", "name": asset_name, "parent": project_entity["_id"] diff --git a/openpype/plugins/publish/collect_from_create_context.py b/openpype/plugins/publish/collect_from_create_context.py index 16e3f669c3..f6ead98809 100644 --- a/openpype/plugins/publish/collect_from_create_context.py +++ b/openpype/plugins/publish/collect_from_create_context.py @@ -3,7 +3,8 @@ """ import os import pyblish.api -import avalon.api + +from openpype.pipeline import legacy_io class CollectFromCreateContext(pyblish.api.ContextPlugin): @@ -25,12 +26,12 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): # Update global data to context context.data.update(create_context.context_data_to_store()) - + context.data["newPublishing"] = True # Update context data for key in ("AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK"): value = create_context.dbcon.Session.get(key) if value is not None: - avalon.api.Session[key] = value + legacy_io.Session[key] = value os.environ[key] = value def create_instance(self, context, in_data): diff --git a/openpype/plugins/publish/collect_hierarchy.py b/openpype/plugins/publish/collect_hierarchy.py index efb40407d9..a96d444be6 100644 --- a/openpype/plugins/publish/collect_hierarchy.py +++ b/openpype/plugins/publish/collect_hierarchy.py @@ -1,5 +1,6 @@ import pyblish.api -import avalon.api as avalon + +from openpype.pipeline import legacy_io class CollectHierarchy(pyblish.api.ContextPlugin): @@ -19,7 +20,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin): def process(self, context): temp_context = {} - project_name = avalon.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] final_context = {} final_context[project_name] = {} final_context[project_name]['entity_type'] = 'Project' @@ -29,14 +30,15 @@ class CollectHierarchy(pyblish.api.ContextPlugin): # shot data dict shot_data = {} - family = instance.data.get("family") + family = instance.data["family"] + families = instance.data["families"] # filter out all unepropriate instances if not instance.data["publish"]: continue # exclude other families then self.families with intersection - if not set(self.families).intersection([family]): + if not set(self.families).intersection(set(families + [family])): continue # exclude if not masterLayer True diff --git a/openpype/plugins/publish/collect_rendered_files.py b/openpype/plugins/publish/collect_rendered_files.py index 1005c38b9d..670e57ed10 100644 --- a/openpype/plugins/publish/collect_rendered_files.py +++ b/openpype/plugins/publish/collect_rendered_files.py @@ -11,7 +11,8 @@ import os import json import pyblish.api -from avalon import api + +from openpype.pipeline import legacy_io class CollectRenderedFiles(pyblish.api.ContextPlugin): @@ -150,7 +151,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): session_data["AVALON_WORKDIR"] = remapped self.log.info("Setting session using data from file") - api.Session.update(session_data) + legacy_io.Session.update(session_data) os.environ.update(session_data) session_is_set = True self._process_path(data, anatomy) diff --git a/openpype/plugins/publish/collect_resources_path.py b/openpype/plugins/publish/collect_resources_path.py index 1f509365c7..89df031fb0 100644 --- a/openpype/plugins/publish/collect_resources_path.py +++ b/openpype/plugins/publish/collect_resources_path.py @@ -12,7 +12,8 @@ import os import copy import pyblish.api -from avalon import api + +from openpype.pipeline import legacy_io class CollectResourcesPath(pyblish.api.InstancePlugin): @@ -84,7 +85,7 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): else: # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = api.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." diff --git a/openpype/plugins/publish/collect_scene_loaded_versions.py b/openpype/plugins/publish/collect_scene_loaded_versions.py index 6746757e5f..bb34e3ce31 100644 --- a/openpype/plugins/publish/collect_scene_loaded_versions.py +++ b/openpype/plugins/publish/collect_scene_loaded_versions.py @@ -1,7 +1,11 @@ from bson.objectid import ObjectId import pyblish.api -from avalon import api, io + +from openpype.pipeline import ( + registered_host, + legacy_io, +) class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): @@ -24,7 +28,7 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): ] def process(self, context): - host = api.registered_host() + host = registered_host() if host is None: self.log.warn("No registered host.") return @@ -37,18 +41,33 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): loaded_versions = [] _containers = list(host.ls()) _repr_ids = [ObjectId(c["representation"]) for c in _containers] + repre_docs = legacy_io.find( + {"_id": {"$in": _repr_ids}}, + projection={"_id": 1, "parent": 1} + ) version_by_repr = { - str(doc["_id"]): doc["parent"] for doc in - io.find({"_id": {"$in": _repr_ids}}, projection={"parent": 1}) + str(doc["_id"]): doc["parent"] + for doc in repre_docs } + # QUESTION should we add same representation id when loaded multiple + # times? for con in _containers: + repre_id = con["representation"] + version_id = version_by_repr.get(repre_id) + if version_id is None: + self.log.warning(( + "Skipping container," + " did not find representation document. {}" + ).format(str(con))) + continue + # NOTE: # may have more then one representation that are same version version = { "subsetName": con["name"], - "representation": ObjectId(con["representation"]), - "version": version_by_repr[con["representation"]], # _id + "representation": ObjectId(repre_id), + "version": version_id, } loaded_versions.append(version) diff --git a/openpype/plugins/publish/extract_burnin.py b/openpype/plugins/publish/extract_burnin.py index b2ca8850b6..88093fb92f 100644 --- a/openpype/plugins/publish/extract_burnin.py +++ b/openpype/plugins/publish/extract_burnin.py @@ -16,7 +16,7 @@ from openpype.lib import ( run_openpype_process, get_transcode_temp_directory, - convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, should_convert_for_ffmpeg, CREATE_NO_WINDOW @@ -41,6 +41,7 @@ class ExtractBurnin(openpype.api.Extractor): "shell", "hiero", "premiere", + "traypublisher", "standalonepublisher", "harmony", "fusion", @@ -187,8 +188,13 @@ class ExtractBurnin(openpype.api.Extractor): repre_files = repre["files"] if isinstance(repre_files, (tuple, list)): filename = repre_files[0] + src_filepaths = [ + os.path.join(src_repre_staging_dir, filename) + for filename in repre_files + ] else: filename = repre_files + src_filepaths = [os.path.join(src_repre_staging_dir, filename)] first_input_path = os.path.join(src_repre_staging_dir, filename) # Determine if representation requires pre conversion for ffmpeg @@ -209,11 +215,9 @@ class ExtractBurnin(openpype.api.Extractor): new_staging_dir = get_transcode_temp_directory() repre["stagingDir"] = new_staging_dir - convert_for_ffmpeg( - first_input_path, + convert_input_paths_for_ffmpeg( + src_filepaths, new_staging_dir, - _temp_data["frameStart"], - _temp_data["frameEnd"], self.log ) @@ -221,11 +225,17 @@ class ExtractBurnin(openpype.api.Extractor): filled_anatomy = anatomy.format_all(burnin_data) burnin_data["anatomy"] = filled_anatomy.get_solved() - # Add context data burnin_data. - burnin_data["custom"] = ( + custom_data = copy.deepcopy( + instance.data.get("customData") or {} + ) + # Backwards compatibility (since 2022/04/07) + custom_data.update( instance.data.get("custom_burnin_data") or {} ) + # Add context data burnin_data. + burnin_data["custom"] = custom_data + # Add source camera name to burnin data camera_name = repre.get("camera_name") if camera_name: diff --git a/openpype/plugins/publish/extract_hierarchy_avalon.py b/openpype/plugins/publish/extract_hierarchy_avalon.py index b062a9c4b5..2f528d4469 100644 --- a/openpype/plugins/publish/extract_hierarchy_avalon.py +++ b/openpype/plugins/publish/extract_hierarchy_avalon.py @@ -1,7 +1,10 @@ -import pyblish.api -from avalon import io from copy import deepcopy +import pyblish.api + +from openpype.pipeline import legacy_io + + class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): """Create entities in Avalon based on collected data.""" @@ -16,8 +19,8 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): return hierarchy_context = deepcopy(context.data["hierarchyContext"]) - if not io.Session: - io.install() + if not legacy_io.Session: + legacy_io.install() active_assets = [] # filter only the active publishing insatnces @@ -78,7 +81,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): update_data = True # Process project if entity_type.lower() == "project": - entity = io.find_one({"type": "project"}) + entity = legacy_io.find_one({"type": "project"}) # TODO: should be in validator? assert (entity is not None), "Did not find project in DB" @@ -95,7 +98,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): ) # Else process assset else: - entity = io.find_one({"type": "asset", "name": name}) + entity = legacy_io.find_one({"type": "asset", "name": name}) if entity: # Do not override data, only update cur_entity_data = entity.get("data") or {} @@ -119,7 +122,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): # Skip updating data update_data = False - archived_entities = io.find({ + archived_entities = legacy_io.find({ "type": "archived_asset", "name": name }) @@ -143,7 +146,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): if update_data: # Update entity data with input data - io.update_many( + legacy_io.update_many( {"_id": entity["_id"]}, {"$set": {"data": data}} ) @@ -161,7 +164,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): "type": "asset", "data": data } - io.replace_one( + legacy_io.replace_one( {"_id": entity["_id"]}, new_entity ) @@ -176,9 +179,9 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): "data": data } self.log.debug("Creating asset: {}".format(item)) - entity_id = io.insert_one(item).inserted_id + entity_id = legacy_io.insert_one(item).inserted_id - return io.find_one({"_id": entity_id}) + return legacy_io.find_one({"_id": entity_id}) def _get_assets(self, input_dict): """ Returns only asset dictionary. diff --git a/openpype/plugins/publish/extract_jpeg_exr.py b/openpype/plugins/publish/extract_jpeg_exr.py index 468ed96199..ae29f8b95b 100644 --- a/openpype/plugins/publish/extract_jpeg_exr.py +++ b/openpype/plugins/publish/extract_jpeg_exr.py @@ -8,7 +8,7 @@ from openpype.lib import ( path_to_subprocess_arg, get_transcode_temp_directory, - convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, should_convert_for_ffmpeg ) @@ -49,6 +49,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): return filtered_repres = self._get_filtered_repres(instance) + for repre in filtered_repres: repre_files = repre["files"] if not isinstance(repre_files, (list, tuple)): @@ -79,11 +80,9 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): if do_convert: convert_dir = get_transcode_temp_directory() filename = os.path.basename(full_input_path) - convert_for_ffmpeg( - full_input_path, + convert_input_paths_for_ffmpeg( + [full_input_path], convert_dir, - None, - None, self.log ) full_input_path = os.path.join(convert_dir, filename) @@ -153,6 +152,11 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): if convert_dir is not None and os.path.exists(convert_dir): shutil.rmtree(convert_dir) + # Create only one representation with name 'thumbnail' + # TODO maybe handle way how to decide from which representation + # will be thumbnail created + break + def _get_filtered_repres(self, instance): filtered_repres = [] src_repres = instance.data.get("representations") or [] diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index 3ecea1f8bd..879125dac3 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -18,7 +18,7 @@ from openpype.lib import ( path_to_subprocess_arg, should_convert_for_ffmpeg, - convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, get_transcode_temp_directory ) import speedcopy @@ -45,13 +45,15 @@ class ExtractReview(pyblish.api.InstancePlugin): "hiero", "premiere", "harmony", + "traypublisher", "standalonepublisher", "fusion", "tvpaint", "resolve", "webpublisher", "aftereffects", - "flame" + "flame", + "unreal" ] # Supported extensions @@ -188,23 +190,26 @@ class ExtractReview(pyblish.api.InstancePlugin): outputs_per_repres = self._get_outputs_per_representations( instance, profile_outputs ) - fill_data = copy.deepcopy(instance.data["anatomyData"]) - for repre, outputs in outputs_per_repres: + for repre, outpu_defs in outputs_per_repres: # Check if input should be preconverted before processing # Store original staging dir (it's value may change) src_repre_staging_dir = repre["stagingDir"] # Receive filepath to first file in representation first_input_path = None + input_filepaths = [] if not self.input_is_sequence(repre): first_input_path = os.path.join( src_repre_staging_dir, repre["files"] ) + input_filepaths.append(first_input_path) else: for filename in repre["files"]: - first_input_path = os.path.join( + filepath = os.path.join( src_repre_staging_dir, filename ) - break + input_filepaths.append(filepath) + if first_input_path is None: + first_input_path = filepath # Skip if file is not set if first_input_path is None: @@ -231,136 +236,149 @@ class ExtractReview(pyblish.api.InstancePlugin): new_staging_dir = get_transcode_temp_directory() repre["stagingDir"] = new_staging_dir - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] - convert_for_ffmpeg( - first_input_path, + convert_input_paths_for_ffmpeg( + input_filepaths, new_staging_dir, - frame_start, - frame_end, self.log ) - for _output_def in outputs: - output_def = copy.deepcopy(_output_def) - # Make sure output definition has "tags" key - if "tags" not in output_def: - output_def["tags"] = [] - - if "burnins" not in output_def: - output_def["burnins"] = [] - - # Create copy of representation - new_repre = copy.deepcopy(repre) - # Make sure new representation has origin staging dir - # - this is because source representation may change - # it's staging dir because of ffmpeg conversion - new_repre["stagingDir"] = src_repre_staging_dir - - # Remove "delete" tag from new repre if there is - if "delete" in new_repre["tags"]: - new_repre["tags"].remove("delete") - - # Add additional tags from output definition to representation - for tag in output_def["tags"]: - if tag not in new_repre["tags"]: - new_repre["tags"].append(tag) - - # Add burnin link from output definition to representation - for burnin in output_def["burnins"]: - if burnin not in new_repre.get("burnins", []): - if not new_repre.get("burnins"): - new_repre["burnins"] = [] - new_repre["burnins"].append(str(burnin)) - - self.log.debug( - "Linked burnins: `{}`".format(new_repre.get("burnins")) + try: + self._render_output_definitions( + instance, repre, src_repre_staging_dir, outpu_defs ) - self.log.debug( - "New representation tags: `{}`".format( - new_repre.get("tags")) + finally: + # Make sure temporary staging is cleaned up and representation + # has set origin stagingDir + if do_convert: + # Set staging dir of source representation back to previous + # value + repre["stagingDir"] = src_repre_staging_dir + if os.path.exists(new_staging_dir): + shutil.rmtree(new_staging_dir) + + def _render_output_definitions( + self, instance, repre, src_repre_staging_dir, outpu_defs + ): + fill_data = copy.deepcopy(instance.data["anatomyData"]) + for _output_def in outpu_defs: + output_def = copy.deepcopy(_output_def) + # Make sure output definition has "tags" key + if "tags" not in output_def: + output_def["tags"] = [] + + if "burnins" not in output_def: + output_def["burnins"] = [] + + # Create copy of representation + new_repre = copy.deepcopy(repre) + # Make sure new representation has origin staging dir + # - this is because source representation may change + # it's staging dir because of ffmpeg conversion + new_repre["stagingDir"] = src_repre_staging_dir + + # Remove "delete" tag from new repre if there is + if "delete" in new_repre["tags"]: + new_repre["tags"].remove("delete") + + # Add additional tags from output definition to representation + for tag in output_def["tags"]: + if tag not in new_repre["tags"]: + new_repre["tags"].append(tag) + + # Add burnin link from output definition to representation + for burnin in output_def["burnins"]: + if burnin not in new_repre.get("burnins", []): + if not new_repre.get("burnins"): + new_repre["burnins"] = [] + new_repre["burnins"].append(str(burnin)) + + self.log.debug( + "Linked burnins: `{}`".format(new_repre.get("burnins")) + ) + + self.log.debug( + "New representation tags: `{}`".format( + new_repre.get("tags")) + ) + + temp_data = self.prepare_temp_data(instance, repre, output_def) + files_to_clean = [] + if temp_data["input_is_sequence"]: + self.log.info("Filling gaps in sequence.") + files_to_clean = self.fill_sequence_gaps( + temp_data["origin_repre"]["files"], + new_repre["stagingDir"], + temp_data["frame_start"], + temp_data["frame_end"]) + + # create or update outputName + output_name = new_repre.get("outputName", "") + output_ext = new_repre["ext"] + if output_name: + output_name += "_" + output_name += output_def["filename_suffix"] + if temp_data["without_handles"]: + output_name += "_noHandles" + + # add outputName to anatomy format fill_data + fill_data.update({ + "output": output_name, + "ext": output_ext + }) + + try: # temporary until oiiotool is supported cross platform + ffmpeg_args = self._ffmpeg_arguments( + output_def, instance, new_repre, temp_data, fill_data ) - - temp_data = self.prepare_temp_data( - instance, repre, output_def) - files_to_clean = [] - if temp_data["input_is_sequence"]: - self.log.info("Filling gaps in sequence.") - files_to_clean = self.fill_sequence_gaps( - temp_data["origin_repre"]["files"], - new_repre["stagingDir"], - temp_data["frame_start"], - temp_data["frame_end"]) - - # create or update outputName - output_name = new_repre.get("outputName", "") - output_ext = new_repre["ext"] - if output_name: - output_name += "_" - output_name += output_def["filename_suffix"] - if temp_data["without_handles"]: - output_name += "_noHandles" - - # add outputName to anatomy format fill_data - fill_data.update({ - "output": output_name, - "ext": output_ext - }) - - try: # temporary until oiiotool is supported cross platform - ffmpeg_args = self._ffmpeg_arguments( - output_def, instance, new_repre, temp_data, fill_data + except ZeroDivisionError: + # TODO recalculate width and height using OIIO before + # conversion + if 'exr' in temp_data["origin_repre"]["ext"]: + self.log.warning( + ( + "Unsupported compression on input files." + " Skipping!!!" + ), + exc_info=True ) - except ZeroDivisionError: - if 'exr' in temp_data["origin_repre"]["ext"]: - self.log.debug("Unsupported compression on input " + - "files. Skipping!!!") - return - raise NotImplementedError + return + raise NotImplementedError - subprcs_cmd = " ".join(ffmpeg_args) + subprcs_cmd = " ".join(ffmpeg_args) - # run subprocess - self.log.debug("Executing: {}".format(subprcs_cmd)) + # run subprocess + self.log.debug("Executing: {}".format(subprcs_cmd)) - openpype.api.run_subprocess( - subprcs_cmd, shell=True, logger=self.log - ) + openpype.api.run_subprocess( + subprcs_cmd, shell=True, logger=self.log + ) - # delete files added to fill gaps - if files_to_clean: - for f in files_to_clean: - os.unlink(f) + # delete files added to fill gaps + if files_to_clean: + for f in files_to_clean: + os.unlink(f) - new_repre.update({ - "name": "{}_{}".format(output_name, output_ext), - "outputName": output_name, - "outputDef": output_def, - "frameStartFtrack": temp_data["output_frame_start"], - "frameEndFtrack": temp_data["output_frame_end"], - "ffmpeg_cmd": subprcs_cmd - }) + new_repre.update({ + "name": "{}_{}".format(output_name, output_ext), + "outputName": output_name, + "outputDef": output_def, + "frameStartFtrack": temp_data["output_frame_start"], + "frameEndFtrack": temp_data["output_frame_end"], + "ffmpeg_cmd": subprcs_cmd + }) - # Force to pop these key if are in new repre - new_repre.pop("preview", None) - new_repre.pop("thumbnail", None) - if "clean_name" in new_repre.get("tags", []): - new_repre.pop("outputName") + # Force to pop these key if are in new repre + new_repre.pop("preview", None) + new_repre.pop("thumbnail", None) + if "clean_name" in new_repre.get("tags", []): + new_repre.pop("outputName") - # adding representation - self.log.debug( - "Adding new representation: {}".format(new_repre) - ) - instance.data["representations"].append(new_repre) - - # Cleanup temp staging dir after procesisng of output definitions - if do_convert: - temp_dir = repre["stagingDir"] - shutil.rmtree(temp_dir) - # Set staging dir of source representation back to previous - # value - repre["stagingDir"] = src_repre_staging_dir + # adding representation + self.log.debug( + "Adding new representation: {}".format(new_repre) + ) + instance.data["representations"].append(new_repre) def input_is_sequence(self, repre): """Deduce from representation data if input is sequence.""" diff --git a/openpype/plugins/publish/extract_review_slate.py b/openpype/plugins/publish/extract_review_slate.py index 505ae75169..49f0eac41d 100644 --- a/openpype/plugins/publish/extract_review_slate.py +++ b/openpype/plugins/publish/extract_review_slate.py @@ -158,13 +158,15 @@ class ExtractReviewSlate(openpype.api.Extractor): ]) if use_legacy_code: + format_args = [] codec_args = repre["_profile"].get('codec', []) output_args.extend(codec_args) # preset's output data output_args.extend(repre["_profile"].get('output', [])) else: # Codecs are copied from source for whole input - codec_args = self._get_codec_args(repre) + format_args, codec_args = self._get_format_codec_args(repre) + output_args.extend(format_args) output_args.extend(codec_args) # make sure colors are correct @@ -266,8 +268,14 @@ class ExtractReviewSlate(openpype.api.Extractor): "-safe", "0", "-i", conc_text_path, "-c", "copy", - output_path ] + # NOTE: Added because of OP Atom demuxers + # Add format arguments if there are any + # - keep format of output + if format_args: + concat_args.extend(format_args) + # Add final output path + concat_args.append(output_path) # ffmpeg concat subprocess self.log.debug( @@ -338,7 +346,7 @@ class ExtractReviewSlate(openpype.api.Extractor): return vf_back - def _get_codec_args(self, repre): + def _get_format_codec_args(self, repre): """Detect possible codec arguments from representation.""" codec_args = [] @@ -361,13 +369,9 @@ class ExtractReviewSlate(openpype.api.Extractor): return codec_args source_ffmpeg_cmd = repre.get("ffmpeg_cmd") - codec_args.extend( - get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd) - ) - codec_args.extend( - get_ffmpeg_codec_args( - ffprobe_data, source_ffmpeg_cmd, logger=self.log - ) + format_args = get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd) + codec_args = get_ffmpeg_codec_args( + ffprobe_data, source_ffmpeg_cmd, logger=self.log ) - return codec_args + return format_args, codec_args diff --git a/openpype/plugins/publish/integrate_hero_version.py b/openpype/plugins/publish/integrate_hero_version.py index ded149bdd0..a706b653c4 100644 --- a/openpype/plugins/publish/integrate_hero_version.py +++ b/openpype/plugins/publish/integrate_hero_version.py @@ -8,11 +8,14 @@ from bson.objectid import ObjectId from pymongo import InsertOne, ReplaceOne import pyblish.api -from avalon import api, io, schema from openpype.lib import ( create_hard_link, filter_profiles ) +from openpype.pipeline import ( + schema, + legacy_io, +) class IntegrateHeroVersion(pyblish.api.InstancePlugin): @@ -62,7 +65,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): template_key = self._get_template_key(instance) anatomy = instance.context.data["anatomy"] - project_name = api.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] if template_key not in anatomy.templates: self.log.warning(( "!!! Anatomy of project \"{}\" does not have set" @@ -220,7 +223,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): if old_repres_by_name: old_repres_to_delete = old_repres_by_name - archived_repres = list(io.find({ + archived_repres = list(legacy_io.find({ # Check what is type of archived representation "type": "archived_repsentation", "parent": new_version_id @@ -441,7 +444,8 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): ) if bulk_writes: - io._database[io.Session["AVALON_PROJECT"]].bulk_write( + project_name = legacy_io.Session["AVALON_PROJECT"] + legacy_io.database[project_name].bulk_write( bulk_writes ) @@ -503,7 +507,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): anatomy_filled = anatomy.format(template_data) # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = api.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." @@ -584,12 +588,12 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): def version_from_representations(self, repres): for repre in repres: - version = io.find_one({"_id": repre["parent"]}) + version = legacy_io.find_one({"_id": repre["parent"]}) if version: return version def current_hero_ents(self, version): - hero_version = io.find_one({ + hero_version = legacy_io.find_one({ "parent": version["parent"], "type": "hero_version" }) @@ -597,7 +601,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): if not hero_version: return (None, []) - hero_repres = list(io.find({ + hero_repres = list(legacy_io.find({ "parent": hero_version["_id"], "type": "representation" })) diff --git a/openpype/plugins/publish/integrate_inputlinks.py b/openpype/plugins/publish/integrate_inputlinks.py index 11cffc4638..6964f2d938 100644 --- a/openpype/plugins/publish/integrate_inputlinks.py +++ b/openpype/plugins/publish/integrate_inputlinks.py @@ -3,7 +3,7 @@ from collections import OrderedDict from bson.objectid import ObjectId import pyblish.api -from avalon import io +from openpype.pipeline import legacy_io class IntegrateInputLinks(pyblish.api.ContextPlugin): @@ -129,5 +129,7 @@ class IntegrateInputLinks(pyblish.api.ContextPlugin): if input_links is None: continue - io.update_one({"_id": version_doc["_id"]}, - {"$set": {"data.inputLinks": input_links}}) + legacy_io.update_one( + {"_id": version_doc["_id"]}, + {"$set": {"data.inputLinks": input_links}} + ) diff --git a/openpype/plugins/publish/integrate_new.py b/openpype/plugins/publish/integrate_new.py index 959fd3bbee..bf13a4050e 100644 --- a/openpype/plugins/publish/integrate_new.py +++ b/openpype/plugins/publish/integrate_new.py @@ -8,14 +8,14 @@ import errno import six import re import shutil +from collections import deque, defaultdict +from datetime import datetime from bson.objectid import ObjectId from pymongo import DeleteOne, InsertOne import pyblish.api -from avalon import io + import openpype.api -from datetime import datetime -# from pype.modules import ModulesManager from openpype.lib.profiles_filtering import filter_profiles from openpype.lib import ( prepare_template_data, @@ -23,6 +23,7 @@ from openpype.lib import ( StringTemplate, TemplateUnsolved ) +from openpype.pipeline import legacy_io # this is needed until speedcopy for linux is fixed if sys.platform == "win32": @@ -112,7 +113,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "usdOverride", "simpleUnrealTexture" ] - exclude_families = ["clip"] + exclude_families = ["clip", "render.farm"] db_representation_context_keys = [ "project", "asset", "task", "subset", "version", "representation", "family", "hierarchy", "task", "username" @@ -130,11 +131,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): subset_grouping_profiles = None def process(self, instance): - self.integrated_file_sizes = {} - if [ef for ef in self.exclude_families - if instance.data["family"] in ef]: - return + for ef in self.exclude_families: + if ( + instance.data["family"] == ef or + ef in instance.data["families"]): + self.log.debug("Excluded family '{}' in '{}' or {}".format( + ef, instance.data["family"], instance.data["families"])) + return + self.integrated_file_sizes = {} try: self.register(instance) self.log.info("Integrated Asset in to the database ...") @@ -151,7 +156,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Required environment variables anatomy_data = instance.data["anatomyData"] - io.install() + legacy_io.install() context = instance.context @@ -165,7 +170,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): asset_name = instance.data["asset"] asset_entity = instance.data.get("assetEntity") if not asset_entity or asset_entity["name"] != context_asset_name: - asset_entity = io.find_one({ + asset_entity = legacy_io.find_one({ "type": "asset", "name": asset_name, "parent": project_entity["_id"] @@ -227,7 +232,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Ensure at least one file is set up for transfer in staging dir. repres = instance.data.get("representations") - assert repres, "Instance has no files to transfer" + repres = instance.data.get("representations") + msg = "Instance {} has no files to transfer".format( + instance.data["family"]) + assert repres, msg assert isinstance(repres, (list, tuple)), ( "Instance 'files' must be a list, got: {0} {1}".format( str(type(repres)), str(repres) @@ -258,14 +266,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): new_repre_names_low = [_repre["name"].lower() for _repre in repres] - existing_version = io.find_one({ + existing_version = legacy_io.find_one({ 'type': 'version', 'parent': subset["_id"], 'name': version_number }) if existing_version is None: - version_id = io.insert_one(version).inserted_id + version_id = legacy_io.insert_one(version).inserted_id else: # Check if instance have set `append` mode which cause that # only replicated representations are set to archive @@ -273,7 +281,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Update version data # TODO query by _id and - io.update_many({ + legacy_io.update_many({ 'type': 'version', 'parent': subset["_id"], 'name': version_number @@ -283,7 +291,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): version_id = existing_version['_id'] # Find representations of existing version and archive them - current_repres = list(io.find({ + current_repres = list(legacy_io.find({ "type": "representation", "parent": version_id })) @@ -306,14 +314,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # bulk updates if bulk_writes: - io._database[io.Session["AVALON_PROJECT"]].bulk_write( + project_name = legacy_io.Session["AVALON_PROJECT"] + legacy_io.database[project_name].bulk_write( bulk_writes ) - version = io.find_one({"_id": version_id}) + version = legacy_io.find_one({"_id": version_id}) instance.data["versionEntity"] = version - existing_repres = list(io.find({ + existing_repres = list(legacy_io.find({ "parent": version_id, "type": "archived_representation" })) @@ -653,12 +662,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): repre_ids_to_remove = [] for repre in existing_repres: repre_ids_to_remove.append(repre["_id"]) - io.delete_many({"_id": {"$in": repre_ids_to_remove}}) + legacy_io.delete_many({"_id": {"$in": repre_ids_to_remove}}) for rep in instance.data["representations"]: self.log.debug("__ rep: {}".format(rep)) - io.insert_many(representations) + legacy_io.insert_many(representations) instance.data["published_representations"] = ( published_representations ) @@ -760,7 +769,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): def get_subset(self, asset, instance): subset_name = instance.data["subset"] - subset = io.find_one({ + subset = legacy_io.find_one({ "type": "subset", "parent": asset["_id"], "name": subset_name @@ -781,7 +790,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if _family not in families: families.append(_family) - _id = io.insert_one({ + _id = legacy_io.insert_one({ "schema": "openpype:subset-3.0", "type": "subset", "name": subset_name, @@ -791,7 +800,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "parent": asset["_id"] }).inserted_id - subset = io.find_one({"_id": _id}) + subset = legacy_io.find_one({"_id": _id}) # QUESTION Why is changing of group and updating it's # families in 'get_subset'? @@ -800,7 +809,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Update families on subset. families = [instance.data["family"]] families.extend(instance.data.get("families", [])) - io.update_many( + legacy_io.update_many( {"type": "subset", "_id": ObjectId(subset["_id"])}, {"$set": {"data.families": families}} ) @@ -824,7 +833,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): subset_group = self._get_subset_group(instance) if subset_group: - io.update_many({ + legacy_io.update_many({ 'type': 'subset', '_id': ObjectId(subset_id) }, {'$set': {'data.subsetGroup': subset_group}}) @@ -1116,18 +1125,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): rec["sites"].append(meta) already_attached_sites[meta["name"]] = None + # add alternative sites + rec, already_attached_sites = self._add_alternative_sites( + system_sync_server_presets, already_attached_sites, rec) + # add skeleton for site where it should be always synced to - for always_on_site in always_accesible: + for always_on_site in set(always_accesible): if always_on_site not in already_attached_sites.keys(): meta = {"name": always_on_site.strip()} rec["sites"].append(meta) already_attached_sites[meta["name"]] = None - # add alternative sites - rec = self._add_alternative_sites(system_sync_server_presets, - already_attached_sites, - rec) - log.debug("final sites:: {}".format(rec["sites"])) return rec @@ -1158,22 +1166,60 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): """ conf_sites = system_sync_server_presets.get("sites", {}) + alt_site_pairs = self._get_alt_site_pairs(conf_sites) + + already_attached_keys = list(already_attached_sites.keys()) + for added_site in already_attached_keys: + real_created = already_attached_sites[added_site] + for alt_site in alt_site_pairs.get(added_site, []): + if alt_site in already_attached_sites.keys(): + continue + meta = {"name": alt_site} + # alt site inherits state of 'created_dt' + if real_created: + meta["created_dt"] = real_created + rec["sites"].append(meta) + already_attached_sites[meta["name"]] = real_created + + return rec, already_attached_sites + + def _get_alt_site_pairs(self, conf_sites): + """Returns dict of site and its alternative sites. + + If `site` has alternative site, it means that alt_site has 'site' as + alternative site + Args: + conf_sites (dict) + Returns: + (dict): {'site': [alternative sites]...} + """ + alt_site_pairs = defaultdict(list) for site_name, site_info in conf_sites.items(): alt_sites = set(site_info.get("alternative_sites", [])) - already_attached_keys = list(already_attached_sites.keys()) - for added_site in already_attached_keys: - if added_site in alt_sites: - if site_name in already_attached_keys: - continue - meta = {"name": site_name} - real_created = already_attached_sites[added_site] - # alt site inherits state of 'created_dt' - if real_created: - meta["created_dt"] = real_created - rec["sites"].append(meta) - already_attached_sites[meta["name"]] = real_created + alt_site_pairs[site_name].extend(alt_sites) - return rec + for alt_site in alt_sites: + alt_site_pairs[alt_site].append(site_name) + + for site_name, alt_sites in alt_site_pairs.items(): + sites_queue = deque(alt_sites) + while sites_queue: + alt_site = sites_queue.popleft() + + # safety against wrong config + # {"SFTP": {"alternative_site": "SFTP"} + if alt_site == site_name or alt_site not in alt_site_pairs: + continue + + for alt_alt_site in alt_site_pairs[alt_site]: + if ( + alt_alt_site != site_name + and alt_alt_site not in alt_sites + ): + alt_sites.append(alt_alt_site) + sites_queue.append(alt_alt_site) + + return alt_site_pairs def handle_destination_files(self, integrated_file_sizes, mode): """ Clean destination files diff --git a/openpype/plugins/publish/integrate_thumbnail.py b/openpype/plugins/publish/integrate_thumbnail.py index 28a93efb9a..5d6fc561ea 100644 --- a/openpype/plugins/publish/integrate_thumbnail.py +++ b/openpype/plugins/publish/integrate_thumbnail.py @@ -8,7 +8,7 @@ import six import pyblish.api from bson.objectid import ObjectId -from avalon import api, io +from openpype.pipeline import legacy_io class IntegrateThumbnails(pyblish.api.InstancePlugin): @@ -38,7 +38,7 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): ) return - project_name = api.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] anatomy = instance.context.data["anatomy"] if "publish" not in anatomy.templates: @@ -66,11 +66,11 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): ) return - io.install() + legacy_io.install() thumbnail_template = anatomy.templates["publish"]["thumbnail"] - version = io.find_one({"_id": thumb_repre["parent"]}) + version = legacy_io.find_one({"_id": thumb_repre["parent"]}) if not version: raise AssertionError( "There does not exist version with id {}".format( @@ -137,12 +137,12 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): } } # Create thumbnail entity - io.insert_one(thumbnail_entity) + legacy_io.insert_one(thumbnail_entity) self.log.debug( "Creating entity in database {}".format(str(thumbnail_entity)) ) # Set thumbnail id for version - io.update_many( + legacy_io.update_many( {"_id": version["_id"]}, {"$set": {"data.thumbnail_id": thumbnail_id}} ) @@ -151,7 +151,7 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): )) asset_entity = instance.data["assetEntity"] - io.update_many( + legacy_io.update_many( {"_id": asset_entity["_id"]}, {"$set": {"data.thumbnail_id": thumbnail_id}} ) diff --git a/openpype/plugins/publish/validate_editorial_asset_name.py b/openpype/plugins/publish/validate_editorial_asset_name.py index 4a65f3c64a..f9cdaebf0c 100644 --- a/openpype/plugins/publish/validate_editorial_asset_name.py +++ b/openpype/plugins/publish/validate_editorial_asset_name.py @@ -1,7 +1,9 @@ -import pyblish.api -from avalon import io from pprint import pformat +import pyblish.api + +from openpype.pipeline import legacy_io + class ValidateEditorialAssetName(pyblish.api.ContextPlugin): """ Validating if editorial's asset names are not already created in db. @@ -24,10 +26,10 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin): asset_and_parents = self.get_parents(context) self.log.debug("__ asset_and_parents: {}".format(asset_and_parents)) - if not io.Session: - io.install() + if not legacy_io.Session: + legacy_io.install() - db_assets = list(io.find( + db_assets = list(legacy_io.find( {"type": "asset"}, {"name": 1, "data.parents": 1})) self.log.debug("__ db_assets: {}".format(db_assets)) diff --git a/openpype/pype_commands.py b/openpype/pype_commands.py index c05eece2be..d945a1f697 100644 --- a/openpype/pype_commands.py +++ b/openpype/pype_commands.py @@ -25,7 +25,7 @@ class PypeCommands: Most of its methods are called by :mod:`cli` module. """ @staticmethod - def launch_tray(debug=False): + def launch_tray(): PypeLogger.set_process_name("Tray") from openpype.tools import tray @@ -101,7 +101,8 @@ class PypeCommands: RuntimeError: When there is no path to process. """ from openpype.modules import ModulesManager - from openpype import install, uninstall + from openpype.pipeline import install_openpype_plugins + from openpype.api import Logger from openpype.tools.utils.host_tools import show_publish from openpype.tools.utils.lib import qt_app_context @@ -112,7 +113,7 @@ class PypeCommands: log = Logger.get_logger() - install() + install_openpype_plugins() manager = ModulesManager() @@ -124,13 +125,14 @@ class PypeCommands: if not any(paths): raise RuntimeError("No publish paths specified") - env = get_app_environments_for_context( - os.environ["AVALON_PROJECT"], - os.environ["AVALON_ASSET"], - os.environ["AVALON_TASK"], - os.environ["AVALON_APP_NAME"] - ) - os.environ.update(env) + if os.getenv("AVALON_APP_NAME"): + env = get_app_environments_for_context( + os.environ["AVALON_PROJECT"], + os.environ["AVALON_ASSET"], + os.environ["AVALON_TASK"], + os.environ["AVALON_APP_NAME"] + ) + os.environ.update(env) pyblish.api.register_host("shell") @@ -294,7 +296,8 @@ class PypeCommands: # Register target and host import pyblish.api import pyblish.util - import avalon.api + + from openpype.pipeline import install_host from openpype.hosts.webpublisher import api as webpublisher log = PypeLogger.get_logger() @@ -315,7 +318,7 @@ class PypeCommands: for target in targets: pyblish.api.register_target(target) - avalon.api.install(webpublisher) + install_host(webpublisher) log.info("Running publish ...") diff --git a/openpype/scripts/fusion_switch_shot.py b/openpype/scripts/fusion_switch_shot.py index 6db8ff36a8..245fc665f0 100644 --- a/openpype/scripts/fusion_switch_shot.py +++ b/openpype/scripts/fusion_switch_shot.py @@ -4,12 +4,16 @@ import sys import logging # Pipeline imports -from avalon import api, io -import avalon.fusion +from openpype.hosts.fusion import api +import openpype.hosts.fusion.api.lib as fusion_lib # Config imports -import openpype.lib as pype -import openpype.hosts.fusion.lib as fusion_lib +from openpype.lib import version_up +from openpype.pipeline import ( + install_host, + registered_host, + legacy_io, +) from openpype.lib.avalon_context import get_workdir_from_session @@ -79,7 +83,7 @@ def _format_filepath(session): # Create new unqiue filepath if os.path.exists(new_filepath): - new_filepath = pype.version_up(new_filepath) + new_filepath = version_up(new_filepath) return new_filepath @@ -102,7 +106,7 @@ def _update_savers(comp, session): comp.Print("New renders to: %s\n" % renders) - with avalon.fusion.comp_lock_and_undo_chunk(comp): + with api.comp_lock_and_undo_chunk(comp): savers = comp.GetToolList(False, "Saver").values() for saver in savers: filepath = saver.GetAttrs("TOOLST_Clip_Name")[1.0] @@ -127,7 +131,7 @@ def update_frame_range(comp, representations): """ version_ids = [r["parent"] for r in representations] - versions = io.find({"type": "version", "_id": {"$in": version_ids}}) + versions = legacy_io.find({"type": "version", "_id": {"$in": version_ids}}) versions = list(versions) start = min(v["data"]["frameStart"] for v in versions) @@ -158,25 +162,25 @@ def switch(asset_name, filepath=None, new=True): # Assert asset name exists # It is better to do this here then to wait till switch_shot does it - asset = io.find_one({"type": "asset", "name": asset_name}) + asset = legacy_io.find_one({"type": "asset", "name": asset_name}) assert asset, "Could not find '%s' in the database" % asset_name # Get current project - self._project = io.find_one({ + self._project = legacy_io.find_one({ "type": "project", - "name": api.Session["AVALON_PROJECT"] + "name": legacy_io.Session["AVALON_PROJECT"] }) # Go to comp if not filepath: - current_comp = avalon.fusion.get_current_comp() + current_comp = api.get_current_comp() assert current_comp is not None, "Could not find current comp" else: fusion = _get_fusion_instance() current_comp = fusion.LoadComp(filepath, quiet=True) assert current_comp is not None, "Fusion could not load '%s'" % filepath - host = api.registered_host() + host = registered_host() containers = list(host.ls()) assert containers, "Nothing to update" @@ -194,7 +198,7 @@ def switch(asset_name, filepath=None, new=True): current_comp.Print(message) # Build the session to switch to - switch_to_session = api.Session.copy() + switch_to_session = legacy_io.Session.copy() switch_to_session["AVALON_ASSET"] = asset['name'] if new: @@ -203,7 +207,7 @@ def switch(asset_name, filepath=None, new=True): # Update savers output based on new session _update_savers(current_comp, switch_to_session) else: - comp_path = pype.version_up(filepath) + comp_path = version_up(filepath) current_comp.Print(comp_path) @@ -234,7 +238,7 @@ if __name__ == '__main__': args, unknown = parser.parse_args() - api.install(avalon.fusion) + install_host(api) switch(args.asset_name, args.file_path) sys.exit(0) diff --git a/openpype/scripts/non_python_host_launch.py b/openpype/scripts/non_python_host_launch.py index 43921f0483..f795af7bb3 100644 --- a/openpype/scripts/non_python_host_launch.py +++ b/openpype/scripts/non_python_host_launch.py @@ -15,7 +15,7 @@ CURRENT_FILE = os.path.abspath(__file__) def show_error_messagebox(title, message, detail_message=None): """Function will show message and process ends after closing it.""" from Qt import QtWidgets, QtCore - from avalon import style + from openpype import style app = QtWidgets.QApplication([]) app.setStyleSheet(style.load_stylesheet()) diff --git a/openpype/settings/__init__.py b/openpype/settings/__init__.py index 14e4678050..ca7157812d 100644 --- a/openpype/settings/__init__.py +++ b/openpype/settings/__init__.py @@ -22,7 +22,6 @@ from .lib import ( get_project_settings, get_current_project_settings, get_anatomy_settings, - get_environments, get_local_settings ) from .entities import ( @@ -54,7 +53,6 @@ __all__ = ( "get_project_settings", "get_current_project_settings", "get_anatomy_settings", - "get_environments", "get_local_settings", "SystemSettings", diff --git a/openpype/settings/constants.py b/openpype/settings/constants.py index 19ff953eb4..cd84d4db1c 100644 --- a/openpype/settings/constants.py +++ b/openpype/settings/constants.py @@ -3,14 +3,11 @@ import re # Metadata keys for work with studio and project overrides M_OVERRIDDEN_KEY = "__overriden_keys__" -# Metadata key for storing information about environments -M_ENVIRONMENT_KEY = "__environment_keys__" # Metadata key for storing dynamic created labels M_DYNAMIC_KEY_LABEL = "__dynamic_keys_labels__" METADATA_KEYS = frozenset([ M_OVERRIDDEN_KEY, - M_ENVIRONMENT_KEY, M_DYNAMIC_KEY_LABEL ]) @@ -35,7 +32,6 @@ KEY_REGEX = re.compile(r"^[{}]+$".format(KEY_ALLOWED_SYMBOLS)) __all__ = ( "M_OVERRIDDEN_KEY", - "M_ENVIRONMENT_KEY", "M_DYNAMIC_KEY_LABEL", "METADATA_KEYS", diff --git a/openpype/settings/defaults/project_anatomy/imageio.json b/openpype/settings/defaults/project_anatomy/imageio.json index 1c86509155..f0be8f95f4 100644 --- a/openpype/settings/defaults/project_anatomy/imageio.json +++ b/openpype/settings/defaults/project_anatomy/imageio.json @@ -55,36 +55,49 @@ "nukeNodeClass": "Write", "knobs": [ { + "type": "text", "name": "file_type", "value": "exr" }, { + "type": "text", "name": "datatype", "value": "16 bit half" }, { + "type": "text", "name": "compression", "value": "Zip (1 scanline)" }, { + "type": "bool", "name": "autocrop", - "value": "True" + "value": true }, { + "type": "color_gui", "name": "tile_color", - "value": "0xff0000ff" + "value": [ + 186, + 35, + 35, + 255 + ] }, { + "type": "text", "name": "channels", "value": "rgb" }, { + "type": "text", "name": "colorspace", "value": "linear" }, { + "type": "bool", "name": "create_directories", - "value": "True" + "value": true } ] }, @@ -95,36 +108,49 @@ "nukeNodeClass": "Write", "knobs": [ { + "type": "text", "name": "file_type", "value": "exr" }, { + "type": "text", "name": "datatype", "value": "16 bit half" }, { + "type": "text", "name": "compression", "value": "Zip (1 scanline)" }, { + "type": "bool", "name": "autocrop", - "value": "False" + "value": true }, { + "type": "color_gui", "name": "tile_color", - "value": "0xadab1dff" + "value": [ + 171, + 171, + 10, + 255 + ] }, { + "type": "text", "name": "channels", "value": "rgb" }, { + "type": "text", "name": "colorspace", "value": "linear" }, { + "type": "bool", "name": "create_directories", - "value": "True" + "value": true } ] }, @@ -135,42 +161,54 @@ "nukeNodeClass": "Write", "knobs": [ { + "type": "text", "name": "file_type", "value": "tiff" }, { + "type": "text", "name": "datatype", "value": "16 bit" }, { + "type": "text", "name": "compression", "value": "Deflate" }, { + "type": "color_gui", "name": "tile_color", - "value": "0x23ff00ff" + "value": [ + 56, + 162, + 7, + 255 + ] }, { + "type": "text", "name": "channels", "value": "rgb" }, { + "type": "text", "name": "colorspace", "value": "sRGB" }, { + "type": "bool", "name": "create_directories", - "value": "True" + "value": true } ] } ], - "customNodes": [] + "overrideNodes": [] }, "regexInputs": { "inputs": [ { - "regex": "[^-a-zA-Z0-9]beauty[^-a-zA-Z0-9]", + "regex": "(beauty).*(?=.exr)", "colorspace": "linear" } ] @@ -185,8 +223,8 @@ "linux": [] }, "renderSpace": "ACEScg", - "viewName": "ACES 1.0 SDR-video", - "displayName": "sRGB" + "displayName": "sRGB", + "viewName": "ACES 1.0 SDR-video" }, "colorManagementPreference": { "configFilePath": { diff --git a/openpype/settings/defaults/project_settings/aftereffects.json b/openpype/settings/defaults/project_settings/aftereffects.json index 6a9a399069..8083aa0972 100644 --- a/openpype/settings/defaults/project_settings/aftereffects.json +++ b/openpype/settings/defaults/project_settings/aftereffects.json @@ -1,4 +1,11 @@ { + "create": { + "RenderCreator": { + "defaults": [ + "Main" + ] + } + }, "publish": { "ValidateSceneSettings": { "enabled": true, diff --git a/openpype/settings/defaults/project_settings/deadline.json b/openpype/settings/defaults/project_settings/deadline.json index 1ef169e387..5c5a14bf21 100644 --- a/openpype/settings/defaults/project_settings/deadline.json +++ b/openpype/settings/defaults/project_settings/deadline.json @@ -4,6 +4,10 @@ "CollectDefaultDeadlineServer": { "pass_mongo_url": false }, + "CollectDeadlinePools": { + "primary_pool": "", + "secondary_pool": "" + }, "ValidateExpectedFiles": { "enabled": true, "active": true, @@ -38,8 +42,6 @@ "priority": 50, "chunk_size": 10, "concurrent_tasks": 1, - "primary_pool": "", - "secondary_pool": "", "group": "", "department": "", "use_gpu": true, @@ -54,8 +56,6 @@ "use_published": true, "priority": 50, "chunk_size": 10000, - "primary_pool": "", - "secondary_pool": "", "group": "", "department": "" }, @@ -66,8 +66,6 @@ "use_published": true, "priority": 50, "chunk_size": 10000, - "primary_pool": "", - "secondary_pool": "", "group": "", "department": "", "multiprocess": true @@ -83,7 +81,7 @@ "skip_integration_repre_list": [], "aov_filter": { "maya": [ - ".+(?:\\.|_)([Bb]eauty)(?:\\.|_).*" + ".*([Bb]eauty).*" ], "nuke": [ ".*" diff --git a/openpype/settings/defaults/project_settings/flame.json b/openpype/settings/defaults/project_settings/flame.json index c7188b10b5..dd8c05d460 100644 --- a/openpype/settings/defaults/project_settings/flame.json +++ b/openpype/settings/defaults/project_settings/flame.json @@ -20,20 +20,58 @@ } }, "publish": { + "CollectTimelineInstances": { + "xml_preset_attrs_from_comments": [ + { + "name": "width", + "type": "number" + }, + { + "name": "height", + "type": "number" + }, + { + "name": "pixelRatio", + "type": "float" + }, + { + "name": "resizeType", + "type": "string" + }, + { + "name": "resizeFilter", + "type": "string" + } + ], + "add_tasks": [ + { + "name": "compositing", + "type": "Compositing", + "create_batch_group": true + } + ] + }, "ExtractSubsetResources": { "keep_original_representation": false, "export_presets_mapping": { "exr16fpdwaa": { + "active": true, + "export_type": "File Sequence", "ext": "exr", "xml_preset_file": "OpenEXR (16-bit fp DWAA).xml", - "xml_preset_dir": "", - "export_type": "File Sequence", - "ignore_comment_attrs": false, "colorspace_out": "ACES - ACEScg", + "xml_preset_dir": "", + "parsed_comment_attrs": true, "representation_add_range": true, - "representation_tags": [] + "representation_tags": [], + "load_to_batch_group": true, + "batch_group_loader_name": "LoadClipBatch", + "filter_path_regex": ".*" } } + }, + "IntegrateBatchGroup": { + "enabled": false } }, "load": { @@ -54,11 +92,35 @@ "png", "h264", "mov", - "mp4" + "mp4", + "exr16fpdwaa" ], "reel_group_name": "OpenPype_Reels", "reel_name": "Loaded", - "clip_name_template": "{asset}_{subset}_{representation}" + "clip_name_template": "{asset}_{subset}_{output}" + }, + "LoadClipBatch": { + "enabled": true, + "families": [ + "render2d", + "source", + "plate", + "render", + "review" + ], + "representations": [ + "exr", + "dpx", + "jpg", + "jpeg", + "png", + "h264", + "mov", + "mp4", + "exr16fpdwaa" + ], + "reel_name": "OP_LoadedReel", + "clip_name_template": "{asset}_{subset}_{output}" } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json index 31d6a70ac7..f9d16d6476 100644 --- a/openpype/settings/defaults/project_settings/ftrack.json +++ b/openpype/settings/defaults/project_settings/ftrack.json @@ -349,9 +349,25 @@ "tasks": [], "add_ftrack_family": true, "advanced_filtering": [] + }, + { + "hosts": [ + "photoshop" + ], + "families": [ + "review" + ], + "task_types": [], + "tasks": [], + "add_ftrack_family": true, + "advanced_filtering": [] } ] }, + "CollectFtrackCustomAttributeData": { + "enabled": false, + "custom_attribute_keys": [] + }, "IntegrateFtrackNote": { "enabled": true, "note_template": "{intent}: {comment}", @@ -402,7 +418,8 @@ "redshiftproxy": "cache", "usd": "usd" }, - "keep_first_subset_name_for_review": true + "keep_first_subset_name_for_review": true, + "asset_versions_status_profiles": [] } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json index 58659d5d41..7b223798f1 100644 --- a/openpype/settings/defaults/project_settings/global.json +++ b/openpype/settings/defaults/project_settings/global.json @@ -307,7 +307,7 @@ ], "task_types": [], "tasks": [], - "template": "{family}{Task}_{Render_layer}_{Render_pass}" + "template": "{family}{Task}_{Renderlayer}_{Renderpass}" }, { "families": [ @@ -315,6 +315,7 @@ "workfile" ], "hosts": [ + "aftereffects", "tvpaint" ], "task_types": [], diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json index 44d7f2d9d0..33ddc2f251 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/openpype/settings/defaults/project_settings/nuke.json @@ -21,10 +21,29 @@ "defaults": [ "Main", "Mask" - ] + ], + "knobs": [], + "prenodes": { + "Reformat01": { + "nodeclass": "Reformat", + "dependent": "", + "knobs": [ + { + "type": "text", + "name": "resize", + "value": "none" + }, + { + "type": "bool", + "name": "black_outside", + "value": true + } + ] + } + } }, "CreateWritePrerender": { - "fpath_template": "{work}/prerenders/nuke/{subset}/{subset}.{frame}.{ext}", + "fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}", "use_range_limit": true, "defaults": [ "Key01", @@ -33,7 +52,32 @@ "Branch01", "Part01" ], - "reviewable": false + "reviewable": false, + "knobs": [], + "prenodes": {} + }, + "CreateWriteStill": { + "fpath_template": "{work}/renders/nuke/{subset}/{subset}.{ext}", + "defaults": [ + "ImageFrame", + "MPFrame", + "LayoutFrame" + ], + "knobs": [], + "prenodes": { + "FrameHold01": { + "nodeclass": "FrameHold", + "dependent": "", + "knobs": [ + { + "type": "formatable", + "name": "first_frame", + "template": "{frame}", + "to_type": "number" + } + ] + } + } } }, "publish": { @@ -43,7 +87,8 @@ "camera", "gizmo", "source", - "render" + "render", + "write" ] }, "ValidateInstanceInContext": { @@ -120,7 +165,7 @@ "filter": { "task_types": [], "families": [], - "sebsets": [] + "subsets": [] }, "read_raw": false, "viewer_process_override": "", @@ -129,17 +174,17 @@ "reformat_node_add": false, "reformat_node_config": [ { - "type": "string", + "type": "text", "name": "type", "value": "to format" }, { - "type": "string", + "type": "text", "name": "format", "value": "HD_1080" }, { - "type": "string", + "type": "text", "name": "filter", "value": "Lanczos6" }, @@ -160,7 +205,21 @@ } }, "ExtractSlateFrame": { - "viewer_lut_raw": false + "viewer_lut_raw": false, + "key_value_mapping": { + "f_submission_note": [ + true, + "{comment}" + ], + "f_submitting_for": [ + true, + "{intent[value]}" + ], + "f_vfx_scope_of_work": [ + false, + "" + ] + } }, "IncrementScriptVersion": { "enabled": true, @@ -206,11 +265,12 @@ "repre_names": [ "exr", "dpx", - "mov" + "mov", + "mp4", + "h264" ], "loaders": [ - "LoadSequence", - "LoadMov" + "LoadClip" ] } ], diff --git a/openpype/settings/defaults/project_settings/standalonepublisher.json b/openpype/settings/defaults/project_settings/standalonepublisher.json index bc91a5ea8a..e36232d3f7 100644 --- a/openpype/settings/defaults/project_settings/standalonepublisher.json +++ b/openpype/settings/defaults/project_settings/standalonepublisher.json @@ -141,6 +141,14 @@ "defaults": [], "help": "Texture files with Unreal naming convention" }, + "create_vdb": { + "name": "vdb", + "label": "VDB Volumetric Data", + "family": "vdbcache", + "icon": "cloud", + "defaults": [], + "help": "Hierarchical data structure for the efficient storage and manipulation of sparse volumetric data discretized on three-dimensional grids" + }, "__dynamic_keys_labels__": { "create_workfile": "Workfile", "create_model": "Model", @@ -154,7 +162,8 @@ "create_render": "Render", "create_mov_batch": "Batch Mov", "create_texture_batch": "Batch Texture", - "create_simple_unreal_texture": "Simple Unreal Texture" + "create_simple_unreal_texture": "Simple Unreal Texture", + "create_vdb": "VDB Cache" } }, "publish": { diff --git a/openpype/settings/defaults/project_settings/traypublisher.json b/openpype/settings/defaults/project_settings/traypublisher.json new file mode 100644 index 0000000000..0b54cfd39e --- /dev/null +++ b/openpype/settings/defaults/project_settings/traypublisher.json @@ -0,0 +1,35 @@ +{ + "simple_creators": [ + { + "family": "workfile", + "identifier": "", + "label": "Workfile", + "icon": "fa.file", + "default_variants": [ + "Main" + ], + "description": "Publish workfile backup", + "detailed_description": "", + "allow_sequences": true, + "extensions": [ + ".ma", + ".mb", + ".nk", + ".hrox", + ".hip", + ".hiplc", + ".hipnc", + ".blend", + ".scn", + ".tvpp", + ".comp", + ".zip", + ".prproj", + ".drp", + ".psd", + ".psb", + ".aep" + ] + } + ] +} \ No newline at end of file diff --git a/openpype/settings/defaults/system_settings/applications.json b/openpype/settings/defaults/system_settings/applications.json index 0fb99a2608..2b0de44fa9 100644 --- a/openpype/settings/defaults/system_settings/applications.json +++ b/openpype/settings/defaults/system_settings/applications.json @@ -12,6 +12,26 @@ "LC_ALL": "C" }, "variants": { + "2023": { + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\Autodesk\\Maya2023\\bin\\maya.exe" + ], + "darwin": [], + "linux": [ + "/usr/autodesk/maya2023/bin/maya" + ] + }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, + "environment": { + "MAYA_VERSION": "2023" + } + }, "2022": { "use_python_2": false, "executables": { @@ -91,9 +111,6 @@ "environment": { "MAYA_VERSION": "2018" } - }, - "__dynamic_keys_labels__": { - "2022": "2022" } } }, diff --git a/openpype/settings/defaults/system_settings/general.json b/openpype/settings/defaults/system_settings/general.json index 5a3e39e5b6..a06947ba77 100644 --- a/openpype/settings/defaults/system_settings/general.json +++ b/openpype/settings/defaults/system_settings/general.json @@ -7,11 +7,13 @@ "global": [] } }, + "log_to_server": true, "disk_mapping": { "windows": [], "linux": [], "darwin": [] }, + "local_env_white_list": [], "openpype_path": { "windows": [], "darwin": [], diff --git a/openpype/settings/defaults/system_settings/tools.json b/openpype/settings/defaults/system_settings/tools.json index 9e08465195..243cde40cc 100644 --- a/openpype/settings/defaults/system_settings/tools.json +++ b/openpype/settings/defaults/system_settings/tools.json @@ -52,10 +52,39 @@ "environment": {}, "variants": {} }, + "renderman": { + "environment": {}, + "variants": { + "24-3-maya": { + "host_names": [ + "maya" + ], + "app_variants": [ + "maya/2022" + ], + "environment": { + "RFMTREE": { + "windows": "C:\\Program Files\\Pixar\\RenderManForMaya-24.3", + "darwin": "/Applications/Pixar/RenderManForMaya-24.3", + "linux": "/opt/pixar/RenderManForMaya-24.3" + }, + "RMANTREE": { + "windows": "C:\\Program Files\\Pixar\\RenderManProServer-24.3", + "darwin": "/Applications/Pixar/RenderManProServer-24.3", + "linux": "/opt/pixar/RenderManProServer-24.3" + } + } + }, + "__dynamic_keys_labels__": { + "24-3-maya": "24.3 RFM" + } + } + }, "__dynamic_keys_labels__": { "mtoa": "Autodesk Arnold", "vray": "Chaos Group Vray", - "yeti": "Pergrine Labs Yeti" + "yeti": "Peregrine Labs Yeti", + "renderman": "Pixar Renderman" } } } \ No newline at end of file diff --git a/openpype/settings/entities/base_entity.py b/openpype/settings/entities/base_entity.py index 21ee44ae77..741f13c49b 100644 --- a/openpype/settings/entities/base_entity.py +++ b/openpype/settings/entities/base_entity.py @@ -127,12 +127,6 @@ class BaseItemEntity(BaseEntity): # Entity is in hierarchy of dynamically created entity self.is_in_dynamic_item = False - # Entity will save metadata about environments - # - this is current possible only for RawJsonEnity - self.is_env_group = False - # Key of environment group key must be unique across system settings - self.env_group_key = None - # Roles of an entity self.roles = None @@ -286,16 +280,6 @@ class BaseItemEntity(BaseEntity): ).format(self.group_item.path) raise EntitySchemaError(self, reason) - # Validate that env group entities will be stored into file. - # - env group entities must store metadata which is not possible if - # metadata would be outside of file - if self.file_item is None and self.is_env_group: - reason = ( - "Environment item is not inside file" - " item so can't store metadata for defaults." - ) - raise EntitySchemaError(self, reason) - # Dynamic items must not have defined labels. (UI specific) if self.label and self.is_dynamic_item: raise EntitySchemaError( @@ -862,11 +846,6 @@ class ItemEntity(BaseItemEntity): if self.is_dynamic_item: self.require_key = False - # If value should be stored to environments and uder which group key - # - the key may be dynamically changed by it's parent on save - self.env_group_key = self.schema_data.get("env_group_key") - self.is_env_group = bool(self.env_group_key is not None) - # Root item reference self.root_item = self.parent.root_item diff --git a/openpype/settings/entities/dict_mutable_keys_entity.py b/openpype/settings/entities/dict_mutable_keys_entity.py index a0c93b97a7..e6d332b9ad 100644 --- a/openpype/settings/entities/dict_mutable_keys_entity.py +++ b/openpype/settings/entities/dict_mutable_keys_entity.py @@ -15,7 +15,6 @@ from .exceptions import ( from openpype.settings.constants import ( METADATA_KEYS, M_DYNAMIC_KEY_LABEL, - M_ENVIRONMENT_KEY, KEY_REGEX, KEY_ALLOWED_SYMBOLS ) @@ -148,11 +147,7 @@ class DictMutableKeysEntity(EndpointEntity): ): raise InvalidKeySymbols(self.path, key) - if self.value_is_env_group: - item_schema = copy.deepcopy(self.item_schema) - item_schema["env_group_key"] = key - else: - item_schema = self.item_schema + item_schema = self.item_schema new_child = self.create_schema_object(item_schema, self, True) self.children_by_key[key] = new_child @@ -216,9 +211,7 @@ class DictMutableKeysEntity(EndpointEntity): self.children_label_by_id = {} self.store_as_list = self.schema_data.get("store_as_list") or False - self.value_is_env_group = ( - self.schema_data.get("value_is_env_group") or False - ) + self.required_keys = self.schema_data.get("required_keys") or [] self.collapsible_key = self.schema_data.get("collapsible_key") or False # GUI attributes @@ -241,9 +234,6 @@ class DictMutableKeysEntity(EndpointEntity): object_type.update(input_modifiers) self.item_schema = object_type - if self.value_is_env_group: - self.item_schema["env_group_key"] = "" - if self.group_item is None: self.is_group = True @@ -259,10 +249,6 @@ class DictMutableKeysEntity(EndpointEntity): if used_temp_label: self.label = None - if self.value_is_env_group and self.store_as_list: - reason = "Item can't store environments metadata to list output." - raise EntitySchemaError(self, reason) - if not self.schema_data.get("object_type"): reason = ( "Modifiable dictionary must have specified `object_type`." @@ -579,18 +565,10 @@ class DictMutableKeysEntity(EndpointEntity): output.append([key, child_value]) return output - output = {} - for key, child_entity in self.children_by_key.items(): - child_value = child_entity.settings_value() - # TODO child should have setter of env group key se child can - # know what env group represents. - if self.value_is_env_group: - if key not in child_value[M_ENVIRONMENT_KEY]: - _metadata = child_value[M_ENVIRONMENT_KEY] - _m_keykey = tuple(_metadata.keys())[0] - env_keys = child_value[M_ENVIRONMENT_KEY].pop(_m_keykey) - child_value[M_ENVIRONMENT_KEY][key] = env_keys - output[key] = child_value + output = { + key: child_entity.settings_value() + for key, child_entity in self.children_by_key.items() + } output.update(self.metadata) return output diff --git a/openpype/settings/entities/input_entities.py b/openpype/settings/entities/input_entities.py index 3dcd238672..89f12afd9b 100644 --- a/openpype/settings/entities/input_entities.py +++ b/openpype/settings/entities/input_entities.py @@ -15,10 +15,7 @@ from .exceptions import ( EntitySchemaError ) -from openpype.settings.constants import ( - METADATA_KEYS, - M_ENVIRONMENT_KEY -) +from openpype.settings.constants import METADATA_KEYS class EndpointEntity(ItemEntity): @@ -534,13 +531,7 @@ class RawJsonEntity(InputEntity): @property def metadata(self): - output = {} - if isinstance(self._current_value, dict) and self.is_env_group: - output[M_ENVIRONMENT_KEY] = { - self.env_group_key: list(self._current_value.keys()) - } - - return output + return {} @property def has_unsaved_changes(self): @@ -549,15 +540,6 @@ class RawJsonEntity(InputEntity): result = self.metadata != self._metadata_for_current_state() return result - def schema_validations(self): - if self.store_as_string and self.is_env_group: - reason = ( - "RawJson entity can't store environment group metadata" - " as string." - ) - raise EntitySchemaError(self, reason) - super(RawJsonEntity, self).schema_validations() - def _convert_to_valid_type(self, value): if isinstance(value, STRING_TYPE): try: @@ -583,9 +565,6 @@ class RawJsonEntity(InputEntity): def _settings_value(self): value = super(RawJsonEntity, self)._settings_value() - if self.is_env_group and isinstance(value, dict): - value.update(self.metadata) - if self.store_as_string: return json.dumps(value) return value diff --git a/openpype/settings/entities/root_entities.py b/openpype/settings/entities/root_entities.py index edb4407679..ff76fa5180 100644 --- a/openpype/settings/entities/root_entities.py +++ b/openpype/settings/entities/root_entities.py @@ -52,7 +52,6 @@ from openpype.settings.lib import ( get_available_studio_project_settings_overrides_versions, get_available_studio_project_anatomy_overrides_versions, - find_environments, apply_overrides ) @@ -422,11 +421,6 @@ class RootEntity(BaseItemEntity): """ pass - @abstractmethod - def _validate_defaults_to_save(self, value): - """Validate default values before save.""" - pass - def _save_default_values(self): """Save default values. @@ -435,7 +429,6 @@ class RootEntity(BaseItemEntity): DEFAULTS. """ settings_value = self.settings_value() - self._validate_defaults_to_save(settings_value) defaults_dir = self.defaults_dir() for file_path, value in settings_value.items(): @@ -604,8 +597,6 @@ class SystemSettings(RootEntity): def _save_studio_values(self): settings_value = self.settings_value() - self._validate_duplicated_env_group(settings_value) - self.log.debug("Saving system settings: {}".format( json.dumps(settings_value, indent=4) )) @@ -613,29 +604,6 @@ class SystemSettings(RootEntity): # Reset source version after restart self._source_version = None - def _validate_defaults_to_save(self, value): - """Valiations of default values before save.""" - self._validate_duplicated_env_group(value) - - def _validate_duplicated_env_group(self, value, override_state=None): - """ Validate duplicated environment groups. - - Raises: - DuplicatedEnvGroups: When value contain duplicated env groups. - """ - value = copy.deepcopy(value) - if override_state is None: - override_state = self._override_state - - if override_state is OverrideState.STUDIO: - default_values = get_default_settings()[SYSTEM_SETTINGS_KEY] - final_value = apply_overrides(default_values, value) - else: - final_value = value - - # Check if final_value contain duplicated environment groups - find_environments(final_value) - def _save_project_values(self): """System settings can't have project overrides. @@ -911,10 +879,6 @@ class ProjectSettings(RootEntity): if warnings: raise SaveWarningExc(warnings) - def _validate_defaults_to_save(self, value): - """Valiations of default values before save.""" - pass - def _validate_values_to_save(self, value): pass diff --git a/openpype/settings/entities/schemas/README.md b/openpype/settings/entities/schemas/README.md index fbfd699937..b4c878fe0f 100644 --- a/openpype/settings/entities/schemas/README.md +++ b/openpype/settings/entities/schemas/README.md @@ -46,8 +46,7 @@ }, { "type": "raw-json", "label": "{host_label} Environments", - "key": "{host_name}_environments", - "env_group_key": "{host_name}" + "key": "{host_name}_environments" }, { "type": "path", "key": "{host_name}_executables", @@ -745,6 +744,7 @@ How output of the schema could look like on save: ### label - add label with note or explanations - it is possible to use html tags inside the label +- set `work_wrap` to `true`/`false` if you want to enable word wrapping in UI (default: `false`) ``` { diff --git a/openpype/settings/entities/schemas/projects_schema/schema_main.json b/openpype/settings/entities/schemas/projects_schema/schema_main.json index 521c066964..cb363d17fe 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_main.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_main.json @@ -130,6 +130,10 @@ "type": "schema", "name": "schema_project_standalonepublisher" }, + { + "type": "schema", + "name": "schema_project_traypublisher" + }, { "type": "schema", "name": "schema_project_webpublisher" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json b/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json index 4c4cd225ab..1a3eaef540 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json @@ -5,6 +5,29 @@ "label": "AfterEffects", "is_file": true, "children": [ + { + "type": "dict", + "collapsible": true, + "key": "create", + "label": "Creator plugins", + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "RenderCreator", + "label": "Create render", + "children": [ + { + "type": "list", + "key": "defaults", + "label": "Default Variants", + "object_type": "text", + "docstring": "Fill default variant(s) (like 'Main' or 'Default') used in subset name creation." + } + ] + } + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json index 5bf0a81a4d..cd1741ba8b 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json @@ -30,6 +30,24 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "CollectDeadlinePools", + "label": "Default Deadline Pools", + "children": [ + { + "type": "text", + "key": "primary_pool", + "label": "Primary Pool" + }, + { + "type": "text", + "key": "secondary_pool", + "label": "Secondary Pool" + } + ] + }, { "type": "dict", "collapsible": true, @@ -223,16 +241,6 @@ { "type": "splitter" }, - { - "type": "text", - "key": "primary_pool", - "label": "Primary Pool" - }, - { - "type": "text", - "key": "secondary_pool", - "label": "Secondary Pool" - }, { "type": "text", "key": "group", @@ -313,16 +321,6 @@ "key": "chunk_size", "label": "Chunk Size" }, - { - "type": "text", - "key": "primary_pool", - "label": "Primary Pool" - }, - { - "type": "text", - "key": "secondary_pool", - "label": "Secondary Pool" - }, { "type": "text", "key": "group", @@ -372,16 +370,6 @@ "key": "chunk_size", "label": "Chunk Size" }, - { - "type": "text", - "key": "primary_pool", - "label": "Primary Pool" - }, - { - "type": "text", - "key": "secondary_pool", - "label": "Secondary Pool" - }, { "type": "text", "key": "group", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json index e352f8b132..ace404b47a 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json @@ -136,6 +136,87 @@ "key": "publish", "label": "Publish plugins", "children": [ + { + "type": "dict", + "collapsible": true, + "key": "CollectTimelineInstances", + "label": "Collect Timeline Instances", + "is_group": true, + "children": [ + { + "type": "collapsible-wrap", + "label": "XML presets attributes parsable from segment comments", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "list", + "key": "xml_preset_attrs_from_comments", + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "name", + "label": "Attribute name" + }, + { + "key": "type", + "label": "Attribute type", + "type": "enum", + "default": "number", + "enum_items": [ + { + "number": "number" + }, + { + "float": "float" + }, + { + "string": "string" + } + ] + } + ] + } + } + ] + }, + { + "type": "collapsible-wrap", + "label": "Add tasks", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "list", + "key": "add_tasks", + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "name", + "label": "Task name" + }, + { + "key": "type", + "label": "Task type", + "multiselection": false, + "type": "task-types-enum" + }, + { + "type": "boolean", + "key": "create_batch_group", + "label": "Create batch group" + } + ] + } + } + ] + } + ] + }, { "type": "dict", "collapsible": true, @@ -157,25 +238,19 @@ "type": "dict", "children": [ { - "key": "ext", - "label": "Output extension", - "type": "text" + "type": "boolean", + "key": "active", + "label": "Is active", + "default": true }, { - "key": "xml_preset_file", - "label": "XML preset file (with ext)", - "type": "text" - }, - { - "key": "xml_preset_dir", - "label": "XML preset folder (optional)", - "type": "text" + "type": "separator" }, { "key": "export_type", "label": "Eport clip type", "type": "enum", - "default": "File Sequence", + "default": "Sequence Publish", "enum_items": [ { "Movie": "Movie" @@ -187,45 +262,125 @@ "Sequence Publish": "Sequence Publish" } ] - }, { - "type": "separator" + "key": "ext", + "label": "Output extension", + "type": "text", + "default": "exr" }, { - "type": "boolean", - "key": "ignore_comment_attrs", - "label": "Ignore attributes parsed from a segment comments" - }, - { - "type": "separator" + "key": "xml_preset_file", + "label": "XML preset file (with ext)", + "type": "text" }, { "key": "colorspace_out", "label": "Output color (imageio)", - "type": "text" + "type": "text", + "default": "linear" }, { - "type": "separator" + "type": "collapsible-wrap", + "label": "Other parameters", + "collapsible": true, + "collapsed": true, + "children": [ + { + "key": "xml_preset_dir", + "label": "XML preset folder (optional)", + "type": "text" + }, + { + "type": "separator" + }, + { + "type": "boolean", + "key": "parsed_comment_attrs", + "label": "Include parsed attributes from comments", + "default": false + + }, + { + "type": "separator" + }, + { + "type": "collapsible-wrap", + "label": "Representation", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "boolean", + "key": "representation_add_range", + "label": "Add frame range to representation" + }, + { + "type": "list", + "key": "representation_tags", + "label": "Add representation tags", + "object_type": { + "type": "text", + "multiline": false + } + } + ] + }, + { + "type": "collapsible-wrap", + "label": "Loading during publish", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "boolean", + "key": "load_to_batch_group", + "label": "Load to batch group reel", + "default": false + }, + { + "type": "text", + "key": "batch_group_loader_name", + "label": "Use loader name" + } + ] + } + + ] }, { - "type": "boolean", - "key": "representation_add_range", - "label": "Add frame range to representation" - }, - { - "type": "list", - "key": "representation_tags", - "label": "Add representation tags", - "object_type": { - "type": "text", - "multiline": false - } + "type": "collapsible-wrap", + "label": "Filtering", + "collapsible": true, + "collapsed": true, + "children": [ + { + "key": "filter_path_regex", + "label": "Regex in clip path", + "type": "text", + "default": ".*" + } + ] } ] } } ] + }, + { + "type": "dict", + "collapsible": true, + "key": "IntegrateBatchGroup", + "label": "IntegrateBatchGroup", + "is_group": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] } ] }, @@ -281,6 +436,48 @@ "label": "Clip name template" } ] + }, + { + "type": "dict", + "collapsible": true, + "key": "LoadClipBatch", + "label": "Load as clip to current batch", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "families", + "label": "Families", + "object_type": "text" + }, + { + "type": "list", + "key": "representations", + "label": "Representations", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "reel_name", + "label": "Reel name" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "clip_name_template", + "label": "Clip name template" + } + ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json index 5ce9b24b4b..7db490b114 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json @@ -725,6 +725,31 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "CollectFtrackCustomAttributeData", + "label": "Collect Custom Attribute Data", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "Collect custom attributes from ftrack for ftrack entities that can be used in some templates during publishing." + }, + { + "type": "list", + "key": "custom_attribute_keys", + "label": "Custom attribute keys", + "object_type": "text" + } + ] + }, { "type": "dict", "collapsible": true, @@ -833,6 +858,43 @@ "key": "keep_first_subset_name_for_review", "label": "Make subset name as first asset name", "default": true + }, + { + "type": "list", + "collapsible": true, + "key": "asset_versions_status_profiles", + "label": "AssetVersion status on publish", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "hosts", + "label": "Host names", + "type": "hosts-enum", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "family", + "label": "Family", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "key": "status", + "label": "Status name", + "type": "text" + } + ] + } } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json b/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json index 9ab5fc65fb..bc572cbdc8 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json @@ -87,7 +87,7 @@ "children": [ { "type": "dict", - "collapsible": false, + "collapsible": true, "key": "CreateWriteRender", "label": "CreateWriteRender", "is_group": true, @@ -104,12 +104,53 @@ "object_type": { "type": "text" } + }, + { + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Node knobs", + "key": "knobs" + } + ] + }, + { + "key": "prenodes", + "label": "Pre write nodes", + "type": "dict-modifiable", + "highlight_content": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "nodeclass", + "label": "Node class", + "type": "text" + }, + { + "key": "dependent", + "label": "Outside node dependency", + "type": "text" + }, + { + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Node knobs", + "key": "knobs" + } + ] + } + ] + } } ] }, { "type": "dict", - "collapsible": false, + "collapsible": true, "key": "CreateWritePrerender", "label": "CreateWritePrerender", "is_group": true, @@ -136,6 +177,110 @@ "type": "boolean", "key": "reviewable", "label": "Add reviewable toggle" + }, + { + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Node knobs", + "key": "knobs" + } + ] + }, + { + "key": "prenodes", + "label": "Pre write nodes", + "type": "dict-modifiable", + "highlight_content": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "nodeclass", + "label": "Node class", + "type": "text" + }, + { + "key": "dependent", + "label": "Outside node dependency", + "type": "text" + }, + { + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Node knobs", + "key": "knobs" + } + ] + } + ] + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreateWriteStill", + "label": "CreateWriteStill", + "is_group": true, + "children": [ + { + "type": "text", + "key": "fpath_template", + "label": "Path template" + }, + { + "type": "list", + "key": "defaults", + "label": "Subset name defaults", + "object_type": { + "type": "text" + } + }, + { + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Node knobs", + "key": "knobs" + } + ] + }, + { + "key": "prenodes", + "label": "Pre write nodes", + "type": "dict-modifiable", + "highlight_content": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "nodeclass", + "label": "Node class", + "type": "text" + }, + { + "key": "dependent", + "label": "Outside node dependency", + "type": "text" + }, + { + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Node knobs", + "key": "knobs" + } + ] + } + ] + } } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json new file mode 100644 index 0000000000..55c1b7b7d7 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json @@ -0,0 +1,83 @@ +{ + "type": "dict", + "collapsible": true, + "key": "traypublisher", + "label": "Tray Publisher", + "is_file": true, + "children": [ + { + "type": "list", + "collapsible": true, + "key": "simple_creators", + "label": "Creator plugins", + "use_label_wrap": true, + "collapsible_key": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "family", + "label": "Family" + }, + { + "type": "text", + "key": "identifier", + "label": "Identifier", + "placeholder": "< Use 'Family' >", + "tooltip": "All creators must have unique identifier.\nBy default is used 'family' but if you need to have more creators with same families\nyou have to set identifier too." + }, + { + "type": "text", + "key": "label", + "label": "Label" + }, + { + "type": "text", + "key": "icon", + "label": "Icon" + }, + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "description", + "label": "Description" + }, + { + "type": "text", + "key": "detailed_description", + "label": "Detailed Description", + "multiline": true + }, + { + "type": "separator" + }, + { + "key": "allow_sequences", + "label": "Allow sequences", + "type": "boolean" + }, + { + "type": "list", + "key": "extensions", + "label": "Extensions", + "use_label_wrap": true, + "collapsible_key": true, + "collapsed": false, + "object_type": "text" + } + ] + } + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json index 9f142bad09..ef8c907dda 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json @@ -253,7 +253,7 @@ { "key": "requiredNodes", "type": "list", - "label": "Required Nodes", + "label": "Plugin required", "object_type": { "type": "dict", "children": [ @@ -272,35 +272,26 @@ "label": "Nuke Node Class" }, { - "type": "splitter" - }, - { - "key": "knobs", - "label": "Knobs", - "type": "list", - "object_type": { - "type": "dict", - "children": [ - { - "type": "text", - "key": "name", - "label": "Name" - }, - { - "type": "text", - "key": "value", - "label": "Value" - } - ] - } + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Knobs", + "key": "knobs" + } + ] } + ] } }, + { + "type": "splitter" + }, { "type": "list", - "key": "customNodes", - "label": "Custom Nodes", + "key": "overrideNodes", + "label": "Plugin's node overrides", "object_type": { "type": "dict", "children": [ @@ -319,27 +310,20 @@ "label": "Nuke Node Class" }, { - "type": "splitter" + "key": "subsets", + "label": "Subsets", + "type": "list", + "object_type": "text" }, { - "key": "knobs", - "label": "Knobs", - "type": "list", - "object_type": { - "type": "dict", - "children": [ - { - "type": "text", - "key": "name", - "label": "Name" - }, - { - "type": "text", - "key": "value", - "label": "Value" - } - ] - } + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Knobs overrides", + "key": "knobs" + } + ] } ] } @@ -446,7 +430,7 @@ { "key": "flame", "type": "dict", - "label": "Flame/Flair", + "label": "Flame & Flare", "children": [ { "key": "project", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json index 27e8957786..04df957d67 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json @@ -41,6 +41,9 @@ }, { "render": "render" + }, + { + "write": "write" } ] } @@ -212,7 +215,7 @@ "object_type": "text" }, { - "key": "sebsets", + "key": "subsets", "label": "Subsets", "type": "list", "object_type": "text" @@ -253,108 +256,12 @@ "default": false }, { - "type": "collapsible-wrap", - "label": "Reformat Node Knobs", - "collapsible": true, - "collapsed": true, - "children": [ + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ { - "type": "list", - "key": "reformat_node_config", - "object_type": { - "type": "dict-conditional", - "enum_key": "type", - "enum_label": "Type", - "enum_children": [ - { - "key": "string", - "label": "String", - "children": [ - { - "type": "text", - "key": "name", - "label": "Name" - }, - { - "type": "text", - "key": "value", - "label": "Value" - } - ] - }, - { - "key": "bool", - "label": "Boolean", - "children": [ - { - "type": "text", - "key": "name", - "label": "Name" - }, - { - "type": "boolean", - "key": "value", - "label": "Value" - } - ] - }, - { - "key": "number", - "label": "Number", - "children": [ - { - "type": "text", - "key": "name", - "label": "Name" - }, - { - "type": "list-strict", - "key": "value", - "label": "Value", - "object_types": [ - { - "type": "number", - "key": "number", - "default": 1, - "decimal": 4 - } - ] - } - - ] - }, - { - "key": "list_numbers", - "label": "2 Numbers", - "children": [ - { - "type": "text", - "key": "name", - "label": "Name" - }, - { - "type": "list-strict", - "key": "value", - "label": "Value", - "object_types": [ - { - "type": "number", - "key": "x", - "default": 1, - "decimal": 4 - }, - { - "type": "number", - "key": "y", - "default": 1, - "decimal": 4 - } - ] - } - ] - } - ] - } + "label": "Reformat Node Knobs", + "key": "reformat_node_config" } ] }, @@ -389,6 +296,59 @@ "type": "boolean", "key": "viewer_lut_raw", "label": "Viewer LUT raw" + }, + { + "type": "separator" + }, + { + "type": "label", + "label": "Fill specific slate node values with templates. Uncheck the checkbox to not change the value.", + "word_wrap": true + }, + { + "type": "dict", + "key": "key_value_mapping", + "children": [ + { + "type": "list-strict", + "key": "f_submission_note", + "label": "Submission Note:", + "object_types": [ + { + "type": "boolean" + }, + { + "type": "text" + } + ] + }, + { + "type": "list-strict", + "key": "f_submitting_for", + "label": "Submission For:", + "object_types": [ + { + "type": "boolean" + }, + { + "type": "text" + } + ] + }, + { + "type": "list-strict", + "key": "f_vfx_scope_of_work", + "label": "VFX Scope Of Work:", + "object_types": [ + { + "type": "boolean" + }, + { + "type": "text" + } + ] + } + ] } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/template_nuke_knob_inputs.json b/openpype/settings/entities/schemas/projects_schema/schemas/template_nuke_knob_inputs.json new file mode 100644 index 0000000000..52a14e0636 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/template_nuke_knob_inputs.json @@ -0,0 +1,275 @@ +[ + { + "type": "collapsible-wrap", + "label": "{label}", + "collapsible": true, + "collapsed": true, + "children": [{ + "type": "list", + "key": "{key}", + "object_type": { + "type": "dict-conditional", + "enum_key": "type", + "enum_label": "Type", + "enum_children": [ + { + "key": "text", + "label": "Text", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "text", + "key": "value", + "label": "Value" + } + ] + }, + { + "key": "formatable", + "label": "Formate from template", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "text", + "key": "template", + "label": "Template", + "placeholder": "{{key}} or {{key}};{{key}}" + }, + { + "type": "enum", + "key": "to_type", + "label": "Knob type", + "enum_items": [ + { + "text": "Text" + }, + { + "number": "Number" + }, + { + "decimal_number": "Decimal number" + }, + { + "2d_vector": "2D vector" + } + ] + } + ] + }, + { + "key": "color_gui", + "label": "Color GUI", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "color", + "key": "value", + "label": "Value", + "use_alpha": false + } + ] + }, + { + "key": "bool", + "label": "Boolean", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "boolean", + "key": "value", + "label": "Value" + } + ] + }, + { + "key": "number", + "label": "Number", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "number", + "key": "value", + "default": 1, + "decimal": 0, + "maximum": 99999999 + } + + ] + }, + { + "key": "decimal_number", + "label": "Decimal number", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "number", + "key": "value", + "default": 1, + "decimal": 4, + "maximum": 99999999 + } + + ] + }, + { + "key": "2d_vector", + "label": "2D vector", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "list-strict", + "key": "value", + "label": "Value", + "object_types": [ + { + "type": "number", + "key": "x", + "default": 1, + "decimal": 4, + "maximum": 99999999 + }, + { + "type": "number", + "key": "y", + "default": 1, + "decimal": 4, + "maximum": 99999999 + } + ] + } + ] + }, + { + "key": "3d_vector", + "label": "3D vector", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "list-strict", + "key": "value", + "label": "Value", + "object_types": [ + { + "type": "number", + "key": "x", + "default": 1, + "decimal": 4, + "maximum": 99999999 + }, + { + "type": "number", + "key": "y", + "default": 1, + "decimal": 4, + "maximum": 99999999 + }, + { + "type": "number", + "key": "y", + "default": 1, + "decimal": 4, + "maximum": 99999999 + } + ] + } + ] + }, + { + "key": "color", + "label": "Color", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "list-strict", + "key": "value", + "label": "Value", + "object_types": [ + { + "type": "number", + "key": "x", + "default": 1, + "decimal": 4, + "maximum": 99999999 + }, + { + "type": "number", + "key": "x", + "default": 1, + "decimal": 4, + "maximum": 99999999 + }, + { + "type": "number", + "key": "y", + "default": 1, + "decimal": 4, + "maximum": 99999999 + }, + { + "type": "number", + "key": "y", + "default": 1, + "decimal": 4, + "maximum": 99999999 + } + ] + } + ] + }, + { + "key": "__legacy__", + "label": "_ Legacy type _", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "text", + "key": "value", + "label": "Value" + } + ] + } + ] + } + }] + } +] diff --git a/openpype/settings/entities/schemas/system_schema/example_schema.json b/openpype/settings/entities/schemas/system_schema/example_schema.json index 6a86dae259..b9747b5f4f 100644 --- a/openpype/settings/entities/schemas/system_schema/example_schema.json +++ b/openpype/settings/entities/schemas/system_schema/example_schema.json @@ -117,19 +117,6 @@ } ] }, - { - "key": "env_group_test", - "label": "EnvGroup Test", - "type": "dict", - "children": [ - { - "key": "key_to_store_in_system_settings", - "label": "Testing environment group", - "type": "raw-json", - "env_group_key": "test_group" - } - ] - }, { "key": "dict_wrapper", "type": "dict", diff --git a/openpype/settings/entities/schemas/system_schema/example_template.json b/openpype/settings/entities/schemas/system_schema/example_template.json index ff78c78e8f..9955cf5651 100644 --- a/openpype/settings/entities/schemas/system_schema/example_template.json +++ b/openpype/settings/entities/schemas/system_schema/example_template.json @@ -7,8 +7,7 @@ { "type": "raw-json", "label": "{host_label} Environments", - "key": "{host_name}_environments", - "env_group_key": "{host_name}" + "key": "{host_name}_environments" }, { "type": "path", diff --git a/openpype/settings/entities/schemas/system_schema/schema_general.json b/openpype/settings/entities/schemas/system_schema/schema_general.json index 6306317df8..5b6d8d5d62 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_general.json +++ b/openpype/settings/entities/schemas/system_schema/schema_general.json @@ -34,12 +34,16 @@ "key": "environment", "label": "Environment", "type": "raw-json", - "env_group_key": "global", "require_restart": true }, { "type": "splitter" }, + { + "type": "boolean", + "key": "log_to_server", + "label": "Log to mongo" + }, { "type": "dict", "key": "disk_mapping", @@ -110,6 +114,17 @@ { "type": "splitter" }, + { + "type": "list", + "key": "local_env_white_list", + "label": "Local overrides of environment variable keys", + "tooltip": "Environment variable keys that can be changed per machine using Local settings UI.\nKey changes are applied only on applications and tools environments.", + "use_label_wrap": true, + "object_type": "text" + }, + { + "type": "splitter" + }, { "type": "collapsible-wrap", "label": "OpenPype deployment control", diff --git a/openpype/settings/handlers.py b/openpype/settings/handlers.py index 2109b53b09..c99fc6080b 100644 --- a/openpype/settings/handlers.py +++ b/openpype/settings/handlers.py @@ -324,6 +324,7 @@ class MongoSettingsHandler(SettingsHandler): global_general_keys = ( "openpype_path", "admin_password", + "log_to_server", "disk_mapping", "production_version", "staging_version" @@ -337,7 +338,7 @@ class MongoSettingsHandler(SettingsHandler): def __init__(self): # Get mongo connection from openpype.lib import OpenPypeMongoConnection - from avalon.api import AvalonMongoDB + from openpype.pipeline import AvalonMongoDB settings_collection = OpenPypeMongoConnection.get_mongo_client() diff --git a/openpype/settings/lib.py b/openpype/settings/lib.py index 54502292dc..6df41112c8 100644 --- a/openpype/settings/lib.py +++ b/openpype/settings/lib.py @@ -9,7 +9,6 @@ from .exceptions import ( ) from .constants import ( M_OVERRIDDEN_KEY, - M_ENVIRONMENT_KEY, METADATA_KEYS, @@ -292,6 +291,22 @@ def _system_settings_backwards_compatible_conversion(studio_overrides): } +def _project_anatomy_backwards_compatible_conversion(project_anatomy): + # Backwards compatibility of node settings in Nuke 3.9.x - 3.10.0 + # - source PR - https://github.com/pypeclub/OpenPype/pull/3143 + value = project_anatomy + for key in ("imageio", "nuke", "nodes", "requiredNodes"): + if key not in value: + return + value = value[key] + + for item in value: + for node in item.get("knobs") or []: + if "type" in node: + break + node["type"] = "__legacy__" + + @require_handler def get_studio_system_settings_overrides(return_version=False): output = _SETTINGS_HANDLER.get_studio_system_settings_overrides( @@ -327,7 +342,9 @@ def get_project_settings_overrides(project_name, return_version=False): @require_handler def get_project_anatomy_overrides(project_name): - return _SETTINGS_HANDLER.get_project_anatomy_overrides(project_name) + output = _SETTINGS_HANDLER.get_project_anatomy_overrides(project_name) + _project_anatomy_backwards_compatible_conversion(output) + return output @require_handler @@ -457,24 +474,6 @@ def get_local_settings(): return _LOCAL_SETTINGS_HANDLER.get_local_settings() -class DuplicatedEnvGroups(Exception): - def __init__(self, duplicated): - self.origin_duplicated = duplicated - self.duplicated = {} - for key, items in duplicated.items(): - self.duplicated[key] = [] - for item in items: - self.duplicated[key].append("/".join(item["parents"])) - - msg = "Duplicated environment group keys. {}".format( - ", ".join([ - "\"{}\"".format(env_key) for env_key in self.duplicated.keys() - ]) - ) - - super(DuplicatedEnvGroups, self).__init__(msg) - - def load_openpype_default_settings(): """Load openpype default settings.""" return load_jsons_from_dir(DEFAULTS_DIR) @@ -624,69 +623,6 @@ def load_jsons_from_dir(path, *args, **kwargs): return output -def find_environments(data, with_items=False, parents=None): - """ Find environemnt values from system settings by it's metadata. - - Args: - data(dict): System settings data or dictionary which may contain - environments metadata. - - Returns: - dict: Key as Environment key and value for `acre` module. - """ - if not data or not isinstance(data, dict): - return {} - - output = {} - if parents is None: - parents = [] - - if M_ENVIRONMENT_KEY in data: - metadata = data.get(M_ENVIRONMENT_KEY) - for env_group_key, env_keys in metadata.items(): - if env_group_key not in output: - output[env_group_key] = [] - - _env_values = {} - for key in env_keys: - _env_values[key] = data[key] - - item = { - "env": _env_values, - "parents": parents[:-1] - } - output[env_group_key].append(item) - - for key, value in data.items(): - _parents = copy.deepcopy(parents) - _parents.append(key) - result = find_environments(value, True, _parents) - if not result: - continue - - for env_group_key, env_values in result.items(): - if env_group_key not in output: - output[env_group_key] = [] - - for env_values_item in env_values: - output[env_group_key].append(env_values_item) - - if with_items: - return output - - duplicated_env_groups = {} - final_output = {} - for key, value_in_list in output.items(): - if len(value_in_list) > 1: - duplicated_env_groups[key] = value_in_list - else: - final_output[key] = value_in_list[0]["env"] - - if duplicated_env_groups: - raise DuplicatedEnvGroups(duplicated_env_groups) - return final_output - - def subkey_merge(_dict, value, keys): key = keys.pop(0) if not keys: @@ -1082,19 +1018,6 @@ def get_current_project_settings(): return get_project_settings(project_name) -def get_environments(): - """Calculated environment based on defaults and system settings. - - Any default environment also found in the system settings will be fully - overridden by the one from the system settings. - - Returns: - dict: Output should be ready for `acre` module. - """ - - return find_environments(get_system_settings(False)) - - def get_general_environments(): """Get general environments. @@ -1113,6 +1036,14 @@ def get_general_environments(): clear_metadata_from_settings(environments) + whitelist_envs = result["general"].get("local_env_white_list") + if whitelist_envs: + local_settings = get_local_settings() + local_envs = local_settings.get("environments") or {} + for key, value in local_envs.items(): + if key in whitelist_envs and key in environments: + environments[key] = value + return environments diff --git a/openpype/style/data.json b/openpype/style/data.json index a76a77015b..15d9472e3e 100644 --- a/openpype/style/data.json +++ b/openpype/style/data.json @@ -61,7 +61,11 @@ "icon-entity-default": "#bfccd6", "icon-entity-disabled": "#808080", "font-entity-deprecated": "#666666", - + "overlay-messages": { + "close-btn": "#D3D8DE", + "bg-success": "#458056", + "bg-success-hover": "#55a066" + }, "tab-widget": { "bg": "#21252B", "bg-selected": "#434a56", diff --git a/openpype/style/style.css b/openpype/style/style.css index b5f6962eee..d76d833be1 100644 --- a/openpype/style/style.css +++ b/openpype/style/style.css @@ -687,6 +687,26 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { background: none; } +/* Messages overlay */ +#OverlayMessageWidget { + border-radius: 0.2em; + background: {color:bg-buttons}; +} + +#OverlayMessageWidget:hover { + background: {color:bg-button-hover}; +} +#OverlayMessageWidget { + background: {color:overlay-messages:bg-success}; +} +#OverlayMessageWidget:hover { + background: {color:overlay-messages:bg-success-hover}; +} + +#OverlayMessageWidget QWidget { + background: transparent; +} + /* Password dialog*/ #PasswordBtn { border: none; @@ -836,23 +856,45 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { } /* New Create/Publish UI */ +#CreatorDetailedDescription { + padding-left: 5px; + padding-right: 5px; + padding-top: 5px; + background: transparent; + border: 1px solid {color:border}; +} + #CreateDialogHelpButton { background: rgba(255, 255, 255, 31); + border-top-left-radius: 0.2em; + border-bottom-left-radius: 0.2em; border-top-right-radius: 0; border-bottom-right-radius: 0; font-size: 10pt; font-weight: bold; - padding: 3px 3px 3px 3px; + padding: 0px; } #CreateDialogHelpButton:hover { background: rgba(255, 255, 255, 63); } +#CreateDialogHelpButton QWidget { + background: transparent; +} #PublishLogConsole { font-family: "Noto Sans Mono"; } - +VariantInputsWidget QLineEdit { + border-bottom-right-radius: 0px; + border-top-right-radius: 0px; +} +VariantInputsWidget QToolButton { + border-bottom-left-radius: 0px; + border-top-left-radius: 0px; + padding-top: 0.5em; + padding-bottom: 0.5em; +} #VariantInput[state="new"], #VariantInput[state="new"]:focus, #VariantInput[state="new"]:hover { border-color: {color:publisher:success}; } @@ -985,7 +1027,44 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { border-left: 1px solid {color:border}; } -#TasksCombobox[state="invalid"], #AssetNameInput[state="invalid"] { +#AssetNameInputWidget { + background: {color:bg-inputs}; + border: 1px solid {color:border}; + border-radius: 0.3em; +} + +#AssetNameInputWidget QWidget { + background: transparent; +} + +#AssetNameInputButton { + border-bottom-left-radius: 0px; + border-top-left-radius: 0px; + padding: 0px; + qproperty-iconSize: 11px 11px; + border-left: 1px solid {color:border}; + border-right: none; + border-top: none; + border-bottom: none; +} + +#AssetNameInput { + border-bottom-right-radius: 0px; + border-top-right-radius: 0px; + border: none; +} + +#AssetNameInputWidget:hover { + border-color: {color:border-hover}; +} +#AssetNameInputWidget:focus{ + border-color: {color:border-focus}; +} +#AssetNameInputWidget:disabled { + background: {color:bg-inputs-disabled}; +} + +#TasksCombobox[state="invalid"], #AssetNameInputWidget[state="invalid"], #AssetNameInputButton[state="invalid"] { border-color: {color:publisher:error}; } @@ -1331,3 +1410,11 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { #LikeDisabledInput:focus { border-color: {color:border}; } + +/* Attribute Definition widgets */ +InViewButton, InViewButton:disabled { + background: transparent; +} +InViewButton:hover { + background: rgba(255, 255, 255, 37); +} diff --git a/openpype/tests/test_avalon_plugin_presets.py b/openpype/tests/test_avalon_plugin_presets.py index c491be1c05..464c216d6f 100644 --- a/openpype/tests/test_avalon_plugin_presets.py +++ b/openpype/tests/test_avalon_plugin_presets.py @@ -1,6 +1,5 @@ -import avalon.api as api -import openpype from openpype.pipeline import ( + install_host, LegacyCreator, register_creator_plugin, discover_creator_plugins, @@ -23,15 +22,14 @@ class Test: __name__ = "test" ls = len - def __call__(self): - pass + @staticmethod + def install(): + register_creator_plugin(MyTestCreator) def test_avalon_plugin_presets(monkeypatch, printer): + install_host(Test) - openpype.install() - api.register_host(Test()) - register_creator_plugin(MyTestCreator) plugins = discover_creator_plugins() printer("Test if we got our test plugin") assert MyTestCreator in plugins diff --git a/openpype/tools/adobe_webserver/app.py b/openpype/tools/adobe_webserver/app.py index b79d6c6c60..3911baf7ac 100644 --- a/openpype/tools/adobe_webserver/app.py +++ b/openpype/tools/adobe_webserver/app.py @@ -16,7 +16,7 @@ from wsrpc_aiohttp import ( WSRPCClient ) -from avalon import api +from openpype.pipeline import legacy_io log = logging.getLogger(__name__) @@ -80,9 +80,9 @@ class WebServerTool: loop=asyncio.get_event_loop()) await client.connect() - project = api.Session["AVALON_PROJECT"] - asset = api.Session["AVALON_ASSET"] - task = api.Session["AVALON_TASK"] + project = legacy_io.Session["AVALON_PROJECT"] + asset = legacy_io.Session["AVALON_ASSET"] + task = legacy_io.Session["AVALON_TASK"] log.info("Sending context change to {}-{}-{}".format(project, asset, task)) diff --git a/openpype/tools/context_dialog/window.py b/openpype/tools/context_dialog/window.py index 9e030853bf..3b544bd375 100644 --- a/openpype/tools/context_dialog/window.py +++ b/openpype/tools/context_dialog/window.py @@ -2,9 +2,9 @@ import os import json from Qt import QtWidgets, QtCore, QtGui -from avalon.api import AvalonMongoDB from openpype import style +from openpype.pipeline import AvalonMongoDB from openpype.tools.utils.lib import center_window from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget from openpype.tools.utils.constants import ( diff --git a/openpype/tools/creator/window.py b/openpype/tools/creator/window.py index 51cc66e715..e0c329fb78 100644 --- a/openpype/tools/creator/window.py +++ b/openpype/tools/creator/window.py @@ -4,16 +4,14 @@ import re from Qt import QtWidgets, QtCore -from avalon import api, io - from openpype import style from openpype.api import get_current_project_settings from openpype.tools.utils.lib import qt_app_context +from openpype.pipeline import legacy_io from openpype.pipeline.create import ( SUBSET_NAME_ALLOWED_SYMBOLS, legacy_create, CreatorError, - LegacyCreator, ) from .model import CreatorsModel @@ -220,7 +218,7 @@ class CreatorWindow(QtWidgets.QDialog): asset_doc = None if creator_plugin: # Get the asset from the database which match with the name - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"name": asset_name, "type": "asset"}, projection={"_id": 1} ) @@ -237,9 +235,9 @@ class CreatorWindow(QtWidgets.QDialog): self._set_valid_state(False) return - project_name = io.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] asset_id = asset_doc["_id"] - task_name = io.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] # Calculate subset name with Creator plugin subset_name = creator_plugin.get_subset_name( @@ -271,7 +269,7 @@ class CreatorWindow(QtWidgets.QDialog): self._subset_name_input.setText(subset_name) # Get all subsets of the current asset - subset_docs = io.find( + subset_docs = legacy_io.find( { "type": "subset", "parent": asset_id @@ -372,7 +370,7 @@ class CreatorWindow(QtWidgets.QDialog): self.setStyleSheet(style.load_stylesheet()) def refresh(self): - self._asset_name_input.setText(io.Session["AVALON_ASSET"]) + self._asset_name_input.setText(legacy_io.Session["AVALON_ASSET"]) self._creators_model.reset() @@ -385,7 +383,7 @@ class CreatorWindow(QtWidgets.QDialog): ) current_index = None family = None - task_name = io.Session.get("AVALON_TASK", None) + task_name = legacy_io.Session.get("AVALON_TASK", None) lowered_task_name = task_name.lower() if task_name: for _family, _task_names in pype_project_setting.items(): @@ -471,7 +469,7 @@ class CreatorWindow(QtWidgets.QDialog): self._msg_timer.start() -def show(debug=False, parent=None): +def show(parent=None): """Display asset creator GUI Arguments: @@ -488,24 +486,6 @@ def show(debug=False, parent=None): except (AttributeError, RuntimeError): pass - if debug: - from avalon import mock - for creator in mock.creators: - api.register_plugin(LegacyCreator, creator) - - import traceback - sys.excepthook = lambda typ, val, tb: traceback.print_last() - - io.install() - - any_project = next( - project for project in io.projects() - if project.get("active", True) is not False - ) - - api.Session["AVALON_PROJECT"] = any_project["name"] - module.project = any_project["name"] - with qt_app_context(): window = CreatorWindow(parent) window.refresh() diff --git a/openpype/tools/launcher/window.py b/openpype/tools/launcher/window.py index d80b3eabf0..dab6949613 100644 --- a/openpype/tools/launcher/window.py +++ b/openpype/tools/launcher/window.py @@ -3,10 +3,9 @@ import logging from Qt import QtWidgets, QtCore, QtGui -from avalon.api import AvalonMongoDB - from openpype import style from openpype.api import resources +from openpype.pipeline import AvalonMongoDB import qtawesome from .models import ( diff --git a/openpype/tools/libraryloader/app.py b/openpype/tools/libraryloader/app.py index b73b415128..7fda6bd6f9 100644 --- a/openpype/tools/libraryloader/app.py +++ b/openpype/tools/libraryloader/app.py @@ -2,8 +2,8 @@ import sys from Qt import QtWidgets, QtCore, QtGui -from avalon.api import AvalonMongoDB from openpype import style +from openpype.pipeline import AvalonMongoDB from openpype.tools.utils import lib as tools_lib from openpype.tools.loader.widgets import ( ThumbnailWidget, @@ -16,8 +16,6 @@ from openpype.tools.utils.assets_widget import MultiSelectAssetsWidget from openpype.modules import ModulesManager -from . import lib - module = sys.modules[__name__] module.window = None @@ -260,14 +258,6 @@ class LibraryLoaderWindow(QtWidgets.QDialog): self.dbcon.Session["AVALON_PROJECT"] = project_name - _config = lib.find_config() - if hasattr(_config, "install"): - _config.install() - else: - print( - "Config `%s` has no function `install`" % _config.__name__ - ) - self._subsets_widget.on_project_change(project_name) if self._repres_widget: self._repres_widget.on_project_change(project_name) diff --git a/openpype/tools/libraryloader/lib.py b/openpype/tools/libraryloader/lib.py deleted file mode 100644 index 182b48893a..0000000000 --- a/openpype/tools/libraryloader/lib.py +++ /dev/null @@ -1,21 +0,0 @@ -import os -import importlib -import logging - -log = logging.getLogger(__name__) - - -# `find_config` from `pipeline` -def find_config(): - log.info("Finding configuration for project..") - - config = os.environ["AVALON_CONFIG"] - - if not config: - raise EnvironmentError( - "No configuration found in " - "the project nor environment" - ) - - log.info("Found %s, loading.." % config) - return importlib.import_module(config) diff --git a/openpype/tools/loader/__main__.py b/openpype/tools/loader/__main__.py index 146ba7fd10..acf357aa97 100644 --- a/openpype/tools/loader/__main__.py +++ b/openpype/tools/loader/__main__.py @@ -19,12 +19,10 @@ def my_exception_hook(exctype, value, traceback): if __name__ == '__main__': - os.environ["AVALON_MONGO"] = "mongodb://localhost:27017" os.environ["OPENPYPE_MONGO"] = "mongodb://localhost:27017" os.environ["AVALON_DB"] = "avalon" os.environ["AVALON_TIMEOUT"] = "1000" os.environ["OPENPYPE_DEBUG"] = "1" - os.environ["AVALON_CONFIG"] = "pype" os.environ["AVALON_ASSET"] = "Jungle" # Set the exception hook to our wrapping function diff --git a/openpype/tools/loader/app.py b/openpype/tools/loader/app.py index 923a1fabdb..bb589c199d 100644 --- a/openpype/tools/loader/app.py +++ b/openpype/tools/loader/app.py @@ -1,10 +1,14 @@ import sys +import traceback from Qt import QtWidgets, QtCore -from avalon import api, io from openpype import style from openpype.lib import register_event_callback +from openpype.pipeline import ( + install_openpype_plugins, + legacy_io, +) from openpype.tools.utils import ( lib, PlaceholderLineEdit @@ -35,14 +39,14 @@ class LoaderWindow(QtWidgets.QDialog): def __init__(self, parent=None): super(LoaderWindow, self).__init__(parent) title = "Asset Loader 2.1" - project_name = api.Session.get("AVALON_PROJECT") + project_name = legacy_io.Session.get("AVALON_PROJECT") if project_name: title += " - {}".format(project_name) self.setWindowTitle(title) # Groups config - self.groups_config = lib.GroupsConfig(io) - self.family_config_cache = lib.FamilyConfigCache(io) + self.groups_config = lib.GroupsConfig(legacy_io) + self.family_config_cache = lib.FamilyConfigCache(legacy_io) # Enable minimize and maximize for app window_flags = QtCore.Qt.Window @@ -59,13 +63,13 @@ class LoaderWindow(QtWidgets.QDialog): # Assets widget assets_widget = MultiSelectAssetsWidget( - io, parent=left_side_splitter + legacy_io, parent=left_side_splitter ) assets_widget.set_current_asset_btn_visibility(True) # Families widget families_filter_view = FamilyListView( - io, self.family_config_cache, left_side_splitter + legacy_io, self.family_config_cache, left_side_splitter ) left_side_splitter.addWidget(assets_widget) left_side_splitter.addWidget(families_filter_view) @@ -75,7 +79,7 @@ class LoaderWindow(QtWidgets.QDialog): # --- Middle part --- # Subsets widget subsets_widget = SubsetWidget( - io, + legacy_io, self.groups_config, self.family_config_cache, tool_name=self.tool_name, @@ -86,8 +90,12 @@ class LoaderWindow(QtWidgets.QDialog): thumb_ver_splitter = QtWidgets.QSplitter(main_splitter) thumb_ver_splitter.setOrientation(QtCore.Qt.Vertical) - thumbnail_widget = ThumbnailWidget(io, parent=thumb_ver_splitter) - version_info_widget = VersionWidget(io, parent=thumb_ver_splitter) + thumbnail_widget = ThumbnailWidget( + legacy_io, parent=thumb_ver_splitter + ) + version_info_widget = VersionWidget( + legacy_io, parent=thumb_ver_splitter + ) thumb_ver_splitter.addWidget(thumbnail_widget) thumb_ver_splitter.addWidget(version_info_widget) @@ -104,7 +112,7 @@ class LoaderWindow(QtWidgets.QDialog): repres_widget = None if sync_server_enabled: repres_widget = RepresentationWidget( - io, self.tool_name, parent=thumb_ver_splitter + legacy_io, self.tool_name, parent=thumb_ver_splitter ) thumb_ver_splitter.addWidget(repres_widget) @@ -258,13 +266,15 @@ class LoaderWindow(QtWidgets.QDialog): # Refresh families config self._families_filter_view.refresh() # Change to context asset on context change - self._assets_widget.select_asset_by_name(io.Session["AVALON_ASSET"]) + self._assets_widget.select_asset_by_name( + legacy_io.Session["AVALON_ASSET"] + ) def _refresh(self): """Load assets from database""" # Ensure a project is loaded - project = io.find_one({"type": "project"}, {"type": 1}) + project = legacy_io.find_one({"type": "project"}, {"type": 1}) assert project, "Project was not found! This is a bug" self._assets_widget.refresh() @@ -561,17 +571,16 @@ def show(debug=False, parent=None, use_context=False): module.window = None if debug: - import traceback sys.excepthook = lambda typ, val, tb: traceback.print_last() - io.install() + legacy_io.install() any_project = next( - project for project in io.projects() + project for project in legacy_io.projects() if project.get("active", True) is not False ) - api.Session["AVALON_PROJECT"] = any_project["name"] + legacy_io.Session["AVALON_PROJECT"] = any_project["name"] module.project = any_project["name"] with lib.qt_app_context(): @@ -579,7 +588,7 @@ def show(debug=False, parent=None, use_context=False): window.show() if use_context: - context = {"asset": api.Session["AVALON_ASSET"]} + context = {"asset": legacy_io.Session["AVALON_ASSET"]} window.set_context(context, refresh=True) else: window.refresh() @@ -603,19 +612,11 @@ def cli(args): print("Entering Project: %s" % project) - io.install() + legacy_io.install() # Store settings - api.Session["AVALON_PROJECT"] = project + legacy_io.Session["AVALON_PROJECT"] = project - from avalon import pipeline - - # Find the set config - _config = pipeline.find_config() - if hasattr(_config, "install"): - _config.install() - else: - print("Config `%s` has no function `install`" % - _config.__name__) + install_openpype_plugins(project) show() diff --git a/openpype/tools/loader/model.py b/openpype/tools/loader/model.py index 6cc6fae1fb..6f39c428be 100644 --- a/openpype/tools/loader/model.py +++ b/openpype/tools/loader/model.py @@ -1,13 +1,16 @@ import copy import re import math +import time from uuid import uuid4 from Qt import QtCore, QtGui import qtawesome -from avalon import schema -from openpype.pipeline import HeroVersionType +from openpype.pipeline import ( + HeroVersionType, + schema, +) from openpype.style import get_default_entity_icon_color from openpype.tools.utils.models import TreeModel, Item @@ -36,6 +39,14 @@ def is_filtering_recursible(): class BaseRepresentationModel(object): """Methods for SyncServer useful in multiple models""" + # Cheap & hackish way how to avoid refreshing of whole sync server module + # on each selection change + _last_project = None + _modules_manager = None + _last_project_cache = 0 + _last_manager_cache = 0 + _max_project_cache_time = 30 + _max_manager_cache_time = 60 def reset_sync_server(self, project_name=None): """Sets/Resets sync server vars after every change (refresh.)""" @@ -45,28 +56,53 @@ class BaseRepresentationModel(object): remote_site = remote_provider = None if not project_name: - project_name = self.dbcon.Session["AVALON_PROJECT"] + project_name = self.dbcon.Session.get("AVALON_PROJECT") else: self.dbcon.Session["AVALON_PROJECT"] = project_name - if project_name: - manager = ModulesManager() - sync_server = manager.modules_by_name["sync_server"] + if not project_name: + self.repre_icons = repre_icons + self.sync_server = sync_server + self.active_site = active_site + self.active_provider = active_provider + self.remote_site = remote_site + self.remote_provider = remote_provider + return - if project_name in sync_server.get_enabled_projects(): - active_site = sync_server.get_active_site(project_name) - active_provider = sync_server.get_provider_for_site( - project_name, active_site) - if active_site == 'studio': # for studio use explicit icon - active_provider = 'studio' + now_time = time.time() + project_cache_diff = now_time - self._last_project_cache + if project_cache_diff > self._max_project_cache_time: + self._last_project = None - remote_site = sync_server.get_remote_site(project_name) - remote_provider = sync_server.get_provider_for_site( - project_name, remote_site) - if remote_site == 'studio': # for studio use explicit icon - remote_provider = 'studio' + if project_name == self._last_project: + return - repre_icons = lib.get_repre_icons() + self._last_project = project_name + self._last_project_cache = now_time + + manager_cache_diff = now_time - self._last_manager_cache + if manager_cache_diff > self._max_manager_cache_time: + self._modules_manager = None + + if self._modules_manager is None: + self._modules_manager = ModulesManager() + self._last_manager_cache = now_time + + sync_server = self._modules_manager.modules_by_name["sync_server"] + if sync_server.is_project_enabled(project_name): + active_site = sync_server.get_active_site(project_name) + active_provider = sync_server.get_provider_for_site( + project_name, active_site) + if active_site == 'studio': # for studio use explicit icon + active_provider = 'studio' + + remote_site = sync_server.get_remote_site(project_name) + remote_provider = sync_server.get_provider_for_site( + project_name, remote_site) + if remote_site == 'studio': # for studio use explicit icon + remote_provider = 'studio' + + repre_icons = lib.get_repre_icons() self.repre_icons = repre_icons self.sync_server = sync_server diff --git a/openpype/tools/mayalookassigner/app.py b/openpype/tools/mayalookassigner/app.py index 0e633a21e3..1b6cad77a8 100644 --- a/openpype/tools/mayalookassigner/app.py +++ b/openpype/tools/mayalookassigner/app.py @@ -4,8 +4,8 @@ import logging from Qt import QtWidgets, QtCore -from avalon import io from openpype import style +from openpype.pipeline import legacy_io from openpype.tools.utils.lib import qt_app_context from openpype.hosts.maya.api.lib import assign_look_by_version @@ -227,9 +227,13 @@ class MayaLookAssignerWindow(QtWidgets.QWidget): continue # Get the latest version of this asset's look subset - version = io.find_one({"type": "version", - "parent": assign_look["_id"]}, - sort=[("name", -1)]) + version = legacy_io.find_one( + { + "type": "version", + "parent": assign_look["_id"] + }, + sort=[("name", -1)] + ) subset_name = assign_look["name"] self.echo("{} Assigning {} to {}\t".format(prefix, diff --git a/openpype/tools/mayalookassigner/commands.py b/openpype/tools/mayalookassigner/commands.py index 78fd51c7a3..d41d8ca5a2 100644 --- a/openpype/tools/mayalookassigner/commands.py +++ b/openpype/tools/mayalookassigner/commands.py @@ -5,9 +5,11 @@ import os from bson.objectid import ObjectId import maya.cmds as cmds -from avalon import io, api - -from openpype.pipeline import remove_container +from openpype.pipeline import ( + legacy_io, + remove_container, + registered_host, +) from openpype.hosts.maya.api import lib from .vray_proxies import get_alembic_ids_cache @@ -79,7 +81,7 @@ def get_all_asset_nodes(): list: list of dictionaries """ - host = api.registered_host() + host = registered_host() nodes = [] for container in host.ls(): @@ -158,8 +160,10 @@ def create_items_from_nodes(nodes): return asset_view_items for _id, id_nodes in id_hashes.items(): - asset = io.find_one({"_id": ObjectId(_id)}, - projection={"name": True}) + asset = legacy_io.find_one( + {"_id": ObjectId(_id)}, + projection={"name": True} + ) # Skip if asset id is not found if not asset: @@ -192,7 +196,7 @@ def remove_unused_looks(): """ - host = api.registered_host() + host = registered_host() unused = [] for container in host.ls(): diff --git a/openpype/tools/mayalookassigner/vray_proxies.py b/openpype/tools/mayalookassigner/vray_proxies.py index 25621fc652..3523b24bf3 100644 --- a/openpype/tools/mayalookassigner/vray_proxies.py +++ b/openpype/tools/mayalookassigner/vray_proxies.py @@ -11,13 +11,13 @@ from bson.objectid import ObjectId import alembic.Abc from maya import cmds -from avalon import io, api - from openpype.pipeline import ( + legacy_io, load_container, loaders_from_representation, discover_loader_plugins, get_representation_path, + registered_host, ) from openpype.hosts.maya.api import lib @@ -157,9 +157,11 @@ def get_look_relationships(version_id): dict: Dictionary of relations. """ - json_representation = io.find_one({"type": "representation", - "parent": version_id, - "name": "json"}) + json_representation = legacy_io.find_one({ + "type": "representation", + "parent": version_id, + "name": "json" + }) # Load relationships shader_relation = get_representation_path(json_representation) @@ -183,12 +185,14 @@ def load_look(version_id): """ # Get representations of shader file and relationships - look_representation = io.find_one({"type": "representation", - "parent": version_id, - "name": "ma"}) + look_representation = legacy_io.find_one({ + "type": "representation", + "parent": version_id, + "name": "ma" + }) # See if representation is already loaded, if so reuse it. - host = api.registered_host() + host = registered_host() representation_id = str(look_representation['_id']) for container in host.ls(): if (container['loader'] == "LookLoader" and @@ -231,15 +235,21 @@ def get_latest_version(asset_id, subset): RuntimeError: When subset or version doesn't exist. """ - subset = io.find_one({"name": subset, - "parent": ObjectId(asset_id), - "type": "subset"}) + subset = legacy_io.find_one({ + "name": subset, + "parent": ObjectId(asset_id), + "type": "subset" + }) if not subset: raise RuntimeError("Subset does not exist: %s" % subset) - version = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[("name", -1)]) + version = legacy_io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) if not version: raise RuntimeError("Version does not exist.") diff --git a/openpype/tools/project_manager/project_manager/model.py b/openpype/tools/project_manager/project_manager/model.py index 1c3ec089f6..223cfa629d 100644 --- a/openpype/tools/project_manager/project_manager/model.py +++ b/openpype/tools/project_manager/project_manager/model.py @@ -7,6 +7,11 @@ from pymongo import UpdateOne, DeleteOne from Qt import QtCore, QtGui +from openpype.lib import ( + CURRENT_DOC_SCHEMAS, + PypeLogger, +) + from .constants import ( IDENTIFIER_ROLE, ITEM_TYPE_ROLE, @@ -18,8 +23,6 @@ from .constants import ( ) from .style import ResourceCache -from openpype.lib import CURRENT_DOC_SCHEMAS - class ProjectModel(QtGui.QStandardItemModel): """Load possible projects to modify from MongoDB. @@ -185,6 +188,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): for key in self.multiselection_columns } + self._log = None # TODO Reset them on project change self._current_project = None self._root_item = None @@ -194,6 +198,12 @@ class HierarchyModel(QtCore.QAbstractItemModel): self._reset_root_item() + @property + def log(self): + if self._log is None: + self._log = PypeLogger.get_logger("ProjectManagerModel") + return self._log + @property def items_by_id(self): return self._items_by_id @@ -1367,6 +1377,9 @@ class HierarchyModel(QtCore.QAbstractItemModel): to_process = collections.deque() to_process.append(project_item) + created_count = 0 + updated_count = 0 + removed_count = 0 bulk_writes = [] while to_process: parent = to_process.popleft() @@ -1381,6 +1394,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): insert_list.append(item) elif item.data(REMOVED_ROLE): + removed_count += 1 if item.data(HIERARCHY_CHANGE_ABLE_ROLE): bulk_writes.append(DeleteOne( {"_id": item.asset_id} @@ -1394,6 +1408,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): else: update_data = item.update_data() if update_data: + updated_count += 1 bulk_writes.append(UpdateOne( {"_id": item.asset_id}, update_data @@ -1406,11 +1421,21 @@ class HierarchyModel(QtCore.QAbstractItemModel): result = project_col.insert_many(new_docs) for idx, mongo_id in enumerate(result.inserted_ids): + created_count += 1 insert_list[idx].mongo_id = mongo_id + if sum([created_count, updated_count, removed_count]) == 0: + self.log.info("Nothing has changed") + return + if bulk_writes: project_col.bulk_write(bulk_writes) + self.log.info(( + "Save finished." + " Created {} | Updated {} | Removed {} asset documents" + ).format(created_count, updated_count, removed_count)) + self.refresh_project() def copy_mime_data(self, indexes): @@ -1447,12 +1472,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): mimedata.setData("application/copy_task", encoded_data) return mimedata - def paste_mime_data(self, index, mime_data): - if not index.isValid(): - return - - item_id = index.data(IDENTIFIER_ROLE) - item = self._items_by_id[item_id] + def _paste_mime_data(self, item, mime_data): if not isinstance(item, (AssetItem, TaskItem)): return @@ -1486,6 +1506,25 @@ class HierarchyModel(QtCore.QAbstractItemModel): task_item = TaskItem(task_data, True) self.add_item(task_item, parent) + def paste(self, indexes, mime_data): + + # Get the selected Assets uniquely + items = set() + for index in indexes: + if not index.isValid(): + return + item_id = index.data(IDENTIFIER_ROLE) + item = self._items_by_id[item_id] + + # Do not copy into the Task Item so get parent Asset instead + if isinstance(item, TaskItem): + item = item.parent() + + items.add(item) + + for item in items: + self._paste_mime_data(item, mime_data) + class BaseItem: """Base item for HierarchyModel. @@ -1819,12 +1858,16 @@ class AssetItem(BaseItem): } query_projection = { "_id": 1, - "data.tasks": 1, - "data.visualParent": 1, - "schema": 1, - "name": 1, + "schema": 1, "type": 1, + "parent": 1, + + "data.visualParent": 1, + "data.parents": 1, + + "data.tasks": 1, + "data.frameStart": 1, "data.frameEnd": 1, "data.fps": 1, @@ -1835,7 +1878,7 @@ class AssetItem(BaseItem): "data.clipIn": 1, "data.clipOut": 1, "data.pixelAspect": 1, - "data.tools_env": 1 + "data.tools_env": 1, } def __init__(self, asset_doc): diff --git a/openpype/tools/project_manager/project_manager/view.py b/openpype/tools/project_manager/project_manager/view.py index 74f5a06b71..7d9f1a7323 100644 --- a/openpype/tools/project_manager/project_manager/view.py +++ b/openpype/tools/project_manager/project_manager/view.py @@ -195,13 +195,13 @@ class HierarchyView(QtWidgets.QTreeView): for idx, width in widths_by_idx.items(): self.setColumnWidth(idx, width) - def set_project(self, project_name): + def set_project(self, project_name, force=False): # Trigger helpers first self._project_doc_cache.set_project(project_name) self._tools_cache.refresh() # Trigger update of model after all data for delegates are filled - self._source_model.set_project(project_name) + self._source_model.set_project(project_name, force) def _on_project_reset(self): self.header_init() @@ -365,20 +365,24 @@ class HierarchyView(QtWidgets.QTreeView): event.accept() def _copy_items(self, indexes=None): + clipboard = QtWidgets.QApplication.clipboard() try: if indexes is None: indexes = self.selectedIndexes() mime_data = self._source_model.copy_mime_data(indexes) - QtWidgets.QApplication.clipboard().setMimeData(mime_data) + clipboard.setMimeData(mime_data) self._show_message("Tasks copied") except ValueError as exc: + # Change clipboard to contain empty data + empty_mime_data = QtCore.QMimeData() + clipboard.setMimeData(empty_mime_data) self._show_message(str(exc)) def _paste_items(self): - index = self.currentIndex() mime_data = QtWidgets.QApplication.clipboard().mimeData() - self._source_model.paste_mime_data(index, mime_data) + rows = self.selectionModel().selectedRows() + self._source_model.paste(rows, mime_data) def _delete_items(self, indexes=None): if indexes is None: diff --git a/openpype/tools/project_manager/project_manager/widgets.py b/openpype/tools/project_manager/project_manager/widgets.py index 39ea833961..dc75b30bd7 100644 --- a/openpype/tools/project_manager/project_manager/widgets.py +++ b/openpype/tools/project_manager/project_manager/widgets.py @@ -10,11 +10,11 @@ from openpype.lib import ( PROJECT_NAME_REGEX ) from openpype.style import load_stylesheet +from openpype.pipeline import AvalonMongoDB from openpype.tools.utils import ( PlaceholderLineEdit, get_warning_pixmap ) -from avalon.api import AvalonMongoDB from Qt import QtWidgets, QtCore, QtGui diff --git a/openpype/tools/project_manager/project_manager/window.py b/openpype/tools/project_manager/project_manager/window.py index bdf32c7415..6a2bc29fd1 100644 --- a/openpype/tools/project_manager/project_manager/window.py +++ b/openpype/tools/project_manager/project_manager/window.py @@ -16,6 +16,7 @@ from .style import ResourceCache from openpype.style import load_stylesheet from openpype.lib import is_admin_password_required from openpype.widgets import PasswordDialog +from openpype.pipeline import AvalonMongoDB from openpype import resources from openpype.api import ( @@ -23,7 +24,6 @@ from openpype.api import ( create_project_folders, Logger ) -from avalon.api import AvalonMongoDB class ProjectManagerWindow(QtWidgets.QWidget): @@ -184,14 +184,14 @@ class ProjectManagerWindow(QtWidgets.QWidget): self.resize(1200, 600) self.setStyleSheet(load_stylesheet()) - def _set_project(self, project_name=None): + def _set_project(self, project_name=None, force=False): self._create_folders_btn.setEnabled(project_name is not None) self._remove_projects_btn.setEnabled(project_name is not None) self._add_asset_btn.setEnabled(project_name is not None) self._add_task_btn.setEnabled(project_name is not None) self._save_btn.setEnabled(project_name is not None) self._project_proxy_model.set_filter_default(project_name is not None) - self.hierarchy_view.set_project(project_name) + self.hierarchy_view.set_project(project_name, force) def _current_project(self): row = self._project_combobox.currentIndex() @@ -229,11 +229,11 @@ class ProjectManagerWindow(QtWidgets.QWidget): self._project_combobox.setCurrentIndex(row) selected_project = self._current_project() - self._set_project(selected_project) + self._set_project(selected_project, True) def _on_project_change(self): selected_project = self._current_project() - self._set_project(selected_project) + self._set_project(selected_project, False) def _on_project_refresh(self): self.refresh_projects() diff --git a/openpype/tools/publisher/control.py b/openpype/tools/publisher/control.py index 6707feac9c..2973d6a5bb 100644 --- a/openpype/tools/publisher/control.py +++ b/openpype/tools/publisher/control.py @@ -11,10 +11,12 @@ try: except Exception: from openpype.lib.python_2_comp import WeakMethod -import avalon.api import pyblish.api -from openpype.pipeline import PublishValidationError +from openpype.pipeline import ( + PublishValidationError, + registered_host, +) from openpype.pipeline.create import CreateContext from Qt import QtCore @@ -353,7 +355,7 @@ class PublisherController: """ def __init__(self, dbcon=None, headless=False): self.log = logging.getLogger("PublisherController") - self.host = avalon.api.registered_host() + self.host = registered_host() self.headless = headless self.create_context = CreateContext( diff --git a/openpype/tools/publisher/widgets/assets_widget.py b/openpype/tools/publisher/widgets/assets_widget.py index 984da59c77..46fdcc6526 100644 --- a/openpype/tools/publisher/widgets/assets_widget.py +++ b/openpype/tools/publisher/widgets/assets_widget.py @@ -15,6 +15,7 @@ from openpype.tools.utils.assets_widget import ( class CreateDialogAssetsWidget(SingleSelectAssetsWidget): current_context_required = QtCore.Signal() + header_height_changed = QtCore.Signal(int) def __init__(self, controller, parent): self._controller = controller @@ -27,6 +28,27 @@ class CreateDialogAssetsWidget(SingleSelectAssetsWidget): self._last_selection = None self._enabled = None + self._last_filter_height = None + + def _check_header_height(self): + """Catch header height changes. + + Label on top of creaters should have same height so Creators view has + same offset. + """ + height = self.header_widget.height() + if height != self._last_filter_height: + self._last_filter_height = height + self.header_height_changed.emit(height) + + def resizeEvent(self, event): + super(CreateDialogAssetsWidget, self).resizeEvent(event) + self._check_header_height() + + def showEvent(self, event): + super(CreateDialogAssetsWidget, self).showEvent(event) + self._check_header_height() + def _on_current_asset_click(self): self.current_context_required.emit() @@ -71,6 +93,7 @@ class AssetsHierarchyModel(QtGui.QStandardItemModel): Uses controller to load asset hierarchy. All asset documents are stored by their parents. """ + def __init__(self, controller): super(AssetsHierarchyModel, self).__init__() self._controller = controller @@ -143,6 +166,7 @@ class AssetsHierarchyModel(QtGui.QStandardItemModel): class AssetsDialog(QtWidgets.QDialog): """Dialog to select asset for a context of instance.""" + def __init__(self, controller, parent): super(AssetsDialog, self).__init__(parent) self.setWindowTitle("Select asset") @@ -196,9 +220,26 @@ class AssetsDialog(QtWidgets.QDialog): # - adds ability to call reset on multiple places without repeating self._soft_reset_enabled = True + self._first_show = True + self._default_height = 500 + + def _on_first_show(self): + center = self.rect().center() + size = self.size() + size.setHeight(self._default_height) + + self.resize(size) + new_pos = self.mapToGlobal(center) + new_pos.setX(new_pos.x() - int(self.width() / 2)) + new_pos.setY(new_pos.y() - int(self.height() / 2)) + self.move(new_pos) + def showEvent(self, event): """Refresh asset model on show.""" super(AssetsDialog, self).showEvent(event) + if self._first_show: + self._first_show = False + self._on_first_show() # Refresh on show self.reset(False) diff --git a/openpype/tools/publisher/widgets/create_dialog.py b/openpype/tools/publisher/widgets/create_dialog.py index 7d98609c2c..9e357f3a56 100644 --- a/openpype/tools/publisher/widgets/create_dialog.py +++ b/openpype/tools/publisher/widgets/create_dialog.py @@ -3,6 +3,7 @@ import re import traceback import copy +import qtawesome try: import commonmark except Exception: @@ -13,8 +14,11 @@ from openpype.pipeline.create import ( CreatorError, SUBSET_NAME_ALLOWED_SYMBOLS ) - -from openpype.tools.utils import ErrorMessageBox +from openpype.tools.utils import ( + ErrorMessageBox, + MessageOverlayObject, + ClickableFrame, +) from .widgets import IconValuePixmapLabel from .assets_widget import CreateDialogAssetsWidget @@ -29,6 +33,14 @@ from ..constants import ( SEPARATORS = ("---separator---", "---") +class VariantInputsWidget(QtWidgets.QWidget): + resized = QtCore.Signal() + + def resizeEvent(self, event): + super(VariantInputsWidget, self).resizeEvent(event) + self.resized.emit() + + class CreateErrorMessageBox(ErrorMessageBox): def __init__( self, @@ -104,6 +116,8 @@ class CreateErrorMessageBox(ErrorMessageBox): # TODO add creator identifier/label to details class CreatorShortDescWidget(QtWidgets.QWidget): + height_changed = QtCore.Signal(int) + def __init__(self, parent=None): super(CreatorShortDescWidget, self).__init__(parent=parent) @@ -142,6 +156,22 @@ class CreatorShortDescWidget(QtWidgets.QWidget): self._family_label = family_label self._description_label = description_label + self._last_height = None + + def _check_height_change(self): + height = self.height() + if height != self._last_height: + self._last_height = height + self.height_changed.emit(height) + + def showEvent(self, event): + super(CreatorShortDescWidget, self).showEvent(event) + self._check_height_change() + + def resizeEvent(self, event): + super(CreatorShortDescWidget, self).resizeEvent(event) + self._check_height_change() + def set_plugin(self, plugin=None): if not plugin: self._icon_widget.set_icon_def(None) @@ -158,13 +188,43 @@ class CreatorShortDescWidget(QtWidgets.QWidget): self._description_label.setText(description) -class HelpButton(QtWidgets.QPushButton): - resized = QtCore.Signal() +class HelpButton(ClickableFrame): + resized = QtCore.Signal(int) + question_mark_icon_name = "fa.question" + help_icon_name = "fa.question-circle" + hide_icon_name = "fa.angle-left" def __init__(self, *args, **kwargs): super(HelpButton, self).__init__(*args, **kwargs) self.setObjectName("CreateDialogHelpButton") + question_mark_label = QtWidgets.QLabel(self) + help_widget = QtWidgets.QWidget(self) + + help_question = QtWidgets.QLabel(help_widget) + help_label = QtWidgets.QLabel("Help", help_widget) + hide_icon = QtWidgets.QLabel(help_widget) + + help_layout = QtWidgets.QHBoxLayout(help_widget) + help_layout.setContentsMargins(0, 0, 5, 0) + help_layout.addWidget(help_question, 0) + help_layout.addWidget(help_label, 0) + help_layout.addStretch(1) + help_layout.addWidget(hide_icon, 0) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(0) + layout.addWidget(question_mark_label, 0) + layout.addWidget(help_widget, 1) + + help_widget.setVisible(False) + + self._question_mark_label = question_mark_label + self._help_widget = help_widget + self._help_question = help_question + self._hide_icon = hide_icon + self._expanded = None self.set_expanded() @@ -174,31 +234,56 @@ class HelpButton(QtWidgets.QPushButton): return expanded = False self._expanded = expanded - if expanded: - text = "<" + self._help_widget.setVisible(expanded) + self._update_content() + + def _update_content(self): + width = self.get_icon_width() + if self._expanded: + question_mark_pix = QtGui.QPixmap(width, width) + question_mark_pix.fill(QtCore.Qt.transparent) + else: - text = "?" - self.setText(text) + question_mark_icon = qtawesome.icon( + self.question_mark_icon_name, color=QtCore.Qt.white + ) + question_mark_pix = question_mark_icon.pixmap(width, width) - self._update_size() + hide_icon = qtawesome.icon( + self.hide_icon_name, color=QtCore.Qt.white + ) + help_question_icon = qtawesome.icon( + self.help_icon_name, color=QtCore.Qt.white + ) + self._question_mark_label.setPixmap(question_mark_pix) + self._question_mark_label.setMaximumWidth(width) + self._hide_icon.setPixmap(hide_icon.pixmap(width, width)) + self._help_question.setPixmap(help_question_icon.pixmap(width, width)) - def _update_size(self): - new_size = self.minimumSizeHint() - if self.size() != new_size: - self.resize(new_size) - self.resized.emit() + def get_icon_width(self): + metrics = self.fontMetrics() + return metrics.height() + + def set_pos_and_size(self, pos_x, pos_y, width, height): + update_icon = self.height() != height + self.move(pos_x, pos_y) + self.resize(width, height) + + if update_icon: + self._update_content() + self.updateGeometry() def showEvent(self, event): super(HelpButton, self).showEvent(event) - self._update_size() + self.resized.emit(self.height()) def resizeEvent(self, event): super(HelpButton, self).resizeEvent(event) - self._update_size() + self.resized.emit(self.height()) class CreateDialog(QtWidgets.QDialog): - default_size = (900, 500) + default_size = (1000, 560) def __init__( self, controller, asset_name=None, task_name=None, parent=None @@ -231,6 +316,8 @@ class CreateDialog(QtWidgets.QDialog): self._name_pattern = name_pattern self._compiled_name_pattern = re.compile(name_pattern) + overlay_object = MessageOverlayObject(self) + context_widget = QtWidgets.QWidget(self) assets_widget = CreateDialogAssetsWidget(controller, context_widget) @@ -243,44 +330,50 @@ class CreateDialog(QtWidgets.QDialog): context_layout.addWidget(tasks_widget, 1) # --- Creators view --- + creators_header_widget = QtWidgets.QWidget(self) + header_label_widget = QtWidgets.QLabel( + "Choose family:", creators_header_widget + ) + creators_header_layout = QtWidgets.QHBoxLayout(creators_header_widget) + creators_header_layout.setContentsMargins(0, 0, 0, 0) + creators_header_layout.addWidget(header_label_widget, 1) + creators_view = QtWidgets.QListView(self) creators_model = QtGui.QStandardItemModel() creators_view.setModel(creators_model) - variant_input = QtWidgets.QLineEdit(self) + variant_widget = VariantInputsWidget(self) + + variant_input = QtWidgets.QLineEdit(variant_widget) variant_input.setObjectName("VariantInput") variant_input.setToolTip(VARIANT_TOOLTIP) - variant_hints_btn = QtWidgets.QPushButton(self) - variant_hints_btn.setFixedWidth(18) + variant_hints_btn = QtWidgets.QToolButton(variant_widget) + variant_hints_btn.setArrowType(QtCore.Qt.DownArrow) + variant_hints_btn.setIconSize(QtCore.QSize(12, 12)) - variant_hints_menu = QtWidgets.QMenu(variant_hints_btn) + variant_hints_menu = QtWidgets.QMenu(variant_widget) variant_hints_group = QtWidgets.QActionGroup(variant_hints_menu) - variant_hints_btn.setMenu(variant_hints_menu) - variant_layout = QtWidgets.QHBoxLayout() + variant_layout = QtWidgets.QHBoxLayout(variant_widget) variant_layout.setContentsMargins(0, 0, 0, 0) variant_layout.setSpacing(0) variant_layout.addWidget(variant_input, 1) - variant_layout.addWidget(variant_hints_btn, 0) + variant_layout.addWidget(variant_hints_btn, 0, QtCore.Qt.AlignVCenter) subset_name_input = QtWidgets.QLineEdit(self) subset_name_input.setEnabled(False) - create_btn = QtWidgets.QPushButton("Create", self) - create_btn.setEnabled(False) - form_layout = QtWidgets.QFormLayout() - form_layout.addRow("Variant:", variant_layout) + form_layout.addRow("Variant:", variant_widget) form_layout.addRow("Subset:", subset_name_input) mid_widget = QtWidgets.QWidget(self) mid_layout = QtWidgets.QVBoxLayout(mid_widget) mid_layout.setContentsMargins(0, 0, 0, 0) - mid_layout.addWidget(QtWidgets.QLabel("Choose family:", self)) + mid_layout.addWidget(creators_header_widget, 0) mid_layout.addWidget(creators_view, 1) mid_layout.addLayout(form_layout, 0) - mid_layout.addWidget(create_btn, 0) # ------------ # --- Creator short info and attr defs --- @@ -290,31 +383,62 @@ class CreateDialog(QtWidgets.QDialog): creator_attrs_widget ) - separator_widget = QtWidgets.QWidget(self) - separator_widget.setObjectName("Separator") - separator_widget.setMinimumHeight(2) - separator_widget.setMaximumHeight(2) + attr_separator_widget = QtWidgets.QWidget(self) + attr_separator_widget.setObjectName("Separator") + attr_separator_widget.setMinimumHeight(1) + attr_separator_widget.setMaximumHeight(1) # Precreate attributes widget pre_create_widget = PreCreateWidget(creator_attrs_widget) + # Create button + create_btn_wrapper = QtWidgets.QWidget(creator_attrs_widget) + create_btn = QtWidgets.QPushButton("Create", create_btn_wrapper) + create_btn.setEnabled(False) + + create_btn_wrap_layout = QtWidgets.QHBoxLayout(create_btn_wrapper) + create_btn_wrap_layout.setContentsMargins(0, 0, 0, 0) + create_btn_wrap_layout.addStretch(1) + create_btn_wrap_layout.addWidget(create_btn, 0) + creator_attrs_layout = QtWidgets.QVBoxLayout(creator_attrs_widget) creator_attrs_layout.setContentsMargins(0, 0, 0, 0) creator_attrs_layout.addWidget(creator_short_desc_widget, 0) - creator_attrs_layout.addWidget(separator_widget, 0) + creator_attrs_layout.addWidget(attr_separator_widget, 0) creator_attrs_layout.addWidget(pre_create_widget, 1) + creator_attrs_layout.addWidget(create_btn_wrapper, 0) # ------------------------------------- # --- Detailed information about creator --- # Detailed description of creator - detail_description_widget = QtWidgets.QTextEdit(self) - detail_description_widget.setObjectName("InfoText") - detail_description_widget.setTextInteractionFlags( + detail_description_widget = QtWidgets.QWidget(self) + + detail_placoholder_widget = QtWidgets.QWidget( + detail_description_widget + ) + detail_placoholder_widget.setAttribute( + QtCore.Qt.WA_TranslucentBackground + ) + + detail_description_input = QtWidgets.QTextEdit( + detail_description_widget + ) + detail_description_input.setObjectName("CreatorDetailedDescription") + detail_description_input.setTextInteractionFlags( QtCore.Qt.TextBrowserInteraction ) - detail_description_widget.setVisible(False) - # ------------------------------------------- + detail_description_layout = QtWidgets.QVBoxLayout( + detail_description_widget + ) + detail_description_layout.setContentsMargins(0, 0, 0, 0) + detail_description_layout.setSpacing(0) + detail_description_layout.addWidget(detail_placoholder_widget, 0) + detail_description_layout.addWidget(detail_description_input, 1) + + detail_description_widget.setVisible(False) + + # ------------------------------------------- splitter_widget = QtWidgets.QSplitter(self) splitter_widget.addWidget(context_widget) splitter_widget.addWidget(mid_widget) @@ -329,32 +453,50 @@ class CreateDialog(QtWidgets.QDialog): layout.addWidget(splitter_widget, 1) # Floating help button + # - Create this button as last to be fully visible help_btn = HelpButton(self) prereq_timer = QtCore.QTimer() prereq_timer.setInterval(50) prereq_timer.setSingleShot(True) + desc_width_anim_timer = QtCore.QTimer() + desc_width_anim_timer.setInterval(10) + prereq_timer.timeout.connect(self._on_prereq_timer) + desc_width_anim_timer.timeout.connect(self._on_desc_animation) + help_btn.clicked.connect(self._on_help_btn) help_btn.resized.connect(self._on_help_btn_resize) + assets_widget.header_height_changed.connect( + self._on_asset_filter_height_change + ) + create_btn.clicked.connect(self._on_create) + variant_widget.resized.connect(self._on_variant_widget_resize) variant_input.returnPressed.connect(self._on_create) variant_input.textChanged.connect(self._on_variant_change) creators_view.selectionModel().currentChanged.connect( self._on_creator_item_change ) + variant_hints_btn.clicked.connect(self._on_variant_btn_click) variant_hints_menu.triggered.connect(self._on_variant_action) assets_widget.selection_changed.connect(self._on_asset_change) assets_widget.current_context_required.connect( self._on_current_session_context_request ) tasks_widget.task_changed.connect(self._on_task_change) + creator_short_desc_widget.height_changed.connect( + self._on_description_height_change + ) + splitter_widget.splitterMoved.connect(self._on_splitter_move) controller.add_plugins_refresh_callback(self._on_plugins_refresh) + self._overlay_object = overlay_object + self._splitter_widget = splitter_widget self._context_widget = context_widget @@ -368,18 +510,36 @@ class CreateDialog(QtWidgets.QDialog): self.variant_hints_menu = variant_hints_menu self.variant_hints_group = variant_hints_group + self._creators_header_widget = creators_header_widget self.creators_model = creators_model self.creators_view = creators_view self.create_btn = create_btn self._creator_short_desc_widget = creator_short_desc_widget self._pre_create_widget = pre_create_widget + self._attr_separator_widget = attr_separator_widget + + self._detail_placoholder_widget = detail_placoholder_widget self._detail_description_widget = detail_description_widget + self._detail_description_input = detail_description_input self._help_btn = help_btn self._prereq_timer = prereq_timer self._first_show = True + # Description animation + self._description_size_policy = detail_description_widget.sizePolicy() + self._desc_width_anim_timer = desc_width_anim_timer + self._desc_widget_step = 0 + self._last_description_width = None + self._last_full_width = 0 + self._expected_description_width = 0 + self._last_desc_max_width = None + self._other_widgets_widths = [] + + def _emit_message(self, message): + self._overlay_object.add_message(message) + def _context_change_is_enabled(self): return self._context_widget.isEnabled() @@ -443,14 +603,21 @@ class CreateDialog(QtWidgets.QDialog): def _invalidate_prereq(self): self._prereq_timer.start() + def _on_asset_filter_height_change(self, height): + self._creators_header_widget.setMinimumHeight(height) + self._creators_header_widget.setMaximumHeight(height) + def _on_prereq_timer(self): prereq_available = True + creator_btn_tooltips = [] if self.creators_model.rowCount() < 1: prereq_available = False + creator_btn_tooltips.append("Creator is not selected") if self._asset_doc is None: # QUESTION how to handle invalid asset? prereq_available = False + creator_btn_tooltips.append("Context is not selected") if prereq_available != self._prereq_available: self._prereq_available = prereq_available @@ -459,6 +626,12 @@ class CreateDialog(QtWidgets.QDialog): self.creators_view.setEnabled(prereq_available) self.variant_input.setEnabled(prereq_available) self.variant_hints_btn.setEnabled(prereq_available) + + tooltip = "" + if creator_btn_tooltips: + tooltip = "\n".join(creator_btn_tooltips) + self.create_btn.setToolTip(tooltip) + self._on_variant_change() def _refresh_asset(self): @@ -540,7 +713,7 @@ class CreateDialog(QtWidgets.QDialog): identifier = index.data(CREATOR_IDENTIFIER_ROLE) - self._set_creator(identifier) + self._set_creator_by_identifier(identifier) def _on_plugins_refresh(self): # Trigger refresh only if is visible @@ -564,65 +737,211 @@ class CreateDialog(QtWidgets.QDialog): if self._task_name: self._tasks_widget.select_task_name(self._task_name) + def _on_description_height_change(self): + # Use separator's 'y' position as height + height = self._attr_separator_widget.y() + self._detail_placoholder_widget.setMinimumHeight(height) + self._detail_placoholder_widget.setMaximumHeight(height) + def _on_creator_item_change(self, new_index, _old_index): identifier = None if new_index.isValid(): identifier = new_index.data(CREATOR_IDENTIFIER_ROLE) - self._set_creator(identifier) + self._set_creator_by_identifier(identifier) def _update_help_btn(self): - pos_x = self.width() - self._help_btn.width() - point = self._creator_short_desc_widget.rect().topRight() - mapped_point = self._creator_short_desc_widget.mapTo(self, point) - pos_y = mapped_point.y() - self._help_btn.move(max(0, pos_x), max(0, pos_y)) + short_desc_rect = self._creator_short_desc_widget.rect() - def _on_help_btn_resize(self): + # point = short_desc_rect.topRight() + point = short_desc_rect.center() + mapped_point = self._creator_short_desc_widget.mapTo(self, point) + # pos_y = mapped_point.y() + center_pos_y = mapped_point.y() + icon_width = self._help_btn.get_icon_width() + + _height = int(icon_width * 2.5) + height = min(_height, short_desc_rect.height()) + pos_y = center_pos_y - int(height / 2) + + pos_x = self.width() - icon_width + if self._detail_placoholder_widget.isVisible(): + pos_x -= ( + self._detail_placoholder_widget.width() + + self._splitter_widget.handle(3).width() + ) + + width = self.width() - pos_x + + self._help_btn.set_pos_and_size( + max(0, pos_x), max(0, pos_y), + width, height + ) + + def _on_help_btn_resize(self, height): + if self._creator_short_desc_widget.height() != height: + self._update_help_btn() + + def _on_splitter_move(self, *args): self._update_help_btn() def _on_help_btn(self): + if self._desc_width_anim_timer.isActive(): + return + final_size = self.size() cur_sizes = self._splitter_widget.sizes() - spacing = self._splitter_widget.handleWidth() + + if self._desc_widget_step == 0: + now_visible = self._detail_description_widget.isVisible() + else: + now_visible = self._desc_widget_step > 0 sizes = [] for idx, value in enumerate(cur_sizes): if idx < 3: sizes.append(value) - now_visible = self._detail_description_widget.isVisible() + self._last_full_width = final_size.width() + self._other_widgets_widths = list(sizes) + if now_visible: - width = final_size.width() - ( - spacing + self._detail_description_widget.width() - ) + cur_desc_width = self._detail_description_widget.width() + if cur_desc_width < 1: + cur_desc_width = 2 + step_size = int(cur_desc_width / 5) + if step_size < 1: + step_size = 1 + + step_size *= -1 + expected_width = 0 + desc_width = cur_desc_width - 1 + width = final_size.width() - 1 + min_max = desc_width + self._last_description_width = cur_desc_width else: - last_size = self._detail_description_widget.sizeHint().width() - width = final_size.width() + spacing + last_size - sizes.append(last_size) + self._detail_description_widget.setVisible(True) + handle = self._splitter_widget.handle(3) + desc_width = handle.sizeHint().width() + if self._last_description_width: + expected_width = self._last_description_width + else: + hint = self._detail_description_widget.sizeHint() + expected_width = hint.width() + + width = final_size.width() + desc_width + step_size = int(expected_width / 5) + if step_size < 1: + step_size = 1 + min_max = 0 + + if self._last_desc_max_width is None: + self._last_desc_max_width = ( + self._detail_description_widget.maximumWidth() + ) + self._detail_description_widget.setMinimumWidth(min_max) + self._detail_description_widget.setMaximumWidth(min_max) + self._expected_description_width = expected_width + self._desc_widget_step = step_size + + self._desc_width_anim_timer.start() + + sizes.append(desc_width) final_size.setWidth(width) - self._detail_description_widget.setVisible(not now_visible) self._splitter_widget.setSizes(sizes) self.resize(final_size) self._help_btn.set_expanded(not now_visible) + def _on_desc_animation(self): + current_width = self._detail_description_widget.width() + + desc_width = None + last_step = False + growing = self._desc_widget_step > 0 + + # Growing + if growing: + if current_width < self._expected_description_width: + desc_width = current_width + self._desc_widget_step + if desc_width >= self._expected_description_width: + desc_width = self._expected_description_width + last_step = True + + # Decreasing + elif self._desc_widget_step < 0: + if current_width > self._expected_description_width: + desc_width = current_width + self._desc_widget_step + if desc_width <= self._expected_description_width: + desc_width = self._expected_description_width + last_step = True + + if desc_width is None: + self._desc_widget_step = 0 + self._desc_width_anim_timer.stop() + return + + if last_step and not growing: + self._detail_description_widget.setVisible(False) + QtWidgets.QApplication.processEvents() + + width = self._last_full_width + handle_width = self._splitter_widget.handle(3).width() + if growing: + width += (handle_width + desc_width) + else: + width -= self._last_description_width + if last_step: + width -= handle_width + else: + width += desc_width + + if not last_step or growing: + self._detail_description_widget.setMaximumWidth(desc_width) + self._detail_description_widget.setMinimumWidth(desc_width) + + window_size = self.size() + window_size.setWidth(width) + self.resize(window_size) + if not last_step: + return + + self._desc_widget_step = 0 + self._desc_width_anim_timer.stop() + + if not growing: + return + + self._detail_description_widget.setMinimumWidth(0) + self._detail_description_widget.setMaximumWidth( + self._last_desc_max_width + ) + self._detail_description_widget.setSizePolicy( + self._description_size_policy + ) + + sizes = list(self._other_widgets_widths) + sizes.append(desc_width) + self._splitter_widget.setSizes(sizes) + def _set_creator_detailed_text(self, creator): if not creator: - self._detail_description_widget.setPlainText("") + self._detail_description_input.setPlainText("") return detailed_description = creator.get_detail_description() or "" if commonmark: html = commonmark.commonmark(detailed_description) - self._detail_description_widget.setHtml(html) + self._detail_description_input.setHtml(html) else: - self._detail_description_widget.setMarkdown(detailed_description) + self._detail_description_input.setMarkdown(detailed_description) - def _set_creator(self, identifier): + def _set_creator_by_identifier(self, identifier): creator = self.controller.manual_creators.get(identifier) + self._set_creator(creator) + def _set_creator(self, creator): self._creator_short_desc_widget.set_plugin(creator) self._set_creator_detailed_text(creator) self._pre_create_widget.set_plugin(creator) @@ -660,6 +979,14 @@ class CreateDialog(QtWidgets.QDialog): self.variant_input.setText(default_variant or "Main") + def _on_variant_widget_resize(self): + self.variant_hints_btn.setFixedHeight(self.variant_input.height()) + + def _on_variant_btn_click(self): + pos = self.variant_hints_btn.rect().bottomLeft() + point = self.variant_hints_btn.mapToGlobal(pos) + self.variant_hints_menu.popup(point) + def _on_variant_action(self, action): value = action.text() if self.variant_input.text() != value: @@ -765,6 +1092,21 @@ class CreateDialog(QtWidgets.QDialog): self.variant_input.setProperty("state", state) self.variant_input.style().polish(self.variant_input) + def _on_first_show(self): + center = self.rect().center() + + width, height = self.default_size + self.resize(width, height) + part = int(width / 7) + self._splitter_widget.setSizes( + [part * 2, part * 2, width - (part * 4)] + ) + + new_pos = self.mapToGlobal(center) + new_pos.setX(new_pos.x() - int(self.width() / 2)) + new_pos.setY(new_pos.y() - int(self.height() / 2)) + self.move(new_pos) + def moveEvent(self, event): super(CreateDialog, self).moveEvent(event) self._last_pos = self.pos() @@ -773,13 +1115,7 @@ class CreateDialog(QtWidgets.QDialog): super(CreateDialog, self).showEvent(event) if self._first_show: self._first_show = False - width, height = self.default_size - self.resize(width, height) - - third_size = int(width / 3) - self._splitter_widget.setSizes( - [third_size, third_size, width - (2 * third_size)] - ) + self._on_first_show() if self._last_pos is not None: self.move(self._last_pos) @@ -840,7 +1176,10 @@ class CreateDialog(QtWidgets.QDialog): )) error_msg = str(exc_value) - if error_msg is not None: + if error_msg is None: + self._set_creator(self._selected_creator) + self._emit_message("Creation finished...") + else: box = CreateErrorMessageBox( creator_label, subset_name, diff --git a/openpype/tools/publisher/widgets/validations_widget.py b/openpype/tools/publisher/widgets/validations_widget.py index 798c1f9d92..e7ab4ecf5a 100644 --- a/openpype/tools/publisher/widgets/validations_widget.py +++ b/openpype/tools/publisher/widgets/validations_widget.py @@ -142,7 +142,7 @@ class ValidationErrorTitleWidget(QtWidgets.QWidget): self._help_text_by_instance_id = help_text_by_instance_id def sizeHint(self): - result = super().sizeHint() + result = super(ValidationErrorTitleWidget, self).sizeHint() expected_width = 0 for idx in range(self._view_layout.count()): expected_width += self._view_layout.itemAt(idx).sizeHint().width() diff --git a/openpype/tools/publisher/widgets/widgets.py b/openpype/tools/publisher/widgets/widgets.py index 5ced469b59..7096b9fb50 100644 --- a/openpype/tools/publisher/widgets/widgets.py +++ b/openpype/tools/publisher/widgets/widgets.py @@ -14,7 +14,8 @@ from openpype.tools.utils import ( PlaceholderLineEdit, IconButton, PixmapLabel, - BaseClickableFrame + BaseClickableFrame, + set_style_property, ) from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS from .assets_widget import AssetsDialog @@ -344,21 +345,42 @@ class AssetsField(BaseClickableFrame): def __init__(self, controller, parent): super(AssetsField, self).__init__(parent) + self.setObjectName("AssetNameInputWidget") - dialog = AssetsDialog(controller, self) + # Don't use 'self' for parent! + # - this widget has specific styles + dialog = AssetsDialog(controller, parent) name_input = ClickableLineEdit(self) name_input.setObjectName("AssetNameInput") + icon_name = "fa.window-maximize" + icon = qtawesome.icon(icon_name, color="white") + icon_btn = QtWidgets.QPushButton(self) + icon_btn.setIcon(icon) + icon_btn.setObjectName("AssetNameInputButton") + layout = QtWidgets.QHBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(0) layout.addWidget(name_input, 1) + layout.addWidget(icon_btn, 0) + # Make sure all widgets are vertically extended to highest widget + for widget in ( + name_input, + icon_btn + ): + size_policy = widget.sizePolicy() + size_policy.setVerticalPolicy(size_policy.MinimumExpanding) + widget.setSizePolicy(size_policy) name_input.clicked.connect(self._mouse_release_callback) + icon_btn.clicked.connect(self._mouse_release_callback) dialog.finished.connect(self._on_dialog_finish) self._dialog = dialog self._name_input = name_input + self._icon_btn = icon_btn self._origin_value = [] self._origin_selection = [] @@ -406,10 +428,9 @@ class AssetsField(BaseClickableFrame): self._set_state_property(state) def _set_state_property(self, state): - current_value = self._name_input.property("state") - if current_value != state: - self._name_input.setProperty("state", state) - self._name_input.style().polish(self._name_input) + set_style_property(self, "state", state) + set_style_property(self._name_input, "state", state) + set_style_property(self._icon_btn, "state", state) def is_valid(self): """Is asset valid.""" @@ -842,6 +863,8 @@ class VariantInputWidget(PlaceholderLineEdit): self._ignore_value_change = True + self._has_value_changed = False + self._origin_value = list(variants) self._current_value = list(variants) @@ -892,11 +915,23 @@ class MultipleItemWidget(QtWidgets.QWidget): layout.setContentsMargins(0, 0, 0, 0) layout.addWidget(view) + model.rowsInserted.connect(self._on_insert) + self._view = view self._model = model self._value = [] + def _on_insert(self): + self._update_size() + + def _update_size(self): + model = self._view.model() + if model.rowCount() == 0: + return + height = self._view.sizeHintForRow(0) + self.setMaximumHeight(height + (2 * self._view.spacing())) + def showEvent(self, event): super(MultipleItemWidget, self).showEvent(event) tmp_item = None @@ -904,13 +939,15 @@ class MultipleItemWidget(QtWidgets.QWidget): # Add temp item to be able calculate maximum height of widget tmp_item = QtGui.QStandardItem("tmp") self._model.appendRow(tmp_item) - - height = self._view.sizeHintForRow(0) - self.setMaximumHeight(height + (2 * self._view.spacing())) + self._update_size() if tmp_item is not None: self._model.clear() + def resizeEvent(self, event): + super(MultipleItemWidget, self).resizeEvent(event) + self._update_size() + def set_value(self, value=None): """Set value/s of currently selected instance.""" if value is None: @@ -1235,7 +1272,11 @@ class CreatorAttrsWidget(QtWidgets.QWidget): ) content_widget = QtWidgets.QWidget(self._scroll_area) - content_layout = QtWidgets.QFormLayout(content_widget) + content_layout = QtWidgets.QGridLayout(content_widget) + content_layout.setColumnStretch(0, 0) + content_layout.setColumnStretch(1, 1) + + row = 0 for attr_def, attr_instances, values in result: widget = create_widget_for_attr_def(attr_def, content_widget) if attr_def.is_value_def: @@ -1246,10 +1287,28 @@ class CreatorAttrsWidget(QtWidgets.QWidget): else: widget.set_value(values, True) - label = attr_def.label or attr_def.key - content_layout.addRow(label, widget) - widget.value_changed.connect(self._input_value_changed) + expand_cols = 2 + if attr_def.is_value_def and attr_def.is_label_horizontal: + expand_cols = 1 + col_num = 2 - expand_cols + + label = attr_def.label or attr_def.key + if label: + label_widget = QtWidgets.QLabel(label, self) + content_layout.addWidget( + label_widget, row, 0, 1, expand_cols + ) + if not attr_def.is_label_horizontal: + row += 1 + + content_layout.addWidget( + widget, row, col_num, 1, expand_cols + ) + + row += 1 + + widget.value_changed.connect(self._input_value_changed) self._attr_def_id_to_instances[attr_def.id] = attr_instances self._attr_def_id_to_attr_def[attr_def.id] = attr_def diff --git a/openpype/tools/publisher/window.py b/openpype/tools/publisher/window.py index b74e95b227..90a36b4f01 100644 --- a/openpype/tools/publisher/window.py +++ b/openpype/tools/publisher/window.py @@ -83,8 +83,10 @@ class PublisherWindow(QtWidgets.QDialog): line_widget.setMinimumHeight(2) # Content + content_stacked_widget = QtWidgets.QWidget(self) + # Subset widget - subset_frame = QtWidgets.QFrame(self) + subset_frame = QtWidgets.QFrame(content_stacked_widget) subset_views_widget = BorderedLabelWidget( "Subsets to publish", subset_frame @@ -171,9 +173,12 @@ class PublisherWindow(QtWidgets.QDialog): subset_layout.addLayout(footer_layout, 0) # Create publish frame - publish_frame = PublishFrame(controller, self) + publish_frame = PublishFrame(controller, content_stacked_widget) - content_stacked_layout = QtWidgets.QStackedLayout() + content_stacked_layout = QtWidgets.QStackedLayout( + content_stacked_widget + ) + content_stacked_layout.setContentsMargins(0, 0, 0, 0) content_stacked_layout.setStackingMode( QtWidgets.QStackedLayout.StackAll ) @@ -186,7 +191,7 @@ class PublisherWindow(QtWidgets.QDialog): main_layout.setSpacing(0) main_layout.addWidget(header_widget, 0) main_layout.addWidget(line_widget, 0) - main_layout.addLayout(content_stacked_layout, 1) + main_layout.addWidget(content_stacked_widget, 1) creator_window = CreateDialog(controller, parent=self) @@ -228,6 +233,7 @@ class PublisherWindow(QtWidgets.QDialog): # Store header for TrayPublisher self._header_layout = header_layout + self._content_stacked_widget = content_stacked_widget self.content_stacked_layout = content_stacked_layout self.publish_frame = publish_frame self.subset_frame = subset_frame @@ -340,9 +346,23 @@ class PublisherWindow(QtWidgets.QDialog): def _set_publish_visibility(self, visible): if visible: widget = self.publish_frame + publish_frame_visible = True else: widget = self.subset_frame + publish_frame_visible = False self.content_stacked_layout.setCurrentWidget(widget) + self._set_publish_frame_visible(publish_frame_visible) + + def _set_publish_frame_visible(self, publish_frame_visible): + """Publish frame visibility has changed. + + Also used in TrayPublisher to be able handle start/end of publish + widget overlay. + """ + + # Hide creator dialog if visible + if publish_frame_visible and self.creator_window.isVisible(): + self.creator_window.close() def _on_reset_clicked(self): self.controller.reset() diff --git a/openpype/tools/resources/images/menu.png b/openpype/tools/resources/images/menu.png new file mode 100644 index 0000000000..14a991f092 Binary files /dev/null and b/openpype/tools/resources/images/menu.png differ diff --git a/openpype/tools/sceneinventory/model.py b/openpype/tools/sceneinventory/model.py index 091d6ca925..8d72020c98 100644 --- a/openpype/tools/sceneinventory/model.py +++ b/openpype/tools/sceneinventory/model.py @@ -7,8 +7,12 @@ from Qt import QtCore, QtGui import qtawesome from bson.objectid import ObjectId -from avalon import api, io, schema -from openpype.pipeline import HeroVersionType +from openpype.pipeline import ( + legacy_io, + schema, + HeroVersionType, + registered_host, +) from openpype.style import get_default_entity_icon_color from openpype.tools.utils.models import TreeModel, Item from openpype.modules import ModulesManager @@ -51,7 +55,7 @@ class InventoryModel(TreeModel): if not self.sync_enabled: return - project_name = io.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] active_site = sync_server.get_active_site(project_name) remote_site = sync_server.get_remote_site(project_name) @@ -181,7 +185,7 @@ class InventoryModel(TreeModel): def refresh(self, selected=None, items=None): """Refresh the model""" - host = api.registered_host() + host = registered_host() if not items: # for debugging or testing, injecting items from outside items = host.ls() @@ -300,32 +304,32 @@ class InventoryModel(TreeModel): for repre_id, group_dict in sorted(grouped.items()): group_items = group_dict["items"] # Get parenthood per group - representation = io.find_one({"_id": ObjectId(repre_id)}) + representation = legacy_io.find_one({"_id": ObjectId(repre_id)}) if not representation: not_found["representation"].append(group_items) not_found_ids.append(repre_id) continue - version = io.find_one({"_id": representation["parent"]}) + version = legacy_io.find_one({"_id": representation["parent"]}) if not version: not_found["version"].append(group_items) not_found_ids.append(repre_id) continue elif version["type"] == "hero_version": - _version = io.find_one({ + _version = legacy_io.find_one({ "_id": version["version_id"] }) version["name"] = HeroVersionType(_version["name"]) version["data"] = _version["data"] - subset = io.find_one({"_id": version["parent"]}) + subset = legacy_io.find_one({"_id": version["parent"]}) if not subset: not_found["subset"].append(group_items) not_found_ids.append(repre_id) continue - asset = io.find_one({"_id": subset["parent"]}) + asset = legacy_io.find_one({"_id": subset["parent"]}) if not asset: not_found["asset"].append(group_items) not_found_ids.append(repre_id) @@ -386,7 +390,7 @@ class InventoryModel(TreeModel): # Store the highest available version so the model can know # whether current version is currently up-to-date. - highest_version = io.find_one({ + highest_version = legacy_io.find_one({ "type": "version", "parent": version["parent"] }, sort=[("name", -1)]) diff --git a/openpype/tools/sceneinventory/switch_dialog.py b/openpype/tools/sceneinventory/switch_dialog.py index bb3e2615ac..b2d770330f 100644 --- a/openpype/tools/sceneinventory/switch_dialog.py +++ b/openpype/tools/sceneinventory/switch_dialog.py @@ -4,7 +4,7 @@ from Qt import QtWidgets, QtCore import qtawesome from bson.objectid import ObjectId -from avalon import io +from openpype.pipeline import legacy_io from openpype.pipeline.load import ( discover_loader_plugins, switch_container, @@ -151,7 +151,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): repre_ids.add(ObjectId(item["representation"])) content_loaders.add(item["loader"]) - repres = list(io.find({ + repres = list(legacy_io.find({ "type": {"$in": ["representation", "archived_representation"]}, "_id": {"$in": list(repre_ids)} })) @@ -179,7 +179,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): content_repres[repre_id] = repres_by_id[repre_id] version_ids.append(repre["parent"]) - versions = io.find({ + versions = legacy_io.find({ "type": {"$in": ["version", "hero_version"]}, "_id": {"$in": list(set(version_ids))} }) @@ -198,7 +198,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): else: subset_ids.append(content_versions[version_id]["parent"]) - subsets = io.find({ + subsets = legacy_io.find({ "type": {"$in": ["subset", "archived_subset"]}, "_id": {"$in": subset_ids} }) @@ -220,7 +220,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): asset_ids.append(subset["parent"]) content_subsets[subset_id] = subset - assets = io.find({ + assets = legacy_io.find({ "type": {"$in": ["asset", "archived_asset"]}, "_id": {"$in": list(asset_ids)} }) @@ -472,7 +472,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): # Prepare asset document if asset is selected asset_doc = None if selected_asset: - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"type": "asset", "name": selected_asset}, {"_id": True} ) @@ -523,7 +523,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): def _get_current_output_repre_ids_xxx( self, asset_doc, selected_subset, selected_repre ): - subset_doc = io.find_one( + subset_doc = legacy_io.find_one( { "type": "subset", "name": selected_subset, @@ -537,7 +537,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): if not version_doc: return [] - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "parent": version_doc["_id"], @@ -548,7 +548,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): return [repre_doc["_id"] for repre_doc in repre_docs] def _get_current_output_repre_ids_xxo(self, asset_doc, selected_subset): - subset_doc = io.find_one( + subset_doc = legacy_io.find_one( { "type": "subset", "parent": asset_doc["_id"], @@ -563,7 +563,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): for repre_doc in self.content_repres.values(): repre_names.add(repre_doc["name"]) - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "parent": subset_doc["_id"], @@ -578,7 +578,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): for subset_doc in self.content_subsets.values(): susbet_names.add(subset_doc["name"]) - subset_docs = io.find( + subset_docs = legacy_io.find( { "type": "subset", "name": {"$in": list(susbet_names)}, @@ -587,7 +587,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): {"_id": True} ) subset_ids = [subset_doc["_id"] for subset_doc in subset_docs] - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "parent": {"$in": subset_ids}, @@ -606,7 +606,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): subset_name = subset_doc["name"] repres_by_subset_name[subset_name].add(repre_name) - subset_docs = list(io.find( + subset_docs = list(legacy_io.find( { "type": "subset", "parent": asset_doc["_id"], @@ -637,7 +637,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): "parent": version_id, "name": {"$in": list(repre_names)} }) - repre_docs = io.find( + repre_docs = legacy_io.find( {"$or": repre_or_query}, {"_id": True} ) @@ -646,7 +646,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): def _get_current_output_repre_ids_oxx( self, selected_subset, selected_repre ): - subset_docs = list(io.find({ + subset_docs = list(legacy_io.find({ "type": "subset", "parent": {"$in": list(self.content_assets.keys())}, "name": selected_subset @@ -657,7 +657,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): last_version["_id"] for last_version in last_versions_by_subset_id.values() ] - repre_docs = io.find({ + repre_docs = legacy_io.find({ "type": "representation", "parent": {"$in": last_version_ids}, "name": selected_repre @@ -666,7 +666,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): return [repre_doc["_id"] for repre_doc in repre_docs] def _get_current_output_repre_ids_oxo(self, selected_subset): - subset_docs = list(io.find( + subset_docs = list(legacy_io.find( { "type": "subset", "parent": {"$in": list(self.content_assets.keys())}, @@ -713,7 +713,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): "parent": last_version_id, "name": {"$in": list(repre_names)} }) - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "$or": repre_or_query @@ -724,7 +724,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): return [repre_doc["_id"] for repre_doc in repre_docs] def _get_current_output_repre_ids_oox(self, selected_repre): - repre_docs = io.find( + repre_docs = legacy_io.find( { "name": selected_repre, "parent": {"$in": list(self.content_versions.keys())} @@ -734,7 +734,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): return [repre_doc["_id"] for repre_doc in repre_docs] def _get_asset_box_values(self): - asset_docs = io.find( + asset_docs = legacy_io.find( {"type": "asset"}, {"_id": 1, "name": 1} ) @@ -742,7 +742,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): asset_doc["_id"]: asset_doc["name"] for asset_doc in asset_docs } - subsets = io.find( + subsets = legacy_io.find( { "type": "subset", "parent": {"$in": list(asset_names_by_id.keys())} @@ -762,12 +762,15 @@ class SwitchAssetDialog(QtWidgets.QDialog): def _get_subset_box_values(self): selected_asset = self._assets_box.get_valid_value() if selected_asset: - asset_doc = io.find_one({"type": "asset", "name": selected_asset}) + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": selected_asset + }) asset_ids = [asset_doc["_id"]] else: asset_ids = list(self.content_assets.keys()) - subsets = io.find( + subsets = legacy_io.find( { "type": "subset", "parent": {"$in": asset_ids} @@ -804,7 +807,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): # [ ] [ ] [?] if not selected_asset and not selected_subset: # Find all representations of selection's subsets - possible_repres = list(io.find( + possible_repres = list(legacy_io.find( { "type": "representation", "parent": {"$in": list(self.content_versions.keys())} @@ -833,11 +836,11 @@ class SwitchAssetDialog(QtWidgets.QDialog): # [x] [x] [?] if selected_asset and selected_subset: - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"type": "asset", "name": selected_asset}, {"_id": 1} ) - subset_doc = io.find_one( + subset_doc = legacy_io.find_one( { "type": "subset", "name": selected_subset, @@ -848,7 +851,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): subset_id = subset_doc["_id"] last_versions_by_subset_id = self.find_last_versions([subset_id]) version_doc = last_versions_by_subset_id.get(subset_id) - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "parent": version_doc["_id"] @@ -865,7 +868,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): # [x] [ ] [?] # If asset only is selected if selected_asset: - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"type": "asset", "name": selected_asset}, {"_id": 1} ) @@ -876,7 +879,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): subset_names = set() for subset_doc in self.content_subsets.values(): subset_names.add(subset_doc["name"]) - subset_docs = io.find( + subset_docs = legacy_io.find( { "type": "subset", "parent": asset_doc["_id"], @@ -900,7 +903,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): if not subset_id_by_version_id: return list() - repre_docs = list(io.find( + repre_docs = list(legacy_io.find( { "type": "representation", "parent": {"$in": list(subset_id_by_version_id.keys())} @@ -930,7 +933,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): return list(available_repres) # [ ] [x] [?] - subset_docs = list(io.find( + subset_docs = list(legacy_io.find( { "type": "subset", "parent": {"$in": list(self.content_assets.keys())}, @@ -957,7 +960,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): if not subset_id_by_version_id: return list() - repre_docs = list(io.find( + repre_docs = list(legacy_io.find( { "type": "representation", "parent": {"$in": list(subset_id_by_version_id.keys())} @@ -1013,11 +1016,11 @@ class SwitchAssetDialog(QtWidgets.QDialog): return # [x] [ ] [?] - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"type": "asset", "name": selected_asset}, {"_id": 1} ) - subset_docs = io.find( + subset_docs = legacy_io.find( {"type": "subset", "parent": asset_doc["_id"]}, {"name": 1} ) @@ -1048,7 +1051,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): }} ] last_versions_by_subset_id = dict() - for doc in io.aggregate(_pipeline): + for doc in legacy_io.aggregate(_pipeline): doc["parent"] = doc["_id"] doc["_id"] = doc.pop("_version_id") last_versions_by_subset_id[doc["parent"]] = doc @@ -1076,11 +1079,11 @@ class SwitchAssetDialog(QtWidgets.QDialog): # [x] [x] [ ] if selected_asset is not None and selected_subset is not None: - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"type": "asset", "name": selected_asset}, {"_id": 1} ) - subset_doc = io.find_one( + subset_doc = legacy_io.find_one( { "type": "subset", "parent": asset_doc["_id"], @@ -1096,7 +1099,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): validation_state.repre_ok = False return - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "parent": last_version["_id"] @@ -1116,11 +1119,11 @@ class SwitchAssetDialog(QtWidgets.QDialog): # [x] [ ] [ ] if selected_asset is not None: - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"type": "asset", "name": selected_asset}, {"_id": 1} ) - subset_docs = list(io.find( + subset_docs = list(legacy_io.find( { "type": "subset", "parent": asset_doc["_id"] @@ -1142,7 +1145,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): version_id = last_version["_id"] subset_id_by_version_id[version_id] = subset_id - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "parent": {"$in": list(subset_id_by_version_id.keys())} @@ -1173,7 +1176,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): # [ ] [x] [ ] # Subset documents - subset_docs = io.find( + subset_docs = legacy_io.find( { "type": "subset", "parent": {"$in": list(self.content_assets.keys())}, @@ -1194,7 +1197,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): version_id = last_version["_id"] subset_id_by_version_id[version_id] = subset_id - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "parent": {"$in": list(subset_id_by_version_id.keys())} @@ -1225,7 +1228,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): def _on_current_asset(self): # Set initial asset as current. - asset_name = io.Session["AVALON_ASSET"] + asset_name = legacy_io.Session["AVALON_ASSET"] index = self._assets_box.findText( asset_name, QtCore.Qt.MatchFixedString ) @@ -1243,7 +1246,10 @@ class SwitchAssetDialog(QtWidgets.QDialog): selected_representation = self._representations_box.get_valid_value() if selected_asset: - asset_doc = io.find_one({"type": "asset", "name": selected_asset}) + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": selected_asset + }) asset_docs_by_id = {asset_doc["_id"]: asset_doc} else: asset_docs_by_id = self.content_assets @@ -1262,7 +1268,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): if selected_subset: subset_query["name"] = selected_subset - subset_docs = list(io.find(subset_query)) + subset_docs = list(legacy_io.find(subset_query)) subset_ids = [] subset_docs_by_parent_and_name = collections.defaultdict(dict) for subset in subset_docs: @@ -1272,12 +1278,12 @@ class SwitchAssetDialog(QtWidgets.QDialog): subset_docs_by_parent_and_name[parent_id][name] = subset # versions - version_docs = list(io.find({ + version_docs = list(legacy_io.find({ "type": "version", "parent": {"$in": subset_ids} }, sort=[("name", -1)])) - hero_version_docs = list(io.find({ + hero_version_docs = list(legacy_io.find({ "type": "hero_version", "parent": {"$in": subset_ids} })) @@ -1297,7 +1303,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): parent_id = hero_version_doc["parent"] hero_version_docs_by_parent_id[parent_id] = hero_version_doc - repre_docs = io.find({ + repre_docs = legacy_io.find({ "type": "representation", "parent": {"$in": version_ids} }) diff --git a/openpype/tools/sceneinventory/view.py b/openpype/tools/sceneinventory/view.py index 2df6d00406..448e3f4e6f 100644 --- a/openpype/tools/sceneinventory/view.py +++ b/openpype/tools/sceneinventory/view.py @@ -6,10 +6,9 @@ from Qt import QtWidgets, QtCore import qtawesome from bson.objectid import ObjectId -from avalon import io - from openpype import style from openpype.pipeline import ( + legacy_io, HeroVersionType, update_container, remove_container, @@ -84,7 +83,7 @@ class SceneInventoryView(QtWidgets.QTreeView): if item_id not in repre_ids: repre_ids.append(item_id) - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "_id": {"$in": repre_ids} @@ -98,7 +97,7 @@ class SceneInventoryView(QtWidgets.QTreeView): if version_id not in version_ids: version_ids.append(version_id) - loaded_versions = io.find({ + loaded_versions = legacy_io.find({ "_id": {"$in": version_ids}, "type": {"$in": ["version", "hero_version"]} }) @@ -115,7 +114,7 @@ class SceneInventoryView(QtWidgets.QTreeView): if parent_id not in version_parents: version_parents.append(parent_id) - all_versions = io.find({ + all_versions = legacy_io.find({ "type": {"$in": ["hero_version", "version"]}, "parent": {"$in": version_parents} }) @@ -151,7 +150,7 @@ class SceneInventoryView(QtWidgets.QTreeView): if item_id not in repre_ids: repre_ids.append(item_id) - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "_id": {"$in": repre_ids} @@ -166,7 +165,7 @@ class SceneInventoryView(QtWidgets.QTreeView): version_id_by_repre_id[repre_doc["_id"]] = version_id if version_id not in version_ids: version_ids.append(version_id) - hero_versions = io.find( + hero_versions = legacy_io.find( { "_id": {"$in": version_ids}, "type": "hero_version" @@ -184,7 +183,7 @@ class SceneInventoryView(QtWidgets.QTreeView): if current_version_id == hero_version_id: version_id_by_repre_id[_repre_id] = version_id - version_docs = io.find( + version_docs = legacy_io.find( { "_id": {"$in": list(version_ids)}, "type": "version" @@ -367,11 +366,11 @@ class SceneInventoryView(QtWidgets.QTreeView): repre_ids (list) side (str): 'active_site'|'remote_site' """ - project_name = io.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] active_site = self.sync_server.get_active_site(project_name) remote_site = self.sync_server.get_remote_site(project_name) - repre_docs = io.find({ + repre_docs = legacy_io.find({ "type": "representation", "_id": {"$in": repre_ids} }) @@ -661,12 +660,12 @@ class SceneInventoryView(QtWidgets.QTreeView): # Get available versions for active representation representation_id = ObjectId(active["representation"]) - representation = io.find_one({"_id": representation_id}) - version = io.find_one({ + representation = legacy_io.find_one({"_id": representation_id}) + version = legacy_io.find_one({ "_id": representation["parent"] }) - versions = list(io.find( + versions = list(legacy_io.find( { "parent": version["parent"], "type": "version" @@ -674,7 +673,7 @@ class SceneInventoryView(QtWidgets.QTreeView): sort=[("name", 1)] )) - hero_version = io.find_one({ + hero_version = legacy_io.find_one({ "parent": version["parent"], "type": "hero_version" }) diff --git a/openpype/tools/sceneinventory/window.py b/openpype/tools/sceneinventory/window.py index b40fbb69e4..054c2a2daa 100644 --- a/openpype/tools/sceneinventory/window.py +++ b/openpype/tools/sceneinventory/window.py @@ -3,8 +3,8 @@ import sys from Qt import QtWidgets, QtCore import qtawesome -from avalon import io, api +from openpype.pipeline import legacy_io from openpype import style from openpype.tools.utils.delegates import VersionDelegate from openpype.tools.utils.lib import ( @@ -72,7 +72,7 @@ class SceneInventoryWindow(QtWidgets.QDialog): control_layout.addWidget(refresh_button) # endregion control - family_config_cache = FamilyConfigCache(io) + family_config_cache = FamilyConfigCache(legacy_io) model = InventoryModel(family_config_cache) proxy = FilterProxyModel() @@ -91,7 +91,7 @@ class SceneInventoryWindow(QtWidgets.QDialog): view.setColumnWidth(4, 100) # namespace # apply delegates - version_delegate = VersionDelegate(io, self) + version_delegate = VersionDelegate(legacy_io, self) column = model.Columns.index("version") view.setItemDelegateForColumn(column, version_delegate) @@ -191,17 +191,18 @@ def show(root=None, debug=False, parent=None, items=None): pass if debug is True: - io.install() + legacy_io.install() if not os.environ.get("AVALON_PROJECT"): any_project = next( - project for project in io.projects() + project for project in legacy_io.projects() if project.get("active", True) is not False ) - api.Session["AVALON_PROJECT"] = any_project["name"] + project_name = any_project["name"] else: - api.Session["AVALON_PROJECT"] = os.environ.get("AVALON_PROJECT") + project_name = os.environ.get("AVALON_PROJECT") + legacy_io.Session["AVALON_PROJECT"] = project_name with qt_app_context(): window = SceneInventoryWindow(parent) diff --git a/openpype/tools/settings/local_settings/constants.py b/openpype/tools/settings/local_settings/constants.py index 1836c579af..16f87b6f05 100644 --- a/openpype/tools/settings/local_settings/constants.py +++ b/openpype/tools/settings/local_settings/constants.py @@ -9,6 +9,7 @@ LABEL_DISCARD_CHANGES = "Discard changes" # TODO move to settings constants LOCAL_GENERAL_KEY = "general" LOCAL_PROJECTS_KEY = "projects" +LOCAL_ENV_KEY = "environments" LOCAL_APPS_KEY = "applications" # Roots key constant diff --git a/openpype/tools/settings/local_settings/environments_widget.py b/openpype/tools/settings/local_settings/environments_widget.py new file mode 100644 index 0000000000..14ca517851 --- /dev/null +++ b/openpype/tools/settings/local_settings/environments_widget.py @@ -0,0 +1,93 @@ +from Qt import QtWidgets + +from openpype.tools.utils import PlaceholderLineEdit + + +class LocalEnvironmentsWidgets(QtWidgets.QWidget): + def __init__(self, system_settings_entity, parent): + super(LocalEnvironmentsWidgets, self).__init__(parent) + + self._widgets_by_env_key = {} + self.system_settings_entity = system_settings_entity + + content_widget = QtWidgets.QWidget(self) + content_layout = QtWidgets.QGridLayout(content_widget) + content_layout.setContentsMargins(0, 0, 0, 0) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + + self._layout = layout + self._content_layout = content_layout + self._content_widget = content_widget + + def _clear_layout(self, layout): + while layout.count() > 0: + item = layout.itemAt(0) + widget = item.widget() + layout.removeItem(item) + if widget is not None: + widget.setVisible(False) + widget.deleteLater() + + def _reset_env_widgets(self): + self._clear_layout(self._content_layout) + self._clear_layout(self._layout) + + content_widget = QtWidgets.QWidget(self) + content_layout = QtWidgets.QGridLayout(content_widget) + content_layout.setContentsMargins(0, 0, 0, 0) + white_list_entity = ( + self.system_settings_entity["general"]["local_env_white_list"] + ) + row = -1 + for row, item in enumerate(white_list_entity): + key = item.value + label_widget = QtWidgets.QLabel(key, self) + input_widget = PlaceholderLineEdit(self) + input_widget.setPlaceholderText("< Keep studio value >") + + content_layout.addWidget(label_widget, row, 0) + content_layout.addWidget(input_widget, row, 1) + + self._widgets_by_env_key[key] = input_widget + + if row < 0: + label_widget = QtWidgets.QLabel( + ( + "Your studio does not allow to change" + " Environment variables locally." + ), + self + ) + content_layout.addWidget(label_widget, 0, 0) + content_layout.setColumnStretch(0, 1) + + else: + content_layout.setColumnStretch(0, 0) + content_layout.setColumnStretch(1, 1) + + self._layout.addWidget(content_widget, 1) + + self._content_layout = content_layout + self._content_widget = content_widget + + def update_local_settings(self, value): + if not value: + value = {} + + self._reset_env_widgets() + + for env_key, widget in self._widgets_by_env_key.items(): + env_value = value.get(env_key) or "" + widget.setText(env_value) + + def settings_value(self): + output = {} + for env_key, widget in self._widgets_by_env_key.items(): + value = widget.text() + if value: + output[env_key] = value + if not output: + return None + return output diff --git a/openpype/tools/settings/local_settings/window.py b/openpype/tools/settings/local_settings/window.py index fb47e69a17..6a2db3fff5 100644 --- a/openpype/tools/settings/local_settings/window.py +++ b/openpype/tools/settings/local_settings/window.py @@ -8,6 +8,7 @@ from openpype.settings.lib import ( save_local_settings ) from openpype.tools.settings import CHILD_OFFSET +from openpype.tools.utils import MessageOverlayObject from openpype.api import ( Logger, SystemSettings, @@ -25,11 +26,13 @@ from .experimental_widget import ( LOCAL_EXPERIMENTAL_KEY ) from .apps_widget import LocalApplicationsWidgets +from .environments_widget import LocalEnvironmentsWidgets from .projects_widget import ProjectSettingsWidget from .constants import ( LOCAL_GENERAL_KEY, LOCAL_PROJECTS_KEY, + LOCAL_ENV_KEY, LOCAL_APPS_KEY ) @@ -49,18 +52,20 @@ class LocalSettingsWidget(QtWidgets.QWidget): self.pype_mongo_widget = None self.general_widget = None self.experimental_widget = None + self.envs_widget = None self.apps_widget = None self.projects_widget = None - self._create_pype_mongo_ui() + self._create_mongo_url_ui() self._create_general_ui() self._create_experimental_ui() + self._create_environments_ui() self._create_app_ui() self._create_project_ui() self.main_layout.addStretch(1) - def _create_pype_mongo_ui(self): + def _create_mongo_url_ui(self): pype_mongo_expand_widget = ExpandingWidget("OpenPype Mongo URL", self) pype_mongo_content = QtWidgets.QWidget(self) pype_mongo_layout = QtWidgets.QVBoxLayout(pype_mongo_content) @@ -110,6 +115,22 @@ class LocalSettingsWidget(QtWidgets.QWidget): self.experimental_widget = experimental_widget + def _create_environments_ui(self): + envs_expand_widget = ExpandingWidget("Environments", self) + envs_content = QtWidgets.QWidget(self) + envs_layout = QtWidgets.QVBoxLayout(envs_content) + envs_layout.setContentsMargins(CHILD_OFFSET, 5, 0, 0) + envs_expand_widget.set_content_widget(envs_content) + + envs_widget = LocalEnvironmentsWidgets( + self.system_settings, envs_content + ) + envs_layout.addWidget(envs_widget) + + self.main_layout.addWidget(envs_expand_widget) + + self.envs_widget = envs_widget + def _create_app_ui(self): # Applications app_expand_widget = ExpandingWidget("Applications", self) @@ -154,6 +175,9 @@ class LocalSettingsWidget(QtWidgets.QWidget): self.general_widget.update_local_settings( value.get(LOCAL_GENERAL_KEY) ) + self.envs_widget.update_local_settings( + value.get(LOCAL_ENV_KEY) + ) self.app_widget.update_local_settings( value.get(LOCAL_APPS_KEY) ) @@ -170,6 +194,10 @@ class LocalSettingsWidget(QtWidgets.QWidget): if general_value: output[LOCAL_GENERAL_KEY] = general_value + envs_value = self.envs_widget.settings_value() + if envs_value: + output[LOCAL_ENV_KEY] = envs_value + app_value = self.app_widget.settings_value() if app_value: output[LOCAL_APPS_KEY] = app_value @@ -194,6 +222,8 @@ class LocalSettingsWindow(QtWidgets.QWidget): self.setWindowTitle("OpenPype Local settings") + overlay_object = MessageOverlayObject(self) + stylesheet = style.load_stylesheet() self.setStyleSheet(stylesheet) self.setWindowIcon(QtGui.QIcon(style.app_icon_path())) @@ -220,6 +250,7 @@ class LocalSettingsWindow(QtWidgets.QWidget): save_btn.clicked.connect(self._on_save_clicked) reset_btn.clicked.connect(self._on_reset_clicked) + self._overlay_object = overlay_object # Do not create local settings widget in init phase as it's using # settings objects that must be OK to be able create this widget # - we want to show dialog if anything goes wrong @@ -285,8 +316,10 @@ class LocalSettingsWindow(QtWidgets.QWidget): def _on_reset_clicked(self): self.reset() + self._overlay_object.add_message("Refreshed...") def _on_save_clicked(self): value = self._settings_widget.settings_value() save_local_settings(value) + self._overlay_object.add_message("Saved...", message_type="success") self.reset() diff --git a/openpype/tools/settings/settings/README.md b/openpype/tools/settings/settings/README.md index 1c916ddff2..c29664a907 100644 --- a/openpype/tools/settings/settings/README.md +++ b/openpype/tools/settings/settings/README.md @@ -44,8 +44,7 @@ }, { "type": "raw-json", "label": "{host_label} Environments", - "key": "{host_name}_environments", - "env_group_key": "{host_name}" + "key": "{host_name}_environments" }, { "type": "path-widget", "key": "{host_name}_executables", diff --git a/openpype/tools/settings/settings/base.py b/openpype/tools/settings/settings/base.py index bd48b3a966..44ec09b2ca 100644 --- a/openpype/tools/settings/settings/base.py +++ b/openpype/tools/settings/settings/base.py @@ -567,7 +567,9 @@ class GUIWidget(BaseWidget): def _create_label_ui(self): label = self.entity["label"] + word_wrap = self.entity.schema_data.get("word_wrap", False) label_widget = QtWidgets.QLabel(label, self) + label_widget.setWordWrap(word_wrap) label_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) label_widget.setObjectName("SettingsLabel") label_widget.linkActivated.connect(self._on_link_activate) diff --git a/openpype/tools/settings/settings/dict_mutable_widget.py b/openpype/tools/settings/settings/dict_mutable_widget.py index 6489266131..1c704b3cd5 100644 --- a/openpype/tools/settings/settings/dict_mutable_widget.py +++ b/openpype/tools/settings/settings/dict_mutable_widget.py @@ -465,10 +465,6 @@ class ModifiableDictItem(QtWidgets.QWidget): self.entity_widget.change_key(key, self) self.update_style() - @property - def value_is_env_group(self): - return self.entity_widget.value_is_env_group - def update_key_label(self): if not self.collapsible_key: return diff --git a/openpype/tools/settings/settings/widgets.py b/openpype/tools/settings/settings/widgets.py index 6db001f2f6..45c21d5685 100644 --- a/openpype/tools/settings/settings/widgets.py +++ b/openpype/tools/settings/settings/widgets.py @@ -1,13 +1,9 @@ -import os import copy import uuid from Qt import QtWidgets, QtCore, QtGui import qtawesome -from avalon.mongodb import ( - AvalonMongoConnection, - AvalonMongoDB -) +from openpype.pipeline import AvalonMongoDB from openpype.style import get_objected_colors from openpype.tools.utils.widgets import ImageButton from openpype.tools.utils.lib import paint_image_with_color @@ -1209,15 +1205,6 @@ class ProjectListWidget(QtWidgets.QWidget): selected_project = index.data(PROJECT_NAME_ROLE) break - mongo_url = os.environ["OPENPYPE_MONGO"] - - # Force uninstall of whole avalon connection if url does not match - # to current environment and set it as environment - if mongo_url != os.environ["AVALON_MONGO"]: - AvalonMongoConnection.uninstall(self.dbcon, force=True) - os.environ["AVALON_MONGO"] = mongo_url - self.dbcon = None - if not self.dbcon: try: self.dbcon = AvalonMongoDB() diff --git a/openpype/tools/standalonepublish/app.py b/openpype/tools/standalonepublish/app.py index 3630d92c83..1ad5cd119e 100644 --- a/openpype/tools/standalonepublish/app.py +++ b/openpype/tools/standalonepublish/app.py @@ -12,7 +12,7 @@ from .widgets import ( from .widgets.constants import HOST_NAME from openpype import style from openpype.api import resources -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB from openpype.modules import ModulesManager diff --git a/openpype/tools/standalonepublish/publish.py b/openpype/tools/standalonepublish/publish.py index 582e7eccf8..e1e9edebb9 100644 --- a/openpype/tools/standalonepublish/publish.py +++ b/openpype/tools/standalonepublish/publish.py @@ -1,14 +1,14 @@ import os import sys -import openpype import pyblish.api +from openpype.pipeline import install_openpype_plugins from openpype.tools.utils.host_tools import show_publish def main(env): # Registers pype's Global pyblish plugins - openpype.install() + install_openpype_plugins() # Register additional paths addition_paths_str = env.get("PUBLISH_PATHS") or "" diff --git a/openpype/tools/standalonepublish/widgets/widget_components.py b/openpype/tools/standalonepublish/widgets/widget_components.py index 4d7f94f825..b3280089c3 100644 --- a/openpype/tools/standalonepublish/widgets/widget_components.py +++ b/openpype/tools/standalonepublish/widgets/widget_components.py @@ -5,16 +5,18 @@ import random import string from Qt import QtWidgets, QtCore -from . import DropDataFrame -from .constants import HOST_NAME -from avalon import io + from openpype.api import execute, Logger +from openpype.pipeline import legacy_io from openpype.lib import ( get_openpype_execute_args, apply_project_environments_value ) -log = Logger().get_logger("standalonepublisher") +from . import DropDataFrame +from .constants import HOST_NAME + +log = Logger.get_logger("standalonepublisher") class ComponentsWidget(QtWidgets.QWidget): @@ -152,18 +154,18 @@ def set_context(project, asset, task): :type asset: str ''' os.environ["AVALON_PROJECT"] = project - io.Session["AVALON_PROJECT"] = project + legacy_io.Session["AVALON_PROJECT"] = project os.environ["AVALON_ASSET"] = asset - io.Session["AVALON_ASSET"] = asset + legacy_io.Session["AVALON_ASSET"] = asset if not task: task = '' os.environ["AVALON_TASK"] = task - io.Session["AVALON_TASK"] = task + legacy_io.Session["AVALON_TASK"] = task - io.Session["current_dir"] = os.path.normpath(os.getcwd()) + legacy_io.Session["current_dir"] = os.path.normpath(os.getcwd()) os.environ["AVALON_APP"] = HOST_NAME - io.Session["AVALON_APP"] = HOST_NAME + legacy_io.Session["AVALON_APP"] = HOST_NAME def cli_publish(data, publish_paths, gui=True): @@ -171,7 +173,7 @@ def cli_publish(data, publish_paths, gui=True): os.path.dirname(os.path.dirname(__file__)), "publish.py" ) - io.install() + legacy_io.install() # Create hash name folder in temp chars = "".join([random.choice(string.ascii_letters) for i in range(15)]) @@ -200,9 +202,10 @@ def cli_publish(data, publish_paths, gui=True): if os.path.exists(json_data_path): with open(json_data_path, "r") as f: result = json.load(f) + os.remove(json_data_path) log.info(f"Publish result: {result}") - io.uninstall() + legacy_io.uninstall() return False diff --git a/openpype/tools/standalonepublish/widgets/widget_drop_frame.py b/openpype/tools/standalonepublish/widgets/widget_drop_frame.py index c1c59d65b6..f8a8273b26 100644 --- a/openpype/tools/standalonepublish/widgets/widget_drop_frame.py +++ b/openpype/tools/standalonepublish/widgets/widget_drop_frame.py @@ -37,6 +37,10 @@ class DropDataFrame(QtWidgets.QFrame): "video_file": video_extensions } + sequence_types = [ + ".bgeo", ".vdb", ".bgeosc", ".bgeogz" + ] + def __init__(self, parent): super().__init__() self.parent_widget = parent @@ -176,7 +180,7 @@ class DropDataFrame(QtWidgets.QFrame): non_collectionable_paths = [] for path in in_paths: ext = os.path.splitext(path)[1] - if ext in self.image_extensions: + if ext in self.image_extensions or ext in self.sequence_types: collectionable_paths.append(path) else: non_collectionable_paths.append(path) @@ -289,7 +293,7 @@ class DropDataFrame(QtWidgets.QFrame): def get_file_data(self, data): filepath = data['files'][0] ext = data['ext'].lower() - output = {} + output = {"fps": None} file_info = None if 'file_info' in data: diff --git a/openpype/tools/standalonepublish/widgets/widget_family_desc.py b/openpype/tools/standalonepublish/widgets/widget_family_desc.py index 79681615b9..2095b332bd 100644 --- a/openpype/tools/standalonepublish/widgets/widget_family_desc.py +++ b/openpype/tools/standalonepublish/widgets/widget_family_desc.py @@ -52,6 +52,7 @@ class FamilyDescriptionWidget(QtWidgets.QWidget): family.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft) help = QtWidgets.QLabel("help") + help.setWordWrap(True) help.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft) label_layout.addWidget(family) diff --git a/openpype/tools/stdout_broker/window.py b/openpype/tools/stdout_broker/window.py index a2190e0491..f5720ca05b 100644 --- a/openpype/tools/stdout_broker/window.py +++ b/openpype/tools/stdout_broker/window.py @@ -1,7 +1,9 @@ -from avalon import style -from Qt import QtWidgets, QtCore -import collections import re +import collections + +from Qt import QtWidgets + +from openpype import style class ConsoleDialog(QtWidgets.QDialog): diff --git a/openpype/tools/subsetmanager/model.py b/openpype/tools/subsetmanager/model.py index b76c3c2343..760a167b42 100644 --- a/openpype/tools/subsetmanager/model.py +++ b/openpype/tools/subsetmanager/model.py @@ -2,7 +2,7 @@ import uuid from Qt import QtCore, QtGui -from avalon import api +from openpype.pipeline import registered_host ITEM_ID_ROLE = QtCore.Qt.UserRole + 1 @@ -21,7 +21,7 @@ class InstanceModel(QtGui.QStandardItemModel): self._instances_by_item_id = {} instances = None - host = api.registered_host() + host = registered_host() list_instances = getattr(host, "list_instances", None) if list_instances: instances = list_instances() diff --git a/openpype/tools/subsetmanager/window.py b/openpype/tools/subsetmanager/window.py index a53af52174..6314e67015 100644 --- a/openpype/tools/subsetmanager/window.py +++ b/openpype/tools/subsetmanager/window.py @@ -4,9 +4,8 @@ import sys from Qt import QtWidgets, QtCore import qtawesome -from avalon import api - from openpype import style +from openpype.pipeline import registered_host from openpype.tools.utils import PlaceholderLineEdit from openpype.tools.utils.lib import ( iter_model_rows, @@ -106,7 +105,7 @@ class SubsetManagerWindow(QtWidgets.QDialog): self._details_widget.set_details(container, item_id) def _on_save(self): - host = api.registered_host() + host = registered_host() if not hasattr(host, "save_instances"): print("BUG: Host does not have \"save_instances\" method") return @@ -141,7 +140,7 @@ class SubsetManagerWindow(QtWidgets.QDialog): # Prepare menu menu = QtWidgets.QMenu(self) actions = [] - host = api.registered_host() + host = registered_host() if hasattr(host, "remove_instance"): action = QtWidgets.QAction("Remove instance", menu) action.setData(host.remove_instance) @@ -176,7 +175,7 @@ class SubsetManagerWindow(QtWidgets.QDialog): self._details_widget.set_details(None, None) self._model.refresh() - host = api.registered_host() + host = registered_host() dev_mode = os.environ.get("AVALON_DEVELOP_MODE") or "" editable = False if dev_mode.lower() in ("1", "yes", "true", "on"): diff --git a/openpype/tools/texture_copy/app.py b/openpype/tools/texture_copy/app.py index 0c3c260e51..fd8d6dc02e 100644 --- a/openpype/tools/texture_copy/app.py +++ b/openpype/tools/texture_copy/app.py @@ -1,14 +1,12 @@ import os import re import click -from avalon import io, api -from pprint import pprint + +import speedcopy from openpype.lib import Terminal from openpype.api import Anatomy - -import shutil -import speedcopy +from openpype.pipeline import legacy_io t = Terminal() @@ -20,8 +18,8 @@ texture_extensions = ['.tif', '.tiff', '.jpg', '.jpeg', '.tx', '.png', '.tga', class TextureCopy: def __init__(self): - if not io.Session: - io.install() + if not legacy_io.Session: + legacy_io.install() def _get_textures(self, path): textures = [] @@ -32,14 +30,14 @@ class TextureCopy: return textures def _get_project(self, project_name): - project = io.find_one({ + project = legacy_io.find_one({ 'type': 'project', 'name': project_name }) return project def _get_asset(self, asset_name): - asset = io.find_one({ + asset = legacy_io.find_one({ 'type': 'asset', 'name': asset_name }) diff --git a/openpype/tools/traypublisher/window.py b/openpype/tools/traypublisher/window.py index d0453c4f23..5934c4aa8a 100644 --- a/openpype/tools/traypublisher/window.py +++ b/openpype/tools/traypublisher/window.py @@ -8,8 +8,10 @@ publishing plugins. from Qt import QtWidgets, QtCore -import avalon.api -from avalon.api import AvalonMongoDB +from openpype.pipeline import ( + install_host, + AvalonMongoDB, +) from openpype.hosts.traypublisher import ( api as traypublisher ) @@ -52,8 +54,11 @@ class StandaloneOverlayWidget(QtWidgets.QFrame): ) confirm_btn = QtWidgets.QPushButton("Confirm", content_widget) + cancel_btn = QtWidgets.QPushButton("Cancel", content_widget) + cancel_btn.setVisible(False) btns_layout = QtWidgets.QHBoxLayout() btns_layout.addStretch(1) + btns_layout.addWidget(cancel_btn, 0) btns_layout.addWidget(confirm_btn, 0) content_layout = QtWidgets.QVBoxLayout(content_widget) @@ -75,15 +80,19 @@ class StandaloneOverlayWidget(QtWidgets.QFrame): projects_view.doubleClicked.connect(self._on_double_click) confirm_btn.clicked.connect(self._on_confirm_click) + cancel_btn.clicked.connect(self._on_cancel_click) self._projects_view = projects_view self._projects_model = projects_model + self._cancel_btn = cancel_btn self._confirm_btn = confirm_btn self._publisher_window = publisher_window + self._project_name = None def showEvent(self, event): self._projects_model.refresh() + self._cancel_btn.setVisible(self._project_name is not None) super(StandaloneOverlayWidget, self).showEvent(event) def _on_double_click(self): @@ -92,13 +101,18 @@ class StandaloneOverlayWidget(QtWidgets.QFrame): def _on_confirm_click(self): self.set_selected_project() + def _on_cancel_click(self): + self._set_project(self._project_name) + def set_selected_project(self): index = self._projects_view.currentIndex() project_name = index.data(PROJECT_NAME_ROLE) - if not project_name: - return + if project_name: + self._set_project(project_name) + def _set_project(self, project_name): + self._project_name = project_name traypublisher.set_project_name(project_name) self.setVisible(False) self.project_selected.emit(project_name) @@ -108,6 +122,13 @@ class TrayPublishWindow(PublisherWindow): def __init__(self, *args, **kwargs): super(TrayPublishWindow, self).__init__(reset_on_show=False) + flags = self.windowFlags() + # Disable always on top hint + if flags & QtCore.Qt.WindowStaysOnTopHint: + flags ^= QtCore.Qt.WindowStaysOnTopHint + + self.setWindowFlags(flags) + overlay_widget = StandaloneOverlayWidget(self) btns_widget = QtWidgets.QWidget(self) @@ -134,6 +155,12 @@ class TrayPublishWindow(PublisherWindow): self._back_to_overlay_btn = back_to_overlay_btn self._overlay_widget = overlay_widget + def _set_publish_frame_visible(self, publish_frame_visible): + super(TrayPublishWindow, self)._set_publish_frame_visible( + publish_frame_visible + ) + self._back_to_overlay_btn.setVisible(not publish_frame_visible) + def _on_back_to_overlay(self): self._overlay_widget.setVisible(True) self._resize_overlay() @@ -163,7 +190,7 @@ class TrayPublishWindow(PublisherWindow): def main(): - avalon.api.install(traypublisher) + install_host(traypublisher) app = QtWidgets.QApplication([]) window = TrayPublishWindow() window.show() diff --git a/openpype/tools/utils/__init__.py b/openpype/tools/utils/__init__.py index ea1133c442..0f367510bd 100644 --- a/openpype/tools/utils/__init__.py +++ b/openpype/tools/utils/__init__.py @@ -22,6 +22,10 @@ from .lib import ( from .models import ( RecursiveSortFilterProxyModel, ) +from .overlay_messages import ( + MessageOverlayObject, +) + __all__ = ( "PlaceholderLineEdit", @@ -45,4 +49,6 @@ __all__ = ( "get_asset_icon", "RecursiveSortFilterProxyModel", + + "MessageOverlayObject", ) diff --git a/openpype/tools/utils/assets_widget.py b/openpype/tools/utils/assets_widget.py index 3d4efcdd4d..82bdcd63a2 100644 --- a/openpype/tools/utils/assets_widget.py +++ b/openpype/tools/utils/assets_widget.py @@ -494,8 +494,6 @@ class AssetModel(QtGui.QStandardItemModel): # Remove cache of removed items for asset_id in removed_asset_ids: self._items_by_asset_id.pop(asset_id) - if asset_id in self._items_with_color_by_id: - self._items_with_color_by_id.pop(asset_id) # Refresh data # - all items refresh all data except id @@ -589,10 +587,12 @@ class AssetsWidget(QtWidgets.QWidget): view = AssetsView(self) view.setModel(proxy) + header_widget = QtWidgets.QWidget(self) + current_asset_icon = qtawesome.icon( "fa.arrow-down", color=get_default_tools_icon_color() ) - current_asset_btn = QtWidgets.QPushButton(self) + current_asset_btn = QtWidgets.QPushButton(header_widget) current_asset_btn.setIcon(current_asset_icon) current_asset_btn.setToolTip("Go to Asset from current Session") # Hide by default @@ -601,25 +601,35 @@ class AssetsWidget(QtWidgets.QWidget): refresh_icon = qtawesome.icon( "fa.refresh", color=get_default_tools_icon_color() ) - refresh_btn = QtWidgets.QPushButton(self) + refresh_btn = QtWidgets.QPushButton(header_widget) refresh_btn.setIcon(refresh_icon) refresh_btn.setToolTip("Refresh items") - filter_input = PlaceholderLineEdit(self) + filter_input = PlaceholderLineEdit(header_widget) filter_input.setPlaceholderText("Filter assets..") # Header - header_layout = QtWidgets.QHBoxLayout() + header_layout = QtWidgets.QHBoxLayout(header_widget) + header_layout.setContentsMargins(0, 0, 0, 0) header_layout.addWidget(filter_input) header_layout.addWidget(current_asset_btn) header_layout.addWidget(refresh_btn) + # Make header widgets expand vertically if there is a place + for widget in ( + current_asset_btn, + refresh_btn, + filter_input, + ): + size_policy = widget.sizePolicy() + size_policy.setVerticalPolicy(size_policy.MinimumExpanding) + widget.setSizePolicy(size_policy) + # Layout layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) - layout.setSpacing(4) - layout.addLayout(header_layout) - layout.addWidget(view) + layout.addWidget(header_widget, 0) + layout.addWidget(view, 1) # Signals/Slots filter_input.textChanged.connect(self._on_filter_text_change) @@ -630,6 +640,8 @@ class AssetsWidget(QtWidgets.QWidget): current_asset_btn.clicked.connect(self._on_current_asset_click) view.doubleClicked.connect(self.double_clicked) + self._header_widget = header_widget + self._filter_input = filter_input self._refresh_btn = refresh_btn self._current_asset_btn = current_asset_btn self._model = model @@ -637,8 +649,14 @@ class AssetsWidget(QtWidgets.QWidget): self._view = view self._last_project_name = None + self._last_btns_height = None + self.model_selection = {} + @property + def header_widget(self): + return self._header_widget + def _create_source_model(self): model = AssetModel(dbcon=self.dbcon, parent=self) model.refreshed.connect(self._on_model_refresh) @@ -669,6 +687,7 @@ class AssetsWidget(QtWidgets.QWidget): This separation gives ability to override this method and use it in differnt way. """ + self.set_current_session_asset() def set_current_session_asset(self): @@ -681,6 +700,7 @@ class AssetsWidget(QtWidgets.QWidget): Some tools may have their global refresh button or do not support refresh at all. """ + if visible is None: visible = not self._refresh_btn.isVisible() self._refresh_btn.setVisible(visible) @@ -690,6 +710,7 @@ class AssetsWidget(QtWidgets.QWidget): Not all tools support using of current context asset. """ + if visible is None: visible = not self._current_asset_btn.isVisible() self._current_asset_btn.setVisible(visible) @@ -723,6 +744,7 @@ class AssetsWidget(QtWidgets.QWidget): so if you're modifying model keep in mind that this method should be called when refresh is done. """ + self._proxy.sort(0) self._set_loading_state(loading=False, empty=not has_item) self.refreshed.emit() @@ -767,6 +789,7 @@ class SingleSelectAssetsWidget(AssetsWidget): Contain single selection specific api methods. """ + def get_selected_asset_id(self): """Currently selected asset id.""" selection_model = self._view.selectionModel() diff --git a/openpype/tools/utils/host_tools.py b/openpype/tools/utils/host_tools.py index 2d9733ec94..d8f4570120 100644 --- a/openpype/tools/utils/host_tools.py +++ b/openpype/tools/utils/host_tools.py @@ -4,8 +4,14 @@ It is possible to create `HostToolsHelper` in host implementation or use singleton approach with global functions (using helper anyway). """ import os -import avalon.api + import pyblish.api + +from openpype.pipeline import ( + registered_host, + legacy_io, +) + from .lib import qt_app_context @@ -47,7 +53,7 @@ class HostToolsHelper: Window, validate_host_requirements ) # Host validation - host = avalon.api.registered_host() + host = registered_host() validate_host_requirements(host) workfiles_window = Window(parent=parent) @@ -72,8 +78,8 @@ class HostToolsHelper: if use_context: context = { - "asset": avalon.api.Session["AVALON_ASSET"], - "task": avalon.api.Session["AVALON_TASK"] + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] } workfiles_tool.set_context(context) @@ -104,7 +110,7 @@ class HostToolsHelper: use_context = False if use_context: - context = {"asset": avalon.api.Session["AVALON_ASSET"]} + context = {"asset": legacy_io.Session["AVALON_ASSET"]} loader_tool.set_context(context, refresh=True) else: loader_tool.refresh() diff --git a/openpype/tools/utils/lib.py b/openpype/tools/utils/lib.py index 422d0f5389..20fea6600b 100644 --- a/openpype/tools/utils/lib.py +++ b/openpype/tools/utils/lib.py @@ -6,16 +6,17 @@ import collections from Qt import QtWidgets, QtCore, QtGui import qtawesome -import avalon.api - -from openpype.style import get_default_entity_icon_color +from openpype.style import ( + get_default_entity_icon_color, + get_objected_colors, +) +from openpype.resources import get_image_path +from openpype.lib import filter_profiles from openpype.api import ( get_project_settings, Logger ) -from openpype.lib import filter_profiles -from openpype.style import get_objected_colors -from openpype.resources import get_image_path +from openpype.pipeline import registered_host log = Logger.get_logger(__name__) @@ -402,13 +403,14 @@ class FamilyConfigCache: self.family_configs.clear() # Skip if we're not in host context - if not avalon.api.registered_host(): + if not registered_host(): return # Update the icons from the project configuration project_name = os.environ.get("AVALON_PROJECT") asset_name = os.environ.get("AVALON_ASSET") task_name = os.environ.get("AVALON_TASK") + host_name = os.environ.get("AVALON_APP") if not all((project_name, asset_name, task_name)): return @@ -422,15 +424,21 @@ class FamilyConfigCache: ["family_filter_profiles"] ) if profiles: - asset_doc = self.dbcon.find_one( + # Make sure connection is installed + # - accessing attribute which does not have auto-install + self.dbcon.install() + database = getattr(self.dbcon, "database", None) + if database is None: + database = self.dbcon._database + asset_doc = database[project_name].find_one( {"type": "asset", "name": asset_name}, {"data.tasks": True} - ) + ) or {} tasks_info = asset_doc.get("data", {}).get("tasks") or {} task_type = tasks_info.get(task_name, {}).get("type") profiles_filter = { "task_types": task_type, - "hosts": os.environ["AVALON_APP"] + "hosts": host_name } matching_item = filter_profiles(profiles, profiles_filter) @@ -719,11 +727,11 @@ def is_sync_loader(loader): def is_remove_site_loader(loader): - return hasattr(loader, "remove_site_on_representation") + return hasattr(loader, "is_remove_site_loader") def is_add_site_loader(loader): - return hasattr(loader, "add_site_to_representation") + return hasattr(loader, "is_add_site_loader") class WrappedCallbackItem: diff --git a/openpype/tools/utils/overlay_messages.py b/openpype/tools/utils/overlay_messages.py new file mode 100644 index 0000000000..62de2cf272 --- /dev/null +++ b/openpype/tools/utils/overlay_messages.py @@ -0,0 +1,324 @@ +import uuid + +from Qt import QtWidgets, QtCore, QtGui + +from openpype.style import get_objected_colors + +from .lib import set_style_property + + +class CloseButton(QtWidgets.QFrame): + """Close button drawed manually.""" + + clicked = QtCore.Signal() + + def __init__(self, parent): + super(CloseButton, self).__init__(parent) + colors = get_objected_colors() + close_btn_color = colors["overlay-messages"]["close-btn"] + self._color = close_btn_color.get_qcolor() + self._mouse_pressed = False + policy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Fixed, + QtWidgets.QSizePolicy.Fixed + ) + self.setSizePolicy(policy) + + def sizeHint(self): + size = self.fontMetrics().height() + return QtCore.QSize(size, size) + + def mousePressEvent(self, event): + if event.button() == QtCore.Qt.LeftButton: + self._mouse_pressed = True + super(CloseButton, self).mousePressEvent(event) + + def mouseReleaseEvent(self, event): + if self._mouse_pressed: + self._mouse_pressed = False + if self.rect().contains(event.pos()): + self.clicked.emit() + + super(CloseButton, self).mouseReleaseEvent(event) + + def paintEvent(self, event): + rect = self.rect() + painter = QtGui.QPainter(self) + painter.setClipRect(event.rect()) + pen = QtGui.QPen() + pen.setWidth(2) + pen.setColor(self._color) + pen.setStyle(QtCore.Qt.SolidLine) + pen.setCapStyle(QtCore.Qt.RoundCap) + painter.setPen(pen) + offset = int(rect.height() / 4) + top = rect.top() + offset + left = rect.left() + offset + right = rect.right() - offset + bottom = rect.bottom() - offset + painter.drawLine( + left, top, + right, bottom + ) + painter.drawLine( + left, bottom, + right, top + ) + + +class OverlayMessageWidget(QtWidgets.QFrame): + """Message widget showed as overlay. + + Message is hidden after timeout but can be overriden by mouse hover. + Mouse hover can add additional 2 seconds of widget's visibility. + + Args: + message_id (str): Unique identifier of message widget for + 'MessageOverlayObject'. + message (str): Text shown in message. + parent (QWidget): Parent widget where message is visible. + timeout (int): Timeout of message's visibility (default 5000). + message_type (str): Property which can be used in styles for specific + kid of message. + """ + + close_requested = QtCore.Signal(str) + _default_timeout = 5000 + + def __init__( + self, message_id, message, parent, message_type=None, timeout=None + ): + super(OverlayMessageWidget, self).__init__(parent) + self.setObjectName("OverlayMessageWidget") + + if message_type: + set_style_property(self, "type", message_type) + + if not timeout: + timeout = self._default_timeout + timeout_timer = QtCore.QTimer() + timeout_timer.setInterval(timeout) + timeout_timer.setSingleShot(True) + + hover_timer = QtCore.QTimer() + hover_timer.setInterval(2000) + hover_timer.setSingleShot(True) + + label_widget = QtWidgets.QLabel(message, self) + label_widget.setAlignment(QtCore.Qt.AlignCenter) + label_widget.setWordWrap(True) + close_btn = CloseButton(self) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(5, 5, 0, 5) + layout.addWidget(label_widget, 1) + layout.addWidget(close_btn, 0) + + close_btn.clicked.connect(self._on_close_clicked) + timeout_timer.timeout.connect(self._on_timer_timeout) + hover_timer.timeout.connect(self._on_hover_timeout) + + self._label_widget = label_widget + self._message_id = message_id + self._timeout_timer = timeout_timer + self._hover_timer = hover_timer + + def size_hint_without_word_wrap(self): + """Size hint in cases that word wrap of label is disabled.""" + self._label_widget.setWordWrap(False) + size_hint = self.sizeHint() + self._label_widget.setWordWrap(True) + return size_hint + + def showEvent(self, event): + """Start timeout on show.""" + super(OverlayMessageWidget, self).showEvent(event) + self._timeout_timer.start() + + def _on_timer_timeout(self): + """On message timeout.""" + # Skip closing if hover timer is active + if not self._hover_timer.isActive(): + self._close_message() + + def _on_hover_timeout(self): + """Hover timer timed out.""" + # Check if is still under widget + if self.underMouse(): + self._hover_timer.start() + else: + self._close_message() + + def _on_close_clicked(self): + self._close_message() + + def _close_message(self): + """Emmit close request to 'MessageOverlayObject'.""" + self.close_requested.emit(self._message_id) + + def enterEvent(self, event): + """Start hover timer on hover.""" + super(OverlayMessageWidget, self).enterEvent(event) + self._hover_timer.start() + + def leaveEvent(self, event): + """Start hover timer on hover leave.""" + super(OverlayMessageWidget, self).leaveEvent(event) + self._hover_timer.start() + + +class MessageOverlayObject(QtCore.QObject): + """Object that can be used to add overlay messages. + + Args: + widget (QWidget): + """ + + def __init__(self, widget, default_timeout=None): + super(MessageOverlayObject, self).__init__() + + widget.installEventFilter(self) + + # Timer which triggers recalculation of message positions + recalculate_timer = QtCore.QTimer() + recalculate_timer.setInterval(10) + + recalculate_timer.timeout.connect(self._recalculate_positions) + + self._widget = widget + self._recalculate_timer = recalculate_timer + + self._messages_order = [] + self._closing_messages = set() + self._messages = {} + self._spacing = 5 + self._move_size = 4 + self._move_size_remove = 8 + self._default_timeout = default_timeout + + def add_message(self, message, message_type=None, timeout=None): + """Add single message into overlay. + + Args: + message (str): Message that will be shown. + timeout (int): Message timeout. + message_type (str): Message type can be used as property in + stylesheets. + """ + # Skip empty messages + if not message: + return + + if timeout is None: + timeout = self._default_timeout + + # Create unique id of message + label_id = str(uuid.uuid4()) + # Create message widget + widget = OverlayMessageWidget( + label_id, message, self._widget, message_type, timeout + ) + widget.close_requested.connect(self._on_message_close_request) + widget.show() + + # Move widget outside of window + pos = widget.pos() + pos.setY(pos.y() - widget.height()) + widget.move(pos) + # Store message + self._messages[label_id] = widget + self._messages_order.append(label_id) + # Trigger recalculation timer + self._recalculate_timer.start() + + def _on_message_close_request(self, label_id): + """Message widget requested removement.""" + + widget = self._messages.get(label_id) + if widget is not None: + # Add message to closing messages and start recalculation + self._closing_messages.add(label_id) + self._recalculate_timer.start() + + def _recalculate_positions(self): + """Recalculate positions of widgets.""" + + # Skip if there are no messages to process + if not self._messages_order: + self._recalculate_timer.stop() + return + + # All message widgets are in expected positions + all_at_place = True + # Starting y position + pos_y = self._spacing + # Current widget width + widget_width = self._widget.width() + max_width = widget_width - (2 * self._spacing) + widget_half_width = widget_width / 2 + + # Store message ids that should be removed + message_ids_to_remove = set() + for message_id in reversed(self._messages_order): + widget = self._messages[message_id] + pos = widget.pos() + # Messages to remove are moved upwards + if message_id in self._closing_messages: + bottom = pos.y() + widget.height() + # Add message to remove if is not visible + if bottom < 0 or self._move_size_remove < 1: + message_ids_to_remove.add(message_id) + continue + + # Calculate new y position of message + dst_pos_y = pos.y() - self._move_size_remove + + else: + # Calculate y position of message + # - use y position of previous message widget and add + # move size if is not in final destination yet + if widget.underMouse(): + dst_pos_y = pos.y() + elif pos.y() == pos_y or self._move_size < 1: + dst_pos_y = pos_y + elif pos.y() < pos_y: + dst_pos_y = min(pos_y, pos.y() + self._move_size) + else: + dst_pos_y = max(pos_y, pos.y() - self._move_size) + + # Store if widget is in place where should be + if all_at_place and dst_pos_y != pos_y: + all_at_place = False + + # Calculate ideal width and height of message widget + height = widget.heightForWidth(max_width) + w_size_hint = widget.size_hint_without_word_wrap() + widget.resize(min(max_width, w_size_hint.width()), height) + + # Center message widget + size = widget.size() + pos_x = widget_half_width - (size.width() / 2) + # Move widget to destination position + widget.move(pos_x, dst_pos_y) + + # Add message widget height and spacing for next message widget + pos_y += size.height() + self._spacing + + # Remove widgets to remove + for message_id in message_ids_to_remove: + self._messages_order.remove(message_id) + self._closing_messages.remove(message_id) + widget = self._messages.pop(message_id) + widget.hide() + widget.deleteLater() + + # Stop recalculation timer if all widgets are where should be + if all_at_place: + self._recalculate_timer.stop() + + def eventFilter(self, source, event): + # Trigger recalculation of timer on resize of widget + if source is self._widget and event.type() == QtCore.QEvent.Resize: + self._recalculate_timer.start() + + return super(MessageOverlayObject, self).eventFilter(source, event) diff --git a/openpype/tools/workfiles/app.py b/openpype/tools/workfiles/app.py index f0e7900cf5..352847ede8 100644 --- a/openpype/tools/workfiles/app.py +++ b/openpype/tools/workfiles/app.py @@ -1,8 +1,10 @@ import sys import logging -from avalon import api - +from openpype.pipeline import ( + registered_host, + legacy_io, +) from openpype.tools.utils import qt_app_context from .window import Window @@ -47,12 +49,12 @@ def show(root=None, debug=False, parent=None, use_context=True, save=True): except (AttributeError, RuntimeError): pass - host = api.registered_host() + host = registered_host() validate_host_requirements(host) if debug: - api.Session["AVALON_ASSET"] = "Mock" - api.Session["AVALON_TASK"] = "Testing" + legacy_io.Session["AVALON_ASSET"] = "Mock" + legacy_io.Session["AVALON_TASK"] = "Testing" with qt_app_context(): window = Window(parent=parent) @@ -60,8 +62,8 @@ def show(root=None, debug=False, parent=None, use_context=True, save=True): if use_context: context = { - "asset": api.Session["AVALON_ASSET"], - "task": api.Session["AVALON_TASK"] + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] } window.set_context(context) diff --git a/openpype/tools/workfiles/files_widget.py b/openpype/tools/workfiles/files_widget.py index 56af7752da..977111b71b 100644 --- a/openpype/tools/workfiles/files_widget.py +++ b/openpype/tools/workfiles/files_widget.py @@ -4,7 +4,6 @@ import shutil import Qt from Qt import QtWidgets, QtCore -from avalon import io, api from openpype.tools.utils import PlaceholderLineEdit from openpype.tools.utils.delegates import PrettyTimeDelegate @@ -18,6 +17,10 @@ from openpype.lib.avalon_context import ( update_current_task, compute_session_changes ) +from openpype.pipeline import ( + registered_host, + legacy_io, +) from .model import ( WorkAreaFilesModel, PublishFilesModel, @@ -86,14 +89,14 @@ class FilesWidget(QtWidgets.QWidget): self._task_type = None # Pype's anatomy object for current project - self.anatomy = Anatomy(io.Session["AVALON_PROJECT"]) + self.anatomy = Anatomy(legacy_io.Session["AVALON_PROJECT"]) # Template key used to get work template from anatomy templates self.template_key = "work" # This is not root but workfile directory self._workfiles_root = None self._workdir_path = None - self.host = api.registered_host() + self.host = registered_host() # Whether to automatically select the latest modified # file on a refresh of the files model. @@ -146,7 +149,9 @@ class FilesWidget(QtWidgets.QWidget): workarea_files_view.setColumnWidth(0, 330) # --- Publish files view --- - publish_files_model = PublishFilesModel(extensions, io, self.anatomy) + publish_files_model = PublishFilesModel( + extensions, legacy_io, self.anatomy + ) publish_proxy_model = QtCore.QSortFilterProxyModel() publish_proxy_model.setSourceModel(publish_files_model) @@ -379,13 +384,13 @@ class FilesWidget(QtWidgets.QWidget): return None if self._asset_doc is None: - self._asset_doc = io.find_one({"_id": self._asset_id}) + self._asset_doc = legacy_io.find_one({"_id": self._asset_id}) return self._asset_doc def _get_session(self): """Return a modified session for the current asset and task""" - session = api.Session.copy() + session = legacy_io.Session.copy() self.template_key = get_workfile_template_key( self._task_type, session["AVALON_APP"], @@ -404,7 +409,7 @@ class FilesWidget(QtWidgets.QWidget): def _enter_session(self): """Enter the asset and task session currently selected""" - session = api.Session.copy() + session = legacy_io.Session.copy() changes = compute_session_changes( session, asset=self._get_asset_doc(), @@ -594,10 +599,10 @@ class FilesWidget(QtWidgets.QWidget): # Create extra folders create_workdir_extra_folders( self._workdir_path, - api.Session["AVALON_APP"], + legacy_io.Session["AVALON_APP"], self._task_type, self._task_name, - api.Session["AVALON_PROJECT"] + legacy_io.Session["AVALON_PROJECT"] ) # Trigger after save events emit_event( diff --git a/openpype/tools/workfiles/save_as_dialog.py b/openpype/tools/workfiles/save_as_dialog.py index f5ae393d0f..3e97d6c938 100644 --- a/openpype/tools/workfiles/save_as_dialog.py +++ b/openpype/tools/workfiles/save_as_dialog.py @@ -5,12 +5,14 @@ import logging from Qt import QtWidgets, QtCore -from avalon import api, io - from openpype.lib import ( get_last_workfile_with_version, get_workdir_data, ) +from openpype.pipeline import ( + registered_host, + legacy_io, +) from openpype.tools.utils import PlaceholderLineEdit log = logging.getLogger(__name__) @@ -23,7 +25,7 @@ def build_workfile_data(session): asset_name = session["AVALON_ASSET"] task_name = session["AVALON_TASK"] host_name = session["AVALON_APP"] - project_doc = io.find_one( + project_doc = legacy_io.find_one( {"type": "project"}, { "name": True, @@ -32,7 +34,7 @@ def build_workfile_data(session): } ) - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( { "type": "asset", "name": asset_name @@ -65,7 +67,7 @@ class CommentMatcher(object): return # Create a regex group for extensions - extensions = api.registered_host().file_extensions() + extensions = registered_host().file_extensions() any_extension = "(?:{})".format( "|".join(re.escape(ext[1:]) for ext in extensions) ) @@ -200,14 +202,14 @@ class SaveAsDialog(QtWidgets.QDialog): self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint) self.result = None - self.host = api.registered_host() + self.host = registered_host() self.root = root self.work_file = None self._extensions = extensions if not session: # Fallback to active session - session = api.Session + session = legacy_io.Session self.data = build_workfile_data(session) @@ -282,7 +284,7 @@ class SaveAsDialog(QtWidgets.QDialog): if current_filepath: # We match the current filename against the current session # instead of the session where the user is saving to. - current_data = build_workfile_data(api.Session) + current_data = build_workfile_data(legacy_io.Session) matcher = CommentMatcher(anatomy, template_key, current_data) comment = matcher.parse_comment(current_filepath) if comment: diff --git a/openpype/tools/workfiles/window.py b/openpype/tools/workfiles/window.py index 73e63d30b5..02a22af26c 100644 --- a/openpype/tools/workfiles/window.py +++ b/openpype/tools/workfiles/window.py @@ -2,14 +2,13 @@ import os import datetime from Qt import QtCore, QtWidgets -from avalon import io - from openpype import style from openpype.lib import ( get_workfile_doc, create_workfile_doc, save_workfile_data_to_doc, ) +from openpype.pipeline import legacy_io from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget from openpype.tools.utils.tasks_widget import TasksWidget @@ -158,10 +157,12 @@ class Window(QtWidgets.QMainWindow): home_page_widget = QtWidgets.QWidget(pages_widget) home_body_widget = QtWidgets.QWidget(home_page_widget) - assets_widget = SingleSelectAssetsWidget(io, parent=home_body_widget) + assets_widget = SingleSelectAssetsWidget( + legacy_io, parent=home_body_widget + ) assets_widget.set_current_asset_btn_visibility(True) - tasks_widget = TasksWidget(io, home_body_widget) + tasks_widget = TasksWidget(legacy_io, home_body_widget) files_widget = FilesWidget(home_body_widget) side_panel = SidePanelWidget(home_body_widget) @@ -250,7 +251,7 @@ class Window(QtWidgets.QMainWindow): if asset_id and task_name and filepath: filename = os.path.split(filepath)[1] workfile_doc = get_workfile_doc( - asset_id, task_name, filename, io + asset_id, task_name, filename, legacy_io ) self.side_panel.set_context( asset_id, task_name, filepath, workfile_doc @@ -272,7 +273,7 @@ class Window(QtWidgets.QMainWindow): self._create_workfile_doc(filepath, force=True) workfile_doc = self._get_current_workfile_doc() - save_workfile_data_to_doc(workfile_doc, data, io) + save_workfile_data_to_doc(workfile_doc, data, legacy_io) def _get_current_workfile_doc(self, filepath=None): if filepath is None: @@ -284,7 +285,7 @@ class Window(QtWidgets.QMainWindow): filename = os.path.split(filepath)[1] return get_workfile_doc( - asset_id, task_name, filename, io + asset_id, task_name, filename, legacy_io ) def _create_workfile_doc(self, filepath, force=False): @@ -295,9 +296,11 @@ class Window(QtWidgets.QMainWindow): if not workfile_doc: workdir, filename = os.path.split(filepath) asset_id = self.assets_widget.get_selected_asset_id() - asset_doc = io.find_one({"_id": asset_id}) + asset_doc = legacy_io.find_one({"_id": asset_id}) task_name = self.tasks_widget.get_selected_task_name() - create_workfile_doc(asset_doc, task_name, filename, workdir, io) + create_workfile_doc( + asset_doc, task_name, filename, workdir, legacy_io + ) def refresh(self): # Refresh asset widget @@ -319,7 +322,7 @@ class Window(QtWidgets.QMainWindow): self._context_to_set, context = None, self._context_to_set if "asset" in context: - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( { "name": context["asset"], "type": "asset" diff --git a/openpype/version.py b/openpype/version.py index 97aa585ca7..1cc854cfd1 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.9.3" +__version__ = "3.10.0-nightly.4" diff --git a/openpype/widgets/attribute_defs/__init__.py b/openpype/widgets/attribute_defs/__init__.py index 147efeb3d6..ce6b80109e 100644 --- a/openpype/widgets/attribute_defs/__init__.py +++ b/openpype/widgets/attribute_defs/__init__.py @@ -1,6 +1,10 @@ -from .widgets import create_widget_for_attr_def +from .widgets import ( + create_widget_for_attr_def, + AttributeDefinitionsWidget, +) __all__ = ( "create_widget_for_attr_def", + "AttributeDefinitionsWidget", ) diff --git a/openpype/widgets/attribute_defs/files_widget.py b/openpype/widgets/attribute_defs/files_widget.py index 34f7d159ad..23cf8342b1 100644 --- a/openpype/widgets/attribute_defs/files_widget.py +++ b/openpype/widgets/attribute_defs/files_widget.py @@ -1,15 +1,16 @@ import os import collections import uuid -import clique + from Qt import QtWidgets, QtCore, QtGui -from openpype.tools.utils import paint_image_with_color -# TODO change imports -from openpype.tools.resources import ( - get_pixmap, - get_image, +from openpype.lib import FileDefItem +from openpype.tools.utils import ( + paint_image_with_color, + ClickableLabel, ) +# TODO change imports +from openpype.tools.resources import get_image from openpype.tools.utils import ( IconButton, PixmapLabel @@ -21,7 +22,8 @@ ITEM_ICON_ROLE = QtCore.Qt.UserRole + 3 FILENAMES_ROLE = QtCore.Qt.UserRole + 4 DIRPATH_ROLE = QtCore.Qt.UserRole + 5 IS_DIR_ROLE = QtCore.Qt.UserRole + 6 -EXT_ROLE = QtCore.Qt.UserRole + 7 +IS_SEQUENCE_ROLE = QtCore.Qt.UserRole + 7 +EXT_ROLE = QtCore.Qt.UserRole + 8 class DropEmpty(QtWidgets.QWidget): @@ -73,175 +75,91 @@ class DropEmpty(QtWidgets.QWidget): class FilesModel(QtGui.QStandardItemModel): - sequence_exts = [ - ".ani", ".anim", ".apng", ".art", ".bmp", ".bpg", ".bsave", ".cal", - ".cin", ".cpc", ".cpt", ".dds", ".dpx", ".ecw", ".exr", ".fits", - ".flic", ".flif", ".fpx", ".gif", ".hdri", ".hevc", ".icer", - ".icns", ".ico", ".cur", ".ics", ".ilbm", ".jbig", ".jbig2", - ".jng", ".jpeg", ".jpeg-ls", ".2000", ".jpg", ".xr", - ".jpeg-hdr", ".kra", ".mng", ".miff", ".nrrd", - ".ora", ".pam", ".pbm", ".pgm", ".ppm", ".pnm", ".pcx", ".pgf", - ".pictor", ".png", ".psb", ".psp", ".qtvr", ".ras", - ".rgbe", ".logluv", ".tiff", ".sgi", ".tga", ".tiff", ".tiff/ep", - ".tiff/it", ".ufo", ".ufp", ".wbmp", ".webp", ".xbm", ".xcf", - ".xpm", ".xwd" - ] - - def __init__(self): + def __init__(self, single_item, allow_sequences): super(FilesModel, self).__init__() + + self._single_item = single_item + self._multivalue = False + self._allow_sequences = allow_sequences + + self._items_by_id = {} + self._file_items_by_id = {} self._filenames_by_dirpath = collections.defaultdict(set) self._items_by_dirpath = collections.defaultdict(list) - def add_filepaths(self, filepaths): - if not filepaths: + def set_multivalue(self, multivalue): + """Disable filtering.""" + + if self._multivalue == multivalue: + return + self._multivalue = multivalue + + def add_filepaths(self, items): + if not items: return - new_dirpaths = set() - for filepath in filepaths: - filename = os.path.basename(filepath) - dirpath = os.path.dirname(filepath) - filenames = self._filenames_by_dirpath[dirpath] - if filename not in filenames: - new_dirpaths.add(dirpath) - filenames.add(filename) - self._refresh_items(new_dirpaths) + file_items = FileDefItem.from_value(items, self._allow_sequences) + if not file_items: + return + + if not self._multivalue and self._single_item: + file_items = [file_items[0]] + current_ids = list(self._file_items_by_id.keys()) + if current_ids: + self.remove_item_by_ids(current_ids) + + new_model_items = [] + for file_item in file_items: + item_id, model_item = self._create_item(file_item) + new_model_items.append(model_item) + self._file_items_by_id[item_id] = file_item + self._items_by_id[item_id] = model_item + + if new_model_items: + roow_item = self.invisibleRootItem() + roow_item.appendRows(new_model_items) def remove_item_by_ids(self, item_ids): if not item_ids: return - remaining_ids = set(item_ids) - result = collections.defaultdict(list) - for dirpath, items in self._items_by_dirpath.items(): - if not remaining_ids: - break + items = [] + for item_id in set(item_ids): + if item_id not in self._items_by_id: + continue + item = self._items_by_id.pop(item_id) + self._file_items_by_id.pop(item_id) + items.append(item) + + if items: for item in items: - if not remaining_ids: - break - item_id = item.data(ITEM_ID_ROLE) - if item_id in remaining_ids: - remaining_ids.remove(item_id) - result[dirpath].append(item) - - if not result: - return - - dirpaths = set(result.keys()) - for dirpath, items in result.items(): - filenames_cache = self._filenames_by_dirpath[dirpath] - for item in items: - filenames = item.data(FILENAMES_ROLE) - - self._items_by_dirpath[dirpath].remove(item) self.removeRows(item.row(), 1) - for filename in filenames: - if filename in filenames_cache: - filenames_cache.remove(filename) - self._refresh_items(dirpaths) + def get_file_item_by_id(self, item_id): + return self._file_items_by_id.get(item_id) - def _refresh_items(self, dirpaths=None): - if dirpaths is None: - dirpaths = set(self._items_by_dirpath.keys()) - - new_items = [] - for dirpath in dirpaths: - items_to_remove = list(self._items_by_dirpath[dirpath]) - cols, remainders = clique.assemble( - self._filenames_by_dirpath[dirpath] + def _create_item(self, file_item): + if file_item.is_dir: + icon_pixmap = paint_image_with_color( + get_image(filename="folder.png"), QtCore.Qt.white + ) + else: + icon_pixmap = paint_image_with_color( + get_image(filename="file.png"), QtCore.Qt.white ) - filtered_cols = [] - for collection in cols: - filenames = set(collection) - valid_col = True - for filename in filenames: - ext = os.path.splitext(filename)[-1] - valid_col = ext in self.sequence_exts - break - - if valid_col: - filtered_cols.append(collection) - else: - for filename in filenames: - remainders.append(filename) - - for filename in remainders: - found = False - for item in items_to_remove: - item_filenames = item.data(FILENAMES_ROLE) - if filename in item_filenames and len(item_filenames) == 1: - found = True - items_to_remove.remove(item) - break - - if found: - continue - - fullpath = os.path.join(dirpath, filename) - if os.path.isdir(fullpath): - icon_pixmap = get_pixmap(filename="folder.png") - else: - icon_pixmap = get_pixmap(filename="file.png") - label = filename - filenames = [filename] - item = self._create_item( - label, filenames, dirpath, icon_pixmap - ) - new_items.append(item) - self._items_by_dirpath[dirpath].append(item) - - for collection in filtered_cols: - filenames = set(collection) - found = False - for item in items_to_remove: - item_filenames = item.data(FILENAMES_ROLE) - if item_filenames == filenames: - found = True - items_to_remove.remove(item) - break - - if found: - continue - - col_range = collection.format("{ranges}") - label = "{}<{}>{}".format( - collection.head, col_range, collection.tail - ) - icon_pixmap = get_pixmap(filename="files.png") - item = self._create_item( - label, filenames, dirpath, icon_pixmap - ) - new_items.append(item) - self._items_by_dirpath[dirpath].append(item) - - for item in items_to_remove: - self._items_by_dirpath[dirpath].remove(item) - self.removeRows(item.row(), 1) - - if new_items: - self.invisibleRootItem().appendRows(new_items) - - def _create_item(self, label, filenames, dirpath, icon_pixmap=None): - first_filename = None - for filename in filenames: - first_filename = filename - break - ext = os.path.splitext(first_filename)[-1] - is_dir = False - if len(filenames) == 1: - filepath = os.path.join(dirpath, first_filename) - is_dir = os.path.isdir(filepath) item = QtGui.QStandardItem() - item.setData(str(uuid.uuid4()), ITEM_ID_ROLE) - item.setData(label, ITEM_LABEL_ROLE) - item.setData(filenames, FILENAMES_ROLE) - item.setData(dirpath, DIRPATH_ROLE) + item_id = str(uuid.uuid4()) + item.setData(item_id, ITEM_ID_ROLE) + item.setData(file_item.label or "< empty >", ITEM_LABEL_ROLE) + item.setData(file_item.filenames, FILENAMES_ROLE) + item.setData(file_item.directory, DIRPATH_ROLE) item.setData(icon_pixmap, ITEM_ICON_ROLE) - item.setData(ext, EXT_ROLE) - item.setData(is_dir, IS_DIR_ROLE) + item.setData(file_item.ext, EXT_ROLE) + item.setData(file_item.is_dir, IS_DIR_ROLE) + item.setData(file_item.is_sequence, IS_SEQUENCE_ROLE) - return item + return item_id, item class FilesProxyModel(QtCore.QSortFilterProxyModel): @@ -249,6 +167,15 @@ class FilesProxyModel(QtCore.QSortFilterProxyModel): super(FilesProxyModel, self).__init__(*args, **kwargs) self._allow_folders = False self._allowed_extensions = None + self._multivalue = False + + def set_multivalue(self, multivalue): + """Disable filtering.""" + + if self._multivalue == multivalue: + return + self._multivalue = multivalue + self.invalidateFilter() def set_allow_folders(self, allow=None): if allow is None: @@ -267,7 +194,34 @@ class FilesProxyModel(QtCore.QSortFilterProxyModel): self._allowed_extensions = extensions self.invalidateFilter() + def are_valid_files(self, filepaths): + for filepath in filepaths: + if os.path.isfile(filepath): + _, ext = os.path.splitext(filepath) + if ext in self._allowed_extensions: + return True + + elif self._allow_folders: + return True + return False + + def filter_valid_files(self, filepaths): + filtered_paths = [] + for filepath in filepaths: + if os.path.isfile(filepath): + _, ext = os.path.splitext(filepath) + if ext in self._allowed_extensions: + filtered_paths.append(filepath) + + elif self._allow_folders: + filtered_paths.append(filepath) + return filtered_paths + def filterAcceptsRow(self, row, parent_index): + # Skip filtering if multivalue is set + if self._multivalue: + return True + model = self.sourceModel() index = model.index(row, self.filterKeyColumn(), parent_index) # First check if item is folder and if folders are enabled @@ -297,9 +251,11 @@ class FilesProxyModel(QtCore.QSortFilterProxyModel): class ItemWidget(QtWidgets.QWidget): - remove_requested = QtCore.Signal(str) + context_menu_requested = QtCore.Signal(QtCore.QPoint) - def __init__(self, item_id, label, pixmap_icon, parent=None): + def __init__( + self, item_id, label, pixmap_icon, is_sequence, multivalue, parent=None + ): self._item_id = item_id super(ItemWidget, self).__init__(parent) @@ -308,30 +264,73 @@ class ItemWidget(QtWidgets.QWidget): icon_widget = PixmapLabel(pixmap_icon, self) label_widget = QtWidgets.QLabel(label, self) - pixmap = paint_image_with_color( - get_image(filename="delete.png"), QtCore.Qt.white + + label_size_hint = label_widget.sizeHint() + height = label_size_hint.height() + actions_menu_pix = paint_image_with_color( + get_image(filename="menu.png"), QtCore.Qt.white ) - remove_btn = IconButton(self) - remove_btn.setIcon(QtGui.QIcon(pixmap)) + + split_btn = ClickableLabel(self) + split_btn.setFixedSize(height, height) + split_btn.setPixmap(actions_menu_pix) + if multivalue: + split_btn.setVisible(False) + else: + split_btn.setVisible(is_sequence) layout = QtWidgets.QHBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) + layout.setContentsMargins(5, 5, 5, 5) layout.addWidget(icon_widget, 0) layout.addWidget(label_widget, 1) - layout.addWidget(remove_btn, 0) + layout.addWidget(split_btn, 0) - remove_btn.clicked.connect(self._on_remove_clicked) + split_btn.clicked.connect(self._on_actions_clicked) self._icon_widget = icon_widget self._label_widget = label_widget - self._remove_btn = remove_btn + self._split_btn = split_btn + self._actions_menu_pix = actions_menu_pix + self._last_scaled_pix_height = None - def _on_remove_clicked(self): - self.remove_requested.emit(self._item_id) + def _update_btn_size(self): + label_size_hint = self._label_widget.sizeHint() + height = label_size_hint.height() + if height == self._last_scaled_pix_height: + return + self._last_scaled_pix_height = height + self._split_btn.setFixedSize(height, height) + pix = self._actions_menu_pix.scaled( + height, height, + QtCore.Qt.KeepAspectRatio, + QtCore.Qt.SmoothTransformation + ) + self._split_btn.setPixmap(pix) + + def showEvent(self, event): + super(ItemWidget, self).showEvent(event) + self._update_btn_size() + + def resizeEvent(self, event): + super(ItemWidget, self).resizeEvent(event) + self._update_btn_size() + + def _on_actions_clicked(self): + pos = self._split_btn.rect().bottomLeft() + point = self._split_btn.mapToGlobal(pos) + self.context_menu_requested.emit(point) + + +class InViewButton(IconButton): + pass class FilesView(QtWidgets.QListView): """View showing instances and their groups.""" + + remove_requested = QtCore.Signal() + context_menu_requested = QtCore.Signal(QtCore.QPoint) + def __init__(self, *args, **kwargs): super(FilesView, self).__init__(*args, **kwargs) @@ -339,9 +338,51 @@ class FilesView(QtWidgets.QListView): self.setSelectionMode( QtWidgets.QAbstractItemView.ExtendedSelection ) + self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + + remove_btn = InViewButton(self) + pix_enabled = paint_image_with_color( + get_image(filename="delete.png"), QtCore.Qt.white + ) + pix_disabled = paint_image_with_color( + get_image(filename="delete.png"), QtCore.Qt.gray + ) + icon = QtGui.QIcon(pix_enabled) + icon.addPixmap(pix_disabled, icon.Disabled, icon.Off) + remove_btn.setIcon(icon) + remove_btn.setEnabled(False) + + remove_btn.clicked.connect(self._on_remove_clicked) + self.customContextMenuRequested.connect(self._on_context_menu_request) + + self._remove_btn = remove_btn + + def setSelectionModel(self, *args, **kwargs): + """Catch selection model set to register signal callback. + + Selection model is not available during initialization. + """ + + super(FilesView, self).setSelectionModel(*args, **kwargs) + selection_model = self.selectionModel() + selection_model.selectionChanged.connect(self._on_selection_change) + + def set_multivalue(self, multivalue): + """Disable remove button on multivalue.""" + + self._remove_btn.setVisible(not multivalue) + + def has_selected_item_ids(self): + """Is any index selected.""" + for index in self.selectionModel().selectedIndexes(): + instance_id = index.data(ITEM_ID_ROLE) + if instance_id is not None: + return True + return False def get_selected_item_ids(self): """Ids of selected instances.""" + selected_item_ids = set() for index in self.selectionModel().selectedIndexes(): instance_id = index.data(ITEM_ID_ROLE) @@ -349,35 +390,63 @@ class FilesView(QtWidgets.QListView): selected_item_ids.add(instance_id) return selected_item_ids + def has_selected_sequence(self): + for index in self.selectionModel().selectedIndexes(): + if index.data(IS_SEQUENCE_ROLE): + return True + return False + def event(self, event): - if not event.type() == QtCore.QEvent.KeyPress: - pass - - elif event.key() == QtCore.Qt.Key_Space: - self.toggle_requested.emit(-1) - return True - - elif event.key() == QtCore.Qt.Key_Backspace: - self.toggle_requested.emit(0) - return True - - elif event.key() == QtCore.Qt.Key_Return: - self.toggle_requested.emit(1) - return True + if event.type() == QtCore.QEvent.KeyPress: + if ( + event.key() == QtCore.Qt.Key_Delete + and self.has_selected_item_ids() + ): + self.remove_requested.emit() + return True return super(FilesView, self).event(event) + def _on_context_menu_request(self, pos): + index = self.indexAt(pos) + if index.isValid(): + point = self.viewport().mapToGlobal(pos) + self.context_menu_requested.emit(point) -class MultiFilesWidget(QtWidgets.QFrame): + def _on_selection_change(self): + self._remove_btn.setEnabled(self.has_selected_item_ids()) + + def _on_remove_clicked(self): + self.remove_requested.emit() + + def _update_remove_btn(self): + """Position remove button to bottom right.""" + + viewport = self.viewport() + height = viewport.height() + pos_x = viewport.width() - self._remove_btn.width() - 5 + pos_y = height - self._remove_btn.height() - 5 + self._remove_btn.move(max(0, pos_x), max(0, pos_y)) + + def resizeEvent(self, event): + super(FilesView, self).resizeEvent(event) + self._update_remove_btn() + + def showEvent(self, event): + super(FilesView, self).showEvent(event) + self._update_remove_btn() + + +class FilesWidget(QtWidgets.QFrame): value_changed = QtCore.Signal() - def __init__(self, parent): - super(MultiFilesWidget, self).__init__(parent) + def __init__(self, single_item, allow_sequences, parent): + super(FilesWidget, self).__init__(parent) self.setAcceptDrops(True) empty_widget = DropEmpty(self) - files_model = FilesModel() + files_model = FilesModel(single_item, allow_sequences) files_proxy_model = FilesProxyModel() files_proxy_model.setSourceModel(files_model) files_view = FilesView(self) @@ -391,8 +460,13 @@ class MultiFilesWidget(QtWidgets.QFrame): files_proxy_model.rowsInserted.connect(self._on_rows_inserted) files_proxy_model.rowsRemoved.connect(self._on_rows_removed) - + files_view.remove_requested.connect(self._on_remove_requested) + files_view.context_menu_requested.connect( + self._on_context_menu_requested + ) self._in_set_value = False + self._single_item = single_item + self._multivalue = False self._empty_widget = empty_widget self._files_model = files_model @@ -401,39 +475,46 @@ class MultiFilesWidget(QtWidgets.QFrame): self._widgets_by_id = {} + def _set_multivalue(self, multivalue): + if self._multivalue == multivalue: + return + self._multivalue = multivalue + self._files_view.set_multivalue(multivalue) + self._files_model.set_multivalue(multivalue) + self._files_proxy_model.set_multivalue(multivalue) + def set_value(self, value, multivalue): self._in_set_value = True + widget_ids = set(self._widgets_by_id.keys()) self._remove_item_by_ids(widget_ids) - # TODO how to display multivalue? - all_same = True - if multivalue: - new_value = set() - item_row = None - for _value in value: - _value_set = set(_value) - new_value |= _value_set - if item_row is None: - item_row = _value_set - elif item_row != _value_set: - all_same = False - value = new_value + self._set_multivalue(multivalue) + + self._add_filepaths(value) - if value: - self._add_filepaths(value) self._in_set_value = False def current_value(self): model = self._files_proxy_model - filepaths = set() + item_ids = set() for row in range(model.rowCount()): index = model.index(row, 0) - dirpath = index.data(DIRPATH_ROLE) - filenames = index.data(FILENAMES_ROLE) - for filename in filenames: - filepaths.add(os.path.join(dirpath, filename)) - return list(filepaths) + item_ids.add(index.data(ITEM_ID_ROLE)) + + file_items = [] + for item_id in item_ids: + file_item = self._files_model.get_file_item_by_id(item_id) + if file_item is not None: + file_items.append(file_item.to_dict()) + + if not self._single_item: + return file_items + if file_items: + return file_items[0] + + empty_item = FileDefItem.create_empty_item() + return empty_item.to_dict() def set_filters(self, folders_allowed, exts_filter): self._files_proxy_model.set_allow_folders(folders_allowed) @@ -447,13 +528,22 @@ class MultiFilesWidget(QtWidgets.QFrame): continue label = index.data(ITEM_LABEL_ROLE) pixmap_icon = index.data(ITEM_ICON_ROLE) + is_sequence = index.data(IS_SEQUENCE_ROLE) - widget = ItemWidget(item_id, label, pixmap_icon) + widget = ItemWidget( + item_id, + label, + pixmap_icon, + is_sequence, + self._multivalue + ) + widget.context_menu_requested.connect( + self._on_context_menu_requested + ) self._files_view.setIndexWidget(index, widget) self._files_proxy_model.setData( index, widget.sizeHint(), QtCore.Qt.SizeHintRole ) - widget.remove_requested.connect(self._on_remove_request) self._widgets_by_id[item_id] = widget self._files_proxy_model.sort(0) @@ -481,27 +571,51 @@ class MultiFilesWidget(QtWidgets.QFrame): if not self._in_set_value: self.value_changed.emit() - def _on_remove_request(self, item_id): - found_index = None - for row in range(self._files_model.rowCount()): - index = self._files_model.index(row, 0) - _item_id = index.data(ITEM_ID_ROLE) - if item_id == _item_id: - found_index = index - break + def _on_split_request(self): + if self._multivalue: + return - if found_index is None: + item_ids = self._files_view.get_selected_item_ids() + if not item_ids: + return + + for item_id in item_ids: + file_item = self._files_model.get_file_item_by_id(item_id) + if not file_item: + return + + new_items = file_item.split_sequence() + self._add_filepaths(new_items) + self._remove_item_by_ids(item_ids) + + def _on_remove_requested(self): + if self._multivalue: return items_to_delete = self._files_view.get_selected_item_ids() - if item_id not in items_to_delete: - items_to_delete = [item_id] + if items_to_delete: + self._remove_item_by_ids(items_to_delete) - self._remove_item_by_ids(items_to_delete) + def _on_context_menu_requested(self, pos): + if self._multivalue: + return + + menu = QtWidgets.QMenu(self._files_view) + + if self._files_view.has_selected_sequence(): + split_action = QtWidgets.QAction("Split sequence", menu) + split_action.triggered.connect(self._on_split_request) + menu.addAction(split_action) + + remove_action = QtWidgets.QAction("Remove", menu) + remove_action.triggered.connect(self._on_remove_requested) + menu.addAction(remove_action) + + menu.popup(pos) def sizeHint(self): # Get size hints of widget and visible widgets - result = super(MultiFilesWidget, self).sizeHint() + result = super(FilesWidget, self).sizeHint() if not self._files_view.isVisible(): not_visible_hint = self._files_view.sizeHint() else: @@ -523,15 +637,9 @@ class MultiFilesWidget(QtWidgets.QFrame): return result def dragEnterEvent(self, event): - mime_data = event.mimeData() - if mime_data.hasUrls(): - event.setDropAction(QtCore.Qt.CopyAction) - event.accept() + if self._multivalue: + return - def dragLeaveEvent(self, event): - event.accept() - - def dropEvent(self, event): mime_data = event.mimeData() if mime_data.hasUrls(): filepaths = [] @@ -539,6 +647,25 @@ class MultiFilesWidget(QtWidgets.QFrame): filepath = url.toLocalFile() if os.path.exists(filepath): filepaths.append(filepath) + + if self._files_proxy_model.are_valid_files(filepaths): + event.setDropAction(QtCore.Qt.CopyAction) + event.accept() + + def dragLeaveEvent(self, event): + event.accept() + + def dropEvent(self, event): + mime_data = event.mimeData() + if not self._multivalue and mime_data.hasUrls(): + filepaths = [] + for url in mime_data.urls(): + filepath = url.toLocalFile() + if os.path.exists(filepath): + filepaths.append(filepath) + + # Filter filepaths before passing it to model + filepaths = self._files_proxy_model.filter_valid_files(filepaths) if filepaths: self._add_filepaths(filepaths) event.accept() @@ -555,92 +682,3 @@ class MultiFilesWidget(QtWidgets.QFrame): files_exists = self._files_proxy_model.rowCount() > 0 self._files_view.setVisible(files_exists) self._empty_widget.setVisible(not files_exists) - - -class SingleFileWidget(QtWidgets.QWidget): - value_changed = QtCore.Signal() - - def __init__(self, parent): - super(SingleFileWidget, self).__init__(parent) - - self.setAcceptDrops(True) - - filepath_input = QtWidgets.QLineEdit(self) - - browse_btn = QtWidgets.QPushButton("Browse", self) - browse_btn.setVisible(False) - - layout = QtWidgets.QHBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(filepath_input, 1) - layout.addWidget(browse_btn, 0) - - browse_btn.clicked.connect(self._on_browse_clicked) - filepath_input.textChanged.connect(self._on_text_change) - - self._in_set_value = False - - self._filepath_input = filepath_input - self._folders_allowed = False - self._exts_filter = [] - - def set_value(self, value, multivalue): - self._in_set_value = True - - if multivalue: - set_value = set(value) - if len(set_value) == 1: - value = tuple(set_value)[0] - else: - value = "< Multiselection >" - self._filepath_input.setText(value) - - self._in_set_value = False - - def current_value(self): - return self._filepath_input.text() - - def set_filters(self, folders_allowed, exts_filter): - self._folders_allowed = folders_allowed - self._exts_filter = exts_filter - - def _on_text_change(self, text): - if not self._in_set_value: - self.value_changed.emit() - - def _on_browse_clicked(self): - # TODO implement file dialog logic in '_on_browse_clicked' - print("_on_browse_clicked") - - def dragEnterEvent(self, event): - mime_data = event.mimeData() - if not mime_data.hasUrls(): - return - - filepaths = [] - for url in mime_data.urls(): - filepath = url.toLocalFile() - if os.path.exists(filepath): - filepaths.append(filepath) - - # TODO add folder, extensions check - if len(filepaths) == 1: - event.setDropAction(QtCore.Qt.CopyAction) - event.accept() - - def dragLeaveEvent(self, event): - event.accept() - - def dropEvent(self, event): - mime_data = event.mimeData() - if mime_data.hasUrls(): - filepaths = [] - for url in mime_data.urls(): - filepath = url.toLocalFile() - if os.path.exists(filepath): - filepaths.append(filepath) - # TODO filter check - if len(filepaths) == 1: - self._filepath_input.setText(filepaths[0]) - - event.accept() diff --git a/openpype/widgets/attribute_defs/widgets.py b/openpype/widgets/attribute_defs/widgets.py index 23f025967d..b6493b80a8 100644 --- a/openpype/widgets/attribute_defs/widgets.py +++ b/openpype/widgets/attribute_defs/widgets.py @@ -1,4 +1,5 @@ import uuid +import copy from Qt import QtWidgets, QtCore @@ -10,11 +11,14 @@ from openpype.lib.attribute_definitions import ( EnumDef, BoolDef, FileDef, + UIDef, UISeparatorDef, UILabelDef ) from openpype.widgets.nice_checkbox import NiceCheckbox +from .files_widget import FilesWidget + def create_widget_for_attr_def(attr_def, parent=None): if not isinstance(attr_def, AbtractAttrDef): @@ -51,6 +55,108 @@ def create_widget_for_attr_def(attr_def, parent=None): )) +class AttributeDefinitionsWidget(QtWidgets.QWidget): + """Create widgets for attribute definitions in grid layout. + + Widget creates input widgets for passed attribute definitions. + + Widget can't handle multiselection values. + """ + + def __init__(self, attr_defs=None, parent=None): + super(AttributeDefinitionsWidget, self).__init__(parent) + + self._widgets = [] + self._current_keys = set() + + self.set_attr_defs(attr_defs) + + def clear_attr_defs(self): + """Remove all existing widgets and reset layout if needed.""" + self._widgets = [] + self._current_keys = set() + + layout = self.layout() + if layout is not None: + if layout.count() == 0: + return + + while layout.count(): + item = layout.takeAt(0) + widget = item.widget() + if widget: + widget.setVisible(False) + widget.deleteLater() + + layout.deleteLater() + + new_layout = QtWidgets.QGridLayout() + new_layout.setColumnStretch(0, 0) + new_layout.setColumnStretch(1, 1) + self.setLayout(new_layout) + + def set_attr_defs(self, attr_defs): + """Replace current attribute definitions with passed.""" + self.clear_attr_defs() + if attr_defs: + self.add_attr_defs(attr_defs) + + def add_attr_defs(self, attr_defs): + """Add attribute definitions to current.""" + layout = self.layout() + + row = 0 + for attr_def in attr_defs: + if attr_def.key in self._current_keys: + raise KeyError("Duplicated key \"{}\"".format(attr_def.key)) + + self._current_keys.add(attr_def.key) + widget = create_widget_for_attr_def(attr_def, self) + + expand_cols = 2 + if attr_def.is_value_def and attr_def.is_label_horizontal: + expand_cols = 1 + + col_num = 2 - expand_cols + + if attr_def.label: + label_widget = QtWidgets.QLabel(attr_def.label, self) + layout.addWidget( + label_widget, row, 0, 1, expand_cols + ) + if not attr_def.is_label_horizontal: + row += 1 + + layout.addWidget( + widget, row, col_num, 1, expand_cols + ) + self._widgets.append(widget) + row += 1 + + def set_value(self, value): + new_value = copy.deepcopy(value) + unused_keys = set(new_value.keys()) + for widget in self._widgets: + attr_def = widget.attr_def + if attr_def.key not in new_value: + continue + unused_keys.remove(attr_def.key) + + widget_value = new_value[attr_def.key] + if widget_value is None: + widget_value = copy.deepcopy(attr_def.default) + widget.set_value(widget_value) + + def current_value(self): + output = {} + for widget in self._widgets: + attr_def = widget.attr_def + if not isinstance(attr_def, UIDef): + output[attr_def.key] = widget.current_value() + + return output + + class _BaseAttrDefWidget(QtWidgets.QWidget): # Type 'object' may not work with older PySide versions value_changed = QtCore.Signal(object, uuid.UUID) @@ -336,16 +442,9 @@ class UnknownAttrWidget(_BaseAttrDefWidget): class FileAttrWidget(_BaseAttrDefWidget): def _ui_init(self): - self.multipath = self.attr_def.multipath - if self.multipath: - from .files_widget import MultiFilesWidget - - input_widget = MultiFilesWidget(self) - - else: - from .files_widget import SingleFileWidget - - input_widget = SingleFileWidget(self) + input_widget = FilesWidget( + self.attr_def.single_item, self.attr_def.allow_sequences, self + ) if self.attr_def.tooltip: input_widget.setToolTip(self.attr_def.tooltip) diff --git a/openpype/widgets/popup.py b/openpype/widgets/popup.py index e661d3d293..9fc33ccbb8 100644 --- a/openpype/widgets/popup.py +++ b/openpype/widgets/popup.py @@ -1,16 +1,19 @@ import sys -import logging import contextlib from Qt import QtCore, QtWidgets -log = logging.getLogger(__name__) - class Popup(QtWidgets.QDialog): + """A Popup that moves itself to bottom right of screen on show event. - on_show = QtCore.Signal() + The UI contains a message label and a red highlighted button to "show" + or perform another custom action from this pop-up. + + """ + + on_clicked = QtCore.Signal() def __init__(self, parent=None, *args, **kwargs): super(Popup, self).__init__(parent=parent, *args, **kwargs) @@ -19,32 +22,34 @@ class Popup(QtWidgets.QDialog): # Layout layout = QtWidgets.QHBoxLayout(self) layout.setContentsMargins(10, 5, 10, 10) + + # Increase spacing slightly for readability + layout.setSpacing(10) + message = QtWidgets.QLabel("") message.setStyleSheet(""" QLabel { font-size: 12px; } """) - show = QtWidgets.QPushButton("Show") - show.setSizePolicy(QtWidgets.QSizePolicy.Maximum, + button = QtWidgets.QPushButton("Show") + button.setSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum) - show.setStyleSheet("""QPushButton { background-color: #BB0000 }""") + button.setStyleSheet("""QPushButton { background-color: #BB0000 }""") layout.addWidget(message) - layout.addWidget(show) + layout.addWidget(button) - # Size + # Default size self.resize(400, 40) - geometry = self.calculate_window_geometry() - self.setGeometry(geometry) self.widgets = { "message": message, - "show": show, + "button": button, } # Signals - show.clicked.connect(self._on_show_clicked) + button.clicked.connect(self._on_clicked) # Set default title self.setWindowTitle("Popup") @@ -52,7 +57,10 @@ class Popup(QtWidgets.QDialog): def setMessage(self, message): self.widgets['message'].setText(message) - def _on_show_clicked(self): + def setButtonText(self, text): + self.widgets["button"].setText(text) + + def _on_clicked(self): """Callback for when the 'show' button is clicked. Raises the parent (if any) @@ -63,11 +71,19 @@ class Popup(QtWidgets.QDialog): self.close() # Trigger the signal - self.on_show.emit() + self.on_clicked.emit() if parent: parent.raise_() + def showEvent(self, event): + + # Position popup based on contents on show event + geo = self.calculate_window_geometry() + self.setGeometry(geo) + + return super(Popup, self).showEvent(event) + def calculate_window_geometry(self): """Respond to status changes @@ -104,45 +120,29 @@ class Popup(QtWidgets.QDialog): return QtCore.QRect(x, y, width, height) -class Popup2(Popup): +class PopupUpdateKeys(Popup): + """Popup with Update Keys checkbox (intended for Maya)""" - on_show = QtCore.Signal() + on_clicked_state = QtCore.Signal(bool) def __init__(self, parent=None, *args, **kwargs): Popup.__init__(self, parent=parent, *args, **kwargs) layout = self.layout() - # Add toggle + # Insert toggle for Update keys toggle = QtWidgets.QCheckBox("Update Keys") layout.insertWidget(1, toggle) self.widgets["toggle"] = toggle + self.on_clicked.connect(self.emit_click_with_state) + layout.insertStretch(1, 1) - # Update button text - fix = self.widgets["show"] - fix.setText("Fix") - - def calculate_window_geometry(self): - """Respond to status changes - - On creation, align window with screen bottom right. - - """ - parent_widget = self.parent() - - desktop = QtWidgets.QApplication.desktop() - if parent_widget: - screen = desktop.screenNumber(parent_widget) - else: - screen = desktop.screenNumber(desktop.cursor().pos()) - center_point = desktop.screenGeometry(screen).center() - - frame_geo = self.frameGeometry() - frame_geo.moveCenter(center_point) - - return frame_geo + def emit_click_with_state(self): + """Emit the on_clicked_state signal with the toggled state""" + checked = self.widgets["toggle"].isChecked() + self.on_clicked_state.emit(checked) @contextlib.contextmanager diff --git a/openpype/widgets/project_settings.py b/openpype/widgets/project_settings.py deleted file mode 100644 index 43ff9f2789..0000000000 --- a/openpype/widgets/project_settings.py +++ /dev/null @@ -1,494 +0,0 @@ -import os -import getpass -import platform - -from Qt import QtCore, QtGui, QtWidgets - -from avalon import style -import ftrack_api - - -class Project_name_getUI(QtWidgets.QWidget): - ''' - Project setting ui: here all the neceserry ui widgets are created - they are going to be used i later proces for dynamic linking of project - in list to project's attributes - ''' - - def __init__(self, parent=None): - super(Project_name_getUI, self).__init__(parent) - - self.platform = platform.system() - self.new_index = 0 - # get projects from ftrack - self.session = ftrack_api.Session() - self.projects_from_ft = self.session.query( - 'Project where status is active') - self.disks_from_ft = self.session.query('Disk') - self.schemas_from_ft = self.session.query('ProjectSchema') - self.projects = self._get_projects_ftrack() - - # define window geometry - self.setWindowTitle('Set project attributes') - self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - self.resize(550, 340) - self.setStyleSheet(style.load_stylesheet()) - - # define disk combobox widget - self.disks = self._get_all_disks() - self.disk_combobox_label = QtWidgets.QLabel('Destination storage:') - self.disk_combobox = QtWidgets.QComboBox() - - # define schema combobox widget - self.schemas = self._get_all_schemas() - self.schema_combobox_label = QtWidgets.QLabel('Project schema:') - self.schema_combobox = QtWidgets.QComboBox() - - # define fps widget - self.fps_label = QtWidgets.QLabel('Fps:') - self.fps_label.setAlignment( - QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) - self.fps = QtWidgets.QLineEdit() - - # define project dir widget - self.project_dir_label = QtWidgets.QLabel('Project dir:') - self.project_dir_label.setAlignment( - QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) - self.project_dir = QtWidgets.QLineEdit() - - self.project_path_label = QtWidgets.QLabel( - 'Project_path (if not then created):') - self.project_path_label.setAlignment( - QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter) - project_path_font = QtGui.QFont( - "Helvetica [Cronyx]", 12, QtGui.QFont.Bold) - self.project_path = QtWidgets.QLabel() - self.project_path.setObjectName('nom_plan_label') - self.project_path.setStyleSheet( - 'QtWidgets.QLabel#nom_plan_label {color: red}') - self.project_path.setAlignment( - QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter) - self.project_path.setFont(project_path_font) - - # define handles widget - self.handles_label = QtWidgets.QLabel('Handles:') - self.handles_label.setAlignment( - QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) - self.handles = QtWidgets.QLineEdit() - - # define resolution widget - self.resolution_w_label = QtWidgets.QLabel('W:') - self.resolution_w = QtWidgets.QLineEdit() - self.resolution_h_label = QtWidgets.QLabel('H:') - self.resolution_h = QtWidgets.QLineEdit() - - devider = QtWidgets.QFrame() - # devider.Shape(QFrame.HLine) - devider.setFrameShape(QtWidgets.QFrame.HLine) - devider.setFrameShadow(QtWidgets.QFrame.Sunken) - - self.generate_lines() - - # define push buttons - self.set_pushbutton = QtWidgets.QPushButton('Set project') - self.cancel_pushbutton = QtWidgets.QPushButton('Cancel') - - # definition of layouts - ############################################ - action_layout = QtWidgets.QHBoxLayout() - action_layout.addWidget(self.set_pushbutton) - action_layout.addWidget(self.cancel_pushbutton) - - # schema property - schema_layout = QtWidgets.QGridLayout() - schema_layout.addWidget(self.schema_combobox, 0, 1) - schema_layout.addWidget(self.schema_combobox_label, 0, 0) - - # storage property - storage_layout = QtWidgets.QGridLayout() - storage_layout.addWidget(self.disk_combobox, 0, 1) - storage_layout.addWidget(self.disk_combobox_label, 0, 0) - - # fps property - fps_layout = QtWidgets.QGridLayout() - fps_layout.addWidget(self.fps, 1, 1) - fps_layout.addWidget(self.fps_label, 1, 0) - - # project dir property - project_dir_layout = QtWidgets.QGridLayout() - project_dir_layout.addWidget(self.project_dir, 1, 1) - project_dir_layout.addWidget(self.project_dir_label, 1, 0) - - # project path property - project_path_layout = QtWidgets.QGridLayout() - spacer_1_item = QtWidgets.QSpacerItem(10, 10) - project_path_layout.addItem(spacer_1_item, 0, 1) - project_path_layout.addWidget(self.project_path_label, 1, 1) - project_path_layout.addWidget(self.project_path, 2, 1) - spacer_2_item = QtWidgets.QSpacerItem(20, 20) - project_path_layout.addItem(spacer_2_item, 3, 1) - - # handles property - handles_layout = QtWidgets.QGridLayout() - handles_layout.addWidget(self.handles, 1, 1) - handles_layout.addWidget(self.handles_label, 1, 0) - - # resolution property - resolution_layout = QtWidgets.QGridLayout() - resolution_layout.addWidget(self.resolution_w_label, 1, 1) - resolution_layout.addWidget(self.resolution_w, 2, 1) - resolution_layout.addWidget(self.resolution_h_label, 1, 2) - resolution_layout.addWidget(self.resolution_h, 2, 2) - - # form project property layout - p_layout = QtWidgets.QGridLayout() - p_layout.addLayout(storage_layout, 1, 0) - p_layout.addLayout(schema_layout, 2, 0) - p_layout.addLayout(project_dir_layout, 3, 0) - p_layout.addLayout(fps_layout, 4, 0) - p_layout.addLayout(handles_layout, 5, 0) - p_layout.addLayout(resolution_layout, 6, 0) - p_layout.addWidget(devider, 7, 0) - spacer_item = QtWidgets.QSpacerItem( - 150, - 40, - QtWidgets.QSizePolicy.Minimum, - QtWidgets.QSizePolicy.Expanding - ) - p_layout.addItem(spacer_item, 8, 0) - - # form with list to one layout with project property - list_layout = QtWidgets.QGridLayout() - list_layout.addLayout(p_layout, 1, 0) - list_layout.addWidget(self.listWidget, 1, 1) - - root_layout = QtWidgets.QVBoxLayout() - root_layout.addLayout(project_path_layout) - root_layout.addWidget(devider) - root_layout.addLayout(list_layout) - root_layout.addLayout(action_layout) - - self.setLayout(root_layout) - - def generate_lines(self): - ''' - Will generate lines of project list - ''' - - self.listWidget = QtWidgets.QListWidget() - for self.index, p in enumerate(self.projects): - item = QtWidgets.QListWidgetItem("{full_name}".format(**p)) - # item.setSelected(False) - self.listWidget.addItem(item) - print(self.listWidget.indexFromItem(item)) - # self.listWidget.setCurrentItem(self.listWidget.itemFromIndex(1)) - - # add options to schemas widget - self.schema_combobox.addItems(self.schemas) - - # add options to disk widget - self.disk_combobox.addItems(self.disks) - - # populate content of project info widgets - self.projects[1] = self._fill_project_attributes_widgets(p, None) - - def _fill_project_attributes_widgets(self, p=None, index=None): - ''' - will generate actual informations wich are saved on ftrack - ''' - - if index is None: - self.new_index = 1 - - if not p: - pass - # change schema selection - for i, schema in enumerate(self.schemas): - if p['project_schema']['name'] in schema: - break - self.schema_combobox.setCurrentIndex(i) - - disk_name, disk_path = self._build_disk_path() - for i, disk in enumerate(self.disks): - if disk_name in disk: - break - # change disk selection - self.disk_combobox.setCurrentIndex(i) - - # change project_dir selection - if "{root}".format(**p): - self.project_dir.setPlaceholderText("{root}".format(**p)) - else: - print("not root so it was replaced with name") - self.project_dir.setPlaceholderText("{name}".format(**p)) - p['root'] = p['name'] - - # set project path to show where it will be created - self.project_path.setText( - os.path.join(self.disks[i].split(' ')[-1], - self.project_dir.text())) - - # change fps selection - self.fps.setPlaceholderText("{custom_attributes[fps]}".format(**p)) - - # change handles selection - self.handles.setPlaceholderText( - "{custom_attributes[handles]}".format(**p)) - - # change resolution selection - self.resolution_w.setPlaceholderText( - "{custom_attributes[resolution_width]}".format(**p)) - self.resolution_h.setPlaceholderText( - "{custom_attributes[resolution_height]}".format(**p)) - - self.update_disk() - - return p - - def fix_project_path_literals(self, dir): - return dir.replace(' ', '_').lower() - - def update_disk(self): - disk = self.disk_combobox.currentText().split(' ')[-1] - - dir = self.project_dir.text() - if not dir: - dir = "{root}".format(**self.projects[self.new_index]) - self.projects[self.new_index]['project_path'] = os.path.normpath( - self.fix_project_path_literals(os.path.join(disk, dir))) - else: - self.projects[self.new_index]['project_path'] = os.path.normpath( - self.fix_project_path_literals(os.path.join(disk, dir))) - - self.projects[self.new_index]['disk'] = self.disks_from_ft[ - self.disk_combobox.currentIndex()] - self.projects[self.new_index]['disk_id'] = self.projects[ - self.new_index]['disk']['id'] - - # set project path to show where it will be created - self.project_path.setText( - self.projects[self.new_index]['project_path']) - - def update_resolution(self): - # update all values in resolution - if self.resolution_w.text(): - self.projects[self.new_index]['custom_attributes'][ - "resolutionWidth"] = int(self.resolution_w.text()) - if self.resolution_h.text(): - self.projects[self.new_index]['custom_attributes'][ - "resolutionHeight"] = int(self.resolution_h.text()) - - def _update_attributes_by_list_selection(self): - # generate actual selection index - self.new_index = self.listWidget.currentRow() - self.project_dir.setText('') - self.fps.setText('') - self.handles.setText('') - self.resolution_w.setText('') - self.resolution_h.setText('') - - # update project properities widgets and write changes - # into project dictionaries - self.projects[self.new_index] = self._fill_project_attributes_widgets( - self.projects[self.new_index], self.new_index) - - self.update_disk() - - def _build_disk_path(self): - if self.platform == "Windows": - print(self.projects[self.index].keys()) - print(self.projects[self.new_index]['disk']) - return self.projects[self.new_index]['disk'][ - 'name'], self.projects[self.new_index]['disk']['windows'] - else: - return self.projects[self.new_index]['disk'][ - 'name'], self.projects[self.new_index]['disk']['unix'] - - def _get_all_schemas(self): - schemas_list = [] - - for s in self.schemas_from_ft: - # print d.keys() - # if 'Pokus' in s['name']: - # continue - schemas_list.append('{}'.format(s['name'])) - print("\nschemas in ftrack: {}\n".format(schemas_list)) - return schemas_list - - def _get_all_disks(self): - disks_list = [] - for d in self.disks_from_ft: - # print d.keys() - if self.platform == "Windows": - if 'Local drive' in d['name']: - d['windows'] = os.path.join(d['windows'], - os.getenv('USERNAME') - or os.getenv('USER') - or os.getenv('LOGNAME')) - disks_list.append('"{}" at {}'.format(d['name'], d['windows'])) - else: - if 'Local drive' in d['name']: - d['unix'] = os.path.join(d['unix'], getpass.getuser()) - disks_list.append('"{}" at {}'.format(d['name'], d['unix'])) - return disks_list - - def _get_projects_ftrack(self): - - projects_lst = [] - for project in self.projects_from_ft: - # print project.keys() - projects_dict = {} - - for k in project.keys(): - ''' # TODO: delete this in production version ''' - - # if 'test' not in project['name']: - # continue - - # print '{}: {}\n'.format(k, project[k]) - - if '_link' == k: - # print project[k] - content = project[k] - for kc in content[0].keys(): - if content[0]['name']: - content[0][kc] = content[0][kc].encode( - 'ascii', 'ignore').decode('ascii') - print('{}: {}\n'.format(kc, content[0][kc])) - projects_dict[k] = content - print(project[k]) - print(projects_dict[k]) - elif 'root' == k: - print('{}: {}\n'.format(k, project[k])) - projects_dict[k] = project[k] - elif 'disk' == k: - print('{}: {}\n'.format(k, project[k])) - projects_dict[k] = project[k] - elif 'name' == k: - print('{}: {}\n'.format(k, project[k])) - projects_dict[k] = project[k].encode( - 'ascii', 'ignore').decode('ascii') - elif 'disk_id' == k: - print('{}: {}\n'.format(k, project[k])) - projects_dict[k] = project[k] - elif 'id' == k: - print('{}: {}\n'.format(k, project[k])) - projects_dict[k] = project[k] - elif 'full_name' == k: - print('{}: {}\n'.format(k, project[k])) - projects_dict[k] = project[k].encode( - 'ascii', 'ignore').decode('ascii') - elif 'project_schema_id' == k: - print('{}: {}\n'.format(k, project[k])) - projects_dict[k] = project[k] - elif 'project_schema' == k: - print('{}: {}\n'.format(k, project[k])) - projects_dict[k] = project[k] - elif 'custom_attributes' == k: - print('{}: {}\n'.format(k, project[k])) - projects_dict[k] = project[k] - else: - pass - - if projects_dict: - projects_lst.append(projects_dict) - - return projects_lst - - -class Project_name_get(Project_name_getUI): - def __init__(self, parent=None): - super(Project_name_get, self).__init__(parent) - # self.input_project_name.textChanged.connect(self.input_project_name.placeholderText) - - self.set_pushbutton.clicked.connect(lambda: self.execute()) - self.cancel_pushbutton.clicked.connect(self.close) - - self.listWidget.itemSelectionChanged.connect( - self._update_attributes_by_list_selection) - self.disk_combobox.currentIndexChanged.connect(self.update_disk) - self.schema_combobox.currentIndexChanged.connect(self.update_schema) - self.project_dir.textChanged.connect(self.update_disk) - self.fps.textChanged.connect(self.update_fps) - self.handles.textChanged.connect(self.update_handles) - self.resolution_w.textChanged.connect(self.update_resolution) - self.resolution_h.textChanged.connect(self.update_resolution) - - def update_handles(self): - self.projects[self.new_index]['custom_attributes']['handles'] = int( - self.handles.text()) - - def update_fps(self): - self.projects[self.new_index]['custom_attributes']['fps'] = int( - self.fps.text()) - - def update_schema(self): - self.projects[self.new_index]['project_schema'] = self.schemas_from_ft[ - self.schema_combobox.currentIndex()] - self.projects[self.new_index]['project_schema_id'] = self.projects[ - self.new_index]['project_schema']['id'] - - def execute(self): - # import ft_utils - # import hiero - # get the project which has been selected - print("well and what") - # set the project as context and create entity - # entity is task created with the name of user which is creating it - - # get the project_path and create dir if there is not any - print(self.projects[self.new_index]['project_path'].replace( - self.disk_combobox.currentText().split(' ')[-1].lower(), '')) - - # get the schema and recreate a starting project regarding the selection - # set_hiero_template(project_schema=self.projects[self.new_index][ - # 'project_schema']['name']) - - # set all project properities - # project = hiero.core.Project() - # project.setFramerate( - # int(self.projects[self.new_index]['custom_attributes']['fps'])) - # project.projectRoot() - # print 'handles: {}'.format(self.projects[self.new_index]['custom_attributes']['handles']) - # print 'resolution_width: {}'.format(self.projects[self.new_index]['custom_attributes']["resolutionWidth"]) - # print 'resolution_width: {}'.format(self.projects[self.new_index]['custom_attributes']["resolutionHeight"]) - # print "<< {}".format(self.projects[self.new_index]) - - # get path for the hrox file - # root = context.data('ftrackData')['Project']['root'] - # hrox_script_path = ft_utils.getPathsYaml(taskid, templateList=templates, root=root) - - # save the hrox into the correct path - self.session.commit() - self.close() - -# -# def set_hiero_template(project_schema=None): -# import hiero -# hiero.core.closeAllProjects() -# hiero_plugin_path = [ -# p for p in os.environ['HIERO_PLUGIN_PATH'].split(';') -# if 'hiero_plugin_path' in p -# ][0] -# path = os.path.normpath( -# os.path.join(hiero_plugin_path, 'Templates', project_schema + '.hrox')) -# print('---> path to template: {}'.format(path)) -# return hiero.core.openProject(path) - - -# def set_out_ft_session(): -# session = ftrack_api.Session() -# projects_to_ft = session.query('Project where status is active') - - -def main(): - import sys - app = QtWidgets.QApplication(sys.argv) - panel = Project_name_get() - panel.show() - - sys.exit(app.exec_()) - - -if __name__ == "__main__": - main() diff --git a/pyproject.toml b/pyproject.toml index 8d9eb8b050..e2e0409562 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "OpenPype" -version = "3.9.3" # OpenPype +version = "3.10.0-nightly.4" # OpenPype description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" diff --git a/repos/avalon-core b/repos/avalon-core deleted file mode 160000 index 2200525320..0000000000 --- a/repos/avalon-core +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 2200525320923f17df3b4c3b19ebd737c8a7e625 diff --git a/schema/session-3.0.json b/schema/session-3.0.json new file mode 100644 index 0000000000..9f785939e4 --- /dev/null +++ b/schema/session-3.0.json @@ -0,0 +1,81 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "openpype:session-3.0", + "description": "The Avalon environment", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "AVALON_PROJECT", + "AVALON_ASSET" + ], + + "properties": { + "AVALON_PROJECTS": { + "description": "Absolute path to root of project directories", + "type": "string", + "example": "/nas/projects" + }, + "AVALON_PROJECT": { + "description": "Name of project", + "type": "string", + "pattern": "^\\w*$", + "example": "Hulk" + }, + "AVALON_ASSET": { + "description": "Name of asset", + "type": "string", + "pattern": "^\\w*$", + "example": "Bruce" + }, + "AVALON_TASK": { + "description": "Name of task", + "type": "string", + "pattern": "^\\w*$", + "example": "modeling" + }, + "AVALON_APP": { + "description": "Name of host", + "type": "string", + "pattern": "^\\w*$", + "example": "maya2016" + }, + "AVALON_DB": { + "description": "Name of database", + "type": "string", + "pattern": "^\\w*$", + "example": "avalon", + "default": "avalon" + }, + "AVALON_LABEL": { + "description": "Nice name of Avalon, used in e.g. graphical user interfaces", + "type": "string", + "example": "Mindbender", + "default": "Avalon" + }, + "AVALON_TIMEOUT": { + "description": "Wherever there is a need for a timeout, this is the default value.", + "type": "string", + "pattern": "^[0-9]*$", + "default": "1000", + "example": "1000" + }, + "AVALON_INSTANCE_ID": { + "description": "Unique identifier for instances in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.instance", + "example": "avalon.instance" + }, + "AVALON_CONTAINER_ID": { + "description": "Unique identifier for a loaded representation in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.container", + "example": "avalon.container" + } + } +} diff --git a/setup.py b/setup.py index bf42602b52..8b5a545c16 100644 --- a/setup.py +++ b/setup.py @@ -106,7 +106,9 @@ install_requires = [ "dns", # Python defaults (cx_Freeze skip them by default) "dbm", - "sqlite3" + "sqlite3", + "dataclasses", + "timeit" ] includes = [] @@ -123,7 +125,6 @@ bin_includes = [ include_files = [ "igniter", "openpype", - "repos", "schema", "LICENSE", "README.md" diff --git a/start.py b/start.py index f8a01dd9ab..ace33ab92a 100644 --- a/start.py +++ b/start.py @@ -191,6 +191,51 @@ else: if os.getenv("OPENPYPE_HEADLESS_MODE") != "1": os.environ.pop("OPENPYPE_HEADLESS_MODE", None) +# Enabled logging debug mode when "--debug" is passed +if "--verbose" in sys.argv: + expected_values = ( + "Expected: notset, debug, info, warning, error, critical" + " or integer [0-50]." + ) + idx = sys.argv.index("--verbose") + sys.argv.pop(idx) + if idx < len(sys.argv): + value = sys.argv.pop(idx) + else: + raise RuntimeError(( + "Expect value after \"--verbose\" argument. {}" + ).format(expected_values)) + + log_level = None + low_value = value.lower() + if low_value.isdigit(): + log_level = int(low_value) + elif low_value == "notset": + log_level = 0 + elif low_value == "debug": + log_level = 10 + elif low_value == "info": + log_level = 20 + elif low_value == "warning": + log_level = 30 + elif low_value == "error": + log_level = 40 + elif low_value == "critical": + log_level = 50 + + if log_level is None: + raise RuntimeError(( + "Unexpected value after \"--verbose\" argument \"{}\". {}" + ).format(value, expected_values)) + + os.environ["OPENPYPE_LOG_LEVEL"] = str(log_level) + +# Enable debug mode, may affect log level if log level is not defined +if "--debug" in sys.argv: + sys.argv.remove("--debug") + os.environ["OPENPYPE_DEBUG"] = "1" + + import igniter # noqa: E402 from igniter import BootstrapRepos # noqa: E402 from igniter.tools import ( @@ -221,18 +266,9 @@ def set_openpype_global_environments() -> None: """Set global OpenPype's environments.""" import acre - try: - from openpype.settings import get_general_environments + from openpype.settings import get_general_environments - general_env = get_general_environments() - - except Exception: - # Backwards compatibility for OpenPype versions where - # `get_general_environments` does not exists yet - from openpype.settings import get_environments - - all_env = get_environments() - general_env = all_env["global"] + general_env = get_general_environments() merged_env = acre.merge( acre.parse(general_env), @@ -320,6 +356,7 @@ def run_disk_mapping_commands(settings): destination)) raise + def set_avalon_environments(): """Set avalon specific environments. @@ -327,28 +364,12 @@ def set_avalon_environments(): before avalon module is imported because avalon works with globals set with environment variables. """ - from openpype import PACKAGE_DIR - # Path to OpenPype's schema - schema_path = os.path.join( - os.path.dirname(PACKAGE_DIR), - "schema" - ) - # Avalon mongo URL - avalon_mongo_url = ( - os.environ.get("AVALON_MONGO") - or os.environ["OPENPYPE_MONGO"] - ) avalon_db = os.environ.get("AVALON_DB") or "avalon" # for tests os.environ.update({ - # Mongo url (use same as OpenPype has) - "AVALON_MONGO": avalon_mongo_url, - - "AVALON_SCHEMA": schema_path, # Mongo DB name where avalon docs are stored "AVALON_DB": avalon_db, # Name of config - "AVALON_CONFIG": "openpype", "AVALON_LABEL": "OpenPype" }) @@ -365,18 +386,6 @@ def set_modules_environments(): modules_manager = ModulesManager() module_envs = modules_manager.collect_global_environments() - publish_plugin_dirs = modules_manager.collect_plugin_paths()["publish"] - - # Set pyblish plugins paths if any module want to register them - if publish_plugin_dirs: - publish_paths_str = os.environ.get("PYBLISHPLUGINPATH") or "" - publish_paths = publish_paths_str.split(os.pathsep) - _publish_paths = { - os.path.normpath(path) for path in publish_paths if path - } - for path in publish_plugin_dirs: - _publish_paths.add(os.path.normpath(path)) - module_envs["PYBLISHPLUGINPATH"] = os.pathsep.join(_publish_paths) # Merge environments with current environments and update values if module_envs: @@ -838,17 +847,15 @@ def _bootstrap_from_code(use_version, use_staging): version_path = Path(_openpype_root) os.environ["OPENPYPE_REPOS_ROOT"] = _openpype_root - repos = os.listdir(os.path.join(_openpype_root, "repos")) - repos = [os.path.join(_openpype_root, "repos", repo) for repo in repos] - # add self to python paths - repos.insert(0, _openpype_root) - for repo in repos: - sys.path.insert(0, repo) + # add self to sys.path of current process + # NOTE: this seems to be duplicate of 'add_paths_from_directory' + sys.path.insert(0, _openpype_root) # add venv 'site-packages' to PYTHONPATH python_path = os.getenv("PYTHONPATH", "") split_paths = python_path.split(os.pathsep) - # Add repos as first in list - split_paths = repos + split_paths + # add self to python paths + split_paths.insert(0, _openpype_root) + # last one should be venv site-packages # this is slightly convoluted as we can get here from frozen code too # in case when we are running without any version installed. @@ -878,6 +885,56 @@ def _bootstrap_from_code(use_version, use_staging): return version_path +def _boot_validate_versions(use_version, local_version): + _print(f">>> Validating version [ {use_version} ]") + openpype_versions = bootstrap.find_openpype(include_zips=True, + staging=True) + openpype_versions += bootstrap.find_openpype(include_zips=True, + staging=False) + v: OpenPypeVersion + found = [v for v in openpype_versions if str(v) == use_version] + if not found: + _print(f"!!! Version [ {use_version} ] not found.") + list_versions(openpype_versions, local_version) + sys.exit(1) + + # print result + version_path = bootstrap.get_version_path_from_list( + use_version, openpype_versions + ) + valid, message = bootstrap.validate_openpype_version(version_path) + _print("{}{}".format(">>> " if valid else "!!! ", message)) + + +def _boot_print_versions(use_staging, local_version, openpype_root): + if not use_staging: + _print("--- This will list only non-staging versions detected.") + _print(" To see staging versions, use --use-staging argument.") + else: + _print("--- This will list only staging versions detected.") + _print(" To see other version, omit --use-staging argument.") + + openpype_versions = bootstrap.find_openpype(include_zips=True, + staging=use_staging) + if getattr(sys, 'frozen', False): + local_version = bootstrap.get_version(Path(openpype_root)) + else: + local_version = OpenPypeVersion.get_installed_version_str() + + list_versions(openpype_versions, local_version) + + +def _boot_handle_missing_version(local_version, use_staging, message): + _print(message) + if os.environ.get("OPENPYPE_HEADLESS_MODE") == "1": + openpype_versions = bootstrap.find_openpype( + include_zips=True, staging=use_staging + ) + list_versions(openpype_versions, local_version) + else: + igniter.show_message_dialog("Version not found", message) + + def boot(): """Bootstrap OpenPype.""" @@ -927,6 +984,16 @@ def boot(): _print(">>> run disk mapping command ...") run_disk_mapping_commands(global_settings) + # Logging to server enabled/disabled + log_to_server = global_settings.get("log_to_server", True) + if log_to_server: + os.environ["OPENPYPE_LOG_TO_SERVER"] = "1" + log_to_server_msg = "ON" + else: + os.environ.pop("OPENPYPE_LOG_TO_SERVER", None) + log_to_server_msg = "OFF" + _print(f">>> Logging to server is turned {log_to_server_msg}") + # Get openpype path from database and set it to environment so openpype can # find its versions there and bootstrap them. openpype_path = get_openpype_path_from_settings(global_settings) @@ -937,30 +1004,7 @@ def boot(): local_version = OpenPypeVersion.get_installed_version_str() if "validate" in commands: - _print(f">>> Validating version [ {use_version} ]") - openpype_versions = bootstrap.find_openpype(include_zips=True, - staging=True) - openpype_versions += bootstrap.find_openpype(include_zips=True, - staging=False) - v: OpenPypeVersion - found = [v for v in openpype_versions if str(v) == use_version] - if not found: - _print(f"!!! Version [ {use_version} ] not found.") - list_versions(openpype_versions, local_version) - sys.exit(1) - - # print result - result = bootstrap.validate_openpype_version( - bootstrap.get_version_path_from_list( - use_version, openpype_versions)) - - _print("{}{}".format( - ">>> " if result[0] else "!!! ", - bootstrap.validate_openpype_version( - bootstrap.get_version_path_from_list( - use_version, openpype_versions) - )[1]) - ) + _boot_validate_versions(use_version, local_version) sys.exit(1) if not openpype_path: @@ -970,21 +1014,7 @@ def boot(): os.environ["OPENPYPE_PATH"] = openpype_path if "print_versions" in commands: - if not use_staging: - _print("--- This will list only non-staging versions detected.") - _print(" To see staging versions, use --use-staging argument.") - else: - _print("--- This will list only staging versions detected.") - _print(" To see other version, omit --use-staging argument.") - _openpype_root = OPENPYPE_ROOT - openpype_versions = bootstrap.find_openpype(include_zips=True, - staging=use_staging) - if getattr(sys, 'frozen', False): - local_version = bootstrap.get_version(Path(_openpype_root)) - else: - local_version = OpenPypeVersion.get_installed_version_str() - - list_versions(openpype_versions, local_version) + _boot_print_versions(use_staging, local_version, OPENPYPE_ROOT) sys.exit(1) # ------------------------------------------------------------------------ @@ -997,12 +1027,7 @@ def boot(): try: version_path = _find_frozen_openpype(use_version, use_staging) except OpenPypeVersionNotFound as exc: - message = str(exc) - _print(message) - if os.environ.get("OPENPYPE_HEADLESS_MODE") == "1": - list_versions(openpype_versions, local_version) - else: - igniter.show_message_dialog("Version not found", message) + _boot_handle_missing_version(local_version, use_staging, str(exc)) sys.exit(1) except RuntimeError as e: @@ -1021,12 +1046,7 @@ def boot(): version_path = _bootstrap_from_code(use_version, use_staging) except OpenPypeVersionNotFound as exc: - message = str(exc) - _print(message) - if os.environ.get("OPENPYPE_HEADLESS_MODE") == "1": - list_versions(openpype_versions, local_version) - else: - igniter.show_message_dialog("Version not found", message) + _boot_handle_missing_version(local_version, use_staging, str(exc)) sys.exit(1) # set this to point either to `python` from venv in case of live code diff --git a/tests/integration/conftest.py b/tests/conftest.py similarity index 100% rename from tests/integration/conftest.py rename to tests/conftest.py diff --git a/tests/lib/assert_classes.py b/tests/lib/assert_classes.py index 7f4d8efc10..9a94f89fd0 100644 --- a/tests/lib/assert_classes.py +++ b/tests/lib/assert_classes.py @@ -24,13 +24,14 @@ class DBAssert: else: args[key] = val + no_of_docs = dbcon.count_documents(args) + + msg = None args.pop("type") detail_str = " " if args: detail_str = " with '{}'".format(args) - msg = None - no_of_docs = dbcon.count_documents(args) if expected != no_of_docs: msg = "Not expected no of '{}'{}."\ "Expected {}, found {}".format(queried_type, diff --git a/tests/lib/testing_classes.py b/tests/lib/testing_classes.py index 0a9da1aca8..f991f02227 100644 --- a/tests/lib/testing_classes.py +++ b/tests/lib/testing_classes.py @@ -153,7 +153,7 @@ class ModuleUnitTest(BaseTest): Database prepared from dumps with 'db_setup' fixture. """ - from avalon.api import AvalonMongoDB + from openpype.pipeline import AvalonMongoDB dbcon = AvalonMongoDB() dbcon.Session["AVALON_PROJECT"] = self.TEST_PROJECT_NAME yield dbcon @@ -273,8 +273,6 @@ class PublishTest(ModuleUnitTest): ) os.environ["AVALON_SCHEMA"] = schema_path - import openpype - openpype.install() os.environ["OPENPYPE_EXECUTABLE"] = sys.executable from openpype.lib import ApplicationManager diff --git a/tests/unit/igniter/test_bootstrap_repos.py b/tests/unit/igniter/test_bootstrap_repos.py index 65cd5a2399..10278c4928 100644 --- a/tests/unit/igniter/test_bootstrap_repos.py +++ b/tests/unit/igniter/test_bootstrap_repos.py @@ -152,8 +152,6 @@ def test_install_live_repos(fix_bootstrap, printer, monkeypatch, pytestconfig): openpype_version = fix_bootstrap.create_version_from_live_code() sep = os.path.sep expected_paths = [ - f"{openpype_version.path}{sep}repos{sep}avalon-core", - f"{openpype_version.path}{sep}repos{sep}avalon-unreal-integration", f"{openpype_version.path}" ] printer("testing zip creation") diff --git a/tests/unit/openpype/modules/sync_server/test_module_api.py b/tests/unit/openpype/modules/sync_server/test_module_api.py new file mode 100644 index 0000000000..a484977758 --- /dev/null +++ b/tests/unit/openpype/modules/sync_server/test_module_api.py @@ -0,0 +1,64 @@ +"""Test file for Sync Server, tests API methods, currently for integrate_new + + File: + creates temporary directory and downloads .zip file from GDrive + unzips .zip file + uses content of .zip file (MongoDB's dumps) to import to new databases + with use of 'monkeypatch_session' modifies required env vars + temporarily + runs battery of tests checking that site operation for Sync Server + module are working + removes temporary folder + removes temporary databases (?) +""" +import pytest + +from tests.lib.testing_classes import ModuleUnitTest + + +class TestModuleApi(ModuleUnitTest): + + REPRESENTATION_ID = "60e578d0c987036c6a7b741d" + + TEST_FILES = [("1eCwPljuJeOI8A3aisfOIBKKjcmIycTEt", + "test_site_operations.zip", '')] + + @pytest.fixture(scope="module") + def setup_sync_server_module(self, dbcon): + """Get sync_server_module from ModulesManager""" + from openpype.modules import ModulesManager + + manager = ModulesManager() + sync_server = manager.modules_by_name["sync_server"] + yield sync_server + + def test_get_alt_site_pairs(self, setup_sync_server_module): + conf_sites = {"SFTP": {"alternative_sites": ["studio"]}, + "studio2": {"alternative_sites": ["studio"]}} + + ret = setup_sync_server_module._get_alt_site_pairs(conf_sites) + expected = {"SFTP": {"studio", "studio2"}, + "studio": {"SFTP", "studio2"}, + "studio2": {"studio", "SFTP"}} + assert ret == expected, "Not matching result" + + def test_get_alt_site_pairs_deep(self, setup_sync_server_module): + conf_sites = {"A": {"alternative_sites": ["C"]}, + "B": {"alternative_sites": ["C"]}, + "C": {"alternative_sites": ["D"]}, + "D": {"alternative_sites": ["A"]}, + "F": {"alternative_sites": ["G"]}, + "G": {"alternative_sites": ["F"]}, + } + + ret = setup_sync_server_module._get_alt_site_pairs(conf_sites) + expected = {"A": {"B", "C", "D"}, + "B": {"A", "C", "D"}, + "C": {"A", "B", "D"}, + "D": {"A", "B", "C"}, + "F": {"G"}, + "G": {"F"}} + assert ret == expected, "Not matching result" + + +test_case = TestModuleApi() diff --git a/website/docs/admin_openpype_commands.md b/website/docs/admin_openpype_commands.md index 74cb895ac9..53b4799d6e 100644 --- a/website/docs/admin_openpype_commands.md +++ b/website/docs/admin_openpype_commands.md @@ -24,7 +24,11 @@ openpype_console --use-version=3.0.0-foo+bar `--list-versions [--use-staging]` - to list available versions. -`--validate-version` to validate integrity of given version +`--validate-version` - to validate integrity of given version + +`--verbose` `` - change log verbose level of OpenPype loggers + +`--debug` - set debug flag affects logging For more information [see here](admin_use.md#run-openpype). @@ -47,13 +51,9 @@ For more information [see here](admin_use.md#run-openpype). --- ### `tray` arguments {#tray-arguments} -| Argument | Description | -| --- | --- | -| `--debug` | print verbose information useful for debugging (works with `openpype_console`) | -To launch Tray with debugging information: ```shell -openpype_console tray --debug +openpype_console tray ``` --- ### `launch` arguments {#eventserver-arguments} @@ -62,7 +62,6 @@ option to specify them. | Argument | Description | | --- | --- | -| `--debug` | print debug info | | `--ftrack-url` | URL to ftrack server (can be set with `FTRACK_SERVER`) | | `--ftrack-user` |user name to log in to ftrack (can be set with `FTRACK_API_USER`) | | `--ftrack-api-key` | ftrack api key (can be set with `FTRACK_API_KEY`) | @@ -98,12 +97,16 @@ pype launch --app python --project my_project --asset my_asset --task my_task --- ### `publish` arguments {#publish-arguments} +Run publishing based on metadata passed in json file e.g. on farm. + | Argument | Description | | --- | --- | -| `--debug` | print more verbose information | +| `--targets` | define publishing targets (e.g. "farm") | +| `--gui` (`-g`) | Show publishing | +| Positional argument | Path to metadata json file | ```shell -pype publish +openpype publish --targes farm ``` --- diff --git a/website/docs/admin_settings_project_anatomy.md b/website/docs/admin_settings_project_anatomy.md index 98003dc381..b98819cd8a 100644 --- a/website/docs/admin_settings_project_anatomy.md +++ b/website/docs/admin_settings_project_anatomy.md @@ -59,7 +59,7 @@ We have a few required anatomy templates for OpenPype to work properly, however | `asset` | Name of asset or shot | | `task[name]` | Name of task | | `task[type]` | Type of task | -| `task[short]` | Shortname of task | +| `task[short]` | Short name of task type (eg. 'Modeling' > 'mdl') | | `parent` | Name of hierarchical parent | | `version` | Version number | | `subset` | Subset name | @@ -105,5 +105,8 @@ We have a few required anatomy templates for OpenPype to work properly, however ## Task Types +Current state of default Task descriptors. + +![tasks](assets/settings/anatomy_tasks.png) ## Colour Management and Formats \ No newline at end of file diff --git a/website/docs/admin_use.md b/website/docs/admin_use.md index 178241ad19..f84905c486 100644 --- a/website/docs/admin_use.md +++ b/website/docs/admin_use.md @@ -69,6 +69,22 @@ stored in `checksums` file. Add `--headless` to run OpenPype without graphical UI (useful on server or on automated tasks, etc.) ::: +`--verbose` `` - change log verbose level of OpenPype loggers. + +Level value can be integer in range `0-50` or one of enum strings `"notset" (0)`, `"debug" (10)`, `"info" (20)`, `"warning" (30)`, `"error" (40)`, `"ciritcal" (50)`. Value is stored to `OPENPYPE_LOG_LEVEL` environment variable for next processes. + +```shell +openpype_console --verbose debug +``` + +`--debug` - set debug flag affects logging + +Enable debug flag for OpenPype process. Change value of environment variable `OPENPYPE_DEBUG` to `"1"`. At this moment affects only OpenPype loggers. Argument `--verbose` or environment variable `OPENPYPE_LOG_LEVEL` are used in preference to affect log level. + +```shell +openpype_console --debug +``` + ### Details When you run OpenPype from executable, few check are made: diff --git a/website/docs/artist_hosts_hiero.md b/website/docs/artist_hosts_hiero.md index f516c3a6e0..dc6f1696e7 100644 --- a/website/docs/artist_hosts_hiero.md +++ b/website/docs/artist_hosts_hiero.md @@ -94,6 +94,8 @@ This tool will set any defined colorspace definition from OpenPype `Settings / P With OpenPype, you can use Hiero/NKS as a starting point for creating a project's **shots** as *assets* from timeline clips with its *hierarchycal parents* like **episodes**, **sequences**, **folders**, and its child **tasks**. Most importantly it will create **versions** of plate *subsets*, with or without **reference video**. Publishig is naturally creating clip's **thumbnails** and assigns it to shot *asset*. Hiero is also publishing **audio** *subset* and various **soft-effects** either as retiming component as part of published plates or **color-tranformations**, that will be evailable later on for compositor artists to use either as *viewport input-process* or *loaded nodes* in graph editor.



+ + ### Preparing timeline for conversion to instances Because we don't support on-fly data conversion so in case of working with raw camera sources or some other formats which need to be converted for 2D/3D work. We suggest to convert those before and reconform the timeline. Before any clips in timeline could be converted to publishable instances we recommend following. 1. Merge all tracks which supposed to be one and they are multiply only because of editor's style @@ -191,3 +193,12 @@ If you wish to change any individual properties of the shot then you are able to + +### Publishing Effects from Hiero to Nuke +This video shows a way to publish shot look as effect from Hiero to Nuke. + + + +### Assembling edit from published shot versions + + diff --git a/website/docs/artist_hosts_nuke_tut.md b/website/docs/artist_hosts_nuke_tut.md index eefb213dd2..296fdf44d5 100644 --- a/website/docs/artist_hosts_nuke_tut.md +++ b/website/docs/artist_hosts_nuke_tut.md @@ -89,6 +89,8 @@ This menu item will set correct Colorspace definitions for you. All has to be co - set preview LUT to your viewers - set correct colorspace to all discovered Read nodes (following expression set in settings) +See [Nuke Color Management](artist_hosts_nuke_tut.md#nuke-color-management) +
@@ -144,6 +146,8 @@ This tool will append all available subsets into an actual node graph. It will l This QuickStart is short introduction to what OpenPype can do for you. It attempts to make an overview for compositing artists, and simplifies processes that are better described in specific parts of the documentation. + + ### Launch Nuke - Shot and Task Context OpenPype has to know what shot and task you are working on. You need to run Nuke in context of the task, using Ftrack Action or OpenPype Launcher to select the task and run Nuke. @@ -226,6 +230,11 @@ This will create a Group with a Write node inside. You can configure write node parameters in **Studio Settings → Project → Anatomy → Color Management and Output Formats → Nuke → Nodes** ::: +### Create Prerender Node +Creating Prerender is very similar to creating OpenPype managed Write node. + + + #### What Nuke Publish Does From Artist perspective, Nuke publish gathers all the stuff found in the Nuke script with Publish checkbox set to on, exports stuff and raises the Nuke script (workfile) version. @@ -315,6 +324,8 @@ Main disadvantage of this approach is that you can render only one version of yo When making quick farm publishes, like making two versions with different color correction, care must be taken to let the first job (first version) completely finish before the second version starts rendering. + + ### Managing Versions ![Versionless](assets/nuke_tut/nuke_ManageVersion.png) @@ -323,15 +334,30 @@ OpenPype checks all the assets loaded to Nuke on script open. All out of date as Use Manage to switch versions for loaded assets. +### Loading Effects +This video show how to publish effect from Hiero / Nuke Studio, and use the effect in Nuke. + + + + + +### Nuke Color Management + + + ## Troubleshooting ### Fixing Validate Containers +If your Pyblish dialog fails on Validate Containers, you might have an old asset loaded. Use OpenPype - Manage... to switch the asset(s) to the latest version. + ![Versionless](assets/nuke_tut/nuke_ValidateContainers.png) -If your Pyblish dialog fails on Validate Containers, you might have an old asset loaded. Use OpenPype - Manage... to switch the asset(s) to the latest version. + ### Fixing Validate Version If your Pyblish dialog fails on Validate Version, you might be trying to publish already published version. Rise your version in the OpenPype WorkFiles SaveAs. -Or maybe you accidentally copied write node from different shot to your current one. Check the write publishes on the left side of the Pyblish dialog. Typically you publish only one write. Locate and delete the stray write from other shot. \ No newline at end of file +Or maybe you accidentally copied write node from different shot to your current one. Check the write publishes on the left side of the Pyblish dialog. Typically you publish only one write. Locate and delete the stray write from other shot. + + diff --git a/website/docs/artist_hosts_photoshop.md b/website/docs/artist_hosts_photoshop.md index a140170c49..36670054ee 100644 --- a/website/docs/artist_hosts_photoshop.md +++ b/website/docs/artist_hosts_photoshop.md @@ -111,3 +111,67 @@ You can switch to a previous version of the image or update to the latest. ![Loader](assets/photoshop_manage_switch.gif) ![Loader](assets/photoshop_manage_update.gif) + + +### New Publisher + +All previous screenshot came from regular [pyblish](https://pyblish.com/) process, there is also a different UI available. This process extends existing implementation and adds new functionalities. + +To test this in Photoshop, the artist needs first to enable experimental `New publisher` in Settings. (Tray > Settings > Experimental tools) +![Settings](assets/experimental_tools_settings.png) + +New dialog opens after clicking on `Experimental tools` button in Openpype extension menu. +![Menu](assets/experimental_tools_menu.png) + +After you click on this button, this dialog will show up. + +![Menu](assets/artist_photoshop_new_publisher_workfile.png) + +You can see the first instance, called `workfileYourTaskName`. (Name depends on studio naming convention for Photoshop's workfiles.). This instance is so called "automatic", +it was created without instigation by the artist. You shouldn't delete this instance as it might hold necessary values for future publishing, but you can choose to skip it +from publishing (by toggling the pill button inside of the rectangular object denoting instance). + +New publisher allows publishing into different context, just click on a workfile instance, update `Variant`, `Asset` or `Task` in the form in the middle and don't forget to click on the 'Confirm' button. + +Similarly to the old publishing approach, you need to create instances for everything you want to publish. You will initiate by clicking on the '+' sign in the bottom left corner. + +![Instance creator](assets/artist_photoshop_new_publisher_instance.png) + +In this dialog you can select the family for the published layer or group. Currently only 'image' is implemented. + +On right hand side you can see creator attributes: +- `Create only for selected` - mimics `Use selected` option of regular publish +- `Create separate instance for each selected` - if separate instance should be created for each layer if multiple selected + +![Instance created](assets/artist_photoshop_new_publisher_instance_created.png) + +Here you can see a newly created instance of image family. (Name depends on studio naming convention for image family.) You can disable instance from publishing in the same fashion as a workfile instance. +You could also decide delete instance by selecting it and clicking on a trashcan icon (next to plus button on left button) + +Buttons on the bottom right are for: +- `Refresh publishing` - set publishing process to starting position - useful if previous publish failed, or you changed configuration of a publish +- `Stop/pause publishing` - if you would like to pause publishing process at any time +- `Validate` - if you would like to run only collecting and validating phases (nothing will be published yet) +- `Publish` - standard way how to kick off full publishing process + +In the unfortunate case of some error during publishing, you would receive this kind of error dialog. + +![Publish failed](assets/artist_photoshop_new_publisher_publish_failed.png) + +In this case there is an issue that you are publishing two or more instances with the same subset name ('imageMaing'). If the error is recoverable by the artist, you should +see helpful information in a `How to repair?` section or fix it automatically by clicking on a 'Wrench' button on the right if present. + +If you would like to ask for help admin or support, you could use any of the three buttons on bottom left: +- `Copy report` - stash full publishing log to a clipboard +- `Export and save report` - save log into a file for sending it via mail or any communication tool +- `Show details` - switches into a more detailed list of published instances and plugins. Similar to the old pyblish list. + +If you are able to fix the workfile yourself, use the first button on the right to set the UI to initial state before publish. (Click the `Publish` button to start again.) + +New publishing process should be backward compatible, eg. if you have a workfile with instances created in the previous publishing approach, they will be translated automatically and +could be used right away. + +If you would create instances in a new publisher, you cannot use them in the old approach though! + +If you would hit on unexpected behaviour with old instances, contact support first, then you could try some steps to recover your publish. Delete instances in New publisher UI, or try `Subset manager` in the extension menu. +Nuclear option is to purge workfile metadata in `File > File Info > Origin > Headline`. This is only for most determined daredevils though! diff --git a/website/docs/assets/artist_photoshop_new_publisher_instance.png b/website/docs/assets/artist_photoshop_new_publisher_instance.png new file mode 100644 index 0000000000..723a032c94 Binary files /dev/null and b/website/docs/assets/artist_photoshop_new_publisher_instance.png differ diff --git a/website/docs/assets/artist_photoshop_new_publisher_instance_created.png b/website/docs/assets/artist_photoshop_new_publisher_instance_created.png new file mode 100644 index 0000000000..0cf6d1d18c Binary files /dev/null and b/website/docs/assets/artist_photoshop_new_publisher_instance_created.png differ diff --git a/website/docs/assets/artist_photoshop_new_publisher_publish_failed.png b/website/docs/assets/artist_photoshop_new_publisher_publish_failed.png new file mode 100644 index 0000000000..e34497b77d Binary files /dev/null and b/website/docs/assets/artist_photoshop_new_publisher_publish_failed.png differ diff --git a/website/docs/assets/artist_photoshop_new_publisher_workfile.png b/website/docs/assets/artist_photoshop_new_publisher_workfile.png new file mode 100644 index 0000000000..006206519f Binary files /dev/null and b/website/docs/assets/artist_photoshop_new_publisher_workfile.png differ diff --git a/website/docs/assets/experimental_tools_menu.png b/website/docs/assets/experimental_tools_menu.png new file mode 100644 index 0000000000..79fa8d3655 Binary files /dev/null and b/website/docs/assets/experimental_tools_menu.png differ diff --git a/website/docs/assets/experimental_tools_settings.png b/website/docs/assets/experimental_tools_settings.png new file mode 100644 index 0000000000..4d514e8a8f Binary files /dev/null and b/website/docs/assets/experimental_tools_settings.png differ diff --git a/website/docs/assets/settings/anatomy_tasks.png b/website/docs/assets/settings/anatomy_tasks.png new file mode 100644 index 0000000000..16265cf8eb Binary files /dev/null and b/website/docs/assets/settings/anatomy_tasks.png differ diff --git a/website/src/pages/index.js b/website/src/pages/index.js index 791b309bbc..d9bbc3eaa0 100644 --- a/website/src/pages/index.js +++ b/website/src/pages/index.js @@ -144,6 +144,11 @@ const studios = [ title: "Ember Light", image: "/img/EmberLight_black.png", infoLink: "https://emberlight.se/", + }, + { + title: "IGG Canada", + image: "/img/igg-logo.png", + infoLink: "https://www.igg.com/", } ]; diff --git a/website/static/img/igg-logo.png b/website/static/img/igg-logo.png new file mode 100644 index 0000000000..3c7f7718f7 Binary files /dev/null and b/website/static/img/igg-logo.png differ diff --git a/website/yarn.lock b/website/yarn.lock index e01f0c4ef2..04b9dd658b 100644 --- a/website/yarn.lock +++ b/website/yarn.lock @@ -2311,9 +2311,9 @@ asap@~2.0.3: integrity sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY= async@^2.6.2: - version "2.6.3" - resolved "https://registry.yarnpkg.com/async/-/async-2.6.3.tgz#d72625e2344a3656e3a3ad4fa749fa83299d82ff" - integrity sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg== + version "2.6.4" + resolved "https://registry.yarnpkg.com/async/-/async-2.6.4.tgz#706b7ff6084664cd7eae713f6f965433b5504221" + integrity sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA== dependencies: lodash "^4.17.14"