diff --git a/.all-contributorsrc b/.all-contributorsrc new file mode 100644 index 0000000000..a3b85cae68 --- /dev/null +++ b/.all-contributorsrc @@ -0,0 +1,315 @@ +{ + "projectName": "OpenPype", + "projectOwner": "pypeclub", + "repoType": "github", + "repoHost": "https://github.com", + "files": [ + "README.md" + ], + "imageSize": 100, + "commit": true, + "commitConvention": "none", + "contributors": [ + { + "login": "mkolar", + "name": "Milan Kolar", + "avatar_url": "https://avatars.githubusercontent.com/u/3333008?v=4", + "profile": "http://pype.club/", + "contributions": [ + "code", + "doc", + "infra", + "business", + "content", + "fundingFinding", + "maintenance", + "projectManagement", + "review", + "mentoring", + "question" + ] + }, + { + "login": "jakubjezek001", + "name": "Jakub Ježek", + "avatar_url": "https://avatars.githubusercontent.com/u/40640033?v=4", + "profile": "https://www.linkedin.com/in/jakubjezek79", + "contributions": [ + "code", + "doc", + "infra", + "content", + "review", + "maintenance", + "mentoring", + "projectManagement", + "question" + ] + }, + { + "login": "antirotor", + "name": "Ondřej Samohel", + "avatar_url": "https://avatars.githubusercontent.com/u/33513211?v=4", + "profile": "https://github.com/antirotor", + "contributions": [ + "code", + "doc", + "infra", + "content", + "review", + "maintenance", + "mentoring", + "projectManagement", + "question" + ] + }, + { + "login": "iLLiCiTiT", + "name": "Jakub Trllo", + "avatar_url": "https://avatars.githubusercontent.com/u/43494761?v=4", + "profile": "https://github.com/iLLiCiTiT", + "contributions": [ + "code", + "doc", + "infra", + "review", + "maintenance", + "question" + ] + }, + { + "login": "kalisp", + "name": "Petr Kalis", + "avatar_url": "https://avatars.githubusercontent.com/u/4457962?v=4", + "profile": "https://github.com/kalisp", + "contributions": [ + "code", + "doc", + "infra", + "review", + "maintenance", + "question" + ] + }, + { + "login": "64qam", + "name": "64qam", + "avatar_url": "https://avatars.githubusercontent.com/u/26925793?v=4", + "profile": "https://github.com/64qam", + "contributions": [ + "code", + "review", + "doc", + "infra", + "projectManagement", + "maintenance", + "content", + "userTesting" + ] + }, + { + "login": "BigRoy", + "name": "Roy Nieterau", + "avatar_url": "https://avatars.githubusercontent.com/u/2439881?v=4", + "profile": "http://www.colorbleed.nl/", + "contributions": [ + "code", + "doc", + "review", + "mentoring", + "question" + ] + }, + { + "login": "tokejepsen", + "name": "Toke Jepsen", + "avatar_url": "https://avatars.githubusercontent.com/u/1860085?v=4", + "profile": "https://github.com/tokejepsen", + "contributions": [ + "code", + "doc", + "review", + "mentoring", + "question" + ] + }, + { + "login": "jrsndl", + "name": "Jiri Sindelar", + "avatar_url": "https://avatars.githubusercontent.com/u/45896205?v=4", + "profile": "https://github.com/jrsndl", + "contributions": [ + "code", + "review", + "doc", + "content", + "tutorial", + "userTesting" + ] + }, + { + "login": "simonebarbieri", + "name": "Simone Barbieri", + "avatar_url": "https://avatars.githubusercontent.com/u/1087869?v=4", + "profile": "https://barbierisimone.com/", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "karimmozilla", + "name": "karimmozilla", + "avatar_url": "https://avatars.githubusercontent.com/u/82811760?v=4", + "profile": "http://karimmozilla.xyz/", + "contributions": [ + "code" + ] + }, + { + "login": "Allan-I", + "name": "Allan I. A.", + "avatar_url": "https://avatars.githubusercontent.com/u/76656700?v=4", + "profile": "https://github.com/Allan-I", + "contributions": [ + "code" + ] + }, + { + "login": "m-u-r-p-h-y", + "name": "murphy", + "avatar_url": "https://avatars.githubusercontent.com/u/352795?v=4", + "profile": "https://www.linkedin.com/in/mmuurrpphhyy/", + "contributions": [ + "code", + "review", + "userTesting", + "doc", + "projectManagement" + ] + }, + { + "login": "aardschok", + "name": "Wijnand Koreman", + "avatar_url": "https://avatars.githubusercontent.com/u/26920875?v=4", + "profile": "https://github.com/aardschok", + "contributions": [ + "code" + ] + }, + { + "login": "zhoub", + "name": "Bo Zhou", + "avatar_url": "https://avatars.githubusercontent.com/u/1798206?v=4", + "profile": "http://jedimaster.cnblogs.com/", + "contributions": [ + "code" + ] + }, + { + "login": "ClementHector", + "name": "Clément Hector", + "avatar_url": "https://avatars.githubusercontent.com/u/7068597?v=4", + "profile": "https://www.linkedin.com/in/clementhector/", + "contributions": [ + "code", + "review" + ] + }, + { + "login": "davidlatwe", + "name": "David Lai", + "avatar_url": "https://avatars.githubusercontent.com/u/3357009?v=4", + "profile": "https://twitter.com/davidlatwe", + "contributions": [ + "code", + "review" + ] + }, + { + "login": "2-REC", + "name": "Derek ", + "avatar_url": "https://avatars.githubusercontent.com/u/42170307?v=4", + "profile": "https://github.com/2-REC", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "gabormarinov", + "name": "Gábor Marinov", + "avatar_url": "https://avatars.githubusercontent.com/u/8620515?v=4", + "profile": "https://github.com/gabormarinov", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "icyvapor", + "name": "icyvapor", + "avatar_url": "https://avatars.githubusercontent.com/u/1195278?v=4", + "profile": "https://github.com/icyvapor", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "jlorrain", + "name": "Jérôme LORRAIN", + "avatar_url": "https://avatars.githubusercontent.com/u/7955673?v=4", + "profile": "https://github.com/jlorrain", + "contributions": [ + "code" + ] + }, + { + "login": "dmo-j-cube", + "name": "David Morris-Oliveros", + "avatar_url": "https://avatars.githubusercontent.com/u/89823400?v=4", + "profile": "https://github.com/dmo-j-cube", + "contributions": [ + "code" + ] + }, + { + "login": "BenoitConnan", + "name": "BenoitConnan", + "avatar_url": "https://avatars.githubusercontent.com/u/82808268?v=4", + "profile": "https://github.com/BenoitConnan", + "contributions": [ + "code" + ] + }, + { + "login": "Malthaldar", + "name": "Malthaldar", + "avatar_url": "https://avatars.githubusercontent.com/u/33671694?v=4", + "profile": "https://github.com/Malthaldar", + "contributions": [ + "code" + ] + }, + { + "login": "svenneve", + "name": "Sven Neve", + "avatar_url": "https://avatars.githubusercontent.com/u/2472863?v=4", + "profile": "http://www.svenneve.com/", + "contributions": [ + "code" + ] + }, + { + "login": "zafrs", + "name": "zafrs", + "avatar_url": "https://avatars.githubusercontent.com/u/26890002?v=4", + "profile": "https://github.com/zafrs", + "contributions": [ + "code" + ] + } + ], + "contributorsPerLine": 7 +} \ No newline at end of file diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml index d9b4d8089c..5acd20007c 100644 --- a/.github/workflows/prerelease.yml +++ b/.github/workflows/prerelease.yml @@ -80,7 +80,7 @@ jobs: git tag -a $tag_name -m "nightly build" - name: Push to protected main branch - uses: CasperWA/push-protected@v2 + uses: CasperWA/push-protected@v2.10.0 with: token: ${{ secrets.ADMIN_TOKEN }} branch: main diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 917e6c884c..85864b4442 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -68,7 +68,7 @@ jobs: - name: 🔏 Push to protected main branch if: steps.version.outputs.release_tag != 'skip' - uses: CasperWA/push-protected@v2 + uses: CasperWA/push-protected@v2.10.0 with: token: ${{ secrets.ADMIN_TOKEN }} branch: main diff --git a/.gitignore b/.gitignore index fa3fae1ad2..28cfb4b1e9 100644 --- a/.gitignore +++ b/.gitignore @@ -70,6 +70,8 @@ coverage.xml ################## node_modules package-lock.json +package.json +yarn.lock openpype/premiere/ppro/js/debug.log diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 9920ceaad6..0000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "repos/avalon-core"] - path = repos/avalon-core - url = https://github.com/pypeclub/avalon-core.git \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index f3c7820d8f..e2ff9f919c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,99 +1,174 @@ # Changelog -## [3.9.1](https://github.com/pypeclub/OpenPype/tree/3.9.1) (2022-03-17) +## [3.10.0-nightly.2](https://github.com/pypeclub/OpenPype/tree/HEAD) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.0...3.9.1) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.4...HEAD) + +### 📖 Documentation + +- Docs: add all-contributors config and initial list [\#3094](https://github.com/pypeclub/OpenPype/pull/3094) +- Nuke docs with videos [\#3052](https://github.com/pypeclub/OpenPype/pull/3052) **🚀 Enhancements** -- General: Change how OPENPYPE\_DEBUG value is handled [\#2907](https://github.com/pypeclub/OpenPype/pull/2907) -- nuke: imageio adding ocio config version 1.2 [\#2897](https://github.com/pypeclub/OpenPype/pull/2897) -- Flame: support for comment with xml attribute overrides [\#2892](https://github.com/pypeclub/OpenPype/pull/2892) -- Nuke: ExtractReviewSlate can handle more codes and profiles [\#2879](https://github.com/pypeclub/OpenPype/pull/2879) -- Flame: sequence used for reference video [\#2869](https://github.com/pypeclub/OpenPype/pull/2869) +- Standalone publisher: add support for bgeo and vdb [\#3080](https://github.com/pypeclub/OpenPype/pull/3080) +- Update collect\_render.py [\#3055](https://github.com/pypeclub/OpenPype/pull/3055) +- SiteSync: Added compute\_resource\_sync\_sites to sync\_server\_module [\#2983](https://github.com/pypeclub/OpenPype/pull/2983) **🐛 Bug fixes** -- General: Fix use of Anatomy roots [\#2904](https://github.com/pypeclub/OpenPype/pull/2904) -- Fixing gap detection in extract review [\#2902](https://github.com/pypeclub/OpenPype/pull/2902) -- Pyblish Pype - ensure current state is correct when entering new group order [\#2899](https://github.com/pypeclub/OpenPype/pull/2899) -- SceneInventory: Fix import of load function [\#2894](https://github.com/pypeclub/OpenPype/pull/2894) -- Harmony - fixed creator issue [\#2891](https://github.com/pypeclub/OpenPype/pull/2891) -- General: Remove forgotten use of avalon Creator [\#2885](https://github.com/pypeclub/OpenPype/pull/2885) -- General: Avoid circular import [\#2884](https://github.com/pypeclub/OpenPype/pull/2884) -- Fixes for attaching loaded containers \(\#2837\) [\#2874](https://github.com/pypeclub/OpenPype/pull/2874) -- Maya: Deformer node ids validation plugin [\#2826](https://github.com/pypeclub/OpenPype/pull/2826) +- RoyalRender Control Submission - AVALON\_APP\_NAME default [\#3091](https://github.com/pypeclub/OpenPype/pull/3091) +- Ftrack: Update Create Folders action [\#3089](https://github.com/pypeclub/OpenPype/pull/3089) +- Project Manager: Avoid unnecessary updates of asset documents [\#3083](https://github.com/pypeclub/OpenPype/pull/3083) +- Standalone publisher: Fix plugins install [\#3077](https://github.com/pypeclub/OpenPype/pull/3077) +- General: Extract review sequence is not converted with same names [\#3076](https://github.com/pypeclub/OpenPype/pull/3076) +- Webpublisher: Use variant value [\#3068](https://github.com/pypeclub/OpenPype/pull/3068) +- Nuke: Add aov matching even for remainder and prerender [\#3060](https://github.com/pypeclub/OpenPype/pull/3060) **🔀 Refactored code** -- General: Reduce style usage to OpenPype repository [\#2889](https://github.com/pypeclub/OpenPype/pull/2889) -- General: Move loader logic from avalon to openpype [\#2886](https://github.com/pypeclub/OpenPype/pull/2886) +- General: Move host install [\#3009](https://github.com/pypeclub/OpenPype/pull/3009) + +**Merged pull requests:** + +- Nuke: added suspend\_publish knob [\#3078](https://github.com/pypeclub/OpenPype/pull/3078) +- Bump async from 2.6.3 to 2.6.4 in /website [\#3065](https://github.com/pypeclub/OpenPype/pull/3065) + +## [3.9.4](https://github.com/pypeclub/OpenPype/tree/3.9.4) (2022-04-15) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.4-nightly.2...3.9.4) + +### 📖 Documentation + +- Documentation: more info about Tasks [\#3062](https://github.com/pypeclub/OpenPype/pull/3062) +- Documentation: Python requirements to 3.7.9 [\#3035](https://github.com/pypeclub/OpenPype/pull/3035) +- Website Docs: Remove unused pages [\#2974](https://github.com/pypeclub/OpenPype/pull/2974) + +**🆕 New features** + +- General: Local overrides for environment variables [\#3045](https://github.com/pypeclub/OpenPype/pull/3045) + +**🚀 Enhancements** + +- TVPaint: Added init file for worker to triggers missing sound file dialog [\#3053](https://github.com/pypeclub/OpenPype/pull/3053) +- Ftrack: Custom attributes can be filled in slate values [\#3036](https://github.com/pypeclub/OpenPype/pull/3036) +- Resolve environment variable in google drive credential path [\#3008](https://github.com/pypeclub/OpenPype/pull/3008) + +**🐛 Bug fixes** + +- GitHub: Updated push-protected action in github workflow [\#3064](https://github.com/pypeclub/OpenPype/pull/3064) +- Nuke: Typos in imports from Nuke implementation [\#3061](https://github.com/pypeclub/OpenPype/pull/3061) +- Hotfix: fixing deadline job publishing [\#3059](https://github.com/pypeclub/OpenPype/pull/3059) +- General: Extract Review handle invalid characters for ffmpeg [\#3050](https://github.com/pypeclub/OpenPype/pull/3050) +- Slate Review: Support to keep format on slate concatenation [\#3049](https://github.com/pypeclub/OpenPype/pull/3049) +- Webpublisher: fix processing of workfile [\#3048](https://github.com/pypeclub/OpenPype/pull/3048) +- Ftrack: Integrate ftrack api fix [\#3044](https://github.com/pypeclub/OpenPype/pull/3044) +- Webpublisher - removed wrong hardcoded family [\#3043](https://github.com/pypeclub/OpenPype/pull/3043) +- LibraryLoader: Use current project for asset query in families filter [\#3042](https://github.com/pypeclub/OpenPype/pull/3042) +- SiteSync: Providers ignore that site is disabled [\#3041](https://github.com/pypeclub/OpenPype/pull/3041) +- Unreal: Creator import fixes [\#3040](https://github.com/pypeclub/OpenPype/pull/3040) +- Settings UI: Version column can be extended so version are visible [\#3032](https://github.com/pypeclub/OpenPype/pull/3032) +- SiteSync: fix transitive alternate sites, fix dropdown in Local Settings [\#3018](https://github.com/pypeclub/OpenPype/pull/3018) + +**Merged pull requests:** + +- Deadline: reworked pools assignment [\#3051](https://github.com/pypeclub/OpenPype/pull/3051) +- Houdini: Avoid ImportError on `hdefereval` when Houdini runs without UI [\#2987](https://github.com/pypeclub/OpenPype/pull/2987) + +## [3.9.3](https://github.com/pypeclub/OpenPype/tree/3.9.3) (2022-04-07) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.3-nightly.2...3.9.3) + +### 📖 Documentation + +- Website Docs: Manager Ftrack fix broken links [\#2979](https://github.com/pypeclub/OpenPype/pull/2979) + +**🆕 New features** + +- Ftrack: Add description integrator [\#3027](https://github.com/pypeclub/OpenPype/pull/3027) +- Publishing textures for Unreal [\#2988](https://github.com/pypeclub/OpenPype/pull/2988) + +**🚀 Enhancements** + +- Ftrack: Add more options for note text of integrate ftrack note [\#3025](https://github.com/pypeclub/OpenPype/pull/3025) +- Console Interpreter: Changed how console splitter size are reused on show [\#3016](https://github.com/pypeclub/OpenPype/pull/3016) +- Deadline: Use more suitable name for sequence review logic [\#3015](https://github.com/pypeclub/OpenPype/pull/3015) +- General: default workfile subset name for workfile [\#3011](https://github.com/pypeclub/OpenPype/pull/3011) +- Deadline: priority configurable in Maya jobs [\#2995](https://github.com/pypeclub/OpenPype/pull/2995) + +**🐛 Bug fixes** + +- Deadline: Fixed default value of use sequence for review [\#3033](https://github.com/pypeclub/OpenPype/pull/3033) +- General: Fix validate asset docs plug-in filename and class name [\#3029](https://github.com/pypeclub/OpenPype/pull/3029) +- General: Fix import after movements [\#3028](https://github.com/pypeclub/OpenPype/pull/3028) +- Harmony: Added creating subset name for workfile from template [\#3024](https://github.com/pypeclub/OpenPype/pull/3024) +- AfterEffects: Added creating subset name for workfile from template [\#3023](https://github.com/pypeclub/OpenPype/pull/3023) +- General: Add example addons to ignored [\#3022](https://github.com/pypeclub/OpenPype/pull/3022) +- Maya: Remove missing import [\#3017](https://github.com/pypeclub/OpenPype/pull/3017) +- Ftrack: multiple reviewable componets [\#3012](https://github.com/pypeclub/OpenPype/pull/3012) +- Tray publisher: Fixes after code movement [\#3010](https://github.com/pypeclub/OpenPype/pull/3010) +- Nuke: fixing unicode type detection in effect loaders [\#3002](https://github.com/pypeclub/OpenPype/pull/3002) +- Nuke: removing redundant Ftrack asset when farm publishing [\#2996](https://github.com/pypeclub/OpenPype/pull/2996) + +**Merged pull requests:** + +- Maya: Allow to select invalid camera contents if no cameras found [\#3030](https://github.com/pypeclub/OpenPype/pull/3030) +- General: adding limitations for pyright [\#2994](https://github.com/pypeclub/OpenPype/pull/2994) + +## [3.9.2](https://github.com/pypeclub/OpenPype/tree/3.9.2) (2022-04-04) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.2-nightly.4...3.9.2) + +### 📖 Documentation + +- Documentation: Added mention of adding My Drive as a root [\#2999](https://github.com/pypeclub/OpenPype/pull/2999) +- Docs: Added MongoDB requirements [\#2951](https://github.com/pypeclub/OpenPype/pull/2951) + +**🆕 New features** + +- nuke: bypass baking [\#2992](https://github.com/pypeclub/OpenPype/pull/2992) +- Maya to Unreal: Static and Skeletal Meshes [\#2978](https://github.com/pypeclub/OpenPype/pull/2978) + +**🚀 Enhancements** + +- Nuke: add concurrency attr to deadline job [\#3005](https://github.com/pypeclub/OpenPype/pull/3005) +- Photoshop: create image without instance [\#3001](https://github.com/pypeclub/OpenPype/pull/3001) +- TVPaint: Render scene family [\#3000](https://github.com/pypeclub/OpenPype/pull/3000) +- Nuke: ReviewDataMov Read RAW attribute [\#2985](https://github.com/pypeclub/OpenPype/pull/2985) +- General: `METADATA\_KEYS` constant as `frozenset` for optimal immutable lookup [\#2980](https://github.com/pypeclub/OpenPype/pull/2980) +- General: Tools with host filters [\#2975](https://github.com/pypeclub/OpenPype/pull/2975) +- Hero versions: Use custom templates [\#2967](https://github.com/pypeclub/OpenPype/pull/2967) + +**🐛 Bug fixes** + +- Hosts: Remove path existence checks in 'add\_implementation\_envs' [\#3004](https://github.com/pypeclub/OpenPype/pull/3004) +- Fix - remove doubled dot in workfile created from template [\#2998](https://github.com/pypeclub/OpenPype/pull/2998) +- PS: fix renaming subset incorrectly in PS [\#2991](https://github.com/pypeclub/OpenPype/pull/2991) +- Fix: Disable setuptools auto discovery [\#2990](https://github.com/pypeclub/OpenPype/pull/2990) +- AEL: fix opening existing workfile if no scene opened [\#2989](https://github.com/pypeclub/OpenPype/pull/2989) +- Maya: Don't do hardlinks on windows for look publishing [\#2986](https://github.com/pypeclub/OpenPype/pull/2986) +- Settings UI: Fix version completer on linux [\#2981](https://github.com/pypeclub/OpenPype/pull/2981) +- Photoshop: Fix creation of subset names in PS review and workfile [\#2969](https://github.com/pypeclub/OpenPype/pull/2969) +- Slack: Added default for review\_upload\_limit for Slack [\#2965](https://github.com/pypeclub/OpenPype/pull/2965) +- General: OIIO conversion for ffmeg can handle sequences [\#2958](https://github.com/pypeclub/OpenPype/pull/2958) +- Settings: Conditional dictionary avoid invalid logs [\#2956](https://github.com/pypeclub/OpenPype/pull/2956) +- General: Smaller fixes and typos [\#2950](https://github.com/pypeclub/OpenPype/pull/2950) + +**Merged pull requests:** + +- Bump paramiko from 2.9.2 to 2.10.1 [\#2973](https://github.com/pypeclub/OpenPype/pull/2973) +- Bump minimist from 1.2.5 to 1.2.6 in /website [\#2954](https://github.com/pypeclub/OpenPype/pull/2954) +- Bump node-forge from 1.2.1 to 1.3.0 in /website [\#2953](https://github.com/pypeclub/OpenPype/pull/2953) +- Maya - added transparency into review creator [\#2952](https://github.com/pypeclub/OpenPype/pull/2952) + +## [3.9.1](https://github.com/pypeclub/OpenPype/tree/3.9.1) (2022-03-18) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.1-nightly.3...3.9.1) ## [3.9.0](https://github.com/pypeclub/OpenPype/tree/3.9.0) (2022-03-14) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.0-nightly.9...3.9.0) -**Deprecated:** - -- AssetCreator: Remove the tool [\#2845](https://github.com/pypeclub/OpenPype/pull/2845) - -### 📖 Documentation - -- Documentation: Change Photoshop & AfterEffects plugin path [\#2878](https://github.com/pypeclub/OpenPype/pull/2878) - -**🚀 Enhancements** - -- General: Subset name filtering in ExtractReview outpus [\#2872](https://github.com/pypeclub/OpenPype/pull/2872) -- NewPublisher: Descriptions and Icons in creator dialog [\#2867](https://github.com/pypeclub/OpenPype/pull/2867) -- NewPublisher: Changing task on publishing instance [\#2863](https://github.com/pypeclub/OpenPype/pull/2863) -- TrayPublisher: Choose project widget is more clear [\#2859](https://github.com/pypeclub/OpenPype/pull/2859) -- New: Validation exceptions [\#2841](https://github.com/pypeclub/OpenPype/pull/2841) -- Maya: add loaded containers to published instance [\#2837](https://github.com/pypeclub/OpenPype/pull/2837) -- Ftrack: Can sync fps as string [\#2836](https://github.com/pypeclub/OpenPype/pull/2836) -- General: Custom function for find executable [\#2822](https://github.com/pypeclub/OpenPype/pull/2822) -- General: Color dialog UI fixes [\#2817](https://github.com/pypeclub/OpenPype/pull/2817) -- global: letter box calculated on output as last process [\#2812](https://github.com/pypeclub/OpenPype/pull/2812) -- Nuke: adding Reformat to baking mov plugin [\#2811](https://github.com/pypeclub/OpenPype/pull/2811) -- Manager: Update all to latest button [\#2805](https://github.com/pypeclub/OpenPype/pull/2805) - -**🐛 Bug fixes** - -- General: Missing time function [\#2877](https://github.com/pypeclub/OpenPype/pull/2877) -- Deadline: Fix plugin name for tile assemble [\#2868](https://github.com/pypeclub/OpenPype/pull/2868) -- Nuke: gizmo precollect fix [\#2866](https://github.com/pypeclub/OpenPype/pull/2866) -- General: Fix hardlink for windows [\#2864](https://github.com/pypeclub/OpenPype/pull/2864) -- General: ffmpeg was crashing on slate merge [\#2860](https://github.com/pypeclub/OpenPype/pull/2860) -- WebPublisher: Video file was published with one too many frame [\#2858](https://github.com/pypeclub/OpenPype/pull/2858) -- New Publisher: Error dialog got right styles [\#2857](https://github.com/pypeclub/OpenPype/pull/2857) -- General: Fix getattr clalback on dynamic modules [\#2855](https://github.com/pypeclub/OpenPype/pull/2855) -- Nuke: slate resolution to input video resolution [\#2853](https://github.com/pypeclub/OpenPype/pull/2853) -- WebPublisher: Fix username stored in DB [\#2852](https://github.com/pypeclub/OpenPype/pull/2852) -- WebPublisher: Fix wrong number of frames for video file [\#2851](https://github.com/pypeclub/OpenPype/pull/2851) -- Nuke: Fix family test in validate\_write\_legacy to work with stillImage [\#2847](https://github.com/pypeclub/OpenPype/pull/2847) -- Nuke: fix multiple baking profile farm publishing [\#2842](https://github.com/pypeclub/OpenPype/pull/2842) -- Blender: Fixed parameters for FBX export of the camera [\#2840](https://github.com/pypeclub/OpenPype/pull/2840) -- Maya: Stop creation of reviews for Cryptomattes [\#2832](https://github.com/pypeclub/OpenPype/pull/2832) -- Deadline: Remove recreated event [\#2828](https://github.com/pypeclub/OpenPype/pull/2828) -- Deadline: Added missing events folder [\#2827](https://github.com/pypeclub/OpenPype/pull/2827) -- Settings: Missing document with OP versions may break start of OpenPype [\#2825](https://github.com/pypeclub/OpenPype/pull/2825) -- Deadline: more detailed temp file name for environment json [\#2824](https://github.com/pypeclub/OpenPype/pull/2824) -- General: Host name was formed from obsolete code [\#2821](https://github.com/pypeclub/OpenPype/pull/2821) -- Settings UI: Fix "Apply from" action [\#2820](https://github.com/pypeclub/OpenPype/pull/2820) -- Ftrack: Job killer with missing user [\#2819](https://github.com/pypeclub/OpenPype/pull/2819) -- Nuke: Use AVALON\_APP to get value for "app" key [\#2818](https://github.com/pypeclub/OpenPype/pull/2818) -- StandalonePublisher: use dynamic groups in subset names [\#2816](https://github.com/pypeclub/OpenPype/pull/2816) - -**🔀 Refactored code** - -- Refactor: move webserver tool to openpype [\#2876](https://github.com/pypeclub/OpenPype/pull/2876) -- General: Move create logic from avalon to OpenPype [\#2854](https://github.com/pypeclub/OpenPype/pull/2854) -- General: Add vendors from avalon [\#2848](https://github.com/pypeclub/OpenPype/pull/2848) -- General: Basic event system [\#2846](https://github.com/pypeclub/OpenPype/pull/2846) -- General: Move change context functions [\#2839](https://github.com/pypeclub/OpenPype/pull/2839) -- Tools: Don't use avalon tools code [\#2829](https://github.com/pypeclub/OpenPype/pull/2829) -- Move Unreal Implementation to OpenPype [\#2823](https://github.com/pypeclub/OpenPype/pull/2823) -- General: Extract template formatting from anatomy [\#2766](https://github.com/pypeclub/OpenPype/pull/2766) - ## [3.8.2](https://github.com/pypeclub/OpenPype/tree/3.8.2) (2022-02-07) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.8.2-nightly.3...3.8.2) diff --git a/README.md b/README.md index 0e450fc48d..b6966adbc4 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,7 @@ + +[![All Contributors](https://img.shields.io/badge/all_contributors-26-orange.svg?style=flat-square)](#contributors-) + OpenPype ==== @@ -283,3 +286,54 @@ Running tests To run tests, execute `.\tools\run_tests(.ps1|.sh)`. **Note that it needs existing virtual environment.** + +## Contributors ✨ + +Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Milan Kolar

💻 📖 🚇 💼 🖋 🔍 🚧 📆 👀 🧑‍🏫 💬

Jakub Ježek

💻 📖 🚇 🖋 👀 🚧 🧑‍🏫 📆 💬

Ondřej Samohel

💻 📖 🚇 🖋 👀 🚧 🧑‍🏫 📆 💬

Jakub Trllo

💻 📖 🚇 👀 🚧 💬

Petr Kalis

💻 📖 🚇 👀 🚧 💬

64qam

💻 👀 📖 🚇 📆 🚧 🖋 📓

Roy Nieterau

💻 📖 👀 🧑‍🏫 💬

Toke Jepsen

💻 📖 👀 🧑‍🏫 💬

Jiri Sindelar

💻 👀 📖 🖋 📓

Simone Barbieri

💻 📖

karimmozilla

💻

Allan I. A.

💻

murphy

💻 👀 📓 📖 📆

Wijnand Koreman

💻

Bo Zhou

💻

Clément Hector

💻 👀

David Lai

💻 👀

Derek

💻 📖

Gábor Marinov

💻 📖

icyvapor

💻 📖

Jérôme LORRAIN

💻

David Morris-Oliveros

💻

BenoitConnan

💻

Malthaldar

💻

Sven Neve

💻

zafrs

💻
+ + + + + + +This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! \ No newline at end of file diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py index ad49f868d5..08333885c0 100644 --- a/igniter/bootstrap_repos.py +++ b/igniter/bootstrap_repos.py @@ -627,8 +627,6 @@ class BootstrapRepos: Attributes: data_dir (Path): local OpenPype installation directory. - live_repo_dir (Path): path to repos directory if running live, - otherwise `None`. registry (OpenPypeSettingsRegistry): OpenPype registry object. zip_filter (list): List of files to exclude from zip openpype_filter (list): list of top level directories to @@ -654,7 +652,7 @@ class BootstrapRepos: self.registry = OpenPypeSettingsRegistry() self.zip_filter = [".pyc", "__pycache__"] self.openpype_filter = [ - "openpype", "repos", "schema", "LICENSE" + "openpype", "schema", "LICENSE" ] self._message = message @@ -667,11 +665,6 @@ class BootstrapRepos: progress_callback = empty_progress self._progress_callback = progress_callback - if getattr(sys, "frozen", False): - self.live_repo_dir = Path(sys.executable).parent / "repos" - else: - self.live_repo_dir = Path(Path(__file__).parent / ".." / "repos") - @staticmethod def get_version_path_from_list( version: str, version_list: list) -> Union[Path, None]: @@ -736,11 +729,12 @@ class BootstrapRepos: # if repo dir is not set, we detect local "live" OpenPype repository # version and use it as a source. Otherwise repo_dir is user # entered location. - if not repo_dir: - version = OpenPypeVersion.get_installed_version_str() - repo_dir = self.live_repo_dir - else: + if repo_dir: version = self.get_version(repo_dir) + else: + installed_version = OpenPypeVersion.get_installed_version() + version = str(installed_version) + repo_dir = installed_version.path if not version: self._print("OpenPype not found.", LOG_ERROR) @@ -756,7 +750,7 @@ class BootstrapRepos: Path(temp_dir) / f"openpype-v{version}.zip" self._print(f"creating zip: {temp_zip}") - self._create_openpype_zip(temp_zip, repo_dir.parent) + self._create_openpype_zip(temp_zip, repo_dir) if not os.path.exists(temp_zip): self._print("make archive failed.", LOG_ERROR) return None @@ -1057,27 +1051,11 @@ class BootstrapRepos: if not archive.is_file() and not archive.exists(): raise ValueError("Archive is not file.") - with ZipFile(archive, "r") as zip_file: - name_list = zip_file.namelist() - - roots = [] - paths = [] - for item in name_list: - if not item.startswith("repos/"): - continue - - root = item.split("/")[1] - - if root not in roots: - roots.append(root) - paths.append( - f"{archive}{os.path.sep}repos{os.path.sep}{root}") - sys.path.insert(0, paths[-1]) - - sys.path.insert(0, f"{archive}") + archive_path = str(archive) + sys.path.insert(0, archive_path) pythonpath = os.getenv("PYTHONPATH", "") python_paths = pythonpath.split(os.pathsep) - python_paths += paths + python_paths.insert(0, archive_path) os.environ["PYTHONPATH"] = os.pathsep.join(python_paths) @@ -1094,24 +1072,8 @@ class BootstrapRepos: directory (Path): path to directory. """ + sys.path.insert(0, directory.as_posix()) - directory /= "repos" - if not directory.exists() and not directory.is_dir(): - raise ValueError("directory is invalid") - - roots = [] - for item in directory.iterdir(): - if item.is_dir(): - root = item.as_posix() - if root not in roots: - roots.append(root) - sys.path.insert(0, root) - - pythonpath = os.getenv("PYTHONPATH", "") - paths = pythonpath.split(os.pathsep) - paths += roots - - os.environ["PYTHONPATH"] = os.pathsep.join(paths) @staticmethod def find_openpype_version(version, staging): @@ -1437,6 +1399,7 @@ class BootstrapRepos: # create destination parent directories even if they don't exist. destination.mkdir(parents=True) + remove_source_file = False # version is directory if openpype_version.path.is_dir(): # create zip inside temporary directory. @@ -1470,6 +1433,8 @@ class BootstrapRepos: self._progress_callback(35) openpype_version.path = self._copy_zip( openpype_version.path, destination) + # Mark zip to be deleted when done + remove_source_file = True # extract zip there self._print("extracting zip to destination ...") @@ -1478,6 +1443,10 @@ class BootstrapRepos: zip_ref.extractall(destination) self._progress_callback(100) + # Remove zip file copied to local app data + if remove_source_file: + os.remove(openpype_version.path) + return destination def _copy_zip(self, source: Path, destination: Path) -> Path: diff --git a/openpype/__init__.py b/openpype/__init__.py index 99629a4257..810664707a 100644 --- a/openpype/__init__.py +++ b/openpype/__init__.py @@ -1,154 +1,5 @@ -# -*- coding: utf-8 -*- -"""Pype module.""" import os -import platform -import functools -import logging - -from .settings import get_project_settings -from .lib import ( - Anatomy, - filter_pyblish_plugins, - set_plugin_attributes_from_settings, - change_timer_to_current_context, - register_event_callback, -) - -pyblish = avalon = _original_discover = None - -log = logging.getLogger(__name__) PACKAGE_DIR = os.path.dirname(os.path.abspath(__file__)) PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") - -# Global plugin paths -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") - - -def import_wrapper(func): - """Wrap module imports to specific functions.""" - @functools.wraps(func) - def decorated(*args, **kwargs): - global pyblish - global avalon - global _original_discover - if pyblish is None: - from pyblish import api as pyblish - from avalon import api as avalon - - # we are monkey patching `avalon.api.discover()` to allow us to - # load plugin presets on plugins being discovered by avalon. - # Little bit of hacking, but it allows us to add out own features - # without need to modify upstream code. - - _original_discover = avalon.discover - - return func(*args, **kwargs) - - return decorated - - -@import_wrapper -def patched_discover(superclass): - """Patch `avalon.api.discover()`. - - Monkey patched version of :func:`avalon.api.discover()`. It allows - us to load presets on plugins being discovered. - """ - # run original discover and get plugins - plugins = _original_discover(superclass) - filtered_plugins = [ - plugin - for plugin in plugins - if issubclass(plugin, superclass) - ] - - set_plugin_attributes_from_settings(filtered_plugins, superclass) - - return filtered_plugins - - -@import_wrapper -def install(): - """Install Pype to Avalon.""" - from pyblish.lib import MessageHandler - from openpype.modules import load_modules - from openpype.pipeline import ( - LegacyCreator, - register_loader_plugin_path, - ) - from avalon import pipeline - - # Make sure modules are loaded - load_modules() - - def modified_emit(obj, record): - """Method replacing `emit` in Pyblish's MessageHandler.""" - record.msg = record.getMessage() - obj.records.append(record) - - MessageHandler.emit = modified_emit - - log.info("Registering global plug-ins..") - pyblish.register_plugin_path(PUBLISH_PATH) - pyblish.register_discovery_filter(filter_pyblish_plugins) - register_loader_plugin_path(LOAD_PATH) - - project_name = os.environ.get("AVALON_PROJECT") - - # Register studio specific plugins - if project_name: - anatomy = Anatomy(project_name) - anatomy.set_root_environments() - avalon.register_root(anatomy.roots) - - project_settings = get_project_settings(project_name) - platform_name = platform.system().lower() - project_plugins = ( - project_settings - .get("global", {}) - .get("project_plugins", {}) - .get(platform_name) - ) or [] - for path in project_plugins: - try: - path = str(path.format(**os.environ)) - except KeyError: - pass - - if not path or not os.path.exists(path): - continue - - pyblish.register_plugin_path(path) - register_loader_plugin_path(path) - avalon.register_plugin_path(LegacyCreator, path) - avalon.register_plugin_path(avalon.InventoryAction, path) - - # apply monkey patched discover to original one - log.info("Patching discovery") - - avalon.discover = patched_discover - pipeline.discover = patched_discover - - register_event_callback("taskChanged", _on_task_change) - - -def _on_task_change(): - change_timer_to_current_context() - - -@import_wrapper -def uninstall(): - """Uninstall Pype from Avalon.""" - from openpype.pipeline import deregister_loader_plugin_path - - log.info("Deregistering global plug-ins..") - pyblish.deregister_plugin_path(PUBLISH_PATH) - pyblish.deregister_discovery_filter(filter_pyblish_plugins) - deregister_loader_plugin_path(LOAD_PATH) - log.info("Global plug-ins unregistred") - - # restore original discover - avalon.discover = _original_discover diff --git a/openpype/cli.py b/openpype/cli.py index cbeb7fef9b..2aa4a46929 100644 --- a/openpype/cli.py +++ b/openpype/cli.py @@ -20,6 +20,10 @@ from .pype_commands import PypeCommands "to list staging versions.")) @click.option("--validate-version", expose_value=False, help="validate given version integrity") +@click.option("--debug", is_flag=True, expose_value=False, + help=("Enable debug")) +@click.option("--verbose", expose_value=False, + help=("Change OpenPype log level (debug - critical or 0-50)")) def main(ctx): """Pype is main command serving as entry point to pipeline system. @@ -49,18 +53,13 @@ def traypublisher(): @main.command() -@click.option("-d", "--debug", - is_flag=True, help=("Run pype tray in debug mode")) -def tray(debug=False): +def tray(): """Launch pype tray. Default action of pype command is to launch tray widget to control basic aspects of pype. See documentation for more information. - - Running pype with `--debug` will result in lot of information useful for - debugging to be shown in console. """ - PypeCommands().launch_tray(debug) + PypeCommands().launch_tray() @PypeCommands.add_modules @@ -75,7 +74,6 @@ def module(ctx): @main.command() -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("--ftrack-url", envvar="FTRACK_SERVER", help="Ftrack server url") @click.option("--ftrack-user", envvar="FTRACK_API_USER", @@ -88,8 +86,7 @@ def module(ctx): help="Clockify API key.") @click.option("--clockify-workspace", envvar="CLOCKIFY_WORKSPACE", help="Clockify workspace") -def eventserver(debug, - ftrack_url, +def eventserver(ftrack_url, ftrack_user, ftrack_api_key, legacy, @@ -100,8 +97,6 @@ def eventserver(debug, This should be ideally used by system service (such us systemd or upstart on linux and window service). """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands().launch_eventservercli( ftrack_url, @@ -114,12 +109,11 @@ def eventserver(debug, @main.command() -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-h", "--host", help="Host", default=None) @click.option("-p", "--port", help="Port", default=None) @click.option("-e", "--executable", help="Executable") @click.option("-u", "--upload_dir", help="Upload dir") -def webpublisherwebserver(debug, executable, upload_dir, host=None, port=None): +def webpublisherwebserver(executable, upload_dir, host=None, port=None): """Starts webserver for communication with Webpublish FR via command line OP must be congigured on a machine, eg. OPENPYPE_MONGO filled AND @@ -127,8 +121,6 @@ def webpublisherwebserver(debug, executable, upload_dir, host=None, port=None): Expect "pype.club" user created on Ftrack. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands().launch_webpublisher_webservercli( upload_dir=upload_dir, @@ -164,38 +156,34 @@ def extractenvironments(output_json_path, project, asset, task, app, envgroup): @main.command() @click.argument("paths", nargs=-1) -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-t", "--targets", help="Targets module", default=None, multiple=True) @click.option("-g", "--gui", is_flag=True, help="Show Publish UI", default=False) -def publish(debug, paths, targets, gui): +def publish(paths, targets, gui): """Start CLI publishing. Publish collects json from paths provided as an argument. More than one path is allowed. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands.publish(list(paths), targets, gui) @main.command() @click.argument("path") -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-h", "--host", help="Host") @click.option("-u", "--user", help="User email address") @click.option("-p", "--project", help="Project") @click.option("-t", "--targets", help="Targets", default=None, multiple=True) -def remotepublishfromapp(debug, project, path, host, user=None, targets=None): +def remotepublishfromapp(project, path, host, user=None, targets=None): """Start CLI publishing. Publish collects json from paths provided as an argument. More than one path is allowed. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands.remotepublishfromapp( project, path, host, user, targets=targets ) @@ -203,24 +191,21 @@ def remotepublishfromapp(debug, project, path, host, user=None, targets=None): @main.command() @click.argument("path") -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-u", "--user", help="User email address") @click.option("-p", "--project", help="Project") @click.option("-t", "--targets", help="Targets", default=None, multiple=True) -def remotepublish(debug, project, path, user=None, targets=None): +def remotepublish(project, path, user=None, targets=None): """Start CLI publishing. Publish collects json from paths provided as an argument. More than one path is allowed. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands.remotepublish(project, path, user, targets=targets) @main.command() -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-p", "--project", required=True, help="name of project asset is under") @click.option("-a", "--asset", required=True, @@ -228,7 +213,7 @@ def remotepublish(debug, project, path, user=None, targets=None): @click.option("--path", required=True, help="path where textures are found", type=click.Path(exists=True)) -def texturecopy(debug, project, asset, path): +def texturecopy(project, asset, path): """Copy specified textures to provided asset path. It validates if project and asset exists. Then it will use speedcopy to @@ -239,8 +224,7 @@ def texturecopy(debug, project, asset, path): Result will be copied without directory structure so it will be flat then. Nothing is written to database. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands().texture_copy(project, asset, path) @@ -389,11 +373,9 @@ def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant, @main.command() -@click.option("-d", "--debug", - is_flag=True, help=("Run process in debug mode")) @click.option("-a", "--active_site", required=True, help="Name of active stie") -def syncserver(debug, active_site): +def syncserver(active_site): """Run sync site server in background. Some Site Sync use cases need to expose site to another one. @@ -408,8 +390,7 @@ def syncserver(debug, active_site): Settings (configured by starting OP Tray with env var OPENPYPE_LOCAL_ID set to 'active_site'. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands().syncserver(active_site) diff --git a/openpype/hooks/pre_global_host_data.py b/openpype/hooks/pre_global_host_data.py index 4c85a511ed..ea5e290d6f 100644 --- a/openpype/hooks/pre_global_host_data.py +++ b/openpype/hooks/pre_global_host_data.py @@ -5,8 +5,7 @@ from openpype.lib import ( prepare_app_environments, prepare_context_environments ) - -import avalon.api +from openpype.pipeline import AvalonMongoDB class GlobalHostDataHook(PreLaunchHook): @@ -64,7 +63,7 @@ class GlobalHostDataHook(PreLaunchHook): self.data["anatomy"] = Anatomy(project_name) # Mongo connection - dbcon = avalon.api.AvalonMongoDB() + dbcon = AvalonMongoDB() dbcon.Session["AVALON_PROJECT"] = project_name dbcon.install() diff --git a/openpype/hooks/pre_python_2_prelaunch.py b/openpype/hooks/pre_python_2_prelaunch.py deleted file mode 100644 index 84272d2e5d..0000000000 --- a/openpype/hooks/pre_python_2_prelaunch.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -from openpype.lib import PreLaunchHook - - -class PrePython2Vendor(PreLaunchHook): - """Prepend python 2 dependencies for py2 hosts.""" - order = 10 - - def execute(self): - if not self.application.use_python_2: - return - - # Prepare vendor dir path - self.log.info("adding global python 2 vendor") - pype_root = os.getenv("OPENPYPE_REPOS_ROOT") - python_2_vendor = os.path.join( - pype_root, - "openpype", - "vendor", - "python", - "python_2" - ) - - # Add Python 2 modules - python_paths = [ - python_2_vendor - ] - - # Load PYTHONPATH from current launch context - python_path = self.launch_context.env.get("PYTHONPATH") - if python_path: - python_paths.append(python_path) - - # Set new PYTHONPATH to launch context environments - self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths) diff --git a/openpype/hosts/aftereffects/api/__init__.py b/openpype/hosts/aftereffects/api/__init__.py index cea1bdc023..2ad1255d27 100644 --- a/openpype/hosts/aftereffects/api/__init__.py +++ b/openpype/hosts/aftereffects/api/__init__.py @@ -16,7 +16,10 @@ from .pipeline import ( uninstall, list_instances, remove_instance, - containerise + containerise, + get_context_data, + update_context_data, + get_context_title ) from .workio import ( @@ -51,6 +54,9 @@ __all__ = [ "list_instances", "remove_instance", "containerise", + "get_context_data", + "update_context_data", + "get_context_title", "file_extensions", "has_unsaved_changes", diff --git a/openpype/hosts/aftereffects/api/extension.zxp b/openpype/hosts/aftereffects/api/extension.zxp index 389d74505d..0ed799991e 100644 Binary files a/openpype/hosts/aftereffects/api/extension.zxp and b/openpype/hosts/aftereffects/api/extension.zxp differ diff --git a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml b/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml index 668cb3fc24..a39f5781bb 100644 --- a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml +++ b/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml @@ -1,5 +1,5 @@ - diff --git a/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx b/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx index 8f82c9709d..91df433908 100644 --- a/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx +++ b/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx @@ -417,7 +417,9 @@ function getRenderInfo(){ var file_url = item.file.toString(); return JSON.stringify({ - "file_name": file_url + "file_name": file_url, + "width": render_item.comp.width, + "height": render_item.comp.height }) } diff --git a/openpype/hosts/aftereffects/api/launch_logic.py b/openpype/hosts/aftereffects/api/launch_logic.py index c549268978..30a3e1f1c3 100644 --- a/openpype/hosts/aftereffects/api/launch_logic.py +++ b/openpype/hosts/aftereffects/api/launch_logic.py @@ -12,9 +12,8 @@ from wsrpc_aiohttp import ( from Qt import QtCore +from openpype.pipeline import legacy_io from openpype.tools.utils import host_tools - -from avalon import api from openpype.tools.adobe_webserver.app import WebServerTool from .ws_stub import AfterEffectsServerStub @@ -271,13 +270,13 @@ class AfterEffectsRoute(WebSocketRoute): log.info("Setting context change") log.info("project {} asset {} ".format(project, asset)) if project: - api.Session["AVALON_PROJECT"] = project + legacy_io.Session["AVALON_PROJECT"] = project os.environ["AVALON_PROJECT"] = project if asset: - api.Session["AVALON_ASSET"] = asset + legacy_io.Session["AVALON_ASSET"] = asset os.environ["AVALON_ASSET"] = asset if task: - api.Session["AVALON_TASK"] = task + legacy_io.Session["AVALON_TASK"] = task os.environ["AVALON_TASK"] = task async def read(self): diff --git a/openpype/hosts/aftereffects/api/lib.py b/openpype/hosts/aftereffects/api/lib.py index dac6b5d28f..ce4cbf09af 100644 --- a/openpype/hosts/aftereffects/api/lib.py +++ b/openpype/hosts/aftereffects/api/lib.py @@ -6,6 +6,7 @@ import logging from Qt import QtWidgets +from openpype.pipeline import install_host from openpype.lib.remote_publish import headless_publish from openpype.tools.utils import host_tools @@ -22,10 +23,9 @@ def safe_excepthook(*args): def main(*subprocess_args): sys.excepthook = safe_excepthook - import avalon.api from openpype.hosts.aftereffects import api - avalon.api.install(api) + install_host(api) os.environ["OPENPYPE_LOG_NO_COLORS"] = "False" app = QtWidgets.QApplication([]) diff --git a/openpype/hosts/aftereffects/api/pipeline.py b/openpype/hosts/aftereffects/api/pipeline.py index 681f1c51a7..a428a1470d 100644 --- a/openpype/hosts/aftereffects/api/pipeline.py +++ b/openpype/hosts/aftereffects/api/pipeline.py @@ -4,15 +4,16 @@ import sys from Qt import QtWidgets import pyblish.api -import avalon.api -from avalon import io, pipeline from openpype import lib from openpype.api import Logger from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, + AVALON_CONTAINER_ID, + legacy_io, ) import openpype.hosts.aftereffects from openpype.lib import register_event_callback @@ -29,40 +30,6 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - - -def check_inventory(): - if not lib.any_outdated(): - return - - host = pyblish.api.registered_host() - outdated_containers = [] - for container in host.ls(): - representation = container['representation'] - representation_doc = io.find_one( - { - "_id": io.ObjectId(representation), - "type": "representation" - }, - projection={"parent": True} - ) - if representation_doc and not lib.is_latest(representation_doc): - outdated_containers.append(container) - - # Warn about outdated containers. - print("Starting new QApplication..") - app = QtWidgets.QApplication(sys.argv) - - message_box = QtWidgets.QMessageBox() - message_box.setIcon(QtWidgets.QMessageBox.Warning) - msg = "There are outdated containers in the scene." - message_box.setText(msg) - message_box.exec_() - - -def application_launch(): - check_inventory() def install(): @@ -72,7 +39,7 @@ def install(): pyblish.api.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) log.info(PUBLISH_PATH) pyblish.api.register_callback( @@ -85,7 +52,12 @@ def install(): def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) + + +def application_launch(): + """Triggered after start of app""" + check_inventory() def on_pyblish_instance_toggled(instance, old_value, new_value): @@ -122,65 +94,6 @@ def get_asset_settings(): } -def containerise(name, - namespace, - comp, - context, - loader=None, - suffix="_CON"): - """ - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Creates dictionary payloads that gets saved into file metadata. Each - container contains of who loaded (loader) and members (single or multiple - in case of background). - - Arguments: - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - comp (Comp): Composition to containerise - context (dict): Asset information - loader (str, optional): Name of loader used to produce this container. - suffix (str, optional): Suffix of container, defaults to `_CON`. - - Returns: - container (str): Name of container assembly - """ - data = { - "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace, - "loader": str(loader), - "representation": str(context["representation"]["_id"]), - "members": comp.members or [comp.id] - } - - stub = get_stub() - stub.imprint(comp, data) - - return comp - - -def _get_stub(): - """ - Handle pulling stub from PS to run operations on host - Returns: - (AEServerStub) or None - """ - try: - stub = get_stub() # only after Photoshop is up - except lib.ConnectionNotEstablishedYet: - print("Not connected yet, ignoring") - return - - if not stub.get_active_document_name(): - return - - return stub - - def ls(): """Yields containers from active AfterEffects document. @@ -221,6 +134,66 @@ def ls(): yield data +def check_inventory(): + """Checks loaded containers if they are of highest version""" + if not lib.any_outdated(): + return + + # Warn about outdated containers. + _app = QtWidgets.QApplication.instance() + if not _app: + print("Starting new QApplication..") + _app = QtWidgets.QApplication([]) + + message_box = QtWidgets.QMessageBox() + message_box.setIcon(QtWidgets.QMessageBox.Warning) + msg = "There are outdated containers in the scene." + message_box.setText(msg) + message_box.exec_() + + +def containerise(name, + namespace, + comp, + context, + loader=None, + suffix="_CON"): + """ + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Creates dictionary payloads that gets saved into file metadata. Each + container contains of who loaded (loader) and members (single or multiple + in case of background). + + Arguments: + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + comp (AEItem): Composition to containerise + context (dict): Asset information + loader (str, optional): Name of loader used to produce this container. + suffix (str, optional): Suffix of container, defaults to `_CON`. + + Returns: + container (str): Name of container assembly + """ + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace, + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + "members": comp.members or [comp.id] + } + + stub = get_stub() + stub.imprint(comp.id, data) + + return comp + + +# created instances section def list_instances(): """ List all created instances from current workfile which @@ -241,16 +214,8 @@ def list_instances(): layers_meta = stub.get_metadata() for instance in layers_meta: - if instance.get("schema") and \ - "container" in instance.get("schema"): - continue - - uuid_val = instance.get("uuid") - if uuid_val: - instance['uuid'] = uuid_val - else: - instance['uuid'] = instance.get("members")[0] # legacy - instances.append(instance) + if instance.get("id") == "pyblish.avalon.instance": + instances.append(instance) return instances @@ -271,8 +236,59 @@ def remove_instance(instance): if not stub: return - stub.remove_instance(instance.get("uuid")) - item = stub.get_item(instance.get("uuid")) - if item: - stub.rename_item(item.id, - item.name.replace(stub.PUBLISH_ICON, '')) + inst_id = instance.get("instance_id") or instance.get("uuid") # legacy + if not inst_id: + log.warning("No instance identifier for {}".format(instance)) + return + + stub.remove_instance(inst_id) + + if instance.get("members"): + item = stub.get_item(instance["members"][0]) + if item: + stub.rename_item(item.id, + item.name.replace(stub.PUBLISH_ICON, '')) + + +# new publisher section +def get_context_data(): + meta = _get_stub().get_metadata() + for item in meta: + if item.get("id") == "publish_context": + item.pop("id") + return item + + return {} + + +def update_context_data(data, changes): + item = data + item["id"] = "publish_context" + _get_stub().imprint(item["id"], item) + + +def get_context_title(): + """Returns title for Creator window""" + + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + return "{}/{}/{}".format(project_name, asset_name, task_name) + + +def _get_stub(): + """ + Handle pulling stub from PS to run operations on host + Returns: + (AEServerStub) or None + """ + try: + stub = get_stub() # only after Photoshop is up + except lib.ConnectionNotEstablishedYet: + print("Not connected yet, ignoring") + return + + if not stub.get_active_document_name(): + return + + return stub diff --git a/openpype/hosts/aftereffects/api/workio.py b/openpype/hosts/aftereffects/api/workio.py index 04c7834d8f..d6c732285a 100644 --- a/openpype/hosts/aftereffects/api/workio.py +++ b/openpype/hosts/aftereffects/api/workio.py @@ -1,20 +1,12 @@ """Host API required Work Files tool""" import os +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS from .launch_logic import get_stub -from avalon import api - - -def _active_document(): - document_name = get_stub().get_active_document_name() - if not document_name: - return None - - return document_name def file_extensions(): - return api.HOST_WORKFILE_EXTENSIONS["aftereffects"] + return HOST_WORKFILE_EXTENSIONS["aftereffects"] def has_unsaved_changes(): @@ -39,7 +31,8 @@ def current_file(): full_name = get_stub().get_active_document_full_name() if full_name and full_name != "null": return os.path.normpath(full_name).replace("\\", "/") - except Exception: + except ValueError: + print("Nothing opened") pass return None @@ -47,3 +40,15 @@ def current_file(): def work_root(session): return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") + + +def _active_document(): + # TODO merge with current_file - even in extension + document_name = None + try: + document_name = get_stub().get_active_document_name() + except ValueError: + print("Nothing opened") + pass + + return document_name diff --git a/openpype/hosts/aftereffects/api/ws_stub.py b/openpype/hosts/aftereffects/api/ws_stub.py index b0893310c1..8719a8f46e 100644 --- a/openpype/hosts/aftereffects/api/ws_stub.py +++ b/openpype/hosts/aftereffects/api/ws_stub.py @@ -28,6 +28,9 @@ class AEItem(object): workAreaDuration = attr.ib(default=None) frameRate = attr.ib(default=None) file_name = attr.ib(default=None) + instance_id = attr.ib(default=None) # New Publisher + width = attr.ib(default=None) + height = attr.ib(default=None) class AfterEffectsServerStub(): @@ -110,11 +113,11 @@ class AfterEffectsServerStub(): self.log.debug("Couldn't find layer metadata") - def imprint(self, item, data, all_items=None, items_meta=None): + def imprint(self, item_id, data, all_items=None, items_meta=None): """ Save item metadata to Label field of metadata of active document Args: - item (AEItem): + item_id (int|str): id of FootageItem or instance_id for workfiles data(string): json representation for single layer all_items (list of item): for performance, could be injected for usage in loop, if not, single call will be @@ -132,8 +135,9 @@ class AfterEffectsServerStub(): is_new = True for item_meta in items_meta: - if item_meta.get('members') \ - and str(item.id) == str(item_meta.get('members')[0]): + if ((item_meta.get('members') and + str(item_id) == str(item_meta.get('members')[0])) or + item_meta.get("instance_id") == item_id): is_new = False if data: item_meta.update(data) @@ -153,10 +157,12 @@ class AfterEffectsServerStub(): item_ids = [int(item.id) for item in all_items] cleaned_data = [] for meta in result_meta: - # for creation of instance OR loaded container - if 'instance' in meta.get('id') or \ - int(meta.get('members')[0]) in item_ids: - cleaned_data.append(meta) + # do not added instance with nonexistend item id + if meta.get("members"): + if int(meta["members"][0]) not in item_ids: + continue + + cleaned_data.append(meta) payload = json.dumps(cleaned_data, indent=4) @@ -167,7 +173,7 @@ class AfterEffectsServerStub(): def get_active_document_full_name(self): """ - Returns just a name of active document via ws call + Returns absolute path of active document via ws call Returns(string): file name """ res = self.websocketserver.call(self.client.call( @@ -314,15 +320,13 @@ class AfterEffectsServerStub(): Keep matching item in file though. Args: - instance_id(string): instance uuid + instance_id(string): instance id """ cleaned_data = [] for instance in self.get_metadata(): - uuid_val = instance.get("uuid") - if not uuid_val: - uuid_val = instance.get("members")[0] # legacy - if uuid_val != instance_id: + inst_id = instance.get("instance_id") or instance.get("uuid") + if inst_id != instance_id: cleaned_data.append(instance) payload = json.dumps(cleaned_data, indent=4) @@ -357,7 +361,7 @@ class AfterEffectsServerStub(): item_id (int): Returns: - (namedtuple) + (AEItem) """ res = self.websocketserver.call(self.client.call @@ -418,7 +422,7 @@ class AfterEffectsServerStub(): """ Get render queue info for render purposes Returns: - (namedtuple): with 'file_name' field + (AEItem): with 'file_name' field """ res = self.websocketserver.call(self.client.call ('AfterEffects.get_render_info')) @@ -606,7 +610,10 @@ class AfterEffectsServerStub(): d.get('workAreaStart'), d.get('workAreaDuration'), d.get('frameRate'), - d.get('file_name')) + d.get('file_name'), + d.get("instance_id"), + d.get("width"), + d.get("height")) ret.append(item) return ret diff --git a/openpype/hosts/aftereffects/plugins/create/create_local_render.py b/openpype/hosts/aftereffects/plugins/create/create_legacy_local_render.py similarity index 64% rename from openpype/hosts/aftereffects/plugins/create/create_local_render.py rename to openpype/hosts/aftereffects/plugins/create/create_legacy_local_render.py index 9d2cdcd7be..04413acbcf 100644 --- a/openpype/hosts/aftereffects/plugins/create/create_local_render.py +++ b/openpype/hosts/aftereffects/plugins/create/create_legacy_local_render.py @@ -1,7 +1,7 @@ -from openpype.hosts.aftereffects.plugins.create import create_render +from openpype.hosts.aftereffects.plugins.create import create_legacy_render -class CreateLocalRender(create_render.CreateRender): +class CreateLocalRender(create_legacy_render.CreateRender): """ Creator to render locally. Created only after default render on farm. So family 'render.local' is diff --git a/openpype/hosts/aftereffects/plugins/create/create_legacy_render.py b/openpype/hosts/aftereffects/plugins/create/create_legacy_render.py new file mode 100644 index 0000000000..e4fbb47a33 --- /dev/null +++ b/openpype/hosts/aftereffects/plugins/create/create_legacy_render.py @@ -0,0 +1,62 @@ +from openpype.pipeline import create +from openpype.pipeline import CreatorError +from openpype.hosts.aftereffects.api import ( + get_stub, + list_instances +) + + +class CreateRender(create.LegacyCreator): + """Render folder for publish. + + Creates subsets in format 'familyTaskSubsetname', + eg 'renderCompositingMain'. + + Create only single instance from composition at a time. + """ + + name = "renderDefault" + label = "Render on Farm" + family = "render" + defaults = ["Main"] + + def process(self): + stub = get_stub() # only after After Effects is up + items = [] + if (self.options or {}).get("useSelection"): + items = stub.get_selected_items( + comps=True, folders=False, footages=False + ) + if len(items) > 1: + raise CreatorError( + "Please select only single composition at time." + ) + + if not items: + raise CreatorError(( + "Nothing to create. Select composition " + "if 'useSelection' or create at least " + "one composition." + )) + + existing_subsets = [ + instance['subset'].lower() + for instance in list_instances() + ] + + item = items.pop() + if self.name.lower() in existing_subsets: + txt = "Instance with name \"{}\" already exists.".format(self.name) + raise CreatorError(txt) + + self.data["members"] = [item.id] + self.data["uuid"] = item.id # for SubsetManager + self.data["subset"] = ( + self.data["subset"] + .replace(stub.PUBLISH_ICON, '') + .replace(stub.LOADED_ICON, '') + ) + + stub.imprint(item, self.data) + stub.set_label_color(item.id, 14) # Cyan options 0 - 16 + stub.rename_item(item.id, stub.PUBLISH_ICON + self.data["subset"]) diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py index 41efb4b0ba..215c148f37 100644 --- a/openpype/hosts/aftereffects/plugins/create/create_render.py +++ b/openpype/hosts/aftereffects/plugins/create/create_render.py @@ -1,36 +1,70 @@ -from openpype.pipeline import create -from openpype.pipeline import CreatorError -from openpype.hosts.aftereffects.api import ( - get_stub, - list_instances +from openpype import resources +from openpype.lib import BoolDef, UISeparatorDef +from openpype.hosts.aftereffects import api +from openpype.pipeline import ( + Creator, + CreatedInstance, + CreatorError, + legacy_io, ) -class CreateRender(create.LegacyCreator): - """Render folder for publish. - - Creates subsets in format 'familyTaskSubsetname', - eg 'renderCompositingMain'. - - Create only single instance from composition at a time. - """ - - name = "renderDefault" - label = "Render on Farm" +class RenderCreator(Creator): + identifier = "render" + label = "Render" family = "render" - defaults = ["Main"] + description = "Render creator" - def process(self): - stub = get_stub() # only after After Effects is up - if (self.options or {}).get("useSelection"): + create_allow_context_change = True + + def __init__( + self, create_context, system_settings, project_settings, headless=False + ): + super(RenderCreator, self).__init__(create_context, system_settings, + project_settings, headless) + self._default_variants = (project_settings["aftereffects"] + ["create"] + ["RenderCreator"] + ["defaults"]) + + def get_icon(self): + return resources.get_openpype_splash_filepath() + + def collect_instances(self): + for instance_data in api.list_instances(): + # legacy instances have family=='render' or 'renderLocal', use them + creator_id = (instance_data.get("creator_identifier") or + instance_data.get("family", '').replace("Local", '')) + if creator_id == self.identifier: + instance_data = self._handle_legacy(instance_data) + instance = CreatedInstance.from_existing( + instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + api.get_stub().imprint(created_inst.get("instance_id"), + created_inst.data_to_store()) + + def remove_instances(self, instances): + for instance in instances: + api.remove_instance(instance) + self._remove_instance_from_context(instance) + + def create(self, subset_name, data, pre_create_data): + stub = api.get_stub() # only after After Effects is up + if pre_create_data.get("use_selection"): items = stub.get_selected_items( comps=True, folders=False, footages=False ) + else: + items = stub.get_items(comps=True, folders=False, footages=False) + if len(items) > 1: raise CreatorError( "Please select only single composition at time." ) - if not items: raise CreatorError(( "Nothing to create. Select composition " @@ -38,24 +72,54 @@ class CreateRender(create.LegacyCreator): "one composition." )) - existing_subsets = [ - instance['subset'].lower() - for instance in list_instances() + for inst in self.create_context.instances: + if subset_name == inst.subset_name: + raise CreatorError("{} already exists".format( + inst.subset_name)) + + data["members"] = [items[0].id] + new_instance = CreatedInstance(self.family, subset_name, data, self) + if "farm" in pre_create_data: + use_farm = pre_create_data["farm"] + new_instance.creator_attributes["farm"] = use_farm + + api.get_stub().imprint(new_instance.id, + new_instance.data_to_store()) + self._add_instance_to_context(new_instance) + + def get_default_variants(self): + return self._default_variants + + def get_instance_attr_defs(self): + return [BoolDef("farm", label="Render on farm")] + + def get_pre_create_attr_defs(self): + output = [ + BoolDef("use_selection", default=True, label="Use selection"), + UISeparatorDef(), + BoolDef("farm", label="Render on farm") ] + return output - item = items.pop() - if self.name.lower() in existing_subsets: - txt = "Instance with name \"{}\" already exists.".format(self.name) - raise CreatorError(txt) + def get_detail_description(self): + return """Creator for Render instances""" - self.data["members"] = [item.id] - self.data["uuid"] = item.id # for SubsetManager - self.data["subset"] = ( - self.data["subset"] - .replace(stub.PUBLISH_ICON, '') - .replace(stub.LOADED_ICON, '') - ) + def _handle_legacy(self, instance_data): + """Converts old instances to new format.""" + if not instance_data.get("members"): + instance_data["members"] = [instance_data.get("uuid")] - stub.imprint(item, self.data) - stub.set_label_color(item.id, 14) # Cyan options 0 - 16 - stub.rename_item(item.id, stub.PUBLISH_ICON + self.data["subset"]) + if instance_data.get("uuid"): + # uuid not needed, replaced with unique instance_id + api.get_stub().remove_instance(instance_data.get("uuid")) + instance_data.pop("uuid") + + if not instance_data.get("task"): + instance_data["task"] = legacy_io.Session.get("AVALON_TASK") + + if not instance_data.get("creator_attributes"): + is_old_farm = instance_data["family"] != "renderLocal" + instance_data["creator_attributes"] = {"farm": is_old_farm} + instance_data["family"] = self.family + + return instance_data diff --git a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py new file mode 100644 index 0000000000..88e55e21b5 --- /dev/null +++ b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py @@ -0,0 +1,80 @@ +import openpype.hosts.aftereffects.api as api +from openpype.pipeline import ( + AutoCreator, + CreatedInstance, + legacy_io, +) + + +class AEWorkfileCreator(AutoCreator): + identifier = "workfile" + family = "workfile" + + def get_instance_attr_defs(self): + return [] + + def collect_instances(self): + for instance_data in api.list_instances(): + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + subset_name = instance_data["subset"] + instance = CreatedInstance( + self.family, subset_name, instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + # nothing to change on workfiles + pass + + def create(self, options=None): + existing_instance = None + for instance in self.create_context.instances: + if instance.family == self.family: + existing_instance = instance + break + + variant = '' + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + host_name = legacy_io.Session["AVALON_APP"] + + if existing_instance is None: + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": variant + } + data.update(self.get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name + )) + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(new_instance) + + api.get_stub().imprint(new_instance.get("instance_id"), + new_instance.data_to_store()) + + elif ( + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name diff --git a/openpype/hosts/aftereffects/plugins/load/load_background.py b/openpype/hosts/aftereffects/plugins/load/load_background.py index be43cae44e..d346df504a 100644 --- a/openpype/hosts/aftereffects/plugins/load/load_background.py +++ b/openpype/hosts/aftereffects/plugins/load/load_background.py @@ -90,7 +90,7 @@ class BackgroundLoader(AfterEffectsLoader): container["namespace"] = comp_name container["members"] = comp.members - stub.imprint(comp, container) + stub.imprint(comp.id, container) def remove(self, container): """ @@ -99,10 +99,9 @@ class BackgroundLoader(AfterEffectsLoader): Args: container (dict): container to be removed - used to get layer_id """ - print("!!!! container:: {}".format(container)) stub = self.get_stub() layer = container.pop("layer") - stub.imprint(layer, {}) + stub.imprint(layer.id, {}) stub.delete_item(layer.id) def switch(self, container, representation): diff --git a/openpype/hosts/aftereffects/plugins/load/load_file.py b/openpype/hosts/aftereffects/plugins/load/load_file.py index 9eb9e80a2c..6ab69c6bfa 100644 --- a/openpype/hosts/aftereffects/plugins/load/load_file.py +++ b/openpype/hosts/aftereffects/plugins/load/load_file.py @@ -96,9 +96,9 @@ class FileLoader(AfterEffectsLoader): # with aftereffects.maintained_selection(): # TODO stub.replace_item(layer.id, path, stub.LOADED_ICON + layer_name) stub.imprint( - layer, {"representation": str(representation["_id"]), - "name": context["subset"], - "namespace": layer_name} + layer.id, {"representation": str(representation["_id"]), + "name": context["subset"], + "namespace": layer_name} ) def remove(self, container): @@ -109,7 +109,7 @@ class FileLoader(AfterEffectsLoader): """ stub = self.get_stub() layer = container.pop("layer") - stub.imprint(layer, {}) + stub.imprint(layer.id, {}) stub.delete_item(layer.id) def switch(self, container, representation): diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_audio.py b/openpype/hosts/aftereffects/plugins/publish/collect_audio.py index 80679725e6..8647ba498b 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_audio.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_audio.py @@ -17,12 +17,11 @@ class CollectAudio(pyblish.api.ContextPlugin): def process(self, context): for instance in context: - if instance.data["family"] == 'render.farm': + if 'render.farm' in instance.data.get("families", []): comp_id = instance.data["comp_id"] if not comp_id: self.log.debug("No comp_id filled in instance") - # @iLLiCiTiT QUESTION Should return or continue? - return + continue context.data["audioFile"] = os.path.normpath( get_stub().get_audio_url(comp_id) ).replace("\\", "/") diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_render.py b/openpype/hosts/aftereffects/plugins/publish/collect_render.py index 2a4b773681..fa23bf92b0 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_render.py @@ -21,135 +21,129 @@ class AERenderInstance(RenderInstance): projectEntity = attr.ib(default=None) stagingDir = attr.ib(default=None) app_version = attr.ib(default=None) + publish_attributes = attr.ib(default=None) + file_name = attr.ib(default=None) class CollectAERender(abstract_collect_render.AbstractCollectRender): - order = pyblish.api.CollectorOrder + 0.498 + order = pyblish.api.CollectorOrder + 0.405 label = "Collect After Effects Render Layers" hosts = ["aftereffects"] - # internal - family_remapping = { - "render": ("render.farm", "farm"), # (family, label) - "renderLocal": ("render", "local") - } padding_width = 6 rendered_extension = 'png' - stub = get_stub() + _stub = None + + @classmethod + def get_stub(cls): + if not cls._stub: + cls._stub = get_stub() + return cls._stub def get_instances(self, context): instances = [] + instances_to_remove = [] - app_version = self.stub.get_app_version() + app_version = CollectAERender.get_stub().get_app_version() app_version = app_version[0:4] current_file = context.data["currentFile"] version = context.data["version"] - asset_entity = context.data["assetEntity"] + project_entity = context.data["projectEntity"] - compositions = self.stub.get_items(True) + compositions = CollectAERender.get_stub().get_items(True) compositions_by_id = {item.id: item for item in compositions} - for inst in self.stub.get_metadata(): - schema = inst.get('schema') - # loaded asset container skip it - if schema and 'container' in schema: + for inst in context: + if not inst.data.get("active", True): continue - if not inst["members"]: - raise ValueError("Couldn't find id, unable to publish. " + - "Please recreate instance.") - item_id = inst["members"][0] + family = inst.data["family"] + if family not in ["render", "renderLocal"]: # legacy + continue - work_area_info = self.stub.get_work_area(int(item_id)) + item_id = inst.data["members"][0] + + work_area_info = CollectAERender.get_stub().get_work_area( + int(item_id)) if not work_area_info: self.log.warning("Orphaned instance, deleting metadata") - self.stub.remove_instance(int(item_id)) + inst_id = inst.get("instance_id") or item_id + CollectAERender.get_stub().remove_instance(inst_id) continue - frameStart = work_area_info.workAreaStart - - frameEnd = round(work_area_info.workAreaStart + - float(work_area_info.workAreaDuration) * - float(work_area_info.frameRate)) - 1 + frame_start = work_area_info.workAreaStart + frame_end = round(work_area_info.workAreaStart + + float(work_area_info.workAreaDuration) * + float(work_area_info.frameRate)) - 1 fps = work_area_info.frameRate # TODO add resolution when supported by extension - if inst["family"] in self.family_remapping.keys() \ - and inst["active"]: - remapped_family = self.family_remapping[inst["family"]] - instance = AERenderInstance( - family=remapped_family[0], - families=[remapped_family[0]], - version=version, - time="", - source=current_file, - label="{} - {}".format(inst["subset"], remapped_family[1]), - subset=inst["subset"], - asset=context.data["assetEntity"]["name"], - attachTo=False, - setMembers='', - publish=True, - renderer='aerender', - name=inst["subset"], - resolutionWidth=asset_entity["data"].get( - "resolutionWidth", - project_entity["data"]["resolutionWidth"]), - resolutionHeight=asset_entity["data"].get( - "resolutionHeight", - project_entity["data"]["resolutionHeight"]), - pixelAspect=1, - tileRendering=False, - tilesX=0, - tilesY=0, - frameStart=frameStart, - frameEnd=frameEnd, - frameStep=1, - toBeRenderedOn='deadline', - fps=fps, - app_version=app_version - ) + task_name = inst.data.get("task") # legacy - comp = compositions_by_id.get(int(item_id)) - if not comp: - raise ValueError("There is no composition for item {}". - format(item_id)) - instance.comp_name = comp.name - instance.comp_id = item_id - instance._anatomy = context.data["anatomy"] - instance.anatomyData = context.data["anatomyData"] + render_q = CollectAERender.get_stub().get_render_info() + if not render_q: + raise ValueError("No file extension set in Render Queue") - instance.outputDir = self._get_output_dir(instance) - instance.context = context + subset_name = inst.data["subset"] + instance = AERenderInstance( + family=family, + families=inst.data.get("families", []), + version=version, + time="", + source=current_file, + label="{} - {}".format(subset_name, family), + subset=subset_name, + asset=inst.data["asset"], + task=task_name, + attachTo=False, + setMembers='', + publish=True, + renderer='aerender', + name=subset_name, + resolutionWidth=render_q.width, + resolutionHeight=render_q.height, + pixelAspect=1, + tileRendering=False, + tilesX=0, + tilesY=0, + frameStart=frame_start, + frameEnd=frame_end, + frameStep=1, + toBeRenderedOn='deadline', + fps=fps, + app_version=app_version, + publish_attributes=inst.data.get("publish_attributes"), + file_name=render_q.file_name + ) - settings = get_project_settings(os.getenv("AVALON_PROJECT")) - reviewable_subset_filter = \ - (settings["deadline"] - ["publish"] - ["ProcessSubmittedJobOnFarm"] - ["aov_filter"]) + comp = compositions_by_id.get(int(item_id)) + if not comp: + raise ValueError("There is no composition for item {}". + format(item_id)) + instance.outputDir = self._get_output_dir(instance) + instance.comp_name = comp.name + instance.comp_id = item_id - if inst["family"] == "renderLocal": - # for local renders - instance.anatomyData["version"] = instance.version - instance.anatomyData["subset"] = instance.subset - instance.stagingDir = tempfile.mkdtemp() - instance.projectEntity = project_entity + is_local = "renderLocal" in inst.data["family"] # legacy + if inst.data.get("creator_attributes"): + is_local = not inst.data["creator_attributes"].get("farm") + if is_local: + # for local renders + instance = self._update_for_local(instance, project_entity) + else: + fam = "render.farm" + if fam not in instance.families: + instance.families.append(fam) - if self.hosts[0] in reviewable_subset_filter.keys(): - for aov_pattern in \ - reviewable_subset_filter[self.hosts[0]]: - if re.match(aov_pattern, instance.subset): - instance.families.append("review") - instance.review = True - break - - self.log.info("New instance:: {}".format(instance)) - instances.append(instance) + instances.append(instance) + instances_to_remove.append(inst) + for instance in instances_to_remove: + context.remove(instance) return instances def get_expected_files(self, render_instance): @@ -168,15 +162,11 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): start = render_instance.frameStart end = render_instance.frameEnd - # pull file name from Render Queue Output module - render_q = self.stub.get_render_info() - if not render_q: - raise ValueError("No file extension set in Render Queue") - _, ext = os.path.splitext(os.path.basename(render_q.file_name)) + _, ext = os.path.splitext(os.path.basename(render_instance.file_name)) base_dir = self._get_output_dir(render_instance) expected_files = [] - if "#" not in render_q.file_name: # single frame (mov)W + if "#" not in render_instance.file_name: # single frame (mov)W path = os.path.join(base_dir, "{}_{}_{}.{}".format( render_instance.asset, render_instance.subset, @@ -216,3 +206,24 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): # for submit_publish_job return base_dir + + def _update_for_local(self, instance, project_entity): + """Update old saved instances to current publishing format""" + instance.stagingDir = tempfile.mkdtemp() + instance.projectEntity = project_entity + fam = "render.local" + if fam not in instance.families: + instance.families.append(fam) + + settings = get_project_settings(os.getenv("AVALON_PROJECT")) + reviewable_subset_filter = (settings["deadline"] + ["publish"] + ["ProcessSubmittedJobOnFarm"] + ["aov_filter"].get(self.hosts[0])) + for aov_pattern in reviewable_subset_filter: + if re.match(aov_pattern, instance.subset): + instance.families.append("review") + instance.review = True + break + + return instance diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py index c1c2be4855..9cb6900b0a 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py @@ -1,6 +1,8 @@ import os -from avalon import api + import pyblish.api +from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline import legacy_io class CollectWorkfile(pyblish.api.ContextPlugin): @@ -10,16 +12,45 @@ class CollectWorkfile(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder + 0.1 def process(self, context): - task = api.Session["AVALON_TASK"] + existing_instance = None + for instance in context: + if instance.data["family"] == "workfile": + self.log.debug("Workfile instance found, won't create new") + existing_instance = instance + break + current_file = context.data["currentFile"] staging_dir = os.path.dirname(current_file) scene_file = os.path.basename(current_file) + if existing_instance is None: # old publish + instance = self._get_new_instance(context, scene_file) + else: + instance = existing_instance + + # creating representation + representation = { + 'name': 'aep', + 'ext': 'aep', + 'files': scene_file, + "stagingDir": staging_dir, + } + + if not instance.data.get("representations"): + instance.data["representations"] = [] + instance.data["representations"].append(representation) + + instance.data["publish"] = instance.data["active"] # for DL + + def _get_new_instance(self, context, scene_file): + task = legacy_io.Session["AVALON_TASK"] version = context.data["version"] asset_entity = context.data["assetEntity"] project_entity = context.data["projectEntity"] - shared_instance_data = { + instance_data = { + "active": True, "asset": asset_entity["name"], + "task": task, "frameStart": asset_entity["data"]["frameStart"], "frameEnd": asset_entity["data"]["frameEnd"], "handleStart": asset_entity["data"]["handleStart"], @@ -38,7 +69,14 @@ class CollectWorkfile(pyblish.api.ContextPlugin): # workfile instance family = "workfile" - subset = family + task.capitalize() + subset = get_subset_name_with_asset_doc( + family, + "", + context.data["anatomyData"]["task"]["name"], + context.data["assetEntity"], + context.data["anatomyData"]["project"]["name"], + host_name=context.data["hostName"] + ) # Create instance instance = context.create_instance(subset) @@ -51,20 +89,6 @@ class CollectWorkfile(pyblish.api.ContextPlugin): "representations": list() }) - # adding basic script data - instance.data.update(shared_instance_data) + instance.data.update(instance_data) - # creating representation - representation = { - 'name': 'aep', - 'ext': 'aep', - 'files': scene_file, - "stagingDir": staging_dir, - } - - instance.data["representations"].append(representation) - - self.log.info('Publishing After Effects workfile') - - for i in context: - self.log.debug(f"{i.data['families']}") + return instance diff --git a/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py b/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py index b738068a7b..7323a0b125 100644 --- a/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py @@ -12,7 +12,7 @@ class ExtractLocalRender(openpype.api.Extractor): order = openpype.api.Extractor.order - 0.47 label = "Extract Local Render" hosts = ["aftereffects"] - families = ["render"] + families = ["renderLocal", "render.local"] def process(self, instance): stub = get_stub() diff --git a/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py b/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py index e20598b311..eb2977309f 100644 --- a/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py +++ b/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py @@ -1,15 +1,16 @@ +import pyblish.api + import openpype.api from openpype.hosts.aftereffects.api import get_stub -class ExtractSaveScene(openpype.api.Extractor): +class ExtractSaveScene(pyblish.api.ContextPlugin): """Save scene before extraction.""" order = openpype.api.Extractor.order - 0.48 label = "Extract Save Scene" hosts = ["aftereffects"] - families = ["workfile"] - def process(self, instance): + def process(self, context): stub = get_stub() stub.save() diff --git a/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml b/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml index 36fa90456e..0591020ed3 100644 --- a/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml +++ b/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml @@ -12,6 +12,8 @@ One of the settings in a scene doesn't match to asset settings in database. ### How to repair? Change values for {invalid_keys_str} in the scene OR change them in the asset database if they are wrong there. + + In the scene it is right mouse click on published composition > `Composition Settings`. ### __Detailed Info__ (optional) diff --git a/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py b/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py new file mode 100644 index 0000000000..03ec184524 --- /dev/null +++ b/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py @@ -0,0 +1,54 @@ +import json +import pyblish.api +from openpype.hosts.aftereffects.api import list_instances + + +class PreCollectRender(pyblish.api.ContextPlugin): + """ + Checks if render instance is of old type, adds to families to both + existing collectors work same way. + + Could be removed in the future when no one uses old publish. + """ + + label = "PreCollect Render" + order = pyblish.api.CollectorOrder + 0.400 + hosts = ["aftereffects"] + + family_remapping = { + "render": ("render.farm", "farm"), # (family, label) + "renderLocal": ("render.local", "local") + } + + def process(self, context): + if context.data.get("newPublishing"): + self.log.debug("Not applicable for New Publisher, skip") + return + + for inst in list_instances(): + if inst.get("creator_attributes"): + raise ValueError("Instance created in New publisher, " + "cannot be published in Pyblish.\n" + "Please publish in New Publisher " + "or recreate instances with legacy Creators") + + if inst["family"] not in self.family_remapping.keys(): + continue + + if not inst["members"]: + raise ValueError("Couldn't find id, unable to publish. " + + "Please recreate instance.") + + instance = context.create_instance(inst["subset"]) + inst["families"] = [self.family_remapping[inst["family"]][0]] + instance.data.update(inst) + + self._debug_log(instance) + + def _debug_log(self, instance): + def _default_json(value): + return str(value) + + self.log.info( + json.dumps(instance.data, indent=4, default=_default_json) + ) diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py index 37cecfbcc4..7a9356f020 100644 --- a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py +++ b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py @@ -1,7 +1,10 @@ -from avalon import api import pyblish.api + import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + legacy_io, +) from openpype.hosts.aftereffects.api import get_stub @@ -27,8 +30,8 @@ class ValidateInstanceAssetRepair(pyblish.api.Action): for instance in instances: data = stub.read(instance[0]) - data["asset"] = api.Session["AVALON_ASSET"] - stub.imprint(instance[0], data) + data["asset"] = legacy_io.Session["AVALON_ASSET"] + stub.imprint(instance[0].instance_id, data) class ValidateInstanceAsset(pyblish.api.InstancePlugin): @@ -51,7 +54,7 @@ class ValidateInstanceAsset(pyblish.api.InstancePlugin): def process(self, instance): instance_asset = instance.data["asset"] - current_asset = api.Session["AVALON_ASSET"] + current_asset = legacy_io.Session["AVALON_ASSET"] msg = ( f"Instance asset {instance_asset} is not the same " f"as current context {current_asset}." diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py index 273ccd295e..14e224fdc2 100644 --- a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py +++ b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py @@ -5,11 +5,15 @@ import re import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) from openpype.hosts.aftereffects.api import get_asset_settings -class ValidateSceneSettings(pyblish.api.InstancePlugin): +class ValidateSceneSettings(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): """ Ensures that Composition Settings (right mouse on comp) are same as in FTrack on task. @@ -59,15 +63,20 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): def process(self, instance): """Plugin entry point.""" + # Skip the instance if is not active by data on the instance + if not self.is_active(instance.data): + return + expected_settings = get_asset_settings() self.log.info("config from DB::{}".format(expected_settings)) - if any(re.search(pattern, os.getenv('AVALON_TASK')) + task_name = instance.data["anatomyData"]["task"]["name"] + if any(re.search(pattern, task_name) for pattern in self.skip_resolution_check): expected_settings.pop("resolutionWidth") expected_settings.pop("resolutionHeight") - if any(re.search(pattern, os.getenv('AVALON_TASK')) + if any(re.search(pattern, task_name) for pattern in self.skip_timelines_check): expected_settings.pop('fps', None) expected_settings.pop('frameStart', None) @@ -87,10 +96,14 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): duration = instance.data.get("frameEndHandle") - \ instance.data.get("frameStartHandle") + 1 - self.log.debug("filtered config::{}".format(expected_settings)) + self.log.debug("validated items::{}".format(expected_settings)) current_settings = { "fps": fps, + "frameStart": instance.data.get("frameStart"), + "frameEnd": instance.data.get("frameEnd"), + "handleStart": instance.data.get("handleStart"), + "handleEnd": instance.data.get("handleEnd"), "frameStartHandle": instance.data.get("frameStartHandle"), "frameEndHandle": instance.data.get("frameEndHandle"), "resolutionWidth": instance.data.get("resolutionWidth"), @@ -103,24 +116,22 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): invalid_keys = set() for key, value in expected_settings.items(): if value != current_settings[key]: - invalid_settings.append( - "{} expected: {} found: {}".format(key, value, - current_settings[key]) - ) + msg = "'{}' expected: '{}' found: '{}'".format( + key, value, current_settings[key]) + + if key == "duration" and expected_settings.get("handleStart"): + msg += "Handles included in calculation. Remove " \ + "handles in DB or extend frame range in " \ + "Composition Setting." + + invalid_settings.append(msg) invalid_keys.add(key) - if ((expected_settings.get("handleStart") - or expected_settings.get("handleEnd")) - and invalid_settings): - msg = "Handles included in calculation. Remove handles in DB " +\ - "or extend frame range in Composition Setting." - invalid_settings[-1]["reason"] = msg - - msg = "Found invalid settings:\n{}".format( - "\n".join(invalid_settings) - ) - if invalid_settings: + msg = "Found invalid settings:\n{}".format( + "\n".join(invalid_settings) + ) + invalid_keys_str = ",".join(invalid_keys) break_str = "
" invalid_setting_str = "Found invalid settings:
{}".\ diff --git a/openpype/hosts/blender/__init__.py b/openpype/hosts/blender/__init__.py index 3081d3c9ba..0f27882c7e 100644 --- a/openpype/hosts/blender/__init__.py +++ b/openpype/hosts/blender/__init__.py @@ -29,12 +29,12 @@ def add_implementation_envs(env, _app): env.get("OPENPYPE_BLENDER_USER_SCRIPTS") or "" ) for path in openpype_blender_user_scripts.split(os.pathsep): - if path and os.path.exists(path): + if path: previous_user_scripts.add(os.path.normpath(path)) blender_user_scripts = env.get("BLENDER_USER_SCRIPTS") or "" for path in blender_user_scripts.split(os.pathsep): - if path and os.path.exists(path): + if path: previous_user_scripts.add(os.path.normpath(path)) # Remove implementation path from user script paths as is set to diff --git a/openpype/hosts/blender/api/ops.py b/openpype/hosts/blender/api/ops.py index 3069c3e1c9..c1b5add518 100644 --- a/openpype/hosts/blender/api/ops.py +++ b/openpype/hosts/blender/api/ops.py @@ -15,9 +15,9 @@ from Qt import QtWidgets, QtCore import bpy import bpy.utils.previews -import avalon.api -from openpype.tools.utils import host_tools from openpype import style +from openpype.pipeline import legacy_io +from openpype.tools.utils import host_tools from .workio import OpenFileCacher @@ -279,7 +279,7 @@ class LaunchLoader(LaunchQtApp): def before_window_show(self): self._window.set_context( - {"asset": avalon.api.Session["AVALON_ASSET"]}, + {"asset": legacy_io.Session["AVALON_ASSET"]}, refresh=True ) @@ -327,9 +327,8 @@ class LaunchWorkFiles(LaunchQtApp): def execute(self, context): result = super().execute(context) self._window.set_context({ - "asset": avalon.api.Session["AVALON_ASSET"], - "silo": avalon.api.Session["AVALON_SILO"], - "task": avalon.api.Session["AVALON_TASK"] + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] }) return result @@ -359,8 +358,8 @@ class TOPBAR_MT_avalon(bpy.types.Menu): else: pyblish_menu_icon_id = 0 - asset = avalon.api.Session['AVALON_ASSET'] - task = avalon.api.Session['AVALON_TASK'] + asset = legacy_io.Session['AVALON_ASSET'] + task = legacy_io.Session['AVALON_TASK'] context_label = f"{asset}, {task}" context_label_item = layout.row() context_label_item.operator( diff --git a/openpype/hosts/blender/api/pipeline.py b/openpype/hosts/blender/api/pipeline.py index 07a7509dd7..5b81764644 100644 --- a/openpype/hosts/blender/api/pipeline.py +++ b/openpype/hosts/blender/api/pipeline.py @@ -1,6 +1,5 @@ import os import sys -import importlib import traceback from typing import Callable, Dict, Iterator, List, Optional @@ -10,14 +9,15 @@ from . import lib from . import ops import pyblish.api -import avalon.api -from avalon import io, schema -from avalon.pipeline import AVALON_CONTAINER_ID from openpype.pipeline import ( - LegacyCreator, + schema, + legacy_io, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, + AVALON_CONTAINER_ID, ) from openpype.api import Logger from openpype.lib import ( @@ -31,7 +31,6 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") ORIGINAL_EXCEPTHOOK = sys.excepthook @@ -55,7 +54,7 @@ def install(): pyblish.api.register_plugin_path(str(PUBLISH_PATH)) register_loader_plugin_path(str(LOAD_PATH)) - avalon.api.register_plugin_path(LegacyCreator, str(CREATE_PATH)) + register_creator_plugin_path(str(CREATE_PATH)) lib.append_user_scripts() @@ -77,15 +76,15 @@ def uninstall(): pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) deregister_loader_plugin_path(str(LOAD_PATH)) - avalon.api.deregister_plugin_path(LegacyCreator, str(CREATE_PATH)) + deregister_creator_plugin_path(str(CREATE_PATH)) if not IS_HEADLESS: ops.unregister() def set_start_end_frames(): - asset_name = io.Session["AVALON_ASSET"] - asset_doc = io.find_one({ + asset_name = legacy_io.Session["AVALON_ASSET"] + asset_doc = legacy_io.find_one({ "type": "asset", "name": asset_name }) @@ -189,7 +188,7 @@ def _on_task_changed(): # `directory` attribute, so it opens in that directory (does it?). # https://docs.blender.org/api/blender2.8/bpy.types.Operator.html#calling-a-file-selector # https://docs.blender.org/api/blender2.8/bpy.types.WindowManager.html#bpy.types.WindowManager.fileselect_add - workdir = avalon.api.Session["AVALON_WORKDIR"] + workdir = legacy_io.Session["AVALON_WORKDIR"] log.debug("New working directory: %s", workdir) @@ -200,27 +199,6 @@ def _register_events(): log.info("Installed event callback for 'taskChanged'...") -def reload_pipeline(*args): - """Attempt to reload pipeline at run-time. - - Warning: - This is primarily for development and debugging purposes and not well - tested. - - """ - - avalon.api.uninstall() - - for module in ( - "avalon.io", - "avalon.lib", - "avalon.pipeline", - "avalon.api", - ): - module = importlib.import_module(module) - importlib.reload(module) - - def _discover_gui() -> Optional[Callable]: """Return the most desirable of the currently registered GUIs""" diff --git a/openpype/hosts/blender/api/workio.py b/openpype/hosts/blender/api/workio.py index fd68761982..5eb9f82999 100644 --- a/openpype/hosts/blender/api/workio.py +++ b/openpype/hosts/blender/api/workio.py @@ -4,7 +4,8 @@ from pathlib import Path from typing import List, Optional import bpy -from avalon import api + +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS class OpenFileCacher: @@ -77,7 +78,7 @@ def has_unsaved_changes() -> bool: def file_extensions() -> List[str]: """Return the supported file extensions for Blender scene files.""" - return api.HOST_WORKFILE_EXTENSIONS["blender"] + return HOST_WORKFILE_EXTENSIONS["blender"] def work_root(session: dict) -> str: diff --git a/openpype/hosts/blender/blender_addon/startup/init.py b/openpype/hosts/blender/blender_addon/startup/init.py index e43373bc6c..13a4b8a7a1 100644 --- a/openpype/hosts/blender/blender_addon/startup/init.py +++ b/openpype/hosts/blender/blender_addon/startup/init.py @@ -1,4 +1,4 @@ -from avalon import pipeline +from openpype.pipeline import install_host from openpype.hosts.blender import api -pipeline.install(api) +install_host(api) diff --git a/openpype/hosts/blender/plugins/create/create_action.py b/openpype/hosts/blender/plugins/create/create_action.py index 5f66f5da6e..54b3a501a7 100644 --- a/openpype/hosts/blender/plugins/create/create_action.py +++ b/openpype/hosts/blender/plugins/create/create_action.py @@ -2,7 +2,7 @@ import bpy -from avalon import api +from openpype.pipeline import legacy_io import openpype.hosts.blender.api.plugin from openpype.hosts.blender.api import lib @@ -22,7 +22,7 @@ class CreateAction(openpype.hosts.blender.api.plugin.Creator): name = openpype.hosts.blender.api.plugin.asset_name(asset, subset) collection = bpy.data.collections.new(name=name) bpy.context.scene.collection.children.link(collection) - self.data['task'] = api.Session.get('AVALON_TASK') + self.data['task'] = legacy_io.Session.get('AVALON_TASK') lib.imprint(collection, self.data) if (self.options or {}).get("useSelection"): diff --git a/openpype/hosts/blender/plugins/create/create_animation.py b/openpype/hosts/blender/plugins/create/create_animation.py index b88010ae90..a0e9e5e399 100644 --- a/openpype/hosts/blender/plugins/create/create_animation.py +++ b/openpype/hosts/blender/plugins/create/create_animation.py @@ -2,7 +2,7 @@ import bpy -from avalon import api +from openpype.pipeline import legacy_io from openpype.hosts.blender.api import plugin, lib, ops from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES @@ -37,7 +37,7 @@ class CreateAnimation(plugin.Creator): # asset_group.empty_display_type = 'SINGLE_ARROW' asset_group = bpy.data.collections.new(name=name) instances.children.link(asset_group) - self.data['task'] = api.Session.get('AVALON_TASK') + self.data['task'] = legacy_io.Session.get('AVALON_TASK') lib.imprint(asset_group, self.data) if (self.options or {}).get("useSelection"): diff --git a/openpype/hosts/blender/plugins/create/create_camera.py b/openpype/hosts/blender/plugins/create/create_camera.py index cc796d464d..1a3c008069 100644 --- a/openpype/hosts/blender/plugins/create/create_camera.py +++ b/openpype/hosts/blender/plugins/create/create_camera.py @@ -2,7 +2,7 @@ import bpy -from avalon import api +from openpype.pipeline import legacy_io from openpype.hosts.blender.api import plugin, lib, ops from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES @@ -40,7 +40,7 @@ class CreateCamera(plugin.Creator): asset_group = bpy.data.objects.new(name=name, object_data=None) asset_group.empty_display_type = 'SINGLE_ARROW' instances.objects.link(asset_group) - self.data['task'] = api.Session.get('AVALON_TASK') + self.data['task'] = legacy_io.Session.get('AVALON_TASK') print(f"self.data: {self.data}") lib.imprint(asset_group, self.data) diff --git a/openpype/hosts/blender/plugins/create/create_layout.py b/openpype/hosts/blender/plugins/create/create_layout.py index f62cbc52ba..5949a4b86e 100644 --- a/openpype/hosts/blender/plugins/create/create_layout.py +++ b/openpype/hosts/blender/plugins/create/create_layout.py @@ -2,7 +2,7 @@ import bpy -from avalon import api +from openpype.pipeline import legacy_io from openpype.hosts.blender.api import plugin, lib, ops from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES @@ -34,7 +34,7 @@ class CreateLayout(plugin.Creator): asset_group = bpy.data.objects.new(name=name, object_data=None) asset_group.empty_display_type = 'SINGLE_ARROW' instances.objects.link(asset_group) - self.data['task'] = api.Session.get('AVALON_TASK') + self.data['task'] = legacy_io.Session.get('AVALON_TASK') lib.imprint(asset_group, self.data) # Add selected objects to instance diff --git a/openpype/hosts/blender/plugins/create/create_model.py b/openpype/hosts/blender/plugins/create/create_model.py index 75c90f9bb1..fedc708943 100644 --- a/openpype/hosts/blender/plugins/create/create_model.py +++ b/openpype/hosts/blender/plugins/create/create_model.py @@ -2,7 +2,7 @@ import bpy -from avalon import api +from openpype.pipeline import legacy_io from openpype.hosts.blender.api import plugin, lib, ops from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES @@ -34,7 +34,7 @@ class CreateModel(plugin.Creator): asset_group = bpy.data.objects.new(name=name, object_data=None) asset_group.empty_display_type = 'SINGLE_ARROW' instances.objects.link(asset_group) - self.data['task'] = api.Session.get('AVALON_TASK') + self.data['task'] = legacy_io.Session.get('AVALON_TASK') lib.imprint(asset_group, self.data) # Add selected objects to instance diff --git a/openpype/hosts/blender/plugins/create/create_pointcache.py b/openpype/hosts/blender/plugins/create/create_pointcache.py index bf5a84048f..38707fd3b1 100644 --- a/openpype/hosts/blender/plugins/create/create_pointcache.py +++ b/openpype/hosts/blender/plugins/create/create_pointcache.py @@ -2,7 +2,7 @@ import bpy -from avalon import api +from openpype.pipeline import legacy_io import openpype.hosts.blender.api.plugin from openpype.hosts.blender.api import lib @@ -22,7 +22,7 @@ class CreatePointcache(openpype.hosts.blender.api.plugin.Creator): name = openpype.hosts.blender.api.plugin.asset_name(asset, subset) collection = bpy.data.collections.new(name=name) bpy.context.scene.collection.children.link(collection) - self.data['task'] = api.Session.get('AVALON_TASK') + self.data['task'] = legacy_io.Session.get('AVALON_TASK') lib.imprint(collection, self.data) if (self.options or {}).get("useSelection"): diff --git a/openpype/hosts/blender/plugins/create/create_rig.py b/openpype/hosts/blender/plugins/create/create_rig.py index 65f5061924..0abd306c6b 100644 --- a/openpype/hosts/blender/plugins/create/create_rig.py +++ b/openpype/hosts/blender/plugins/create/create_rig.py @@ -2,7 +2,7 @@ import bpy -from avalon import api +from openpype.pipeline import legacy_io from openpype.hosts.blender.api import plugin, lib, ops from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES @@ -34,7 +34,7 @@ class CreateRig(plugin.Creator): asset_group = bpy.data.objects.new(name=name, object_data=None) asset_group.empty_display_type = 'SINGLE_ARROW' instances.objects.link(asset_group) - self.data['task'] = api.Session.get('AVALON_TASK') + self.data['task'] = legacy_io.Session.get('AVALON_TASK') lib.imprint(asset_group, self.data) # Add selected objects to instance diff --git a/openpype/hosts/blender/plugins/load/load_abc.py b/openpype/hosts/blender/plugins/load/load_abc.py index 3daaeceffe..1b2e800769 100644 --- a/openpype/hosts/blender/plugins/load/load_abc.py +++ b/openpype/hosts/blender/plugins/load/load_abc.py @@ -6,11 +6,14 @@ from typing import Dict, List, Optional import bpy -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) + from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) from openpype.hosts.blender.api import plugin, lib diff --git a/openpype/hosts/blender/plugins/load/load_audio.py b/openpype/hosts/blender/plugins/load/load_audio.py index b95c5db270..3f4fcc17de 100644 --- a/openpype/hosts/blender/plugins/load/load_audio.py +++ b/openpype/hosts/blender/plugins/load/load_audio.py @@ -6,12 +6,14 @@ from typing import Dict, List, Optional import bpy -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) diff --git a/openpype/hosts/blender/plugins/load/load_camera_blend.py b/openpype/hosts/blender/plugins/load/load_camera_blend.py index 6ed2e8a575..f00027f0b4 100644 --- a/openpype/hosts/blender/plugins/load/load_camera_blend.py +++ b/openpype/hosts/blender/plugins/load/load_camera_blend.py @@ -7,12 +7,14 @@ from typing import Dict, List, Optional import bpy -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) logger = logging.getLogger("openpype").getChild( diff --git a/openpype/hosts/blender/plugins/load/load_camera_fbx.py b/openpype/hosts/blender/plugins/load/load_camera_fbx.py index 626ed44f08..97f844e610 100644 --- a/openpype/hosts/blender/plugins/load/load_camera_fbx.py +++ b/openpype/hosts/blender/plugins/load/load_camera_fbx.py @@ -6,12 +6,14 @@ from typing import Dict, List, Optional import bpy -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) from openpype.hosts.blender.api import plugin, lib from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) diff --git a/openpype/hosts/blender/plugins/load/load_fbx.py b/openpype/hosts/blender/plugins/load/load_fbx.py index 2d249ef647..ee2e7d175c 100644 --- a/openpype/hosts/blender/plugins/load/load_fbx.py +++ b/openpype/hosts/blender/plugins/load/load_fbx.py @@ -6,12 +6,14 @@ from typing import Dict, List, Optional import bpy -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) from openpype.hosts.blender.api import plugin, lib from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) diff --git a/openpype/hosts/blender/plugins/load/load_layout_blend.py b/openpype/hosts/blender/plugins/load/load_layout_blend.py index d87df3c010..cf8e89ed1f 100644 --- a/openpype/hosts/blender/plugins/load/load_layout_blend.py +++ b/openpype/hosts/blender/plugins/load/load_layout_blend.py @@ -10,12 +10,12 @@ from openpype import lib from openpype.pipeline import ( legacy_create, get_representation_path, + AVALON_CONTAINER_ID, ) from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) diff --git a/openpype/hosts/blender/plugins/load/load_layout_json.py b/openpype/hosts/blender/plugins/load/load_layout_json.py index 0693937fec..a0580af4a0 100644 --- a/openpype/hosts/blender/plugins/load/load_layout_json.py +++ b/openpype/hosts/blender/plugins/load/load_layout_json.py @@ -13,12 +13,12 @@ from openpype.pipeline import ( load_container, get_representation_path, loaders_from_representation, + AVALON_CONTAINER_ID, ) from openpype.hosts.blender.api.pipeline import ( AVALON_INSTANCES, AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) from openpype.hosts.blender.api import plugin diff --git a/openpype/hosts/blender/plugins/load/load_model.py b/openpype/hosts/blender/plugins/load/load_model.py index 18d01dcb29..0a5d98ffa0 100644 --- a/openpype/hosts/blender/plugins/load/load_model.py +++ b/openpype/hosts/blender/plugins/load/load_model.py @@ -6,12 +6,14 @@ from typing import Dict, List, Optional import bpy -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) diff --git a/openpype/hosts/blender/plugins/load/load_rig.py b/openpype/hosts/blender/plugins/load/load_rig.py index cec088076c..4dfa96167f 100644 --- a/openpype/hosts/blender/plugins/load/load_rig.py +++ b/openpype/hosts/blender/plugins/load/load_rig.py @@ -10,6 +10,7 @@ from openpype import lib from openpype.pipeline import ( legacy_create, get_representation_path, + AVALON_CONTAINER_ID, ) from openpype.hosts.blender.api import ( plugin, @@ -18,7 +19,6 @@ from openpype.hosts.blender.api import ( from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) diff --git a/openpype/hosts/blender/plugins/publish/extract_layout.py b/openpype/hosts/blender/plugins/publish/extract_layout.py index cc7c90f4c8..8ecc78a2c6 100644 --- a/openpype/hosts/blender/plugins/publish/extract_layout.py +++ b/openpype/hosts/blender/plugins/publish/extract_layout.py @@ -1,11 +1,13 @@ import os import json +from bson.objectid import ObjectId + import bpy import bpy_extras import bpy_extras.anim_utils -from avalon import io +from openpype.pipeline import legacy_io from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY import openpype.api @@ -137,10 +139,10 @@ class ExtractLayout(openpype.api.Extractor): self.log.debug("Parent: {}".format(parent)) # Get blend reference - blend = io.find_one( + blend = legacy_io.find_one( { "type": "representation", - "parent": io.ObjectId(parent), + "parent": ObjectId(parent), "name": "blend" }, projection={"_id": True}) @@ -148,10 +150,10 @@ class ExtractLayout(openpype.api.Extractor): if blend: blend_id = blend["_id"] # Get fbx reference - fbx = io.find_one( + fbx = legacy_io.find_one( { "type": "representation", - "parent": io.ObjectId(parent), + "parent": ObjectId(parent), "name": "fbx" }, projection={"_id": True}) @@ -159,10 +161,10 @@ class ExtractLayout(openpype.api.Extractor): if fbx: fbx_id = fbx["_id"] # Get abc reference - abc = io.find_one( + abc = legacy_io.find_one( { "type": "representation", - "parent": io.ObjectId(parent), + "parent": ObjectId(parent), "name": "abc" }, projection={"_id": True}) diff --git a/openpype/hosts/blender/plugins/publish/integrate_animation.py b/openpype/hosts/blender/plugins/publish/integrate_animation.py index 90e94a4aac..d9a85bc79b 100644 --- a/openpype/hosts/blender/plugins/publish/integrate_animation.py +++ b/openpype/hosts/blender/plugins/publish/integrate_animation.py @@ -1,6 +1,5 @@ import json -from avalon import io import pyblish.api diff --git a/openpype/hosts/celaction/api/cli.py b/openpype/hosts/celaction/api/cli.py index bc1e3eaf89..8c7b3a2e74 100644 --- a/openpype/hosts/celaction/api/cli.py +++ b/openpype/hosts/celaction/api/cli.py @@ -3,8 +3,6 @@ import sys import copy import argparse -from avalon import io - import pyblish.api import pyblish.util @@ -13,6 +11,8 @@ import openpype import openpype.hosts.celaction from openpype.hosts.celaction import api as celaction from openpype.tools.utils import host_tools +from openpype.pipeline import install_openpype_plugins + log = Logger().get_logger("Celaction_cli_publisher") @@ -21,9 +21,6 @@ publish_host = "celaction" HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.celaction.__file__)) PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") def cli(): @@ -74,7 +71,7 @@ def main(): _prepare_publish_environments() # Registers pype's Global pyblish plugins - openpype.install() + install_openpype_plugins() if os.path.exists(PUBLISH_PATH): log.info(f"Registering path: {PUBLISH_PATH}") diff --git a/openpype/hosts/celaction/plugins/publish/collect_audio.py b/openpype/hosts/celaction/plugins/publish/collect_audio.py index 80c1c37d7e..8acda5fc7c 100644 --- a/openpype/hosts/celaction/plugins/publish/collect_audio.py +++ b/openpype/hosts/celaction/plugins/publish/collect_audio.py @@ -1,10 +1,10 @@ import os import collections +from pprint import pformat import pyblish.api -from avalon import io -from pprint import pformat +from openpype.pipeline import legacy_io class AppendCelactionAudio(pyblish.api.ContextPlugin): @@ -60,7 +60,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): """ # Query all subsets for asset - subset_docs = io.find({ + subset_docs = legacy_io.find({ "type": "subset", "parent": asset_doc["_id"] }) @@ -93,7 +93,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): }} ] last_versions_by_subset_id = dict() - for doc in io.aggregate(pipeline): + for doc in legacy_io.aggregate(pipeline): doc["parent"] = doc["_id"] doc["_id"] = doc.pop("_version_id") last_versions_by_subset_id[doc["parent"]] = doc @@ -102,7 +102,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): for version_doc in last_versions_by_subset_id.values(): version_docs_by_id[version_doc["_id"]] = version_doc - repre_docs = io.find({ + repre_docs = legacy_io.find({ "type": "representation", "parent": {"$in": list(version_docs_by_id.keys())}, "name": {"$in": representations} diff --git a/openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py b/openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py index f393e471c4..1d2d9da1af 100644 --- a/openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py +++ b/openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py @@ -1,6 +1,6 @@ import os -from avalon import api import pyblish.api +from openpype.pipeline import legacy_io class CollectCelactionInstances(pyblish.api.ContextPlugin): @@ -10,7 +10,7 @@ class CollectCelactionInstances(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder + 0.1 def process(self, context): - task = api.Session["AVALON_TASK"] + task = legacy_io.Session["AVALON_TASK"] current_file = context.data["currentFile"] staging_dir = os.path.dirname(current_file) scene_file = os.path.basename(current_file) diff --git a/openpype/hosts/flame/api/__init__.py b/openpype/hosts/flame/api/__init__.py index f210c27f87..2c461e5f16 100644 --- a/openpype/hosts/flame/api/__init__.py +++ b/openpype/hosts/flame/api/__init__.py @@ -11,10 +11,8 @@ from .constants import ( from .lib import ( CTX, FlameAppFramework, - get_project_manager, get_current_project, get_current_sequence, - create_bin, create_segment_data_marker, get_segment_data_marker, set_segment_data_marker, @@ -29,7 +27,10 @@ from .lib import ( get_frame_from_filename, get_padding_from_filename, maintained_object_duplication, - get_clip_segment + maintained_temp_file_path, + get_clip_segment, + get_batch_group_from_desktop, + MediaInfoFile ) from .utils import ( setup, @@ -56,7 +57,6 @@ from .plugin import ( PublishableClip, ClipLoader, OpenClipSolver - ) from .workio import ( open_file, @@ -71,6 +71,10 @@ from .render_utils import ( get_preset_path_by_xml_name, modify_preset_file ) +from .batch_utils import ( + create_batch_group, + create_batch_group_conent +) __all__ = [ # constants @@ -83,10 +87,8 @@ __all__ = [ # lib "CTX", "FlameAppFramework", - "get_project_manager", "get_current_project", "get_current_sequence", - "create_bin", "create_segment_data_marker", "get_segment_data_marker", "set_segment_data_marker", @@ -101,7 +103,10 @@ __all__ = [ "get_frame_from_filename", "get_padding_from_filename", "maintained_object_duplication", + "maintained_temp_file_path", "get_clip_segment", + "get_batch_group_from_desktop", + "MediaInfoFile", # pipeline "install", @@ -142,5 +147,9 @@ __all__ = [ # render utils "export_clip", "get_preset_path_by_xml_name", - "modify_preset_file" + "modify_preset_file", + + # batch utils + "create_batch_group", + "create_batch_group_conent" ] diff --git a/openpype/hosts/flame/api/batch_utils.py b/openpype/hosts/flame/api/batch_utils.py new file mode 100644 index 0000000000..9d419a4a90 --- /dev/null +++ b/openpype/hosts/flame/api/batch_utils.py @@ -0,0 +1,151 @@ +import flame + + +def create_batch_group( + name, + frame_start, + frame_duration, + update_batch_group=None, + **kwargs +): + """Create Batch Group in active project's Desktop + + Args: + name (str): name of batch group to be created + frame_start (int): start frame of batch + frame_end (int): end frame of batch + update_batch_group (PyBatch)[optional]: batch group to update + + Return: + PyBatch: active flame batch group + """ + # make sure some batch obj is present + batch_group = update_batch_group or flame.batch + + schematic_reels = kwargs.get("shematic_reels") or ['LoadedReel1'] + shelf_reels = kwargs.get("shelf_reels") or ['ShelfReel1'] + + handle_start = kwargs.get("handleStart") or 0 + handle_end = kwargs.get("handleEnd") or 0 + + frame_start -= handle_start + frame_duration += handle_start + handle_end + + if not update_batch_group: + # Create batch group with name, start_frame value, duration value, + # set of schematic reel names, set of shelf reel names + batch_group = batch_group.create_batch_group( + name, + start_frame=frame_start, + duration=frame_duration, + reels=schematic_reels, + shelf_reels=shelf_reels + ) + else: + batch_group.name = name + batch_group.start_frame = frame_start + batch_group.duration = frame_duration + + # add reels to batch group + _add_reels_to_batch_group( + batch_group, schematic_reels, shelf_reels) + + # TODO: also update write node if there is any + # TODO: also update loaders to start from correct frameStart + + if kwargs.get("switch_batch_tab"): + # use this command to switch to the batch tab + batch_group.go_to() + + return batch_group + + +def _add_reels_to_batch_group(batch_group, reels, shelf_reels): + # update or create defined reels + # helper variables + reel_names = [ + r.name.get_value() + for r in batch_group.reels + ] + shelf_reel_names = [ + r.name.get_value() + for r in batch_group.shelf_reels + ] + # add schematic reels + for _r in reels: + if _r in reel_names: + continue + batch_group.create_reel(_r) + + # add shelf reels + for _sr in shelf_reels: + if _sr in shelf_reel_names: + continue + batch_group.create_shelf_reel(_sr) + + +def create_batch_group_conent(batch_nodes, batch_links, batch_group=None): + """Creating batch group with links + + Args: + batch_nodes (list of dict): each dict is node definition + batch_links (list of dict): each dict is link definition + batch_group (PyBatch, optional): batch group. Defaults to None. + + Return: + dict: all batch nodes {name or id: PyNode} + """ + # make sure some batch obj is present + batch_group = batch_group or flame.batch + all_batch_nodes = { + b.name.get_value(): b + for b in batch_group.nodes + } + for node in batch_nodes: + # NOTE: node_props needs to be ideally OrederDict type + node_id, node_type, node_props = ( + node["id"], node["type"], node["properties"]) + + # get node name for checking if exists + node_name = node_props.pop("name", None) or node_id + + if all_batch_nodes.get(node_name): + # update existing batch node + batch_node = all_batch_nodes[node_name] + else: + # create new batch node + batch_node = batch_group.create_node(node_type) + + # set name + batch_node.name.set_value(node_name) + + # set attributes found in node props + for key, value in node_props.items(): + if not hasattr(batch_node, key): + continue + setattr(batch_node, key, value) + + # add created node for possible linking + all_batch_nodes[node_id] = batch_node + + # link nodes to each other + for link in batch_links: + _from_n, _to_n = link["from_node"], link["to_node"] + + # check if all linking nodes are available + if not all([ + all_batch_nodes.get(_from_n["id"]), + all_batch_nodes.get(_to_n["id"]) + ]): + continue + + # link nodes in defined link + batch_group.connect_nodes( + all_batch_nodes[_from_n["id"]], _from_n["connector"], + all_batch_nodes[_to_n["id"]], _to_n["connector"] + ) + + # sort batch nodes + batch_group.organize() + + return all_batch_nodes diff --git a/openpype/hosts/flame/api/lib.py b/openpype/hosts/flame/api/lib.py index 74d9e7607a..c7c444c1fb 100644 --- a/openpype/hosts/flame/api/lib.py +++ b/openpype/hosts/flame/api/lib.py @@ -3,7 +3,12 @@ import os import re import json import pickle +import tempfile +import itertools import contextlib +import xml.etree.cElementTree as cET +from copy import deepcopy +from xml.etree import ElementTree as ET from pprint import pformat from .constants import ( MARKER_COLOR, @@ -12,12 +17,14 @@ from .constants import ( COLOR_MAP, MARKER_PUBLISH_DEFAULT ) -from openpype.api import Logger -log = Logger.get_logger(__name__) +import openpype.api as openpype + +log = openpype.Logger.get_logger(__name__) FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]") + class CTX: # singleton used for passing data between api modules app_framework = None @@ -226,16 +233,6 @@ class FlameAppFramework(object): return True -def get_project_manager(): - # TODO: get_project_manager - return - - -def get_media_storage(): - # TODO: get_media_storage - return - - def get_current_project(): import flame return flame.project.current_project @@ -265,11 +262,6 @@ def get_current_sequence(selection): return process_timeline -def create_bin(name, root=None): - # TODO: create_bin - return - - def rescan_hooks(): import flame try: @@ -279,6 +271,7 @@ def rescan_hooks(): def get_metadata(project_name, _log=None): + # TODO: can be replaced by MediaInfoFile class method from adsk.libwiretapPythonClientAPI import ( WireTapClient, WireTapServerHandle, @@ -538,9 +531,17 @@ def get_segment_attributes(segment): # head and tail with forward compatibility if segment.head: - clip_data["segment_head"] = int(segment.head) + # `infinite` can be also returned + if isinstance(segment.head, str): + clip_data["segment_head"] = 0 + else: + clip_data["segment_head"] = int(segment.head) if segment.tail: - clip_data["segment_tail"] = int(segment.tail) + # `infinite` can be also returned + if isinstance(segment.tail, str): + clip_data["segment_tail"] = 0 + else: + clip_data["segment_tail"] = int(segment.tail) # add all available shot tokens shot_tokens = _get_shot_tokens_values(segment, [ @@ -695,6 +696,25 @@ def maintained_object_duplication(item): flame.delete(duplicate) +@contextlib.contextmanager +def maintained_temp_file_path(suffix=None): + _suffix = suffix or "" + + try: + # Store dumped json to temporary file + temporary_file = tempfile.mktemp( + suffix=_suffix, prefix="flame_maintained_") + yield temporary_file.replace("\\", "/") + + except IOError as _error: + raise IOError( + "Not able to create temp json file: {}".format(_error)) + + finally: + # Remove the temporary json + os.remove(temporary_file) + + def get_clip_segment(flame_clip): name = flame_clip.name.get_value() version = flame_clip.versions[0] @@ -708,3 +728,213 @@ def get_clip_segment(flame_clip): raise ValueError("Clip `{}` has too many segments!".format(name)) return segments[0] + + +def get_batch_group_from_desktop(name): + project = get_current_project() + project_desktop = project.current_workspace.desktop + + for bgroup in project_desktop.batch_groups: + if bgroup.name.get_value() in name: + return bgroup + + +class MediaInfoFile(object): + """Class to get media info file clip data + + Raises: + IOError: MEDIA_SCRIPT_PATH path doesn't exists + TypeError: Not able to generate clip xml data file + ET.ParseError: Missing clip in xml clip data + IOError: Not able to save xml clip data to file + + Attributes: + str: `MEDIA_SCRIPT_PATH` path to flame binary + logging.Logger: `log` logger + + TODO: add method for getting metadata to dict + """ + MEDIA_SCRIPT_PATH = "/opt/Autodesk/mio/current/dl_get_media_info" + + log = log + + _clip_data = None + _start_frame = None + _fps = None + _drop_mode = None + + def __init__(self, path, **kwargs): + + # replace log if any + if kwargs.get("logger"): + self.log = kwargs["logger"] + + # test if `dl_get_media_info` paht exists + self._validate_media_script_path() + + # derivate other feed variables + self.feed_basename = os.path.basename(path) + self.feed_dir = os.path.dirname(path) + self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower() + + with maintained_temp_file_path(".clip") as tmp_path: + self.log.info("Temp File: {}".format(tmp_path)) + self._generate_media_info_file(tmp_path) + + # get clip data and make them single if there is multiple + # clips data + xml_data = self._make_single_clip_media_info(tmp_path) + self.log.debug("xml_data: {}".format(xml_data)) + self.log.debug("type: {}".format(type(xml_data))) + + # get all time related data and assign them + self._get_time_info_from_origin(xml_data) + self.log.debug("start_frame: {}".format(self.start_frame)) + self.log.debug("fps: {}".format(self.fps)) + self.log.debug("drop frame: {}".format(self.drop_mode)) + self.clip_data = xml_data + + @property + def clip_data(self): + """Clip's xml clip data + + Returns: + xml.etree.ElementTree: xml data + """ + return self._clip_data + + @clip_data.setter + def clip_data(self, data): + self._clip_data = data + + @property + def start_frame(self): + """ Clip's starting frame found in timecode + + Returns: + int: number of frames + """ + return self._start_frame + + @start_frame.setter + def start_frame(self, number): + self._start_frame = int(number) + + @property + def fps(self): + """ Clip's frame rate + + Returns: + float: frame rate + """ + return self._fps + + @fps.setter + def fps(self, fl_number): + self._fps = float(fl_number) + + @property + def drop_mode(self): + """ Clip's drop frame mode + + Returns: + str: drop frame flag + """ + return self._drop_mode + + @drop_mode.setter + def drop_mode(self, text): + self._drop_mode = str(text) + + def _validate_media_script_path(self): + if not os.path.isfile(self.MEDIA_SCRIPT_PATH): + raise IOError("Media Scirpt does not exist: `{}`".format( + self.MEDIA_SCRIPT_PATH)) + + def _generate_media_info_file(self, fpath): + # Create cmd arguments for gettig xml file info file + cmd_args = [ + self.MEDIA_SCRIPT_PATH, + "-e", self.feed_ext, + "-o", fpath, + self.feed_dir + ] + + try: + # execute creation of clip xml template data + openpype.run_subprocess(cmd_args) + except TypeError as error: + raise TypeError( + "Error creating `{}` due: {}".format(fpath, error)) + + def _make_single_clip_media_info(self, fpath): + with open(fpath) as f: + lines = f.readlines() + _added_root = itertools.chain( + "", deepcopy(lines)[1:], "") + new_root = ET.fromstringlist(_added_root) + + # find the clip which is matching to my input name + xml_clips = new_root.findall("clip") + matching_clip = None + for xml_clip in xml_clips: + if xml_clip.find("name").text in self.feed_basename: + matching_clip = xml_clip + + if matching_clip is None: + # return warning there is missing clip + raise ET.ParseError( + "Missing clip in `{}`. Available clips {}".format( + self.feed_basename, [ + xml_clip.find("name").text + for xml_clip in xml_clips + ] + )) + + return matching_clip + + def _get_time_info_from_origin(self, xml_data): + try: + for out_track in xml_data.iter('track'): + for out_feed in out_track.iter('feed'): + # start frame + out_feed_nb_ticks_obj = out_feed.find( + 'startTimecode/nbTicks') + self.start_frame = out_feed_nb_ticks_obj.text + + # fps + out_feed_fps_obj = out_feed.find( + 'startTimecode/rate') + self.fps = out_feed_fps_obj.text + + # drop frame mode + out_feed_drop_mode_obj = out_feed.find( + 'startTimecode/dropMode') + self.drop_mode = out_feed_drop_mode_obj.text + break + else: + continue + except Exception as msg: + self.log.warning(msg) + + @staticmethod + def write_clip_data_to_file(fpath, xml_element_data): + """ Write xml element of clip data to file + + Args: + fpath (string): file path + xml_element_data (xml.etree.ElementTree.Element): xml data + + Raises: + IOError: If data could not be written to file + """ + try: + # save it as new file + tree = cET.ElementTree(xml_element_data) + tree.write( + fpath, xml_declaration=True, + method='xml', encoding='UTF-8' + ) + except IOError as error: + raise IOError( + "Not able to write data to file: {}".format(error)) diff --git a/openpype/hosts/flame/api/pipeline.py b/openpype/hosts/flame/api/pipeline.py index 930c6abe29..da44be1b15 100644 --- a/openpype/hosts/flame/api/pipeline.py +++ b/openpype/hosts/flame/api/pipeline.py @@ -3,14 +3,15 @@ Basic avalon integration """ import os import contextlib -from avalon import api as avalon -from avalon.pipeline import AVALON_CONTAINER_ID from pyblish import api as pyblish + from openpype.api import Logger from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, + AVALON_CONTAINER_ID, ) from .lib import ( set_segment_data_marker, @@ -26,7 +27,6 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") AVALON_CONTAINERS = "AVALON_CONTAINERS" @@ -34,12 +34,10 @@ log = Logger.get_logger(__name__) def install(): - pyblish.register_host("flame") pyblish.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.register_plugin_path(LegacyCreator, CREATE_PATH) - avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) + register_creator_plugin_path(CREATE_PATH) log.info("OpenPype Flame plug-ins registred ...") # register callback for switching publishable @@ -47,14 +45,14 @@ def install(): log.info("OpenPype Flame host installed ...") + def uninstall(): pyblish.deregister_host("flame") log.info("Deregistering Flame plug-ins..") pyblish.deregister_plugin_path(PUBLISH_PATH) deregister_loader_plugin_path(LOAD_PATH) - avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH) - avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH) + deregister_creator_plugin_path(CREATE_PATH) # register callback for switching publishable pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) diff --git a/openpype/hosts/flame/api/plugin.py b/openpype/hosts/flame/api/plugin.py index 4c9d3c5383..c87445fdd3 100644 --- a/openpype/hosts/flame/api/plugin.py +++ b/openpype/hosts/flame/api/plugin.py @@ -1,24 +1,19 @@ import os import re import shutil -import sys -from xml.etree import ElementTree as ET -import six -import qargparse -from Qt import QtWidgets, QtCore -import openpype.api as openpype -from openpype.pipeline import ( - LegacyCreator, - LoaderPlugin, -) -from openpype import style -from . import ( - lib as flib, - pipeline as fpipeline, - constants -) - from copy import deepcopy +from xml.etree import ElementTree as ET + +from Qt import QtCore, QtWidgets + +import openpype.api as openpype +import qargparse +from openpype import style +from openpype.pipeline import LegacyCreator, LoaderPlugin + +from . import constants +from . import lib as flib +from . import pipeline as fpipeline log = openpype.Logger.get_logger(__name__) @@ -660,8 +655,8 @@ class PublishableClip: # Publishing plugin functions -# Loader plugin functions +# Loader plugin functions class ClipLoader(LoaderPlugin): """A basic clip loader for Flame @@ -681,50 +676,52 @@ class ClipLoader(LoaderPlugin): ] -class OpenClipSolver: - media_script_path = "/opt/Autodesk/mio/current/dl_get_media_info" - tmp_name = "_tmp.clip" - tmp_file = None +class OpenClipSolver(flib.MediaInfoFile): create_new_clip = False - out_feed_nb_ticks = None - out_feed_fps = None - out_feed_drop_mode = None - log = log def __init__(self, openclip_file_path, feed_data): - # test if media script paht exists - self._validate_media_script_path() + self.out_file = openclip_file_path # new feed variables: - feed_path = feed_data["path"] + feed_path = feed_data.pop("path") + + # initialize parent class + super(OpenClipSolver, self).__init__( + feed_path, + **feed_data + ) + + # get other metadata self.feed_version_name = feed_data["version"] self.feed_colorspace = feed_data.get("colorspace") - - if feed_data.get("logger"): - self.log = feed_data["logger"] + self.log.debug("feed_version_name: {}".format(self.feed_version_name)) # derivate other feed variables self.feed_basename = os.path.basename(feed_path) self.feed_dir = os.path.dirname(feed_path) self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower() - - if not os.path.isfile(openclip_file_path): - # openclip does not exist yet and will be created - self.tmp_file = self.out_file = openclip_file_path + self.log.debug("feed_ext: {}".format(self.feed_ext)) + self.log.debug("out_file: {}".format(self.out_file)) + if not self._is_valid_tmp_file(self.out_file): self.create_new_clip = True - else: - # output a temp file - self.out_file = openclip_file_path - self.tmp_file = os.path.join(self.feed_dir, self.tmp_name) - self._clear_tmp_file() + def _is_valid_tmp_file(self, file): + # check if file exists + if os.path.isfile(file): + # test also if file is not empty + with open(file) as f: + lines = f.readlines() - self.log.info("Temp File: {}".format(self.tmp_file)) + if len(lines) > 2: + return True + + # file is probably corrupted + os.remove(file) + return False def make(self): - self._generate_media_info_file() if self.create_new_clip: # New openClip @@ -732,42 +729,17 @@ class OpenClipSolver: else: self._update_open_clip() - def _validate_media_script_path(self): - if not os.path.isfile(self.media_script_path): - raise IOError("Media Scirpt does not exist: `{}`".format( - self.media_script_path)) - - def _generate_media_info_file(self): - # Create cmd arguments for gettig xml file info file - cmd_args = [ - self.media_script_path, - "-e", self.feed_ext, - "-o", self.tmp_file, - self.feed_dir - ] - - # execute creation of clip xml template data - try: - openpype.run_subprocess(cmd_args) - except TypeError: - self.log.error("Error creating self.tmp_file") - six.reraise(*sys.exc_info()) - - def _clear_tmp_file(self): - if os.path.isfile(self.tmp_file): - os.remove(self.tmp_file) - def _clear_handler(self, xml_object): for handler in xml_object.findall("./handler"): - self.log.debug("Handler found") + self.log.info("Handler found") xml_object.remove(handler) def _create_new_open_clip(self): self.log.info("Building new openClip") + self.log.debug(">> self.clip_data: {}".format(self.clip_data)) - tmp_xml = ET.parse(self.tmp_file) - - tmp_xml_feeds = tmp_xml.find('tracks/track/feeds') + # clip data comming from MediaInfoFile + tmp_xml_feeds = self.clip_data.find('tracks/track/feeds') tmp_xml_feeds.set('currentVersion', self.feed_version_name) for tmp_feed in tmp_xml_feeds: tmp_feed.set('vuid', self.feed_version_name) @@ -778,46 +750,48 @@ class OpenClipSolver: self._clear_handler(tmp_feed) - tmp_xml_versions_obj = tmp_xml.find('versions') + tmp_xml_versions_obj = self.clip_data.find('versions') tmp_xml_versions_obj.set('currentVersion', self.feed_version_name) for xml_new_version in tmp_xml_versions_obj: xml_new_version.set('uid', self.feed_version_name) xml_new_version.set('type', 'version') - xml_data = self._fix_xml_data(tmp_xml) + self._clear_handler(self.clip_data) self.log.info("Adding feed version: {}".format(self.feed_basename)) - self._write_result_xml_to_file(xml_data) - - self.log.info("openClip Updated: {}".format(self.tmp_file)) + self.write_clip_data_to_file(self.out_file, self.clip_data) def _update_open_clip(self): self.log.info("Updating openClip ..") out_xml = ET.parse(self.out_file) - tmp_xml = ET.parse(self.tmp_file) + out_xml = out_xml.getroot() self.log.debug(">> out_xml: {}".format(out_xml)) - self.log.debug(">> tmp_xml: {}".format(tmp_xml)) + self.log.debug(">> self.clip_data: {}".format(self.clip_data)) # Get new feed from tmp file - tmp_xml_feed = tmp_xml.find('tracks/track/feeds/feed') + tmp_xml_feed = self.clip_data.find('tracks/track/feeds/feed') self._clear_handler(tmp_xml_feed) - self._get_time_info_from_origin(out_xml) - if self.out_feed_fps: + # update fps from MediaInfoFile class + if self.fps: tmp_feed_fps_obj = tmp_xml_feed.find( "startTimecode/rate") - tmp_feed_fps_obj.text = self.out_feed_fps - if self.out_feed_nb_ticks: + tmp_feed_fps_obj.text = str(self.fps) + + # update start_frame from MediaInfoFile class + if self.start_frame: tmp_feed_nb_ticks_obj = tmp_xml_feed.find( "startTimecode/nbTicks") - tmp_feed_nb_ticks_obj.text = self.out_feed_nb_ticks - if self.out_feed_drop_mode: + tmp_feed_nb_ticks_obj.text = str(self.start_frame) + + # update drop_mode from MediaInfoFile class + if self.drop_mode: tmp_feed_drop_mode_obj = tmp_xml_feed.find( "startTimecode/dropMode") - tmp_feed_drop_mode_obj.text = self.out_feed_drop_mode + tmp_feed_drop_mode_obj.text = str(self.drop_mode) new_path_obj = tmp_xml_feed.find( "spans/span/path") @@ -850,7 +824,7 @@ class OpenClipSolver: "version", {"type": "version", "uid": self.feed_version_name}) out_xml_versions_obj.insert(0, new_version_obj) - xml_data = self._fix_xml_data(out_xml) + self._clear_handler(out_xml) # fist create backup self._create_openclip_backup_file(self.out_file) @@ -858,30 +832,9 @@ class OpenClipSolver: self.log.info("Adding feed version: {}".format( self.feed_version_name)) - self._write_result_xml_to_file(xml_data) + self.write_clip_data_to_file(self.out_file, out_xml) - self.log.info("openClip Updated: {}".format(self.out_file)) - - self._clear_tmp_file() - - def _get_time_info_from_origin(self, xml_data): - try: - for out_track in xml_data.iter('track'): - for out_feed in out_track.iter('feed'): - out_feed_nb_ticks_obj = out_feed.find( - 'startTimecode/nbTicks') - self.out_feed_nb_ticks = out_feed_nb_ticks_obj.text - out_feed_fps_obj = out_feed.find( - 'startTimecode/rate') - self.out_feed_fps = out_feed_fps_obj.text - out_feed_drop_mode_obj = out_feed.find( - 'startTimecode/dropMode') - self.out_feed_drop_mode = out_feed_drop_mode_obj.text - break - else: - continue - except Exception as msg: - self.log.warning(msg) + self.log.debug("OpenClip Updated: {}".format(self.out_file)) def _feed_exists(self, xml_data, path): # loop all available feed paths and check if @@ -892,15 +845,6 @@ class OpenClipSolver: "Not appending file as it already is in .clip file") return True - def _fix_xml_data(self, xml_data): - xml_root = xml_data.getroot() - self._clear_handler(xml_root) - return ET.tostring(xml_root).decode('utf-8') - - def _write_result_xml_to_file(self, xml_data): - with open(self.out_file, "w") as f: - f.write(xml_data) - def _create_openclip_backup_file(self, file): bck_file = "{}.bak".format(file) # if backup does not exist diff --git a/openpype/hosts/flame/api/scripts/wiretap_com.py b/openpype/hosts/flame/api/scripts/wiretap_com.py index ee906c2608..4825ff4386 100644 --- a/openpype/hosts/flame/api/scripts/wiretap_com.py +++ b/openpype/hosts/flame/api/scripts/wiretap_com.py @@ -185,7 +185,9 @@ class WireTapCom(object): exit_code = subprocess.call( project_create_cmd, - cwd=os.path.expanduser('~')) + cwd=os.path.expanduser('~'), + preexec_fn=_subprocess_preexec_fn + ) if exit_code != 0: RuntimeError("Cannot create project in flame db") @@ -254,7 +256,7 @@ class WireTapCom(object): filtered_users = [user for user in used_names if user_name in user] if filtered_users: - # todo: need to find lastly created following regex pattern for + # TODO: need to find lastly created following regex pattern for # date used in name return filtered_users.pop() @@ -422,7 +424,13 @@ class WireTapCom(object): color_policy = color_policy or "Legacy" # check if the colour policy in custom dir - if not os.path.exists(color_policy): + if "/" in color_policy: + # if unlikelly full path was used make it redundant + color_policy = color_policy.replace("/syncolor/policies/", "") + # expecting input is `Shared/NameOfPolicy` + color_policy = "/syncolor/policies/{}".format( + color_policy) + else: color_policy = "/syncolor/policies/Autodesk/{}".format( color_policy) @@ -442,7 +450,9 @@ class WireTapCom(object): exit_code = subprocess.call( project_colorspace_cmd, - cwd=os.path.expanduser('~')) + cwd=os.path.expanduser('~'), + preexec_fn=_subprocess_preexec_fn + ) if exit_code != 0: RuntimeError("Cannot set colorspace {} on project {}".format( @@ -450,6 +460,15 @@ class WireTapCom(object): )) +def _subprocess_preexec_fn(): + """ Helper function + + Setting permission mask to 0777 + """ + os.setpgrp() + os.umask(0o000) + + if __name__ == "__main__": # get json exchange data json_path = sys.argv[-1] diff --git a/openpype/hosts/flame/otio/flame_export.py b/openpype/hosts/flame/otio/flame_export.py index 8c240fc9d5..4fe05ec1d8 100644 --- a/openpype/hosts/flame/otio/flame_export.py +++ b/openpype/hosts/flame/otio/flame_export.py @@ -11,8 +11,6 @@ from . import utils import flame from pprint import pformat -reload(utils) # noqa - log = logging.getLogger(__name__) @@ -260,24 +258,15 @@ def create_otio_markers(otio_item, item): otio_item.markers.append(otio_marker) -def create_otio_reference(clip_data): +def create_otio_reference(clip_data, fps=None): metadata = _get_metadata(clip_data) # get file info for path and start frame frame_start = 0 - fps = CTX.get_fps() + fps = fps or CTX.get_fps() path = clip_data["fpath"] - reel_clip = None - match_reel_clip = [ - clip for clip in CTX.clips - if clip["fpath"] == path - ] - if match_reel_clip: - reel_clip = match_reel_clip.pop() - fps = reel_clip["fps"] - file_name = os.path.basename(path) file_head, extension = os.path.splitext(file_name) @@ -339,13 +328,22 @@ def create_otio_reference(clip_data): def create_otio_clip(clip_data): + from openpype.hosts.flame.api import MediaInfoFile + segment = clip_data["PySegment"] - # create media reference - media_reference = create_otio_reference(clip_data) - # calculate source in - first_frame = utils.get_frame_from_filename(clip_data["fpath"]) or 0 + media_info = MediaInfoFile(clip_data["fpath"]) + media_timecode_start = media_info.start_frame + media_fps = media_info.fps + + # create media reference + media_reference = create_otio_reference(clip_data, media_fps) + + # define first frame + first_frame = media_timecode_start or utils.get_frame_from_filename( + clip_data["fpath"]) or 0 + source_in = int(clip_data["source_in"]) - int(first_frame) # creatae source range @@ -378,38 +376,6 @@ def create_otio_gap(gap_start, clip_start, tl_start_frame, fps): ) -def get_clips_in_reels(project): - output_clips = [] - project_desktop = project.current_workspace.desktop - - for reel_group in project_desktop.reel_groups: - for reel in reel_group.reels: - for clip in reel.clips: - clip_data = { - "PyClip": clip, - "fps": float(str(clip.frame_rate)[:-4]) - } - - attrs = [ - "name", "width", "height", - "ratio", "sample_rate", "bit_depth" - ] - - for attr in attrs: - val = getattr(clip, attr) - clip_data[attr] = val - - version = clip.versions[-1] - track = version.tracks[-1] - for segment in track.segments: - segment_data = _get_segment_attributes(segment) - clip_data.update(segment_data) - - output_clips.append(clip_data) - - return output_clips - - def _get_colourspace_policy(): output = {} @@ -493,9 +459,6 @@ def _get_shot_tokens_values(clip, tokens): old_value = None output = {} - if not clip.shot_name: - return output - old_value = clip.shot_name.get_value() for token in tokens: @@ -513,15 +476,21 @@ def _get_shot_tokens_values(clip, tokens): def _get_segment_attributes(segment): - # log.debug(dir(segment)) - if str(segment.name)[1:-1] == "": + log.debug("Segment name|hidden: {}|{}".format( + segment.name.get_value(), segment.hidden + )) + if ( + segment.name.get_value() == "" + or segment.hidden.get_value() + ): return None # Add timeline segment to tree clip_data = { "segment_name": segment.name.get_value(), "segment_comment": segment.comment.get_value(), + "shot_name": segment.shot_name.get_value(), "tape_name": segment.tape_name, "source_name": segment.source_name, "fpath": segment.file_path, @@ -529,9 +498,10 @@ def _get_segment_attributes(segment): } # add all available shot tokens - shot_tokens = _get_shot_tokens_values(segment, [ - "", "", "", "", - ]) + shot_tokens = _get_shot_tokens_values( + segment, + ["", "", "", ""] + ) clip_data.update(shot_tokens) # populate shot source metadata @@ -561,11 +531,6 @@ def create_otio_timeline(sequence): log.info(sequence.attributes) CTX.project = get_current_flame_project() - CTX.clips = get_clips_in_reels(CTX.project) - - log.debug(pformat( - CTX.clips - )) # get current timeline CTX.set_fps( @@ -583,8 +548,13 @@ def create_otio_timeline(sequence): # create otio tracks and clips for ver in sequence.versions: for track in ver.tracks: - if len(track.segments) == 0 and track.hidden: - return None + # avoid all empty tracks + # or hidden tracks + if ( + len(track.segments) == 0 + or track.hidden.get_value() + ): + continue # convert track to otio otio_track = create_otio_track( @@ -597,11 +567,7 @@ def create_otio_timeline(sequence): continue all_segments.append(clip_data) - segments_ordered = { - itemindex: clip_data - for itemindex, clip_data in enumerate( - all_segments) - } + segments_ordered = dict(enumerate(all_segments)) log.debug("_ segments_ordered: {}".format( pformat(segments_ordered) )) @@ -612,15 +578,11 @@ def create_otio_timeline(sequence): log.debug("_ itemindex: {}".format(itemindex)) # Add Gap if needed - if itemindex == 0: - # if it is first track item at track then add - # it to previous item - prev_item = segment_data - - else: - # get previous item - prev_item = segments_ordered[itemindex - 1] - + prev_item = ( + segment_data + if itemindex == 0 + else segments_ordered[itemindex - 1] + ) log.debug("_ segment_data: {}".format(segment_data)) # calculate clip frame range difference from each other diff --git a/openpype/hosts/flame/plugins/load/load_clip.py b/openpype/hosts/flame/plugins/load/load_clip.py index 8980f72cb8..e0a7297381 100644 --- a/openpype/hosts/flame/plugins/load/load_clip.py +++ b/openpype/hosts/flame/plugins/load/load_clip.py @@ -22,7 +22,7 @@ class LoadClip(opfapi.ClipLoader): # settings reel_group_name = "OpenPype_Reels" reel_name = "Loaded" - clip_name_template = "{asset}_{subset}_{representation}" + clip_name_template = "{asset}_{subset}_{output}" def load(self, context, name, namespace, options): @@ -39,7 +39,7 @@ class LoadClip(opfapi.ClipLoader): clip_name = self.clip_name_template.format( **context["representation"]["context"]) - # todo: settings in imageio + # TODO: settings in imageio # convert colorspace with ocio to flame mapping # in imageio flame section colorspace = colorspace diff --git a/openpype/hosts/flame/plugins/load/load_clip_batch.py b/openpype/hosts/flame/plugins/load/load_clip_batch.py new file mode 100644 index 0000000000..5de3226035 --- /dev/null +++ b/openpype/hosts/flame/plugins/load/load_clip_batch.py @@ -0,0 +1,139 @@ +import os +import flame +from pprint import pformat +import openpype.hosts.flame.api as opfapi + + +class LoadClipBatch(opfapi.ClipLoader): + """Load a subset to timeline as clip + + Place clip to timeline on its asset origin timings collected + during conforming to project + """ + + families = ["render2d", "source", "plate", "render", "review"] + representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"] + + label = "Load as clip to current batch" + order = -10 + icon = "code-fork" + color = "orange" + + # settings + reel_name = "OP_LoadedReel" + clip_name_template = "{asset}_{subset}_{output}" + + def load(self, context, name, namespace, options): + + # get flame objects + self.batch = options.get("batch") or flame.batch + + # load clip to timeline and get main variables + namespace = namespace + version = context['version'] + version_data = version.get("data", {}) + version_name = version.get("name", None) + colorspace = version_data.get("colorspace", None) + + # in case output is not in context replace key to representation + if not context["representation"]["context"].get("output"): + self.clip_name_template.replace("output", "representation") + + clip_name = self.clip_name_template.format( + **context["representation"]["context"]) + + # TODO: settings in imageio + # convert colorspace with ocio to flame mapping + # in imageio flame section + colorspace = colorspace + + # create workfile path + workfile_dir = options.get("workdir") or os.environ["AVALON_WORKDIR"] + openclip_dir = os.path.join( + workfile_dir, clip_name + ) + openclip_path = os.path.join( + openclip_dir, clip_name + ".clip" + ) + if not os.path.exists(openclip_dir): + os.makedirs(openclip_dir) + + # prepare clip data from context ad send it to openClipLoader + loading_context = { + "path": self.fname.replace("\\", "/"), + "colorspace": colorspace, + "version": "v{:0>3}".format(version_name), + "logger": self.log + + } + self.log.debug(pformat( + loading_context + )) + self.log.debug(openclip_path) + + # make openpype clip file + opfapi.OpenClipSolver(openclip_path, loading_context).make() + + # prepare Reel group in actual desktop + opc = self._get_clip( + clip_name, + openclip_path + ) + + # add additional metadata from the version to imprint Avalon knob + add_keys = [ + "frameStart", "frameEnd", "source", "author", + "fps", "handleStart", "handleEnd" + ] + + # move all version data keys to tag data + data_imprint = { + key: version_data.get(key, str(None)) + for key in add_keys + } + # add variables related to version context + data_imprint.update({ + "version": version_name, + "colorspace": colorspace, + "objectName": clip_name + }) + + # TODO: finish the containerisation + # opc_segment = opfapi.get_clip_segment(opc) + + # return opfapi.containerise( + # opc_segment, + # name, namespace, context, + # self.__class__.__name__, + # data_imprint) + + return opc + + def _get_clip(self, name, clip_path): + reel = self._get_reel() + + # with maintained openclip as opc + matching_clip = None + for cl in reel.clips: + if cl.name.get_value() != name: + continue + matching_clip = cl + + if not matching_clip: + created_clips = flame.import_clips(str(clip_path), reel) + return created_clips.pop() + + return matching_clip + + def _get_reel(self): + + matching_reel = [ + rg for rg in self.batch.reels + if rg.name.get_value() == self.reel_name + ] + + return ( + matching_reel.pop() + if matching_reel + else self.batch.create_reel(str(self.reel_name)) + ) diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py index 70340ad7a2..95c2002bd9 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py @@ -21,132 +21,135 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): audio_track_items = [] - # TODO: add to settings # settings - xml_preset_attrs_from_comments = { - "width": "number", - "height": "number", - "pixelRatio": "float", - "resizeType": "string", - "resizeFilter": "string" - } + xml_preset_attrs_from_comments = [] + add_tasks = [] def process(self, context): project = context.data["flameProject"] - sequence = context.data["flameSequence"] + selected_segments = context.data["flameSelectedSegments"] + self.log.debug("__ selected_segments: {}".format(selected_segments)) + self.otio_timeline = context.data["otioTimeline"] self.clips_in_reels = opfapi.get_clips_in_reels(project) self.fps = context.data["fps"] # process all sellected - with opfapi.maintained_segment_selection(sequence) as segments: - for segment in segments: - comment_attributes = self._get_comment_attributes(segment) - self.log.debug("_ comment_attributes: {}".format( - pformat(comment_attributes))) + for segment in selected_segments: + # get openpype tag data + marker_data = opfapi.get_segment_data_marker(segment) + self.log.debug("__ marker_data: {}".format( + pformat(marker_data))) - clip_data = opfapi.get_segment_attributes(segment) - clip_name = clip_data["segment_name"] - self.log.debug("clip_name: {}".format(clip_name)) + if not marker_data: + continue - # get openpype tag data - marker_data = opfapi.get_segment_data_marker(segment) - self.log.debug("__ marker_data: {}".format( - pformat(marker_data))) + if marker_data.get("id") != "pyblish.avalon.instance": + continue - if not marker_data: - continue + self.log.debug("__ segment.name: {}".format( + segment.name + )) - if marker_data.get("id") != "pyblish.avalon.instance": - continue + comment_attributes = self._get_comment_attributes(segment) - # get file path - file_path = clip_data["fpath"] + self.log.debug("_ comment_attributes: {}".format( + pformat(comment_attributes))) - # get source clip - source_clip = self._get_reel_clip(file_path) + clip_data = opfapi.get_segment_attributes(segment) + clip_name = clip_data["segment_name"] + self.log.debug("clip_name: {}".format(clip_name)) - first_frame = opfapi.get_frame_from_filename(file_path) or 0 + # get file path + file_path = clip_data["fpath"] - head, tail = self._get_head_tail(clip_data, first_frame) + # get source clip + source_clip = self._get_reel_clip(file_path) - # solve handles length - marker_data["handleStart"] = min( - marker_data["handleStart"], head) - marker_data["handleEnd"] = min( - marker_data["handleEnd"], tail) + first_frame = opfapi.get_frame_from_filename(file_path) or 0 - with_audio = bool(marker_data.pop("audio")) + head, tail = self._get_head_tail(clip_data, first_frame) - # add marker data to instance data - inst_data = dict(marker_data.items()) + # solve handles length + marker_data["handleStart"] = min( + marker_data["handleStart"], abs(head)) + marker_data["handleEnd"] = min( + marker_data["handleEnd"], abs(tail)) - asset = marker_data["asset"] - subset = marker_data["subset"] + with_audio = bool(marker_data.pop("audio")) - # insert family into families - family = marker_data["family"] - families = [str(f) for f in marker_data["families"]] - families.insert(0, str(family)) + # add marker data to instance data + inst_data = dict(marker_data.items()) - # form label - label = asset - if asset != clip_name: - label += " ({})".format(clip_name) - label += " {}".format(subset) - label += " {}".format("[" + ", ".join(families) + "]") + asset = marker_data["asset"] + subset = marker_data["subset"] - inst_data.update({ - "name": "{}_{}".format(asset, subset), - "label": label, - "asset": asset, - "item": segment, - "families": families, - "publish": marker_data["publish"], - "fps": self.fps, - "flameSourceClip": source_clip, - "sourceFirstFrame": int(first_frame), - "path": file_path - }) + # insert family into families + family = marker_data["family"] + families = [str(f) for f in marker_data["families"]] + families.insert(0, str(family)) - # get otio clip data - otio_data = self._get_otio_clip_instance_data(clip_data) or {} - self.log.debug("__ otio_data: {}".format(pformat(otio_data))) + # form label + label = asset + if asset != clip_name: + label += " ({})".format(clip_name) + label += " {} [{}]".format(subset, ", ".join(families)) - # add to instance data - inst_data.update(otio_data) - self.log.debug("__ inst_data: {}".format(pformat(inst_data))) + inst_data.update({ + "name": "{}_{}".format(asset, subset), + "label": label, + "asset": asset, + "item": segment, + "families": families, + "publish": marker_data["publish"], + "fps": self.fps, + "flameSourceClip": source_clip, + "sourceFirstFrame": int(first_frame), + "path": file_path, + "flameAddTasks": self.add_tasks, + "tasks": { + task["name"]: {"type": task["type"]} + for task in self.add_tasks} + }) - # add resolution - self._get_resolution_to_data(inst_data, context) + # get otio clip data + otio_data = self._get_otio_clip_instance_data(clip_data) or {} + self.log.debug("__ otio_data: {}".format(pformat(otio_data))) - # add comment attributes if any - inst_data.update(comment_attributes) + # add to instance data + inst_data.update(otio_data) + self.log.debug("__ inst_data: {}".format(pformat(inst_data))) - # create instance - instance = context.create_instance(**inst_data) + # add resolution + self._get_resolution_to_data(inst_data, context) - # add colorspace data - instance.data.update({ - "versionData": { - "colorspace": clip_data["colour_space"], - } - }) + # add comment attributes if any + inst_data.update(comment_attributes) - # create shot instance for shot attributes create/update - self._create_shot_instance(context, clip_name, **inst_data) + # create instance + instance = context.create_instance(**inst_data) - self.log.info("Creating instance: {}".format(instance)) - self.log.info( - "_ instance.data: {}".format(pformat(instance.data))) + # add colorspace data + instance.data.update({ + "versionData": { + "colorspace": clip_data["colour_space"], + } + }) - if not with_audio: - continue + # create shot instance for shot attributes create/update + self._create_shot_instance(context, clip_name, **inst_data) - # add audioReview attribute to plate instance data - # if reviewTrack is on - if marker_data.get("reviewTrack") is not None: - instance.data["reviewAudio"] = True + self.log.info("Creating instance: {}".format(instance)) + self.log.info( + "_ instance.data: {}".format(pformat(instance.data))) + + if not with_audio: + continue + + # add audioReview attribute to plate instance data + # if reviewTrack is on + if marker_data.get("reviewTrack") is not None: + instance.data["reviewAudio"] = True def _get_comment_attributes(self, segment): comment = segment.comment.get_value() @@ -181,14 +184,17 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): # split to key and value key, value = split.split(":") - for a_name, a_type in self.xml_preset_attrs_from_comments.items(): + for attr_data in self.xml_preset_attrs_from_comments: + a_name = attr_data["name"] + a_type = attr_data["type"] + # exclude all not related attributes if a_name.lower() not in key.lower(): continue # get pattern defined by type pattern = TXT_PATERN - if a_type in ("number" , "float"): + if a_type in ("number", "float"): pattern = NUM_PATERN res_goup = pattern.findall(value) @@ -241,6 +247,7 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): head = clip_data.get("segment_head") tail = clip_data.get("segment_tail") + # HACK: it is here to serve for versions bellow 2021.1 if not head: head = int(clip_data["source_in"]) - int(first_frame) if not tail: diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py b/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py index faa5be9d68..f2ae1f62a9 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py @@ -1,6 +1,7 @@ import pyblish.api -import avalon.api as avalon + import openpype.lib as oplib +from openpype.pipeline import legacy_io import openpype.hosts.flame.api as opfapi from openpype.hosts.flame.otio import flame_export @@ -18,7 +19,7 @@ class CollecTimelineOTIO(pyblish.api.ContextPlugin): # main asset_doc = context.data["assetEntity"] - task_name = avalon.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] project = opfapi.get_current_project() sequence = opfapi.get_current_sequence(opfapi.CTX.selection) @@ -31,27 +32,28 @@ class CollecTimelineOTIO(pyblish.api.ContextPlugin): ) # adding otio timeline to context - with opfapi.maintained_segment_selection(sequence): + with opfapi.maintained_segment_selection(sequence) as selected_seg: otio_timeline = flame_export.create_otio_timeline(sequence) - instance_data = { - "name": subset_name, - "asset": asset_doc["name"], - "subset": subset_name, - "family": "workfile" - } + instance_data = { + "name": subset_name, + "asset": asset_doc["name"], + "subset": subset_name, + "family": "workfile" + } - # create instance with workfile - instance = context.create_instance(**instance_data) - self.log.info("Creating instance: {}".format(instance)) + # create instance with workfile + instance = context.create_instance(**instance_data) + self.log.info("Creating instance: {}".format(instance)) - # update context with main project attributes - context.data.update({ - "flameProject": project, - "flameSequence": sequence, - "otioTimeline": otio_timeline, - "currentFile": "Flame/{}/{}".format( - project.name, sequence.name - ), - "fps": float(str(sequence.frame_rate)[:-4]) - }) + # update context with main project attributes + context.data.update({ + "flameProject": project, + "flameSequence": sequence, + "otioTimeline": otio_timeline, + "currentFile": "Flame/{}/{}".format( + project.name, sequence.name + ), + "flameSelectedSegments": selected_seg, + "fps": float(str(sequence.frame_rate)[:-4]) + }) diff --git a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py index 32f6b9508f..a780f8c9e5 100644 --- a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py +++ b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py @@ -61,9 +61,13 @@ class ExtractSubsetResources(openpype.api.Extractor): # flame objects segment = instance.data["item"] + segment_name = segment.name.get_value() sequence_clip = instance.context.data["flameSequence"] clip_data = instance.data["flameSourceClip"] - clip = clip_data["PyClip"] + + reel_clip = None + if clip_data: + reel_clip = clip_data["PyClip"] # segment's parent track name s_track_name = segment.parent.name.get_value() @@ -108,6 +112,16 @@ class ExtractSubsetResources(openpype.api.Extractor): ignore_comment_attrs = preset_config["ignore_comment_attrs"] color_out = preset_config["colorspace_out"] + # get attribures related loading in integrate_batch_group + load_to_batch_group = preset_config.get( + "load_to_batch_group") + batch_group_loader_name = preset_config.get( + "batch_group_loader_name") + + # convert to None if empty string + if batch_group_loader_name == "": + batch_group_loader_name = None + # get frame range with handles for representation range frame_start_handle = frame_start - handle_start source_duration_handles = ( @@ -117,8 +131,20 @@ class ExtractSubsetResources(openpype.api.Extractor): in_mark = (source_start_handles - source_first_frame) + 1 out_mark = in_mark + source_duration_handles + # make test for type of preset and available reel_clip + if ( + not reel_clip + and export_type != "Sequence Publish" + ): + self.log.warning(( + "Skipping preset {}. Not available " + "reel clip for {}").format( + preset_file, segment_name + )) + continue + # by default export source clips - exporting_clip = clip + exporting_clip = reel_clip if export_type == "Sequence Publish": # change export clip to sequence @@ -150,7 +176,7 @@ class ExtractSubsetResources(openpype.api.Extractor): if export_type == "Sequence Publish": # only keep visible layer where instance segment is child - self.hide_other_tracks(duplclip, s_track_name) + self.hide_others(duplclip, segment_name, s_track_name) # validate xml preset file is filled if preset_file == "": @@ -211,7 +237,9 @@ class ExtractSubsetResources(openpype.api.Extractor): "tags": repre_tags, "data": { "colorspace": color_out - } + }, + "load_to_batch_group": load_to_batch_group, + "batch_group_loader_name": batch_group_loader_name } # collect all available content of export dir @@ -322,18 +350,26 @@ class ExtractSubsetResources(openpype.api.Extractor): return new_stage_dir, new_files_list - def hide_other_tracks(self, sequence_clip, track_name): + def hide_others(self, sequence_clip, segment_name, track_name): """Helper method used only if sequence clip is used Args: sequence_clip (flame.Clip): sequence clip + segment_name (str): segment name track_name (str): track name """ # create otio tracks and clips for ver in sequence_clip.versions: for track in ver.tracks: - if len(track.segments) == 0 and track.hidden: + if len(track.segments) == 0 and track.hidden.get_value(): continue + # hide tracks which are not parent track if track.name.get_value() != track_name: track.hidden = True + continue + + # hidde all other segments + for segment in track.segments: + if segment.name.get_value() != segment_name: + segment.hidden = True diff --git a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py new file mode 100644 index 0000000000..da9553cc2a --- /dev/null +++ b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py @@ -0,0 +1,328 @@ +import os +import copy +from collections import OrderedDict +from pprint import pformat +import pyblish +from openpype.lib import get_workdir +import openpype.hosts.flame.api as opfapi +import openpype.pipeline as op_pipeline + + +class IntegrateBatchGroup(pyblish.api.InstancePlugin): + """Integrate published shot to batch group""" + + order = pyblish.api.IntegratorOrder + 0.45 + label = "Integrate Batch Groups" + hosts = ["flame"] + families = ["clip"] + + # settings + default_loader = "LoadClip" + + def process(self, instance): + add_tasks = instance.data["flameAddTasks"] + + # iterate all tasks from settings + for task_data in add_tasks: + # exclude batch group + if not task_data["create_batch_group"]: + continue + + # create or get already created batch group + bgroup = self._get_batch_group(instance, task_data) + + # add batch group content + all_batch_nodes = self._add_nodes_to_batch_with_links( + instance, task_data, bgroup) + + for name, node in all_batch_nodes.items(): + self.log.debug("name: {}, dir: {}".format( + name, dir(node) + )) + self.log.debug("__ node.attributes: {}".format( + node.attributes + )) + + # load plate to batch group + self.log.info("Loading subset `{}` into batch `{}`".format( + instance.data["subset"], bgroup.name.get_value() + )) + self._load_clip_to_context(instance, bgroup) + + def _add_nodes_to_batch_with_links(self, instance, task_data, batch_group): + # get write file node properties > OrederDict because order does mater + write_pref_data = self._get_write_prefs(instance, task_data) + + batch_nodes = [ + { + "type": "comp", + "properties": {}, + "id": "comp_node01" + }, + { + "type": "Write File", + "properties": write_pref_data, + "id": "write_file_node01" + } + ] + batch_links = [ + { + "from_node": { + "id": "comp_node01", + "connector": "Result" + }, + "to_node": { + "id": "write_file_node01", + "connector": "Front" + } + } + ] + + # add nodes into batch group + return opfapi.create_batch_group_conent( + batch_nodes, batch_links, batch_group) + + def _load_clip_to_context(self, instance, bgroup): + # get all loaders for host + loaders_by_name = { + loader.__name__: loader + for loader in op_pipeline.discover_loader_plugins() + } + + # get all published representations + published_representations = instance.data["published_representations"] + repres_db_id_by_name = { + repre_info["representation"]["name"]: repre_id + for repre_id, repre_info in published_representations.items() + } + + # get all loadable representations + repres_by_name = { + repre["name"]: repre for repre in instance.data["representations"] + } + + # get repre_id for the loadable representations + loader_name_by_repre_id = { + repres_db_id_by_name[repr_name]: { + "loader": repr_data["batch_group_loader_name"], + # add repre data for exception logging + "_repre_data": repr_data + } + for repr_name, repr_data in repres_by_name.items() + if repr_data.get("load_to_batch_group") + } + + self.log.debug("__ loader_name_by_repre_id: {}".format(pformat( + loader_name_by_repre_id))) + + # get representation context from the repre_id + repre_contexts = op_pipeline.load.get_repres_contexts( + loader_name_by_repre_id.keys()) + + self.log.debug("__ repre_contexts: {}".format(pformat( + repre_contexts))) + + # loop all returned repres from repre_context dict + for repre_id, repre_context in repre_contexts.items(): + self.log.debug("__ repre_id: {}".format(repre_id)) + # get loader name by representation id + loader_name = ( + loader_name_by_repre_id[repre_id]["loader"] + # if nothing was added to settings fallback to default + or self.default_loader + ) + + # get loader plugin + loader_plugin = loaders_by_name.get(loader_name) + if loader_plugin: + # load to flame by representation context + try: + op_pipeline.load.load_with_repre_context( + loader_plugin, repre_context, **{ + "data": { + "workdir": self.task_workdir, + "batch": bgroup + } + }) + except op_pipeline.load.IncompatibleLoaderError as msg: + self.log.error( + "Check allowed representations for Loader `{}` " + "in settings > error: {}".format( + loader_plugin.__name__, msg)) + self.log.error( + "Representaton context >>{}<< is not compatible " + "with loader `{}`".format( + pformat(repre_context), loader_plugin.__name__ + ) + ) + else: + self.log.warning( + "Something got wrong and there is not Loader found for " + "following data: {}".format( + pformat(loader_name_by_repre_id)) + ) + + def _get_batch_group(self, instance, task_data): + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + frame_duration = (frame_end - frame_start) + 1 + asset_name = instance.data["asset"] + + task_name = task_data["name"] + batchgroup_name = "{}_{}".format(asset_name, task_name) + + batch_data = { + "shematic_reels": [ + "OP_LoadedReel" + ], + "handleStart": handle_start, + "handleEnd": handle_end + } + self.log.debug( + "__ batch_data: {}".format(pformat(batch_data))) + + # check if the batch group already exists + bgroup = opfapi.get_batch_group_from_desktop(batchgroup_name) + + if not bgroup: + self.log.info( + "Creating new batch group: {}".format(batchgroup_name)) + # create batch with utils + bgroup = opfapi.create_batch_group( + batchgroup_name, + frame_start, + frame_duration, + **batch_data + ) + + else: + self.log.info( + "Updating batch group: {}".format(batchgroup_name)) + # update already created batch group + bgroup = opfapi.create_batch_group( + batchgroup_name, + frame_start, + frame_duration, + update_batch_group=bgroup, + **batch_data + ) + + return bgroup + + def _get_anamoty_data_with_current_task(self, instance, task_data): + anatomy_data = copy.deepcopy(instance.data["anatomyData"]) + task_name = task_data["name"] + task_type = task_data["type"] + anatomy_obj = instance.context.data["anatomy"] + + # update task data in anatomy data + project_task_types = anatomy_obj["tasks"] + task_code = project_task_types.get(task_type, {}).get("short_name") + anatomy_data.update({ + "task": { + "name": task_name, + "type": task_type, + "short": task_code + } + }) + return anatomy_data + + def _get_write_prefs(self, instance, task_data): + # update task in anatomy data + anatomy_data = self._get_anamoty_data_with_current_task( + instance, task_data) + + self.task_workdir = self._get_shot_task_dir_path( + instance, task_data) + self.log.debug("__ task_workdir: {}".format( + self.task_workdir)) + + # TODO: this might be done with template in settings + render_dir_path = os.path.join( + self.task_workdir, "render", "flame") + + if not os.path.exists(render_dir_path): + os.makedirs(render_dir_path, mode=0o777) + + # TODO: add most of these to `imageio/flame/batch/write_node` + name = "{project[code]}_{asset}_{task[name]}".format( + **anatomy_data + ) + + # The path attribute where the rendered clip is exported + # /path/to/file.[0001-0010].exr + media_path = render_dir_path + # name of file represented by tokens + media_path_pattern = ( + "_v/_v.") + # The Create Open Clip attribute of the Write File node. \ + # Determines if an Open Clip is created by the Write File node. + create_clip = True + # The Include Setup attribute of the Write File node. + # Determines if a Batch Setup file is created by the Write File node. + include_setup = True + # The path attribute where the Open Clip file is exported by + # the Write File node. + create_clip_path = "" + # The path attribute where the Batch setup file + # is exported by the Write File node. + include_setup_path = "./_v" + # The file type for the files written by the Write File node. + # Setting this attribute also overwrites format_extension, + # bit_depth and compress_mode to match the defaults for + # this file type. + file_type = "OpenEXR" + # The file extension for the files written by the Write File node. + # This attribute resets to match file_type whenever file_type + # is set. If you require a specific extension, you must + # set format_extension after setting file_type. + format_extension = "exr" + # The bit depth for the files written by the Write File node. + # This attribute resets to match file_type whenever file_type is set. + bit_depth = "16" + # The compressing attribute for the files exported by the Write + # File node. Only relevant when file_type in 'OpenEXR', 'Sgi', 'Tiff' + compress = True + # The compression format attribute for the specific File Types + # export by the Write File node. You must set compress_mode + # after setting file_type. + compress_mode = "DWAB" + # The frame index mode attribute of the Write File node. + # Value range: `Use Timecode` or `Use Start Frame` + frame_index_mode = "Use Start Frame" + frame_padding = 6 + # The versioning mode of the Open Clip exported by the Write File node. + # Only available if create_clip = True. + version_mode = "Follow Iteration" + version_name = "v" + version_padding = 3 + + # need to make sure the order of keys is correct + return OrderedDict(( + ("name", name), + ("media_path", media_path), + ("media_path_pattern", media_path_pattern), + ("create_clip", create_clip), + ("include_setup", include_setup), + ("create_clip_path", create_clip_path), + ("include_setup_path", include_setup_path), + ("file_type", file_type), + ("format_extension", format_extension), + ("bit_depth", bit_depth), + ("compress", compress), + ("compress_mode", compress_mode), + ("frame_index_mode", frame_index_mode), + ("frame_padding", frame_padding), + ("version_mode", version_mode), + ("version_name", version_name), + ("version_padding", version_padding) + )) + + def _get_shot_task_dir_path(self, instance, task_data): + project_doc = instance.data["projectEntity"] + asset_entity = instance.data["assetEntity"] + + return get_workdir( + project_doc, asset_entity, task_data["name"], "flame") diff --git a/openpype/hosts/flame/plugins/publish/validate_source_clip.py b/openpype/hosts/flame/plugins/publish/validate_source_clip.py index 9ff015f628..345c00e05a 100644 --- a/openpype/hosts/flame/plugins/publish/validate_source_clip.py +++ b/openpype/hosts/flame/plugins/publish/validate_source_clip.py @@ -9,6 +9,8 @@ class ValidateSourceClip(pyblish.api.InstancePlugin): label = "Validate Source Clip" hosts = ["flame"] families = ["clip"] + optional = True + active = False def process(self, instance): flame_source_clip = instance.data["flameSourceClip"] diff --git a/openpype/hosts/flame/startup/openpype_in_flame.py b/openpype/hosts/flame/startup/openpype_in_flame.py index 931c5a1b79..f2ac23b19e 100644 --- a/openpype/hosts/flame/startup/openpype_in_flame.py +++ b/openpype/hosts/flame/startup/openpype_in_flame.py @@ -3,18 +3,19 @@ import sys from Qt import QtWidgets from pprint import pformat import atexit -import openpype -import avalon + import openpype.hosts.flame.api as opfapi +from openpype.pipeline import ( + install_host, + registered_host, +) def openpype_install(): """Registering OpenPype in context """ - openpype.install() - avalon.api.install(opfapi) - print("Avalon registered hosts: {}".format( - avalon.api.registered_host())) + install_host(opfapi) + print("Registered host: {}".format(registered_host())) # Exception handler diff --git a/openpype/hosts/fusion/api/lib.py b/openpype/hosts/fusion/api/lib.py index 2bb5ea8aae..29f3a3a3eb 100644 --- a/openpype/hosts/fusion/api/lib.py +++ b/openpype/hosts/fusion/api/lib.py @@ -3,10 +3,13 @@ import sys import re import contextlib +from bson.objectid import ObjectId from Qt import QtGui -from avalon import io -from openpype.pipeline import switch_container +from openpype.pipeline import ( + switch_container, + legacy_io, +) from .pipeline import get_current_comp, comp_lock_and_undo_chunk self = sys.modules[__name__] @@ -92,9 +95,11 @@ def switch_item(container, # Collect any of current asset, subset and representation if not provided # so we can use the original name from those. if any(not x for x in [asset_name, subset_name, representation_name]): - _id = io.ObjectId(container["representation"]) - representation = io.find_one({"type": "representation", "_id": _id}) - version, subset, asset, project = io.parenthood(representation) + _id = ObjectId(container["representation"]) + representation = legacy_io.find_one({ + "type": "representation", "_id": _id + }) + version, subset, asset, project = legacy_io.parenthood(representation) if asset_name is None: asset_name = asset["name"] @@ -106,14 +111,14 @@ def switch_item(container, representation_name = representation["name"] # Find the new one - asset = io.find_one({ + asset = legacy_io.find_one({ "name": asset_name, "type": "asset" }) assert asset, ("Could not find asset in the database with the name " "'%s'" % asset_name) - subset = io.find_one({ + subset = legacy_io.find_one({ "name": subset_name, "type": "subset", "parent": asset["_id"] @@ -121,7 +126,7 @@ def switch_item(container, assert subset, ("Could not find subset in the database with the name " "'%s'" % subset_name) - version = io.find_one( + version = legacy_io.find_one( { "type": "version", "parent": subset["_id"] @@ -133,7 +138,7 @@ def switch_item(container, asset_name, subset_name ) - representation = io.find_one({ + representation = legacy_io.find_one({ "name": representation_name, "type": "representation", "parent": version["_id"]} diff --git a/openpype/hosts/fusion/api/pipeline.py b/openpype/hosts/fusion/api/pipeline.py index 92e54ad6f5..0867b464d5 100644 --- a/openpype/hosts/fusion/api/pipeline.py +++ b/openpype/hosts/fusion/api/pipeline.py @@ -7,14 +7,16 @@ import logging import contextlib import pyblish.api -import avalon.api -from avalon.pipeline import AVALON_CONTAINER_ID from openpype.api import Logger from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, + register_creator_plugin_path, + register_inventory_action_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, + deregister_inventory_action_path, + AVALON_CONTAINER_ID, ) import openpype.hosts.fusion @@ -68,8 +70,8 @@ def install(): log.info("Registering Fusion plug-ins..") register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) - avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH) + register_creator_plugin_path(CREATE_PATH) + register_inventory_action_path(INVENTORY_PATH) pyblish.api.register_callback( "instanceToggled", on_pyblish_instance_toggled @@ -92,10 +94,8 @@ def uninstall(): log.info("Deregistering Fusion plug-ins..") deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) - avalon.api.deregister_plugin_path( - avalon.api.InventoryAction, INVENTORY_PATH - ) + deregister_creator_plugin_path(CREATE_PATH) + deregister_inventory_action_path(INVENTORY_PATH) pyblish.api.deregister_callback( "instanceToggled", on_pyblish_instance_toggled diff --git a/openpype/hosts/fusion/api/workio.py b/openpype/hosts/fusion/api/workio.py index ec9ac7481a..a1710c6e3a 100644 --- a/openpype/hosts/fusion/api/workio.py +++ b/openpype/hosts/fusion/api/workio.py @@ -1,12 +1,14 @@ """Host API required Work Files tool""" import sys import os -from avalon import api + +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS + from .pipeline import get_current_comp def file_extensions(): - return api.HOST_WORKFILE_EXTENSIONS["fusion"] + return HOST_WORKFILE_EXTENSIONS["fusion"] def has_unsaved_changes(): diff --git a/openpype/hosts/fusion/plugins/create/create_exr_saver.py b/openpype/hosts/fusion/plugins/create/create_exr_saver.py index ff8bdb21ef..8bab5ee9b1 100644 --- a/openpype/hosts/fusion/plugins/create/create_exr_saver.py +++ b/openpype/hosts/fusion/plugins/create/create_exr_saver.py @@ -1,13 +1,13 @@ import os -from openpype.pipeline import create +from openpype.pipeline import LegacyCreator from openpype.hosts.fusion.api import ( get_current_comp, comp_lock_and_undo_chunk ) -class CreateOpenEXRSaver(create.LegacyCreator): +class CreateOpenEXRSaver(LegacyCreator): name = "openexrDefault" label = "Create OpenEXR Saver" diff --git a/openpype/hosts/fusion/plugins/inventory/select_containers.py b/openpype/hosts/fusion/plugins/inventory/select_containers.py index 294c134505..d554b73a5b 100644 --- a/openpype/hosts/fusion/plugins/inventory/select_containers.py +++ b/openpype/hosts/fusion/plugins/inventory/select_containers.py @@ -1,7 +1,7 @@ -from avalon import api +from openpype.pipeline import InventoryAction -class FusionSelectContainers(api.InventoryAction): +class FusionSelectContainers(InventoryAction): label = "Select Containers" icon = "mouse-pointer" diff --git a/openpype/hosts/fusion/plugins/inventory/set_tool_color.py b/openpype/hosts/fusion/plugins/inventory/set_tool_color.py index 2f5ae4d241..c7530ce674 100644 --- a/openpype/hosts/fusion/plugins/inventory/set_tool_color.py +++ b/openpype/hosts/fusion/plugins/inventory/set_tool_color.py @@ -1,6 +1,6 @@ -from avalon import api from Qt import QtGui, QtWidgets +from openpype.pipeline import InventoryAction from openpype import style from openpype.hosts.fusion.api import ( get_current_comp, @@ -8,7 +8,7 @@ from openpype.hosts.fusion.api import ( ) -class FusionSetToolColor(api.InventoryAction): +class FusionSetToolColor(InventoryAction): """Update the color of the selected tools""" label = "Set Tool Color" diff --git a/openpype/hosts/fusion/plugins/load/load_sequence.py b/openpype/hosts/fusion/plugins/load/load_sequence.py index 075820de35..b860abd88b 100644 --- a/openpype/hosts/fusion/plugins/load/load_sequence.py +++ b/openpype/hosts/fusion/plugins/load/load_sequence.py @@ -1,10 +1,9 @@ import os import contextlib -from avalon import io - from openpype.pipeline import ( load, + legacy_io, get_representation_path, ) from openpype.hosts.fusion.api import ( @@ -212,8 +211,10 @@ class FusionLoadSequence(load.LoaderPlugin): path = self._get_first_image(root) # Get start frame from version data - version = io.find_one({"type": "version", - "_id": representation["parent"]}) + version = legacy_io.find_one({ + "type": "version", + "_id": representation["parent"] + }) start = version["data"].get("frameStart") if start is None: self.log.warning("Missing start frame for updated version" diff --git a/openpype/hosts/fusion/plugins/publish/submit_deadline.py b/openpype/hosts/fusion/plugins/publish/submit_deadline.py index 9da99dd9e2..8570c759bc 100644 --- a/openpype/hosts/fusion/plugins/publish/submit_deadline.py +++ b/openpype/hosts/fusion/plugins/publish/submit_deadline.py @@ -4,10 +4,10 @@ import getpass import requests -from avalon import api - import pyblish.api +from openpype.pipeline import legacy_io + class FusionSubmitDeadline(pyblish.api.InstancePlugin): """Submit current Comp to Deadline @@ -133,7 +133,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin): "FUSION9_MasterPrefs" ] environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) payload["JobInfo"].update({ "EnvironmentKeyValue%d" % index: "{key}={value}".format( @@ -146,7 +146,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin): self.log.info(json.dumps(payload, indent=4, sort_keys=True)) # E.g. http://192.168.0.1:8082/api/jobs - url = "{}/api/jobs".format(DEADLINE_REST_URL) + url = "{}/api/jobs".format(deadline_url) response = requests.post(url, json=payload) if not response.ok: raise Exception(response.text) diff --git a/openpype/hosts/fusion/scripts/fusion_switch_shot.py b/openpype/hosts/fusion/scripts/fusion_switch_shot.py index ca7efb9136..704f420796 100644 --- a/openpype/hosts/fusion/scripts/fusion_switch_shot.py +++ b/openpype/hosts/fusion/scripts/fusion_switch_shot.py @@ -4,9 +4,11 @@ import sys import logging # Pipeline imports -import avalon.api -from avalon import io - +from openpype.pipeline import ( + legacy_io, + install_host, + registered_host, +) from openpype.lib import version_up from openpype.hosts.fusion import api from openpype.hosts.fusion.api import lib @@ -163,7 +165,7 @@ def update_frame_range(comp, representations): """ version_ids = [r["parent"] for r in representations] - versions = io.find({"type": "version", "_id": {"$in": version_ids}}) + versions = legacy_io.find({"type": "version", "_id": {"$in": version_ids}}) versions = list(versions) versions = [v for v in versions @@ -201,12 +203,11 @@ def switch(asset_name, filepath=None, new=True): # Assert asset name exists # It is better to do this here then to wait till switch_shot does it - asset = io.find_one({"type": "asset", "name": asset_name}) + asset = legacy_io.find_one({"type": "asset", "name": asset_name}) assert asset, "Could not find '%s' in the database" % asset_name # Get current project - self._project = io.find_one({"type": "project", - "name": avalon.api.Session["AVALON_PROJECT"]}) + self._project = legacy_io.find_one({"type": "project"}) # Go to comp if not filepath: @@ -218,7 +219,7 @@ def switch(asset_name, filepath=None, new=True): assert current_comp is not None, ( "Fusion could not load '{}'").format(filepath) - host = avalon.api.registered_host() + host = registered_host() containers = list(host.ls()) assert containers, "Nothing to update" @@ -237,7 +238,7 @@ def switch(asset_name, filepath=None, new=True): current_comp.Print(message) # Build the session to switch to - switch_to_session = avalon.api.Session.copy() + switch_to_session = legacy_io.Session.copy() switch_to_session["AVALON_ASSET"] = asset['name'] if new: @@ -279,7 +280,7 @@ if __name__ == '__main__': args, unknown = parser.parse_args() - avalon.api.install(api) + install_host(api) switch(args.asset_name, args.file_path) sys.exit(0) diff --git a/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py b/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py index 4b5e8f91a0..de8fc4b3b4 100644 --- a/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py +++ b/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py @@ -1,24 +1,23 @@ import os import sys -import openpype from openpype.api import Logger +from openpype.pipeline import ( + install_host, + registered_host, +) log = Logger().get_logger(__name__) def main(env): - import avalon.api from openpype.hosts.fusion import api from openpype.hosts.fusion.api import menu - # Registers pype's Global pyblish plugins - openpype.install() - # activate resolve from pype - avalon.api.install(api) + install_host(api) - log.info(f"Avalon registered hosts: {avalon.api.registered_host()}") + log.info(f"Registered host: {registered_host()}") menu.launch_openpype_menu() diff --git a/openpype/hosts/fusion/utility_scripts/switch_ui.py b/openpype/hosts/fusion/utility_scripts/switch_ui.py index d9eeae25ea..70eb3d0a19 100644 --- a/openpype/hosts/fusion/utility_scripts/switch_ui.py +++ b/openpype/hosts/fusion/utility_scripts/switch_ui.py @@ -1,14 +1,17 @@ import os +import sys import glob import logging from Qt import QtWidgets, QtCore -import avalon.api -from avalon import io import qtawesome as qta from openpype import style +from openpype.pipeline import ( + install_host, + legacy_io, +) from openpype.hosts.fusion import api from openpype.lib.avalon_context import get_workdir_from_session @@ -163,7 +166,7 @@ class App(QtWidgets.QWidget): return items def collect_assets(self): - return list(io.find({"type": "asset"}, {"name": True})) + return list(legacy_io.find({"type": "asset"}, {"name": True})) def populate_comp_box(self, files): """Ensure we display the filename only but the path is stored as well @@ -181,8 +184,7 @@ class App(QtWidgets.QWidget): if __name__ == '__main__': - import sys - avalon.api.install(api) + install_host(api) app = QtWidgets.QApplication(sys.argv) window = App() diff --git a/openpype/hosts/harmony/api/README.md b/openpype/hosts/harmony/api/README.md index e8d354e1e6..dd45eb14dd 100644 --- a/openpype/hosts/harmony/api/README.md +++ b/openpype/hosts/harmony/api/README.md @@ -419,7 +419,6 @@ class ExtractImage(pyblish.api.InstancePlugin): ```python import os -from avalon import api, io import openpype.hosts.harmony.api as harmony signature = str(uuid4()).replace("-", "_") @@ -611,7 +610,7 @@ class ImageSequenceLoader(load.LoaderPlugin): def update(self, container, representation): node = container.pop("node") - version = io.find_one({"_id": representation["parent"]}) + version = legacy_io.find_one({"_id": representation["parent"]}) files = [] for f in version["data"]["files"]: files.append( diff --git a/openpype/hosts/harmony/api/lib.py b/openpype/hosts/harmony/api/lib.py index 66eeac1e3a..53fd0f07dd 100644 --- a/openpype/hosts/harmony/api/lib.py +++ b/openpype/hosts/harmony/api/lib.py @@ -183,10 +183,10 @@ def launch(application_path, *args): application_path (str): Path to Harmony. """ - from avalon import api + from openpype.pipeline import install_host from openpype.hosts.harmony import api as harmony - api.install(harmony) + install_host(harmony) ProcessContext.port = random.randrange(49152, 65535) os.environ["AVALON_HARMONY_PORT"] = str(ProcessContext.port) diff --git a/openpype/hosts/harmony/api/pipeline.py b/openpype/hosts/harmony/api/pipeline.py index f967da15ca..b953d0e984 100644 --- a/openpype/hosts/harmony/api/pipeline.py +++ b/openpype/hosts/harmony/api/pipeline.py @@ -2,18 +2,18 @@ import os from pathlib import Path import logging +from bson.objectid import ObjectId import pyblish.api -from avalon import io -import avalon.api -from avalon.pipeline import AVALON_CONTAINER_ID - from openpype import lib from openpype.lib import register_event_callback from openpype.pipeline import ( - LegacyCreator, + legacy_io, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, + AVALON_CONTAINER_ID, ) import openpype.hosts.harmony import openpype.hosts.harmony.api as harmony @@ -107,13 +107,12 @@ def check_inventory(): if not lib.any_outdated(): return - host = avalon.api.registered_host() outdated_containers = [] - for container in host.ls(): + for container in ls(): representation = container['representation'] - representation_doc = io.find_one( + representation_doc = legacy_io.find_one( { - "_id": io.ObjectId(representation), + "_id": ObjectId(representation), "type": "representation" }, projection={"parent": True} @@ -185,7 +184,7 @@ def install(): pyblish.api.register_host("harmony") pyblish.api.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) log.info(PUBLISH_PATH) # Register callbacks. @@ -199,7 +198,7 @@ def install(): def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) def on_pyblish_instance_toggled(instance, old_value, new_value): diff --git a/openpype/hosts/harmony/api/workio.py b/openpype/hosts/harmony/api/workio.py index 38a00ae414..ab1cb9b1a9 100644 --- a/openpype/hosts/harmony/api/workio.py +++ b/openpype/hosts/harmony/api/workio.py @@ -2,20 +2,21 @@ import os import shutil +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS + from .lib import ( ProcessContext, get_local_harmony_path, zip_and_move, launch_zip_file ) -from avalon import api # used to lock saving until previous save is done. save_disabled = False def file_extensions(): - return api.HOST_WORKFILE_EXTENSIONS["harmony"] + return HOST_WORKFILE_EXTENSIONS["harmony"] def has_unsaved_changes(): diff --git a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py b/openpype/hosts/harmony/plugins/publish/collect_farm_render.py index 35b123f97d..f5bf051243 100644 --- a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py +++ b/openpype/hosts/harmony/plugins/publish/collect_farm_render.py @@ -3,13 +3,13 @@ from pathlib import Path import attr -from avalon import api -from openpype.lib import get_formatted_current_time -import openpype.lib.abstract_collect_render -import openpype.hosts.harmony.api as harmony -from openpype.lib.abstract_collect_render import RenderInstance import openpype.lib +import openpype.lib.abstract_collect_render +from openpype.lib.abstract_collect_render import RenderInstance +from openpype.lib import get_formatted_current_time +from openpype.pipeline import legacy_io +import openpype.hosts.harmony.api as harmony @attr.s @@ -143,7 +143,7 @@ class CollectFarmRender(openpype.lib.abstract_collect_render. source=context.data["currentFile"], label=node.split("/")[1], subset=subset_name, - asset=api.Session["AVALON_ASSET"], + asset=legacy_io.Session["AVALON_ASSET"], attachTo=False, setMembers=[node], publish=info[4], diff --git a/openpype/hosts/harmony/plugins/publish/collect_workfile.py b/openpype/hosts/harmony/plugins/publish/collect_workfile.py index 63bfd5929b..c0493315a4 100644 --- a/openpype/hosts/harmony/plugins/publish/collect_workfile.py +++ b/openpype/hosts/harmony/plugins/publish/collect_workfile.py @@ -3,6 +3,8 @@ import pyblish.api import os +from openpype.lib import get_subset_name_with_asset_doc + class CollectWorkfile(pyblish.api.ContextPlugin): """Collect current script for publish.""" @@ -14,10 +16,15 @@ class CollectWorkfile(pyblish.api.ContextPlugin): def process(self, context): """Plugin entry point.""" family = "workfile" - task = os.getenv("AVALON_TASK", None) - sanitized_task_name = task[0].upper() + task[1:] basename = os.path.basename(context.data["currentFile"]) - subset = "{}{}".format(family, sanitized_task_name) + subset = get_subset_name_with_asset_doc( + family, + "", + context.data["anatomyData"]["task"]["name"], + context.data["assetEntity"], + context.data["anatomyData"]["project"]["name"], + host_name=context.data["hostName"] + ) # Create instance instance = context.create_instance(subset) diff --git a/openpype/hosts/hiero/__init__.py b/openpype/hosts/hiero/__init__.py index 2d674b3fa7..d2ac82391b 100644 --- a/openpype/hosts/hiero/__init__.py +++ b/openpype/hosts/hiero/__init__.py @@ -10,7 +10,7 @@ def add_implementation_envs(env, _app): ] old_hiero_path = env.get("HIERO_PLUGIN_PATH") or "" for path in old_hiero_path.split(os.pathsep): - if not path or not os.path.exists(path): + if not path: continue norm_path = os.path.normpath(path) diff --git a/openpype/hosts/hiero/api/events.py b/openpype/hosts/hiero/api/events.py index 9439199933..7fab3edfc8 100644 --- a/openpype/hosts/hiero/api/events.py +++ b/openpype/hosts/hiero/api/events.py @@ -1,12 +1,12 @@ import os import hiero.core.events from openpype.api import Logger +from openpype.lib import register_event_callback from .lib import ( sync_avalon_data_to_workfile, launch_workfiles_app, selection_changed_timeline, before_project_save, - register_event_callback ) from .tags import add_tags_to_workfile from .menu import update_menu_task_label diff --git a/openpype/hosts/hiero/api/lib.py b/openpype/hosts/hiero/api/lib.py index a9467ae5a4..0e64ddcaf5 100644 --- a/openpype/hosts/hiero/api/lib.py +++ b/openpype/hosts/hiero/api/lib.py @@ -8,9 +8,11 @@ import platform import ast import shutil import hiero + from Qt import QtWidgets -import avalon.api as avalon -import avalon.io +from bson.objectid import ObjectId + +from openpype.pipeline import legacy_io from openpype.api import (Logger, Anatomy, get_anatomy_settings) from . import tags @@ -35,8 +37,6 @@ self.pype_tag_name = "openpypeData" self.default_sequence_name = "openpypeSequence" self.default_bin_name = "openpypeBin" -AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") - def flatten(_list): for item in _list: @@ -46,6 +46,7 @@ def flatten(_list): else: yield item + def get_current_project(remove_untitled=False): projects = flatten(hiero.core.projects()) if not remove_untitled: @@ -381,7 +382,7 @@ def get_publish_attribute(tag): def sync_avalon_data_to_workfile(): # import session to get project dir - project_name = avalon.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] anatomy = Anatomy(project_name) work_template = anatomy.templates["work"]["path"] @@ -406,7 +407,7 @@ def sync_avalon_data_to_workfile(): project.setProjectRoot(active_project_root) # get project data from avalon db - project_doc = avalon.io.find_one({"type": "project"}) + project_doc = legacy_io.find_one({"type": "project"}) project_data = project_doc["data"] log.debug("project_data: {}".format(project_data)) @@ -992,7 +993,6 @@ def check_inventory_versions(): it to red. """ from . import parse_container - from avalon import io # presets clip_color_last = "green" @@ -1004,19 +1004,19 @@ def check_inventory_versions(): if container: # get representation from io - representation = io.find_one({ + representation = legacy_io.find_one({ "type": "representation", - "_id": io.ObjectId(container["representation"]) + "_id": ObjectId(container["representation"]) }) # Get start frame from version data - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/hiero/api/menu.py b/openpype/hosts/hiero/api/menu.py index de20b86f30..e262abec00 100644 --- a/openpype/hosts/hiero/api/menu.py +++ b/openpype/hosts/hiero/api/menu.py @@ -1,14 +1,16 @@ import os import sys + import hiero.core -from openpype.api import Logger -from openpype.tools.utils import host_tools -from avalon.api import Session from hiero.ui import findMenuAction +from openpype.api import Logger +from openpype.pipeline import legacy_io +from openpype.tools.utils import host_tools + from . import tags -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) self = sys.modules[__name__] self._change_context_menu = None @@ -24,8 +26,10 @@ def update_menu_task_label(): log.warning("Can't find menuItem: {}".format(object_name)) return - label = "{}, {}".format(Session["AVALON_ASSET"], - Session["AVALON_TASK"]) + label = "{}, {}".format( + legacy_io.Session["AVALON_ASSET"], + legacy_io.Session["AVALON_TASK"] + ) menu = found_menu.menu() self._change_context_menu = label @@ -51,7 +55,8 @@ def menu_install(): menu_name = os.environ['AVALON_LABEL'] context_label = "{0}, {1}".format( - Session["AVALON_ASSET"], Session["AVALON_TASK"] + legacy_io.Session["AVALON_ASSET"], + legacy_io.Session["AVALON_TASK"] ) self._change_context_menu = context_label diff --git a/openpype/hosts/hiero/api/pipeline.py b/openpype/hosts/hiero/api/pipeline.py index eff126c0b6..8025ebff05 100644 --- a/openpype/hosts/hiero/api/pipeline.py +++ b/openpype/hosts/hiero/api/pipeline.py @@ -4,23 +4,22 @@ Basic avalon integration import os import contextlib from collections import OrderedDict -from avalon.pipeline import AVALON_CONTAINER_ID -from avalon import api as avalon -from avalon import schema + from pyblish import api as pyblish from openpype.api import Logger from openpype.pipeline import ( - LegacyCreator, + schema, + register_creator_plugin_path, register_loader_plugin_path, + deregister_creator_plugin_path, deregister_loader_plugin_path, + AVALON_CONTAINER_ID, ) from openpype.tools.utils import host_tools from . import lib, menu, events log = Logger().get_logger(__name__) -AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") - # plugin paths API_DIR = os.path.dirname(os.path.abspath(__file__)) HOST_DIR = os.path.dirname(API_DIR) @@ -28,20 +27,12 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish").replace("\\", "/") LOAD_PATH = os.path.join(PLUGINS_DIR, "load").replace("\\", "/") CREATE_PATH = os.path.join(PLUGINS_DIR, "create").replace("\\", "/") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory").replace("\\", "/") AVALON_CONTAINERS = ":AVALON_CONTAINERS" def install(): - """ - Installing Hiero integration for avalon - - Args: - config (obj): avalon config module `pype` in our case, it is not - used but required by avalon.api.install() - - """ + """Installing Hiero integration.""" # adding all events events.register_events() @@ -50,8 +41,7 @@ def install(): pyblish.register_host("hiero") pyblish.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.register_plugin_path(LegacyCreator, CREATE_PATH) - avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) + register_creator_plugin_path(CREATE_PATH) # register callback for switching publishable pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) @@ -72,7 +62,7 @@ def uninstall(): pyblish.deregister_host("hiero") pyblish.deregister_plugin_path(PUBLISH_PATH) deregister_loader_plugin_path(LOAD_PATH) - avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) # register callback for switching publishable pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) @@ -255,15 +245,10 @@ def reload_config(): import importlib for module in ( - "avalon", - "avalon.lib", - "avalon.pipeline", - "pyblish", - "pypeapp", - "{}.api".format(AVALON_CONFIG), - "{}.hosts.hiero.lib".format(AVALON_CONFIG), - "{}.hosts.hiero.menu".format(AVALON_CONFIG), - "{}.hosts.hiero.tags".format(AVALON_CONFIG) + "openpype.api", + "openpype.hosts.hiero.lib", + "openpype.hosts.hiero.menu", + "openpype.hosts.hiero.tags" ): log.info("Reloading module: {}...".format(module)) try: diff --git a/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py b/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py index 21c21cd7c3..2e638c2088 100644 --- a/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py +++ b/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py @@ -1,9 +1,9 @@ import traceback # activate hiero from pype -import avalon.api +from openpype.pipeline import install_host import openpype.hosts.hiero.api as phiero -avalon.api.install(phiero) +install_host(phiero) try: __import__("openpype.hosts.hiero.api") diff --git a/openpype/hosts/hiero/api/tags.py b/openpype/hosts/hiero/api/tags.py index fe5c0d5257..e15e3119a6 100644 --- a/openpype/hosts/hiero/api/tags.py +++ b/openpype/hosts/hiero/api/tags.py @@ -3,9 +3,9 @@ import os import hiero from openpype.api import Logger -from avalon import io +from openpype.pipeline import legacy_io -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) def tag_data(): @@ -141,7 +141,7 @@ def add_tags_to_workfile(): nks_pres_tags = tag_data() # Get project task types. - tasks = io.find_one({"type": "project"})["config"]["tasks"] + tasks = legacy_io.find_one({"type": "project"})["config"]["tasks"] nks_pres_tags["[Tasks]"] = {} log.debug("__ tasks: {}".format(tasks)) for task_type in tasks.keys(): @@ -159,7 +159,7 @@ def add_tags_to_workfile(): # asset builds and shots. if int(os.getenv("TAG_ASSETBUILD_STARTUP", 0)) == 1: nks_pres_tags["[AssetBuilds]"] = {} - for asset in io.find({"type": "asset"}): + for asset in legacy_io.find({"type": "asset"}): if asset["data"]["entityType"] == "AssetBuild": nks_pres_tags["[AssetBuilds]"][asset["name"]] = { "editable": "1", diff --git a/openpype/hosts/hiero/api/workio.py b/openpype/hosts/hiero/api/workio.py index dacb11624f..394cb5e2ab 100644 --- a/openpype/hosts/hiero/api/workio.py +++ b/openpype/hosts/hiero/api/workio.py @@ -1,14 +1,14 @@ import os import hiero -from avalon import api + from openpype.api import Logger +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS - -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) def file_extensions(): - return api.HOST_WORKFILE_EXTENSIONS["hiero"] + return HOST_WORKFILE_EXTENSIONS["hiero"] def has_unsaved_changes(): diff --git a/openpype/hosts/hiero/plugins/load/load_clip.py b/openpype/hosts/hiero/plugins/load/load_clip.py index d3908695a2..da4326c8c1 100644 --- a/openpype/hosts/hiero/plugins/load/load_clip.py +++ b/openpype/hosts/hiero/plugins/load/load_clip.py @@ -1,5 +1,7 @@ -from avalon import io -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + legacy_io, + get_representation_path, +) import openpype.hosts.hiero.api as phiero # from openpype.hosts.hiero.api import plugin, lib # reload(lib) @@ -105,7 +107,7 @@ class LoadClip(phiero.SequenceLoader): namespace = container['namespace'] track_item = phiero.get_track_items( track_item_name=namespace) - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -174,7 +176,7 @@ class LoadClip(phiero.SequenceLoader): # define version name version_name = version.get("name", None) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/hiero/plugins/publish/precollect_workfile.py b/openpype/hosts/hiero/plugins/publish/precollect_workfile.py index d48d6949bd..29c0397f79 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_workfile.py @@ -1,12 +1,15 @@ import os -import pyblish.api -import hiero.ui -from openpype.hosts.hiero import api as phiero -from avalon import api as avalon -from pprint import pformat -from openpype.hosts.hiero.api.otio import hiero_export -from Qt.QtGui import QPixmap import tempfile +from pprint import pformat + +import pyblish.api +from Qt.QtGui import QPixmap + +import hiero.ui + +from openpype.pipeline import legacy_io +from openpype.hosts.hiero import api as phiero +from openpype.hosts.hiero.api.otio import hiero_export class PrecollectWorkfile(pyblish.api.ContextPlugin): @@ -17,7 +20,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin): def process(self, context): - asset = avalon.Session["AVALON_ASSET"] + asset = legacy_io.Session["AVALON_ASSET"] subset = "workfile" project = phiero.get_current_project() active_timeline = hiero.ui.activeSequence() diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py index a90856c6fd..10baf25803 100644 --- a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py @@ -1,5 +1,5 @@ from pyblish import api -from avalon import io +from openpype.pipeline import legacy_io class CollectAssetBuilds(api.ContextPlugin): @@ -18,7 +18,7 @@ class CollectAssetBuilds(api.ContextPlugin): def process(self, context): asset_builds = {} - for asset in io.find({"type": "asset"}): + for asset in legacy_io.find({"type": "asset"}): if asset["data"]["entityType"] == "AssetBuild": self.log.debug("Found \"{}\" in database.".format(asset)) asset_builds[asset["name"]] = asset diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_workfile.py b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_workfile.py index ef7d07421b..693e151f6f 100644 --- a/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_workfile.py +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_workfile.py @@ -1,7 +1,7 @@ import os import pyblish.api from openpype.hosts.hiero import api as phiero -from avalon import api as avalon +from openpype.pipeline import legacy_io class PreCollectWorkfile(pyblish.api.ContextPlugin): @@ -11,7 +11,7 @@ class PreCollectWorkfile(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder - 0.51 def process(self, context): - asset = avalon.Session["AVALON_ASSET"] + asset = legacy_io.Session["AVALON_ASSET"] subset = "workfile" project = phiero.get_current_project() diff --git a/openpype/hosts/houdini/__init__.py b/openpype/hosts/houdini/__init__.py index 8c12d13c81..a3ee38db8d 100644 --- a/openpype/hosts/houdini/__init__.py +++ b/openpype/hosts/houdini/__init__.py @@ -15,7 +15,7 @@ def add_implementation_envs(env, _app): old_houdini_menu_path = env.get("HOUDINI_MENU_PATH") or "" for path in old_houdini_path.split(os.pathsep): - if not path or not os.path.exists(path): + if not path: continue norm_path = os.path.normpath(path) @@ -23,7 +23,7 @@ def add_implementation_envs(env, _app): new_houdini_path.append(norm_path) for path in old_houdini_menu_path.split(os.pathsep): - if not path or not os.path.exists(path): + if not path: continue norm_path = os.path.normpath(path) diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py index bd41618856..603519069a 100644 --- a/openpype/hosts/houdini/api/lib.py +++ b/openpype/hosts/houdini/api/lib.py @@ -4,8 +4,8 @@ from contextlib import contextmanager import six -from avalon import api, io from openpype.api import get_asset +from openpype.pipeline import legacy_io import hou @@ -75,9 +75,13 @@ def generate_ids(nodes, asset_id=None): if asset_id is None: # Get the asset ID from the database for the asset of current context - asset_data = io.find_one({"type": "asset", - "name": api.Session["AVALON_ASSET"]}, - projection={"_id": True}) + asset_data = legacy_io.find_one( + { + "type": "asset", + "name": legacy_io.Session["AVALON_ASSET"] + }, + projection={"_id": True} + ) assert asset_data, "No current asset found in Session" asset_id = asset_data['_id'] @@ -155,7 +159,7 @@ def validate_fps(): if parent is None: pass else: - dialog = popup.Popup(parent=parent) + dialog = popup.PopupUpdateKeys(parent=parent) dialog.setModal(True) dialog.setWindowTitle("Houdini scene does not match project FPS") dialog.setMessage("Scene %i FPS does not match project %i FPS" % @@ -163,7 +167,7 @@ def validate_fps(): dialog.setButtonText("Fix") # on_show is the Fix button clicked callback - dialog.on_clicked.connect(lambda: set_scene_fps(fps)) + dialog.on_clicked_state.connect(lambda: set_scene_fps(fps)) dialog.show() @@ -424,8 +428,8 @@ def maintained_selection(): def reset_framerange(): """Set frame range to current asset""" - asset_name = api.Session["AVALON_ASSET"] - asset = io.find_one({"name": asset_name, "type": "asset"}) + asset_name = legacy_io.Session["AVALON_ASSET"] + asset = legacy_io.find_one({"name": asset_name, "type": "asset"}) frame_start = asset["data"].get("frameStart") frame_end = asset["data"].get("frameEnd") diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py index 7d4e58efb7..7048accceb 100644 --- a/openpype/hosts/houdini/api/pipeline.py +++ b/openpype/hosts/houdini/api/pipeline.py @@ -4,16 +4,13 @@ import logging import contextlib import hou -import hdefereval import pyblish.api -import avalon.api -from avalon.pipeline import AVALON_CONTAINER_ID -from avalon.lib import find_submodule from openpype.pipeline import ( - LegacyCreator, + register_creator_plugin_path, register_loader_plugin_path, + AVALON_CONTAINER_ID, ) import openpype.hosts.houdini from openpype.hosts.houdini.api import lib @@ -54,7 +51,7 @@ def install(): pyblish.api.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) log.info("Installing callbacks ... ") # register_event_callback("init", on_init) @@ -215,24 +212,12 @@ def ls(): "pyblish.mindbender.container"): containers += lib.lsattr("id", identifier) - has_metadata_collector = False - config_host = find_submodule(avalon.api.registered_config(), "houdini") - if hasattr(config_host, "collect_container_metadata"): - has_metadata_collector = True - for container in sorted(containers, # Hou 19+ Python 3 hou.ObjNode are not # sortable due to not supporting greater # than comparisons key=lambda node: node.path()): - data = parse_container(container) - - # Collect custom data if attribute is present - if has_metadata_collector: - metadata = config_host.collect_container_metadata(container) - data.update(metadata) - - yield data + yield parse_container(container) def before_save(): @@ -305,7 +290,13 @@ def on_new(): start = hou.playbar.playbackRange()[0] hou.setFrame(start) - hdefereval.executeDeferred(_enforce_start_frame) + if hou.isUIAvailable(): + import hdefereval + hdefereval.executeDeferred(_enforce_start_frame) + else: + # Run without execute deferred when no UI is available because + # without UI `hdefereval` is not available to import + _enforce_start_frame() def _set_context_settings(): diff --git a/openpype/hosts/houdini/api/usd.py b/openpype/hosts/houdini/api/usd.py index a992f1d082..e9991e38ec 100644 --- a/openpype/hosts/houdini/api/usd.py +++ b/openpype/hosts/houdini/api/usd.py @@ -1,11 +1,12 @@ """Houdini-specific USD Library functions.""" import contextlib - import logging + from Qt import QtWidgets, QtCore, QtGui -from avalon import io + from openpype import style +from openpype.pipeline import legacy_io from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget from pxr import Sdf @@ -20,11 +21,12 @@ class SelectAssetDialog(QtWidgets.QWidget): Args: parm: Parameter where selected asset name is set. """ + def __init__(self, parm): self.setWindowTitle("Pick Asset") self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup) - assets_widget = SingleSelectAssetsWidget(io, parent=self) + assets_widget = SingleSelectAssetsWidget(legacy_io, parent=self) layout = QtWidgets.QHBoxLayout(self) layout.addWidget(assets_widget) @@ -44,7 +46,7 @@ class SelectAssetDialog(QtWidgets.QWidget): select_id = None name = self._parm.eval() if name: - db_asset = io.find_one( + db_asset = legacy_io.find_one( {"name": name, "type": "asset"}, {"_id": True} ) diff --git a/openpype/hosts/houdini/api/workio.py b/openpype/hosts/houdini/api/workio.py index e7310163ea..e0213023fd 100644 --- a/openpype/hosts/houdini/api/workio.py +++ b/openpype/hosts/houdini/api/workio.py @@ -2,11 +2,11 @@ import os import hou -from avalon import api +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS def file_extensions(): - return api.HOST_WORKFILE_EXTENSIONS["houdini"] + return HOST_WORKFILE_EXTENSIONS["houdini"] def has_unsaved_changes(): diff --git a/openpype/hosts/houdini/plugins/create/create_hda.py b/openpype/hosts/houdini/plugins/create/create_hda.py index 0a9c1bad1e..5fc78c7539 100644 --- a/openpype/hosts/houdini/plugins/create/create_hda.py +++ b/openpype/hosts/houdini/plugins/create/create_hda.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- import hou -from avalon import io + +from openpype.pipeline import legacy_io from openpype.hosts.houdini.api import lib from openpype.hosts.houdini.api import plugin @@ -22,13 +23,16 @@ class CreateHDA(plugin.Creator): # type: (str) -> bool """Check if existing subset name versions already exists.""" # Get all subsets of the current asset - asset_id = io.find_one({"name": self.data["asset"], "type": "asset"}, - projection={"_id": True})['_id'] - subset_docs = io.find( + asset_id = legacy_io.find_one( + {"name": self.data["asset"], "type": "asset"}, + projection={"_id": True} + )['_id'] + subset_docs = legacy_io.find( { "type": "subset", "parent": asset_id - }, {"name": 1} + }, + {"name": 1} ) existing_subset_names = set(subset_docs.distinct("name")) existing_subset_names_low = { diff --git a/openpype/hosts/houdini/plugins/load/load_image.py b/openpype/hosts/houdini/plugins/load/load_image.py index bd9ea3eee3..671f08f18f 100644 --- a/openpype/hosts/houdini/plugins/load/load_image.py +++ b/openpype/hosts/houdini/plugins/load/load_image.py @@ -3,6 +3,7 @@ import os from openpype.pipeline import ( load, get_representation_path, + AVALON_CONTAINER_ID, ) from openpype.hosts.houdini.api import lib, pipeline @@ -73,7 +74,7 @@ class ImageLoader(load.LoaderPlugin): # Imprint it manually data = { "schema": "avalon-core:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "name": node_name, "namespace": namespace, "loader": str(self.__class__.__name__), diff --git a/openpype/hosts/houdini/plugins/load/load_usd_layer.py b/openpype/hosts/houdini/plugins/load/load_usd_layer.py index d803e6abfe..48580fc3aa 100644 --- a/openpype/hosts/houdini/plugins/load/load_usd_layer.py +++ b/openpype/hosts/houdini/plugins/load/load_usd_layer.py @@ -1,8 +1,9 @@ from openpype.pipeline import ( load, get_representation_path, + AVALON_CONTAINER_ID, ) -from openpype.hosts.houdini.api import lib, pipeline +from openpype.hosts.houdini.api import lib class USDSublayerLoader(load.LoaderPlugin): @@ -43,7 +44,7 @@ class USDSublayerLoader(load.LoaderPlugin): # Imprint it manually data = { "schema": "avalon-core:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "name": node_name, "namespace": namespace, "loader": str(self.__class__.__name__), diff --git a/openpype/hosts/houdini/plugins/load/load_usd_reference.py b/openpype/hosts/houdini/plugins/load/load_usd_reference.py index fdb443f4cf..6851c77e6d 100644 --- a/openpype/hosts/houdini/plugins/load/load_usd_reference.py +++ b/openpype/hosts/houdini/plugins/load/load_usd_reference.py @@ -1,8 +1,9 @@ from openpype.pipeline import ( load, get_representation_path, + AVALON_CONTAINER_ID, ) -from openpype.hosts.houdini.api import lib, pipeline +from openpype.hosts.houdini.api import lib class USDReferenceLoader(load.LoaderPlugin): @@ -43,7 +44,7 @@ class USDReferenceLoader(load.LoaderPlugin): # Imprint it manually data = { "schema": "avalon-core:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "name": node_name, "namespace": namespace, "loader": str(self.__class__.__name__), diff --git a/openpype/hosts/houdini/plugins/publish/collect_inputs.py b/openpype/hosts/houdini/plugins/publish/collect_inputs.py index 39e2737e8c..8c7098c710 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_inputs.py +++ b/openpype/hosts/houdini/plugins/publish/collect_inputs.py @@ -1,6 +1,7 @@ -import avalon.api as api import pyblish.api +from openpype.pipeline import registered_host + def collect_input_containers(nodes): """Collect containers that contain any of the node in `nodes`. @@ -18,7 +19,7 @@ def collect_input_containers(nodes): lookup = frozenset(nodes) containers = [] - host = api.registered_host() + host = registered_host() for container in host.ls(): node = container["node"] diff --git a/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py b/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py index 66dfba64df..3f0d10e0ba 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py +++ b/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py @@ -1,6 +1,6 @@ import pyblish.api -from avalon import io +from openpype.pipeline import legacy_io import openpype.lib.usdlib as usdlib @@ -50,7 +50,10 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin): self.log.debug("Add bootstrap for: %s" % bootstrap) - asset = io.find_one({"name": instance.data["asset"], "type": "asset"}) + asset = legacy_io.find_one({ + "name": instance.data["asset"], + "type": "asset" + }) assert asset, "Asset must exist: %s" % asset # Check which are not about to be created and don't exist yet @@ -104,7 +107,8 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin): # Or, if they already exist in the database we can # skip them too. return bool( - io.find_one( - {"name": subset, "type": "subset", "parent": asset["_id"]} + legacy_io.find_one( + {"name": subset, "type": "subset", "parent": asset["_id"]}, + {"_id": True} ) ) diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py index 3e842ae766..bfcd93c1cb 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py +++ b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py @@ -7,7 +7,10 @@ from collections import deque import pyblish.api import openpype.api -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + legacy_io, +) import openpype.hosts.houdini.api.usd as hou_usdlib from openpype.hosts.houdini.api.lib import render_rop @@ -266,8 +269,6 @@ class ExtractUSDLayered(openpype.api.Extractor): instance.data["files"].append(fname) def _compare_with_latest_publish(self, dependency, new_file): - - from avalon import api, io import filecmp _, ext = os.path.splitext(new_file) @@ -275,10 +276,10 @@ class ExtractUSDLayered(openpype.api.Extractor): # Compare this dependency with the latest published version # to detect whether we should make this into a new publish # version. If not, skip it. - asset = io.find_one( + asset = legacy_io.find_one( {"name": dependency.data["asset"], "type": "asset"} ) - subset = io.find_one( + subset = legacy_io.find_one( { "name": dependency.data["subset"], "type": "subset", @@ -290,7 +291,7 @@ class ExtractUSDLayered(openpype.api.Extractor): self.log.debug("No existing subset..") return False - version = io.find_one( + version = legacy_io.find_one( {"type": "version", "parent": subset["_id"], }, sort=[("name", -1)] ) @@ -298,7 +299,7 @@ class ExtractUSDLayered(openpype.api.Extractor): self.log.debug("No existing version..") return False - representation = io.find_one( + representation = legacy_io.find_one( { "name": ext.lstrip("."), "type": "representation", diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file.py b/openpype/hosts/houdini/plugins/publish/increment_current_file.py index 31c2954ee7..c5cacd1880 100644 --- a/openpype/hosts/houdini/plugins/publish/increment_current_file.py +++ b/openpype/hosts/houdini/plugins/publish/increment_current_file.py @@ -1,8 +1,8 @@ import pyblish.api -import avalon.api from openpype.api import version_up from openpype.action import get_errored_plugins_from_data +from openpype.pipeline import registered_host class IncrementCurrentFile(pyblish.api.InstancePlugin): @@ -41,7 +41,7 @@ class IncrementCurrentFile(pyblish.api.InstancePlugin): ) # Filename must not have changed since collecting - host = avalon.api.registered_host() + host = registered_host() current_file = host.current_file() assert ( context.data["currentFile"] == current_file diff --git a/openpype/hosts/houdini/plugins/publish/save_scene.py b/openpype/hosts/houdini/plugins/publish/save_scene.py index fe5962fbd3..6128c7af77 100644 --- a/openpype/hosts/houdini/plugins/publish/save_scene.py +++ b/openpype/hosts/houdini/plugins/publish/save_scene.py @@ -1,5 +1,6 @@ import pyblish.api -import avalon.api + +from openpype.pipeline import registered_host class SaveCurrentScene(pyblish.api.ContextPlugin): @@ -12,7 +13,7 @@ class SaveCurrentScene(pyblish.api.ContextPlugin): def process(self, context): # Filename must not have changed since collecting - host = avalon.api.registered_host() + host = registered_host() current_file = host.current_file() assert context.data['currentFile'] == current_file, ( "Collected filename from current scene name." diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py index fcfbf6b22d..44719ae488 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py @@ -1,9 +1,9 @@ import re import pyblish.api -import openpype.api -from avalon import io +import openpype.api +from openpype.pipeline import legacy_io class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin): @@ -23,16 +23,20 @@ class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin): shade_subset = subset.split(".", 1)[0] model_subset = re.sub("^usdShade", "usdModel", shade_subset) - asset_doc = io.find_one({"name": asset, "type": "asset"}) + asset_doc = legacy_io.find_one( + {"name": asset, "type": "asset"}, + {"_id": True} + ) if not asset_doc: raise RuntimeError("Asset does not exist: %s" % asset) - subset_doc = io.find_one( + subset_doc = legacy_io.find_one( { "name": model_subset, "type": "subset", "parent": asset_doc["_id"], - } + }, + {"_id": True} ) if not subset_doc: raise RuntimeError( diff --git a/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py b/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py index eb33b49759..afadbffd3e 100644 --- a/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py +++ b/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py @@ -1,10 +1,10 @@ -import avalon.api +from openpype.pipeline import install_host from openpype.hosts.houdini import api def main(): print("Installing OpenPype ...") - avalon.api.install(api) + install_host(api) main() diff --git a/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py b/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py index eb33b49759..afadbffd3e 100644 --- a/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py +++ b/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py @@ -1,10 +1,10 @@ -import avalon.api +from openpype.pipeline import install_host from openpype.hosts.houdini import api def main(): print("Installing OpenPype ...") - avalon.api.install(api) + install_host(api) main() diff --git a/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py index 4071eb3e0c..01a29472e7 100644 --- a/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py +++ b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py @@ -1,17 +1,21 @@ +import os import hou import husdoutputprocessors.base as base -import os -import re -import logging import colorbleed.usdlib as usdlib +from openpype.pipeline import ( + legacy_io, + registered_root, +) + def _get_project_publish_template(): """Return publish template from database for current project""" - from avalon import io - project = io.find_one({"type": "project"}, - projection={"config.template.publish": True}) + project = legacy_io.find_one( + {"type": "project"}, + projection={"config.template.publish": True} + ) return project["config"]["template"]["publish"] @@ -133,19 +137,18 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase): """ - from avalon import api, io - - PROJECT = api.Session["AVALON_PROJECT"] - asset_doc = io.find_one({"name": asset, - "type": "asset"}) + PROJECT = legacy_io.Session["AVALON_PROJECT"] + asset_doc = legacy_io.find_one({ + "name": asset, + "type": "asset" + }) if not asset_doc: raise RuntimeError("Invalid asset name: '%s'" % asset) - root = api.registered_root() + root = registered_root() path = self._template.format(**{ "root": root, "project": PROJECT, - "silo": asset_doc["silo"], "asset": asset_doc["name"], "subset": subset, "representation": ext, @@ -165,4 +168,3 @@ output_processor = AvalonURIOutputProcessor() def usdOutputProcessor(): return output_processor - diff --git a/openpype/hosts/maya/__init__.py b/openpype/hosts/maya/__init__.py index b7d26a7818..c1c82c62e5 100644 --- a/openpype/hosts/maya/__init__.py +++ b/openpype/hosts/maya/__init__.py @@ -9,7 +9,7 @@ def add_implementation_envs(env, _app): ] old_python_path = env.get("PYTHONPATH") or "" for path in old_python_path.split(os.pathsep): - if not path or not os.path.exists(path): + if not path: continue norm_path = os.path.normpath(path) diff --git a/openpype/hosts/maya/api/action.py b/openpype/hosts/maya/api/action.py index ab26748c8a..ca1006b6aa 100644 --- a/openpype/hosts/maya/api/action.py +++ b/openpype/hosts/maya/api/action.py @@ -2,8 +2,8 @@ from __future__ import absolute_import import pyblish.api -from avalon import io +from openpype.pipeline import legacy_io from openpype.api import get_errored_instances_from_context @@ -75,8 +75,10 @@ class GenerateUUIDsOnInvalidAction(pyblish.api.Action): from . import lib asset = instance.data['asset'] - asset_id = io.find_one({"name": asset, "type": "asset"}, - projection={"_id": True})['_id'] + asset_id = legacy_io.find_one( + {"name": asset, "type": "asset"}, + projection={"_id": True} + )['_id'] for node, _id in lib.generate_ids(nodes, asset_id=asset_id): lib.set_id(node, _id, overwrite=True) diff --git a/openpype/hosts/maya/api/commands.py b/openpype/hosts/maya/api/commands.py index a1e0be2cfe..dd616b6dd6 100644 --- a/openpype/hosts/maya/api/commands.py +++ b/openpype/hosts/maya/api/commands.py @@ -1,7 +1,8 @@ # -*- coding: utf-8 -*- """OpenPype script commands to be used directly in Maya.""" from maya import cmds -from avalon import api, io + +from openpype.pipeline import legacy_io class ToolWindows: @@ -73,13 +74,13 @@ def reset_frame_range(): 59.94: '59.94fps', 44100: '44100fps', 48000: '48000fps' - }.get(float(api.Session.get("AVALON_FPS", 25)), "pal") + }.get(float(legacy_io.Session.get("AVALON_FPS", 25)), "pal") cmds.currentUnit(time=fps) # Set frame start/end - asset_name = api.Session["AVALON_ASSET"] - asset = io.find_one({"name": asset_name, "type": "asset"}) + asset_name = legacy_io.Session["AVALON_ASSET"] + asset = legacy_io.find_one({"name": asset_name, "type": "asset"}) frame_start = asset["data"].get("frameStart") frame_end = asset["data"].get("frameEnd") @@ -144,8 +145,8 @@ def reset_resolution(): resolution_height = 1080 # Get resolution from asset - asset_name = api.Session["AVALON_ASSET"] - asset_doc = io.find_one({"name": asset_name, "type": "asset"}) + asset_name = legacy_io.Session["AVALON_ASSET"] + asset_doc = legacy_io.find_one({"name": asset_name, "type": "asset"}) resolution = _resolution_from_document(asset_doc) # Try get resolution from project if resolution is None: @@ -154,7 +155,7 @@ def reset_resolution(): "Asset \"{}\" does not have set resolution." " Trying to get resolution from project" ).format(asset_name)) - project_doc = io.find_one({"type": "project"}) + project_doc = legacy_io.find_one({"type": "project"}) resolution = _resolution_from_document(project_doc) if resolution is None: diff --git a/openpype/hosts/maya/api/fbx.py b/openpype/hosts/maya/api/fbx.py new file mode 100644 index 0000000000..260241f5fc --- /dev/null +++ b/openpype/hosts/maya/api/fbx.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- +"""Tools to work with FBX.""" +import logging + +from pyblish.api import Instance + +from maya import cmds # noqa +import maya.mel as mel # noqa + + +class FBXExtractor: + """Extract FBX from Maya. + + This extracts reproducible FBX exports ignoring any of the settings set + on the local machine in the FBX export options window. + + All export settings are applied with the `FBXExport*` commands prior + to the `FBXExport` call itself. The options can be overridden with + their + nice names as seen in the "options" property on this class. + + For more information on FBX exports see: + - https://knowledge.autodesk.com/support/maya/learn-explore/caas + /CloudHelp/cloudhelp/2016/ENU/Maya/files/GUID-6CCE943A-2ED4-4CEE-96D4 + -9CB19C28F4E0-htm.html + - http://forums.cgsociety.org/archive/index.php?t-1032853.html + - https://groups.google.com/forum/#!msg/python_inside_maya/cLkaSo361oE + /LKs9hakE28kJ + + """ + @property + def options(self): + """Overridable options for FBX Export + + Given in the following format + - {NAME: EXPECTED TYPE} + + If the overridden option's type does not match, + the option is not included and a warning is logged. + + """ + + return { + "cameras": bool, + "smoothingGroups": bool, + "hardEdges": bool, + "tangents": bool, + "smoothMesh": bool, + "instances": bool, + # "referencedContainersContent": bool, # deprecated in Maya 2016+ + "bakeComplexAnimation": int, + "bakeComplexStart": int, + "bakeComplexEnd": int, + "bakeComplexStep": int, + "bakeResampleAnimation": bool, + "animationOnly": bool, + "useSceneName": bool, + "quaternion": str, # "euler" + "shapes": bool, + "skins": bool, + "constraints": bool, + "lights": bool, + "embeddedTextures": bool, + "inputConnections": bool, + "upAxis": str, # x, y or z, + "triangulate": bool + } + + @property + def default_options(self): + """The default options for FBX extraction. + + This includes shapes, skins, constraints, lights and incoming + connections and exports with the Y-axis as up-axis. + + By default this uses the time sliders start and end time. + + """ + + start_frame = int(cmds.playbackOptions(query=True, + animationStartTime=True)) + end_frame = int(cmds.playbackOptions(query=True, + animationEndTime=True)) + + return { + "cameras": False, + "smoothingGroups": True, + "hardEdges": False, + "tangents": False, + "smoothMesh": True, + "instances": False, + "bakeComplexAnimation": True, + "bakeComplexStart": start_frame, + "bakeComplexEnd": end_frame, + "bakeComplexStep": 1, + "bakeResampleAnimation": True, + "animationOnly": False, + "useSceneName": False, + "quaternion": "euler", + "shapes": True, + "skins": True, + "constraints": False, + "lights": True, + "embeddedTextures": False, + "inputConnections": True, + "upAxis": "y", + "triangulate": False + } + + def __init__(self, log=None): + # Ensure FBX plug-in is loaded + self.log = log or logging.getLogger(self.__class__.__name__) + cmds.loadPlugin("fbxmaya", quiet=True) + + def parse_overrides(self, instance, options): + """Inspect data of instance to determine overridden options + + An instance may supply any of the overridable options + as data, the option is then added to the extraction. + + """ + + for key in instance.data: + if key not in self.options: + continue + + # Ensure the data is of correct type + value = instance.data[key] + if not isinstance(value, self.options[key]): + self.log.warning( + "Overridden attribute {key} was of " + "the wrong type: {invalid_type} " + "- should have been {valid_type}".format( + key=key, + invalid_type=type(value).__name__, + valid_type=self.options[key].__name__)) + continue + + options[key] = value + + return options + + def set_options_from_instance(self, instance): + # type: (Instance) -> None + """Sets FBX export options from data in the instance. + + Args: + instance (Instance): Instance data. + + """ + # Parse export options + options = self.default_options + options = self.parse_overrides(instance, options) + self.log.info("Export options: {0}".format(options)) + + # Collect the start and end including handles + start = instance.data.get("frameStartHandle") or \ + instance.context.data.get("frameStartHandle") + end = instance.data.get("frameEndHandle") or \ + instance.context.data.get("frameEndHandle") + + options['bakeComplexStart'] = start + options['bakeComplexEnd'] = end + + # First apply the default export settings to be fully consistent + # each time for successive publishes + mel.eval("FBXResetExport") + + # Apply the FBX overrides through MEL since the commands + # only work correctly in MEL according to online + # available discussions on the topic + _iteritems = getattr(options, "iteritems", options.items) + for option, value in _iteritems(): + key = option[0].upper() + option[1:] # uppercase first letter + + # Boolean must be passed as lower-case strings + # as to MEL standards + if isinstance(value, bool): + value = str(value).lower() + + template = "FBXExport{0} {1}" if key == "UpAxis" else \ + "FBXExport{0} -v {1}" # noqa + cmd = template.format(key, value) + self.log.info(cmd) + mel.eval(cmd) + + # Never show the UI or generate a log + mel.eval("FBXExportShowUI -v false") + mel.eval("FBXExportGenerateLog -v false") + + @staticmethod + def export(members, path): + # type: (list, str) -> None + """Export members as FBX with given path. + + Args: + members (list): List of members to export. + path (str): Path to use for export. + + """ + cmds.select(members, r=True, noExpand=True) + mel.eval('FBXExport -f "{}" -s'.format(path)) diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 376c033d46..088304ab05 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -17,15 +17,15 @@ import bson from maya import cmds, mel import maya.api.OpenMaya as om -from avalon import api, io - from openpype import lib from openpype.api import get_anatomy_settings from openpype.pipeline import ( + legacy_io, discover_loader_plugins, loaders_from_representation, get_representation_path, load_container, + registered_host, ) from .commands import reset_frame_range @@ -1387,9 +1387,13 @@ def generate_ids(nodes, asset_id=None): if asset_id is None: # Get the asset ID from the database for the asset of current context - asset_data = io.find_one({"type": "asset", - "name": api.Session["AVALON_ASSET"]}, - projection={"_id": True}) + asset_data = legacy_io.find_one( + { + "type": "asset", + "name": legacy_io.Session["AVALON_ASSET"] + }, + projection={"_id": True} + ) assert asset_data, "No current asset found in Session" asset_id = asset_data['_id'] @@ -1511,7 +1515,7 @@ def get_container_members(container): members = cmds.sets(container, query=True) or [] members = cmds.ls(members, long=True, objectsOnly=True) or [] - members = set(members) + all_members = set(members) # Include any referenced nodes from any reference in the container # This is required since we've removed adding ALL nodes of a reference @@ -1530,9 +1534,9 @@ def get_container_members(container): reference_members = cmds.ls(reference_members, long=True, objectsOnly=True) - members.update(reference_members) + all_members.update(reference_members) - return members + return list(all_members) # region LOOKDEV @@ -1544,9 +1548,11 @@ def list_looks(asset_id): # # get all subsets with look leading in # the name associated with the asset - subset = io.find({"parent": bson.ObjectId(asset_id), - "type": "subset", - "name": {"$regex": "look*"}}) + subset = legacy_io.find({ + "parent": bson.ObjectId(asset_id), + "type": "subset", + "name": {"$regex": "look*"} + }) return list(subset) @@ -1565,16 +1571,20 @@ def assign_look_by_version(nodes, version_id): """ # Get representations of shader file and relationships - look_representation = io.find_one({"type": "representation", - "parent": version_id, - "name": "ma"}) + look_representation = legacy_io.find_one({ + "type": "representation", + "parent": version_id, + "name": "ma" + }) - json_representation = io.find_one({"type": "representation", - "parent": version_id, - "name": "json"}) + json_representation = legacy_io.find_one({ + "type": "representation", + "parent": version_id, + "name": "json" + }) # See if representation is already loaded, if so reuse it. - host = api.registered_host() + host = registered_host() representation_id = str(look_representation['_id']) for container in host.ls(): if (container['loader'] == "LookLoader" and @@ -1636,9 +1646,11 @@ def assign_look(nodes, subset="lookDefault"): except bson.errors.InvalidId: log.warning("Asset ID is not compatible with bson") continue - subset_data = io.find_one({"type": "subset", - "name": subset, - "parent": asset_id}) + subset_data = legacy_io.find_one({ + "type": "subset", + "name": subset, + "parent": asset_id + }) if not subset_data: log.warning("No subset '{}' found for {}".format(subset, asset_id)) @@ -1646,13 +1658,18 @@ def assign_look(nodes, subset="lookDefault"): # get last version # with backwards compatibility - version = io.find_one({"parent": subset_data['_id'], - "type": "version", - "data.families": - {"$in": ["look"]} - }, - sort=[("name", -1)], - projection={"_id": True, "name": True}) + version = legacy_io.find_one( + { + "parent": subset_data['_id'], + "type": "version", + "data.families": {"$in": ["look"]} + }, + sort=[("name", -1)], + projection={ + "_id": True, + "name": True + } + ) log.debug("Assigning look '{}' ".format(subset, version["name"])) @@ -2135,7 +2152,7 @@ def reset_scene_resolution(): None """ - project_doc = io.find_one({"type": "project"}) + project_doc = legacy_io.find_one({"type": "project"}) project_data = project_doc["data"] asset_data = lib.get_asset()["data"] @@ -2168,13 +2185,13 @@ def set_context_settings(): """ # Todo (Wijnand): apply renderer and resolution of project - project_doc = io.find_one({"type": "project"}) + project_doc = legacy_io.find_one({"type": "project"}) project_data = project_doc["data"] asset_data = lib.get_asset()["data"] # Set project fps fps = asset_data.get("fps", project_data.get("fps", 25)) - api.Session["AVALON_FPS"] = str(fps) + legacy_io.Session["AVALON_FPS"] = str(fps) set_scene_fps(fps) reset_scene_resolution() @@ -2209,15 +2226,17 @@ def validate_fps(): parent = get_main_window() - dialog = popup.Popup2(parent=parent) + dialog = popup.PopupUpdateKeys(parent=parent) dialog.setModal(True) - dialog.setWindowTitle("Maya scene not in line with project") - dialog.setMessage("The FPS is out of sync, please fix") + dialog.setWindowTitle("Maya scene does not match project FPS") + dialog.setMessage("Scene %i FPS does not match project %i FPS" % + (current_fps, fps)) + dialog.setButtonText("Fix") # Set new text for button (add optional argument for the popup?) toggle = dialog.widgets["toggle"] update = toggle.isChecked() - dialog.on_show.connect(lambda: set_scene_fps(fps, update)) + dialog.on_clicked_state.connect(lambda: set_scene_fps(fps, update)) dialog.show() @@ -2612,7 +2631,7 @@ def get_attr_in_layer(attr, layer): def fix_incompatible_containers(): """Backwards compatibility: old containers to use new ReferenceLoader""" - host = api.registered_host() + host = registered_host() for container in host.ls(): loader = container['loader'] @@ -2934,7 +2953,7 @@ def update_content_on_context_change(): This will update scene content to match new asset on context change """ scene_sets = cmds.listSets(allSets=True) - new_asset = api.Session["AVALON_ASSET"] + new_asset = legacy_io.Session["AVALON_ASSET"] new_data = lib.get_asset()["data"] for s in scene_sets: try: @@ -3138,11 +3157,20 @@ def set_colorspace(): @contextlib.contextmanager -def root_parent(nodes): - # type: (list) -> list +def parent_nodes(nodes, parent=None): + # type: (list, str) -> list """Context manager to un-parent provided nodes and return them back.""" import pymel.core as pm # noqa + parent_node = None + delete_parent = False + + if parent: + if not cmds.objExists(parent): + parent_node = pm.createNode("transform", n=parent, ss=False) + delete_parent = True + else: + parent_node = pm.PyNode(parent) node_parents = [] for node in nodes: n = pm.PyNode(node) @@ -3153,9 +3181,14 @@ def root_parent(nodes): node_parents.append((n, root)) try: for node in node_parents: - node[0].setParent(world=True) + if not parent: + node[0].setParent(world=True) + else: + node[0].setParent(parent_node) yield finally: for node in node_parents: if node[1]: node[0].setParent(node[1]) + if delete_parent: + pm.delete(parent_node) diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py index 0c34998874..5956cc482c 100644 --- a/openpype/hosts/maya/api/lib_renderproducts.py +++ b/openpype/hosts/maya/api/lib_renderproducts.py @@ -79,6 +79,7 @@ IMAGE_PREFIXES = { "redshift": "defaultRenderGlobals.imageFilePrefix", } +RENDERMAN_IMAGE_DIR = "maya//" @attr.s class LayerMetadata(object): @@ -1054,6 +1055,8 @@ class RenderProductsRenderman(ARenderProducts): :func:`ARenderProducts.get_render_products()` """ + from rfm2.api.displays import get_displays # noqa + cameras = [ self.sanitize_camera_name(c) for c in self.get_renderable_cameras() @@ -1066,42 +1069,56 @@ class RenderProductsRenderman(ARenderProducts): ] products = [] - default_ext = "exr" - displays = cmds.listConnections("rmanGlobals.displays") - for aov in displays: - enabled = self._get_attr(aov, "enabled") + # NOTE: This is guessing extensions from renderman display types. + # Some of them are just framebuffers, d_texture format can be + # set in display setting. We set those now to None, but it + # should be handled more gracefully. + display_types = { + "d_deepexr": "exr", + "d_it": None, + "d_null": None, + "d_openexr": "exr", + "d_png": "png", + "d_pointcloud": "ptc", + "d_targa": "tga", + "d_texture": None, + "d_tiff": "tif" + } + + displays = get_displays()["displays"] + for name, display in displays.items(): + enabled = display["params"]["enable"]["value"] if not enabled: continue - aov_name = str(aov) + aov_name = name if aov_name == "rmanDefaultDisplay": aov_name = "beauty" + extensions = display_types.get( + display["driverNode"]["type"], "exr") + for camera in cameras: product = RenderProduct(productName=aov_name, - ext=default_ext, + ext=extensions, camera=camera) products.append(product) return products - def get_files(self, product, camera): + def get_files(self, product): """Get expected files. - In renderman we hack it with prepending path. This path would - normally be translated from `rmanGlobals.imageOutputDir`. We skip - this and hardcode prepend path we expect. There is no place for user - to mess around with this settings anyway and it is enforced in - render settings validator. """ - files = super(RenderProductsRenderman, self).get_files(product, camera) + files = super(RenderProductsRenderman, self).get_files(product) layer_data = self.layer_data new_files = [] + + resolved_image_dir = re.sub("", layer_data.sceneName, RENDERMAN_IMAGE_DIR, flags=re.IGNORECASE) # noqa: E501 + resolved_image_dir = re.sub("", layer_data.layerName, resolved_image_dir, flags=re.IGNORECASE) # noqa: E501 for file in files: - new_file = "{}/{}/{}".format( - layer_data["sceneName"], layer_data["layerName"], file - ) + new_file = "{}/{}".format(resolved_image_dir, file) new_files.append(new_file) return new_files diff --git a/openpype/hosts/maya/api/menu.py b/openpype/hosts/maya/api/menu.py index 5f0fc39bf3..97f06c43af 100644 --- a/openpype/hosts/maya/api/menu.py +++ b/openpype/hosts/maya/api/menu.py @@ -6,10 +6,9 @@ from Qt import QtWidgets, QtGui import maya.utils import maya.cmds as cmds -import avalon.api - from openpype.api import BuildWorkfile from openpype.settings import get_project_settings +from openpype.pipeline import legacy_io from openpype.tools.utils import host_tools from openpype.hosts.maya.api import lib from .lib import get_main_window, IS_HEADLESS @@ -40,15 +39,15 @@ def install(): parent_widget = get_main_window() cmds.menu( MENU_NAME, - label=avalon.api.Session["AVALON_LABEL"], + label=legacy_io.Session["AVALON_LABEL"], tearOff=True, parent="MayaWindow" ) # Create context menu context_label = "{}, {}".format( - avalon.api.Session["AVALON_ASSET"], - avalon.api.Session["AVALON_TASK"] + legacy_io.Session["AVALON_ASSET"], + legacy_io.Session["AVALON_TASK"] ) cmds.menuItem( "currentContext", @@ -211,7 +210,7 @@ def update_menu_task_label(): return label = "{}, {}".format( - avalon.api.Session["AVALON_ASSET"], - avalon.api.Session["AVALON_TASK"] + legacy_io.Session["AVALON_ASSET"], + legacy_io.Session["AVALON_TASK"] ) cmds.menuItem(object_name, edit=True, label=label) diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index 5cdc3ff4fd..b0e8fac635 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -7,10 +7,6 @@ from maya import utils, cmds, OpenMaya import maya.api.OpenMaya as om import pyblish.api -import avalon.api - -from avalon.lib import find_submodule -from avalon.pipeline import AVALON_CONTAINER_ID import openpype.hosts.maya from openpype.tools.utils import host_tools @@ -21,9 +17,14 @@ from openpype.lib import ( ) from openpype.lib.path_tools import HostDirmap from openpype.pipeline import ( - LegacyCreator, + legacy_io, register_loader_plugin_path, + register_inventory_action_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_inventory_action_path, + deregister_creator_plugin_path, + AVALON_CONTAINER_ID, ) from openpype.hosts.maya.lib import copy_workspace_mel from . import menu, lib @@ -58,8 +59,8 @@ def install(): pyblish.api.register_host("maya") register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) - avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH) + register_creator_plugin_path(CREATE_PATH) + register_inventory_action_path(INVENTORY_PATH) log.info(PUBLISH_PATH) log.info("Installing callbacks ... ") @@ -92,7 +93,7 @@ def _set_project(): None """ - workdir = avalon.api.Session["AVALON_WORKDIR"] + workdir = legacy_io.Session["AVALON_WORKDIR"] try: os.makedirs(workdir) @@ -187,10 +188,8 @@ def uninstall(): pyblish.api.deregister_host("maya") deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) - avalon.api.deregister_plugin_path( - avalon.api.InventoryAction, INVENTORY_PATH - ) + deregister_creator_plugin_path(CREATE_PATH) + deregister_inventory_action_path(INVENTORY_PATH) menu.uninstall() @@ -268,21 +267,8 @@ def ls(): """ container_names = _ls() - - has_metadata_collector = False - config_host = find_submodule(avalon.api.registered_config(), "maya") - if hasattr(config_host, "collect_container_metadata"): - has_metadata_collector = True - for container in sorted(container_names): - data = parse_container(container) - - # Collect custom data if attribute is present - if has_metadata_collector: - metadata = config_host.collect_container_metadata(container) - data.update(metadata) - - yield data + yield parse_container(container) def containerise(name, @@ -462,7 +448,7 @@ def on_open(): dialog.setWindowTitle("Maya scene has outdated content") dialog.setMessage("There are outdated containers in " "your Maya scene.") - dialog.on_show.connect(_on_show_inventory) + dialog.on_clicked.connect(_on_show_inventory) dialog.show() @@ -487,7 +473,7 @@ def on_task_changed(): # Run menu.update_menu_task_label() - workdir = avalon.api.Session["AVALON_WORKDIR"] + workdir = legacy_io.Session["AVALON_WORKDIR"] if os.path.exists(workdir): log.info("Updating Maya workspace for task change to %s", workdir) @@ -508,9 +494,9 @@ def on_task_changed(): lib.update_content_on_context_change() msg = " project: {}\n asset: {}\n task:{}".format( - avalon.api.Session["AVALON_PROJECT"], - avalon.api.Session["AVALON_ASSET"], - avalon.api.Session["AVALON_TASK"] + legacy_io.Session["AVALON_PROJECT"], + legacy_io.Session["AVALON_ASSET"], + legacy_io.Session["AVALON_TASK"] ) lib.show_message( diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index 84379bc145..3721868823 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -4,11 +4,11 @@ from maya import cmds import qargparse -from avalon.pipeline import AVALON_CONTAINER_ID from openpype.pipeline import ( LegacyCreator, LoaderPlugin, get_representation_path, + AVALON_CONTAINER_ID, ) from .pipeline import containerise diff --git a/openpype/hosts/maya/api/setdress.py b/openpype/hosts/maya/api/setdress.py index 96a9700b88..f8d3ed79b8 100644 --- a/openpype/hosts/maya/api/setdress.py +++ b/openpype/hosts/maya/api/setdress.py @@ -6,10 +6,13 @@ import contextlib import copy import six +from bson.objectid import ObjectId + from maya import cmds -from avalon import io from openpype.pipeline import ( + schema, + legacy_io, discover_loader_plugins, loaders_from_representation, load_container, @@ -251,7 +254,6 @@ def get_contained_containers(container): """ - import avalon.schema from .pipeline import parse_container # Get avalon containers in this package setdress container @@ -261,7 +263,7 @@ def get_contained_containers(container): try: member_container = parse_container(node) containers.append(member_container) - except avalon.schema.ValidationError: + except schema.ValidationError: pass return containers @@ -281,21 +283,23 @@ def update_package_version(container, version): """ # Versioning (from `core.maya.pipeline`) - current_representation = io.find_one({ - "_id": io.ObjectId(container["representation"]) + current_representation = legacy_io.find_one({ + "_id": ObjectId(container["representation"]) }) assert current_representation is not None, "This is a bug" - version_, subset, asset, project = io.parenthood(current_representation) + version_, subset, asset, project = legacy_io.parenthood( + current_representation + ) if version == -1: - new_version = io.find_one({ + new_version = legacy_io.find_one({ "type": "version", "parent": subset["_id"] }, sort=[("name", -1)]) else: - new_version = io.find_one({ + new_version = legacy_io.find_one({ "type": "version", "parent": subset["_id"], "name": version, @@ -304,7 +308,7 @@ def update_package_version(container, version): assert new_version is not None, "This is a bug" # Get the new representation (new file) - new_representation = io.find_one({ + new_representation = legacy_io.find_one({ "type": "representation", "parent": new_version["_id"], "name": current_representation["name"] @@ -326,8 +330,8 @@ def update_package(set_container, representation): """ # Load the original package data - current_representation = io.find_one({ - "_id": io.ObjectId(set_container['representation']), + current_representation = legacy_io.find_one({ + "_id": ObjectId(set_container['representation']), "type": "representation" }) @@ -477,11 +481,11 @@ def update_scene(set_container, containers, current_data, new_data, new_file): # Check whether the conversion can be done by the Loader. # They *must* use the same asset, subset and Loader for # `update_container` to make sense. - old = io.find_one({ - "_id": io.ObjectId(representation_current) + old = legacy_io.find_one({ + "_id": ObjectId(representation_current) }) - new = io.find_one({ - "_id": io.ObjectId(representation_new) + new = legacy_io.find_one({ + "_id": ObjectId(representation_new) }) is_valid = compare_representations(old=old, new=new) if not is_valid: diff --git a/openpype/hosts/maya/api/workio.py b/openpype/hosts/maya/api/workio.py index 698c48e81e..fd4961c4bf 100644 --- a/openpype/hosts/maya/api/workio.py +++ b/openpype/hosts/maya/api/workio.py @@ -1,11 +1,12 @@ """Host API required Work Files tool""" import os from maya import cmds -from avalon import api + +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS def file_extensions(): - return api.HOST_WORKFILE_EXTENSIONS["maya"] + return HOST_WORKFILE_EXTENSIONS["maya"] def has_unsaved_changes(): diff --git a/openpype/hosts/maya/plugins/create/create_look.py b/openpype/hosts/maya/plugins/create/create_look.py index 56e2640919..44e439fe1f 100644 --- a/openpype/hosts/maya/plugins/create/create_look.py +++ b/openpype/hosts/maya/plugins/create/create_look.py @@ -22,4 +22,6 @@ class CreateLook(plugin.Creator): self.data["maketx"] = self.make_tx # Enable users to force a copy. + # - on Windows is "forceCopy" always changed to `True` because of + # windows implementation of hardlinks self.data["forceCopy"] = False diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd.py new file mode 100644 index 0000000000..b2266e5a57 --- /dev/null +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd.py @@ -0,0 +1,51 @@ +from openpype.hosts.maya.api import plugin, lib + + +class CreateMultiverseUsd(plugin.Creator): + """Multiverse USD data""" + + name = "usdMain" + label = "Multiverse USD" + family = "usd" + icon = "cubes" + + def __init__(self, *args, **kwargs): + super(CreateMultiverseUsd, self).__init__(*args, **kwargs) + + # Add animation data first, since it maintains order. + self.data.update(lib.collect_animation_data(True)) + + self.data["stripNamespaces"] = False + self.data["mergeTransformAndShape"] = False + self.data["writeAncestors"] = True + self.data["flattenParentXforms"] = False + self.data["writeSparseOverrides"] = False + self.data["useMetaPrimPath"] = False + self.data["customRootPath"] = '' + self.data["customAttributes"] = '' + self.data["nodeTypesToIgnore"] = '' + self.data["writeMeshes"] = True + self.data["writeCurves"] = True + self.data["writeParticles"] = True + self.data["writeCameras"] = False + self.data["writeLights"] = False + self.data["writeJoints"] = False + self.data["writeCollections"] = False + self.data["writePositions"] = True + self.data["writeNormals"] = True + self.data["writeUVs"] = True + self.data["writeColorSets"] = False + self.data["writeTangents"] = False + self.data["writeRefPositions"] = False + self.data["writeBlendShapes"] = False + self.data["writeDisplayColor"] = False + self.data["writeSkinWeights"] = False + self.data["writeMaterialAssignment"] = False + self.data["writeHardwareShader"] = False + self.data["writeShadingNetworks"] = False + self.data["writeTransformMatrix"] = True + self.data["writeUsdAttributes"] = False + self.data["timeVaryingTopology"] = False + self.data["customMaterialNamespace"] = '' + self.data["numTimeSamples"] = 1 + self.data["timeSamplesSpan"] = 0.0 diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py new file mode 100644 index 0000000000..77b808c459 --- /dev/null +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py @@ -0,0 +1,23 @@ +from openpype.hosts.maya.api import plugin, lib + + +class CreateMultiverseUsdComp(plugin.Creator): + """Create Multiverse USD Composition""" + + name = "usdCompositionMain" + label = "Multiverse USD Composition" + family = "usdComposition" + icon = "cubes" + + def __init__(self, *args, **kwargs): + super(CreateMultiverseUsdComp, self).__init__(*args, **kwargs) + + # Add animation data first, since it maintains order. + self.data.update(lib.collect_animation_data(True)) + + self.data["stripNamespaces"] = False + self.data["mergeTransformAndShape"] = False + self.data["flattenContent"] = False + self.data["writePendingOverrides"] = False + self.data["numTimeSamples"] = 1 + self.data["timeSamplesSpan"] = 0.0 diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py new file mode 100644 index 0000000000..bb82ab2039 --- /dev/null +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py @@ -0,0 +1,28 @@ +from openpype.hosts.maya.api import plugin, lib + + +class CreateMultiverseUsdOver(plugin.Creator): + """Multiverse USD data""" + + name = "usdOverrideMain" + label = "Multiverse USD Override" + family = "usdOverride" + icon = "cubes" + + def __init__(self, *args, **kwargs): + super(CreateMultiverseUsdOver, self).__init__(*args, **kwargs) + + # Add animation data first, since it maintains order. + self.data.update(lib.collect_animation_data(True)) + + self.data["writeAll"] = False + self.data["writeTransforms"] = True + self.data["writeVisibility"] = True + self.data["writeAttributes"] = True + self.data["writeMaterials"] = True + self.data["writeVariants"] = True + self.data["writeVariantsDefinition"] = True + self.data["writeActiveState"] = True + self.data["writeNamespaces"] = False + self.data["numTimeSamples"] = 1 + self.data["timeSamplesSpan"] = 0.0 diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 9002ae3876..8e9bd0e22b 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -4,8 +4,6 @@ import os import json import appdirs import requests -import six -import sys from maya import cmds import maya.app.renderSetup.model.renderSetup as renderSetup @@ -14,14 +12,16 @@ from openpype.hosts.maya.api import ( lib, plugin ) +from openpype.lib import requests_get from openpype.api import ( get_system_settings, get_project_settings, get_asset) from openpype.modules import ModulesManager -from openpype.pipeline import CreatorError - -from avalon.api import Session +from openpype.pipeline import ( + CreatorError, + legacy_io, +) class CreateRender(plugin.Creator): @@ -76,7 +76,7 @@ class CreateRender(plugin.Creator): 'mentalray': 'defaultRenderGlobals.imageFilePrefix', 'vray': 'vraySettings.fileNamePrefix', 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'rmanGlobals.imageFileFormat', 'redshift': 'defaultRenderGlobals.imageFilePrefix' } @@ -84,7 +84,9 @@ class CreateRender(plugin.Creator): 'mentalray': 'maya///{aov_separator}', # noqa 'vray': 'maya///', 'arnold': 'maya///{aov_separator}', # noqa - 'renderman': 'maya///{aov_separator}', + # this needs `imageOutputDir` + # (/renders/maya/) set separately + 'renderman': '_..', 'redshift': 'maya///' # noqa } @@ -104,7 +106,7 @@ class CreateRender(plugin.Creator): self.deadline_servers = {} return self._project_settings = get_project_settings( - Session["AVALON_PROJECT"]) + legacy_io.Session["AVALON_PROJECT"]) # project_settings/maya/create/CreateRender/aov_separator try: @@ -117,6 +119,8 @@ class CreateRender(plugin.Creator): except KeyError: self.aov_separator = "_" + manager = ModulesManager() + self.deadline_module = manager.modules_by_name["deadline"] try: default_servers = deadline_settings["deadline_urls"] project_servers = ( @@ -133,10 +137,8 @@ class CreateRender(plugin.Creator): except AttributeError: # Handle situation were we had only one url for deadline. - manager = ModulesManager() - deadline_module = manager.modules_by_name["deadline"] # get default deadline webservice url from deadline module - self.deadline_servers = deadline_module.deadline_urls + self.deadline_servers = self.deadline_module.deadline_urls def process(self): """Entry point.""" @@ -205,53 +207,37 @@ class CreateRender(plugin.Creator): def _deadline_webservice_changed(self): """Refresh Deadline server dependent options.""" # get selected server - from maya import cmds webservice = self.deadline_servers[ self.server_aliases[ cmds.getAttr("{}.deadlineServers".format(self.instance)) ] ] - pools = self._get_deadline_pools(webservice) + pools = self.deadline_module.get_deadline_pools(webservice, self.log) cmds.deleteAttr("{}.primaryPool".format(self.instance)) cmds.deleteAttr("{}.secondaryPool".format(self.instance)) + + pool_setting = (self._project_settings["deadline"] + ["publish"] + ["CollectDeadlinePools"]) + + primary_pool = pool_setting["primary_pool"] + sorted_pools = self._set_default_pool(list(pools), primary_pool) cmds.addAttr(self.instance, longName="primaryPool", attributeType="enum", - enumName=":".join(pools)) - cmds.addAttr(self.instance, longName="secondaryPool", + enumName=":".join(sorted_pools)) + + pools = ["-"] + pools + secondary_pool = pool_setting["secondary_pool"] + sorted_pools = self._set_default_pool(list(pools), secondary_pool) + cmds.addAttr("{}.secondaryPool".format(self.instance), attributeType="enum", - enumName=":".join(["-"] + pools)) - - def _get_deadline_pools(self, webservice): - # type: (str) -> list - """Get pools from Deadline. - Args: - webservice (str): Server url. - Returns: - list: Pools. - Throws: - RuntimeError: If deadline webservice is unreachable. - - """ - argument = "{}/api/pools?NamesOnly=true".format(webservice) - try: - response = self._requests_get(argument) - except requests.exceptions.ConnectionError as exc: - msg = 'Cannot connect to deadline web service' - self.log.error(msg) - six.reraise( - RuntimeError, - RuntimeError('{} - {}'.format(msg, exc)), - sys.exc_info()[2]) - if not response.ok: - self.log.warning("No pools retrieved") - return [] - - return response.json() + enumName=":".join(sorted_pools)) def _create_render_settings(self): """Create instance settings.""" # get pools pool_names = [] + default_priority = 50 self.server_aliases = list(self.deadline_servers.keys()) self.data["deadlineServers"] = self.server_aliases @@ -260,7 +246,8 @@ class CreateRender(plugin.Creator): self.data["extendFrames"] = False self.data["overrideExistingFrame"] = True # self.data["useLegacyRenderLayers"] = True - self.data["priority"] = 50 + self.data["priority"] = default_priority + self.data["tile_priority"] = default_priority self.data["framesPerTask"] = 1 self.data["whitelist"] = False self.data["machineList"] = "" @@ -293,7 +280,18 @@ class CreateRender(plugin.Creator): # use first one for initial list of pools. deadline_url = next(iter(self.deadline_servers.values())) - pool_names = self._get_deadline_pools(deadline_url) + pool_names = self.deadline_module.get_deadline_pools(deadline_url, + self.log) + maya_submit_dl = self._project_settings.get( + "deadline", {}).get( + "publish", {}).get( + "MayaSubmitDeadline", {}) + priority = maya_submit_dl.get("priority", default_priority) + self.data["priority"] = priority + + tile_priority = maya_submit_dl.get("tile_priority", + default_priority) + self.data["tile_priority"] = tile_priority if muster_enabled: self.log.info(">>> Loading Muster credentials ...") @@ -314,12 +312,27 @@ class CreateRender(plugin.Creator): self.log.info(" - pool: {}".format(pool["name"])) pool_names.append(pool["name"]) - self.data["primaryPool"] = pool_names + pool_setting = (self._project_settings["deadline"] + ["publish"] + ["CollectDeadlinePools"]) + primary_pool = pool_setting["primary_pool"] + self.data["primaryPool"] = self._set_default_pool(pool_names, + primary_pool) # We add a string "-" to allow the user to not # set any secondary pools - self.data["secondaryPool"] = ["-"] + pool_names + pool_names = ["-"] + pool_names + secondary_pool = pool_setting["secondary_pool"] + self.data["secondaryPool"] = self._set_default_pool(pool_names, + secondary_pool) self.options = {"useSelection": False} # Force no content + def _set_default_pool(self, pool_names, pool_value): + """Reorder pool names, default should come first""" + if pool_value and pool_value in pool_names: + pool_names.remove(pool_value) + pool_names = [pool_value] + pool_names + return pool_names + def _load_credentials(self): """Load Muster credentials. @@ -354,7 +367,7 @@ class CreateRender(plugin.Creator): """ params = {"authToken": self._token} api_entry = "/api/pools/list" - response = self._requests_get(self.MUSTER_REST_URL + api_entry, + response = requests_get(self.MUSTER_REST_URL + api_entry, params=params) if response.status_code != 200: if response.status_code == 401: @@ -380,45 +393,11 @@ class CreateRender(plugin.Creator): api_url = "{}/muster/show_login".format( os.environ["OPENPYPE_WEBSERVER_URL"]) self.log.debug(api_url) - login_response = self._requests_get(api_url, timeout=1) + login_response = requests_get(api_url, timeout=1) if login_response.status_code != 200: self.log.error("Cannot show login form to Muster") raise Exception("Cannot show login form to Muster") - def _requests_post(self, *args, **kwargs): - """Wrap request post method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if "verify" not in kwargs: - kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - return requests.post(*args, **kwargs) - - def _requests_get(self, *args, **kwargs): - """Wrap request get method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if "verify" not in kwargs: - kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - return requests.get(*args, **kwargs) - def _set_default_renderer_settings(self, renderer): """Set basic settings based on renderer. @@ -463,6 +442,10 @@ class CreateRender(plugin.Creator): self._set_global_output_settings() + if renderer == "renderman": + cmds.setAttr("rmanGlobals.imageOutputDir", + "maya//", type="string") + def _set_vray_settings(self, asset): # type: (dict) -> None """Sets important settings for Vray.""" diff --git a/openpype/hosts/maya/plugins/create/create_review.py b/openpype/hosts/maya/plugins/create/create_review.py index 14a21d28ca..fbf3399f61 100644 --- a/openpype/hosts/maya/plugins/create/create_review.py +++ b/openpype/hosts/maya/plugins/create/create_review.py @@ -15,6 +15,14 @@ class CreateReview(plugin.Creator): keepImages = False isolate = False imagePlane = True + transparency = [ + "preset", + "simple", + "object sorting", + "weighted average", + "depth peeling", + "alpha cut" + ] def __init__(self, *args, **kwargs): super(CreateReview, self).__init__(*args, **kwargs) @@ -28,5 +36,6 @@ class CreateReview(plugin.Creator): data["isolate"] = self.isolate data["keepImages"] = self.keepImages data["imagePlane"] = self.imagePlane + data["transparency"] = self.transparency self.data = data diff --git a/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py b/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py new file mode 100644 index 0000000000..1a8e84c80d --- /dev/null +++ b/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +"""Creator for Unreal Skeletal Meshes.""" +from openpype.hosts.maya.api import plugin, lib +from openpype.pipeline import legacy_io +from maya import cmds # noqa + + +class CreateUnrealSkeletalMesh(plugin.Creator): + """Unreal Static Meshes with collisions.""" + name = "staticMeshMain" + label = "Unreal - Skeletal Mesh" + family = "skeletalMesh" + icon = "thumbs-up" + dynamic_subset_keys = ["asset"] + + joint_hints = [] + + def __init__(self, *args, **kwargs): + """Constructor.""" + super(CreateUnrealSkeletalMesh, self).__init__(*args, **kwargs) + + @classmethod + def get_dynamic_data( + cls, variant, task_name, asset_id, project_name, host_name + ): + dynamic_data = super(CreateUnrealSkeletalMesh, cls).get_dynamic_data( + variant, task_name, asset_id, project_name, host_name + ) + dynamic_data["asset"] = legacy_io.Session.get("AVALON_ASSET") + return dynamic_data + + def process(self): + self.name = "{}_{}".format(self.family, self.name) + with lib.undo_chunk(): + instance = super(CreateUnrealSkeletalMesh, self).process() + content = cmds.sets(instance, query=True) + + # empty set and process its former content + cmds.sets(content, rm=instance) + geometry_set = cmds.sets(name="geometry_SET", empty=True) + joints_set = cmds.sets(name="joints_SET", empty=True) + + cmds.sets([geometry_set, joints_set], forceElement=instance) + members = cmds.ls(content) or [] + + for node in members: + if node in self.joint_hints: + cmds.sets(node, forceElement=joints_set) + else: + cmds.sets(node, forceElement=geometry_set) diff --git a/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py b/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py index 9ad560ab7c..4e4417ff34 100644 --- a/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py +++ b/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """Creator for Unreal Static Meshes.""" from openpype.hosts.maya.api import plugin, lib -from avalon.api import Session from openpype.api import get_project_settings +from openpype.pipeline import legacy_io from maya import cmds # noqa @@ -10,7 +10,7 @@ class CreateUnrealStaticMesh(plugin.Creator): """Unreal Static Meshes with collisions.""" name = "staticMeshMain" label = "Unreal - Static Mesh" - family = "unrealStaticMesh" + family = "staticMesh" icon = "cube" dynamic_subset_keys = ["asset"] @@ -18,7 +18,7 @@ class CreateUnrealStaticMesh(plugin.Creator): """Constructor.""" super(CreateUnrealStaticMesh, self).__init__(*args, **kwargs) self._project_settings = get_project_settings( - Session["AVALON_PROJECT"]) + legacy_io.Session["AVALON_PROJECT"]) @classmethod def get_dynamic_data( @@ -27,11 +27,11 @@ class CreateUnrealStaticMesh(plugin.Creator): dynamic_data = super(CreateUnrealStaticMesh, cls).get_dynamic_data( variant, task_name, asset_id, project_name, host_name ) - dynamic_data["asset"] = Session.get("AVALON_ASSET") - + dynamic_data["asset"] = legacy_io.Session.get("AVALON_ASSET") return dynamic_data def process(self): + self.name = "{}_{}".format(self.family, self.name) with lib.undo_chunk(): instance = super(CreateUnrealStaticMesh, self).process() content = cmds.sets(instance, query=True) diff --git a/openpype/hosts/maya/plugins/create/create_vrayscene.py b/openpype/hosts/maya/plugins/create/create_vrayscene.py index fa9c59e016..45c4b7e443 100644 --- a/openpype/hosts/maya/plugins/create/create_vrayscene.py +++ b/openpype/hosts/maya/plugins/create/create_vrayscene.py @@ -4,8 +4,6 @@ import os import json import appdirs import requests -import six -import sys from maya import cmds import maya.app.renderSetup.model.renderSetup as renderSetup @@ -19,11 +17,13 @@ from openpype.api import ( get_project_settings ) -from openpype.pipeline import CreatorError +from openpype.lib import requests_get +from openpype.pipeline import ( + CreatorError, + legacy_io, +) from openpype.modules import ModulesManager -from avalon.api import Session - class CreateVRayScene(plugin.Creator): """Create Vray Scene.""" @@ -40,11 +40,15 @@ class CreateVRayScene(plugin.Creator): self._rs = renderSetup.instance() self.data["exportOnFarm"] = False deadline_settings = get_system_settings()["modules"]["deadline"] + + manager = ModulesManager() + self.deadline_module = manager.modules_by_name["deadline"] + if not deadline_settings["enabled"]: self.deadline_servers = {} return self._project_settings = get_project_settings( - Session["AVALON_PROJECT"]) + legacy_io.Session["AVALON_PROJECT"]) try: default_servers = deadline_settings["deadline_urls"] @@ -62,10 +66,8 @@ class CreateVRayScene(plugin.Creator): except AttributeError: # Handle situation were we had only one url for deadline. - manager = ModulesManager() - deadline_module = manager.modules_by_name["deadline"] # get default deadline webservice url from deadline module - self.deadline_servers = deadline_module.deadline_urls + self.deadline_servers = self.deadline_module.deadline_urls def process(self): """Entry point.""" @@ -128,7 +130,7 @@ class CreateVRayScene(plugin.Creator): cmds.getAttr("{}.deadlineServers".format(self.instance)) ] ] - pools = self._get_deadline_pools(webservice) + pools = self.deadline_module.get_deadline_pools(webservice) cmds.deleteAttr("{}.primaryPool".format(self.instance)) cmds.deleteAttr("{}.secondaryPool".format(self.instance)) cmds.addAttr(self.instance, longName="primaryPool", @@ -138,33 +140,6 @@ class CreateVRayScene(plugin.Creator): attributeType="enum", enumName=":".join(["-"] + pools)) - def _get_deadline_pools(self, webservice): - # type: (str) -> list - """Get pools from Deadline. - Args: - webservice (str): Server url. - Returns: - list: Pools. - Throws: - RuntimeError: If deadline webservice is unreachable. - - """ - argument = "{}/api/pools?NamesOnly=true".format(webservice) - try: - response = self._requests_get(argument) - except requests.exceptions.ConnectionError as exc: - msg = 'Cannot connect to deadline web service' - self.log.error(msg) - six.reraise( - CreatorError, - CreatorError('{} - {}'.format(msg, exc)), - sys.exc_info()[2]) - if not response.ok: - self.log.warning("No pools retrieved") - return [] - - return response.json() - def _create_vray_instance_settings(self): # get pools pools = [] @@ -195,7 +170,7 @@ class CreateVRayScene(plugin.Creator): for k in self.deadline_servers.keys() ][0] - pool_names = self._get_deadline_pools(deadline_url) + pool_names = self.deadline_module.get_deadline_pools(deadline_url) if muster_enabled: self.log.info(">>> Loading Muster credentials ...") @@ -259,8 +234,8 @@ class CreateVRayScene(plugin.Creator): """ params = {"authToken": self._token} api_entry = "/api/pools/list" - response = self._requests_get(self.MUSTER_REST_URL + api_entry, - params=params) + response = requests_get(self.MUSTER_REST_URL + api_entry, + params=params) if response.status_code != 200: if response.status_code == 401: self.log.warning("Authentication token expired.") @@ -285,45 +260,7 @@ class CreateVRayScene(plugin.Creator): api_url = "{}/muster/show_login".format( os.environ["OPENPYPE_WEBSERVER_URL"]) self.log.debug(api_url) - login_response = self._requests_get(api_url, timeout=1) + login_response = requests_get(api_url, timeout=1) if login_response.status_code != 200: self.log.error("Cannot show login form to Muster") raise CreatorError("Cannot show login form to Muster") - - def _requests_post(self, *args, **kwargs): - """Wrap request post method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if "verify" not in kwargs: - kwargs["verify"] = ( - False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True - ) # noqa - return requests.post(*args, **kwargs) - - def _requests_get(self, *args, **kwargs): - """Wrap request get method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if "verify" not in kwargs: - kwargs["verify"] = ( - False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True - ) # noqa - return requests.get(*args, **kwargs) diff --git a/openpype/hosts/maya/plugins/inventory/import_modelrender.py b/openpype/hosts/maya/plugins/inventory/import_modelrender.py index c5d3d0c8f4..a5367f16e5 100644 --- a/openpype/hosts/maya/plugins/inventory/import_modelrender.py +++ b/openpype/hosts/maya/plugins/inventory/import_modelrender.py @@ -1,8 +1,10 @@ import json -from avalon import api, io +from bson.objectid import ObjectId + from openpype.pipeline import ( + InventoryAction, get_representation_context, - get_representation_path_from_context, + legacy_io, ) from openpype.hosts.maya.api.lib import ( maintained_selection, @@ -10,7 +12,7 @@ from openpype.hosts.maya.api.lib import ( ) -class ImportModelRender(api.InventoryAction): +class ImportModelRender(InventoryAction): label = "Import Model Render Sets" icon = "industry" @@ -38,8 +40,8 @@ class ImportModelRender(api.InventoryAction): else: nodes.append(n) - repr_doc = io.find_one({ - "_id": io.ObjectId(container["representation"]), + repr_doc = legacy_io.find_one({ + "_id": ObjectId(container["representation"]), }) version_id = repr_doc["parent"] @@ -62,7 +64,7 @@ class ImportModelRender(api.InventoryAction): from maya import cmds # Get representations of shader file and relationships - look_repr = io.find_one({ + look_repr = legacy_io.find_one({ "type": "representation", "parent": version_id, "name": {"$regex": self.scene_type_regex}, @@ -71,17 +73,17 @@ class ImportModelRender(api.InventoryAction): print("No model render sets for this model version..") return - json_repr = io.find_one({ + json_repr = legacy_io.find_one({ "type": "representation", "parent": version_id, "name": self.look_data_type, }) context = get_representation_context(look_repr["_id"]) - maya_file = get_representation_path_from_context(context) + maya_file = self.filepath_from_context(context) context = get_representation_context(json_repr["_id"]) - json_file = get_representation_path_from_context(context) + json_file = self.filepath_from_context(context) # Import the look file with maintained_selection(): diff --git a/openpype/hosts/maya/plugins/inventory/import_reference.py b/openpype/hosts/maya/plugins/inventory/import_reference.py index 2fa132a867..afb1e0e17f 100644 --- a/openpype/hosts/maya/plugins/inventory/import_reference.py +++ b/openpype/hosts/maya/plugins/inventory/import_reference.py @@ -1,11 +1,10 @@ from maya import cmds -from avalon import api - +from openpype.pipeline import InventoryAction from openpype.hosts.maya.api.plugin import get_reference_node -class ImportReference(api.InventoryAction): +class ImportReference(InventoryAction): """Imports selected reference to inside of the file.""" label = "Import Reference" diff --git a/openpype/hosts/maya/plugins/load/load_audio.py b/openpype/hosts/maya/plugins/load/load_audio.py index d8844ffea6..ce814e1299 100644 --- a/openpype/hosts/maya/plugins/load/load_audio.py +++ b/openpype/hosts/maya/plugins/load/load_audio.py @@ -1,8 +1,9 @@ from maya import cmds, mel -from avalon import io + from openpype.pipeline import ( + legacy_io, load, - get_representation_path + get_representation_path, ) from openpype.hosts.maya.api.pipeline import containerise from openpype.hosts.maya.api.lib import unique_namespace @@ -64,9 +65,9 @@ class AudioLoader(load.LoaderPlugin): ) # Set frame range. - version = io.find_one({"_id": representation["parent"]}) - subset = io.find_one({"_id": version["parent"]}) - asset = io.find_one({"_id": subset["parent"]}) + version = legacy_io.find_one({"_id": representation["parent"]}) + subset = legacy_io.find_one({"_id": version["parent"]}) + asset = legacy_io.find_one({"_id": subset["parent"]}) audio_node.sourceStart.set(1 - asset["data"]["frameStart"]) audio_node.sourceEnd.set(asset["data"]["frameEnd"]) diff --git a/openpype/hosts/maya/plugins/load/load_image_plane.py b/openpype/hosts/maya/plugins/load/load_image_plane.py index b250986489..b67c2cb209 100644 --- a/openpype/hosts/maya/plugins/load/load_image_plane.py +++ b/openpype/hosts/maya/plugins/load/load_image_plane.py @@ -1,7 +1,7 @@ from Qt import QtWidgets, QtCore -from avalon import io from openpype.pipeline import ( + legacy_io, load, get_representation_path ) @@ -216,9 +216,9 @@ class ImagePlaneLoader(load.LoaderPlugin): ) # Set frame range. - version = io.find_one({"_id": representation["parent"]}) - subset = io.find_one({"_id": version["parent"]}) - asset = io.find_one({"_id": subset["parent"]}) + version = legacy_io.find_one({"_id": representation["parent"]}) + subset = legacy_io.find_one({"_id": version["parent"]}) + asset = legacy_io.find_one({"_id": subset["parent"]}) start_frame = asset["data"]["frameStart"] end_frame = asset["data"]["frameEnd"] image_plane_shape.frameOffset.set(1 - start_frame) diff --git a/openpype/hosts/maya/plugins/load/load_look.py b/openpype/hosts/maya/plugins/load/load_look.py index 8f02ed59b8..80eac8e0b5 100644 --- a/openpype/hosts/maya/plugins/load/load_look.py +++ b/openpype/hosts/maya/plugins/load/load_look.py @@ -5,8 +5,10 @@ from collections import defaultdict from Qt import QtWidgets -from avalon import io -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + legacy_io, + get_representation_path, +) import openpype.hosts.maya.api.plugin from openpype.hosts.maya.api import lib from openpype.widgets.message_window import ScrollMessageBox @@ -71,7 +73,7 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): shader_nodes = cmds.ls(members, type='shadingEngine') nodes = set(self._get_nodes_with_shader(shader_nodes)) - json_representation = io.find_one({ + json_representation = legacy_io.find_one({ "type": "representation", "parent": representation['parent'], "name": "json" diff --git a/openpype/hosts/maya/plugins/load/load_multiverse_usd.py b/openpype/hosts/maya/plugins/load/load_multiverse_usd.py new file mode 100644 index 0000000000..c03f2c5d92 --- /dev/null +++ b/openpype/hosts/maya/plugins/load/load_multiverse_usd.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +import maya.cmds as cmds + +from openpype.pipeline import ( + load, + get_representation_path +) +from openpype.hosts.maya.api.lib import ( + maintained_selection, + namespaced, + unique_namespace +) +from openpype.hosts.maya.api.pipeline import containerise + + +class MultiverseUsdLoader(load.LoaderPlugin): + """Load the USD by Multiverse""" + + families = ["model", "usd", "usdComposition", "usdOverride", + "pointcache", "animation"] + representations = ["usd", "usda", "usdc", "usdz", "abc"] + + label = "Read USD by Multiverse" + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, options=None): + + asset = context['asset']['name'] + namespace = namespace or unique_namespace( + asset + "_", + prefix="_" if asset[0].isdigit() else "", + suffix="_", + ) + + # Create the shape + cmds.loadPlugin("MultiverseForMaya", quiet=True) + + shape = None + transform = None + with maintained_selection(): + cmds.namespace(addNamespace=namespace) + with namespaced(namespace, new=False): + import multiverse + shape = multiverse.CreateUsdCompound(self.fname) + transform = cmds.listRelatives( + shape, parent=True, fullPath=True)[0] + + # Lock the shape node so the user cannot delete it. + cmds.lockNode(shape, lock=True) + + nodes = [transform, shape] + self[:] = nodes + + return containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__) + + def update(self, container, representation): + # type: (dict, dict) -> None + """Update container with specified representation.""" + node = container['objectName'] + assert cmds.objExists(node), "Missing container" + + members = cmds.sets(node, query=True) or [] + shapes = cmds.ls(members, type="mvUsdCompoundShape") + assert shapes, "Cannot find mvUsdCompoundShape in container" + + path = get_representation_path(representation) + + import multiverse + for shape in shapes: + multiverse.SetUsdCompoundAssetPaths(shape, [path]) + + cmds.setAttr("{}.representation".format(node), + str(representation["_id"]), + type="string") + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + # type: (dict) -> None + """Remove loaded container.""" + # Delete container and its contents + if cmds.objExists(container['objectName']): + members = cmds.sets(container['objectName'], query=True) or [] + cmds.delete([container['objectName']] + members) + + # Remove the namespace, if empty + namespace = container['namespace'] + if cmds.namespace(exists=namespace): + members = cmds.namespaceInfo(namespace, listNamespace=True) + if not members: + cmds.namespace(removeNamespace=namespace) + else: + self.log.warning("Namespace not deleted because it " + "still has members: %s", namespace) diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py index 04a25f6493..a8875cf216 100644 --- a/openpype/hosts/maya/plugins/load/load_reference.py +++ b/openpype/hosts/maya/plugins/load/load_reference.py @@ -1,10 +1,12 @@ import os from maya import cmds -from avalon import api from openpype.api import get_project_settings from openpype.lib import get_creator_by_name -from openpype.pipeline import legacy_create +from openpype.pipeline import ( + legacy_io, + legacy_create, +) import openpype.hosts.maya.api.plugin from openpype.hosts.maya.api.lib import maintained_selection @@ -22,7 +24,8 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): "camera", "rig", "camerarig", - "xgen"] + "xgen", + "staticMesh"] representations = ["ma", "abc", "fbx", "mb"] label = "Reference" @@ -142,7 +145,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): roots = cmds.ls(self[:], assemblies=True, long=True) assert roots, "No root nodes in rig, this is a bug." - asset = api.Session["AVALON_ASSET"] + asset = legacy_io.Session["AVALON_ASSET"] dependency = str(context["representation"]["_id"]) self.log.info("Creating subset: {}".format(namespace)) diff --git a/openpype/hosts/maya/plugins/load/load_vrayproxy.py b/openpype/hosts/maya/plugins/load/load_vrayproxy.py index 5b79b1efb3..22d56139f6 100644 --- a/openpype/hosts/maya/plugins/load/load_vrayproxy.py +++ b/openpype/hosts/maya/plugins/load/load_vrayproxy.py @@ -7,11 +7,13 @@ loader will use them instead of native vray vrmesh format. """ import os +from bson.objectid import ObjectId + import maya.cmds as cmds -from avalon import io from openpype.api import get_project_settings from openpype.pipeline import ( + legacy_io, load, get_representation_path ) @@ -183,12 +185,11 @@ class VRayProxyLoader(load.LoaderPlugin): """ self.log.debug( "Looking for abc in published representations of this version.") - abc_rep = io.find_one( - { - "type": "representation", - "parent": io.ObjectId(version_id), - "name": "abc" - }) + abc_rep = legacy_io.find_one({ + "type": "representation", + "parent": ObjectId(version_id), + "name": "abc" + }) if abc_rep: self.log.debug("Found, we'll link alembic to vray proxy.") diff --git a/openpype/hosts/maya/plugins/load/load_yeti_cache.py b/openpype/hosts/maya/plugins/load/load_yeti_cache.py index c64e1c540b..fb903785ae 100644 --- a/openpype/hosts/maya/plugins/load/load_yeti_cache.py +++ b/openpype/hosts/maya/plugins/load/load_yeti_cache.py @@ -7,9 +7,9 @@ from pprint import pprint from maya import cmds -from avalon import io from openpype.api import get_project_settings from openpype.pipeline import ( + legacy_io, load, get_representation_path ) @@ -111,11 +111,11 @@ class YetiCacheLoader(load.LoaderPlugin): def update(self, container, representation): - io.install() + legacy_io.install() namespace = container["namespace"] container_node = container["objectName"] - fur_settings = io.find_one( + fur_settings = legacy_io.find_one( {"parent": representation["parent"], "name": "fursettings"} ) diff --git a/openpype/hosts/maya/plugins/publish/clean_nodes.py b/openpype/hosts/maya/plugins/publish/clean_nodes.py deleted file mode 100644 index 03995cdabe..0000000000 --- a/openpype/hosts/maya/plugins/publish/clean_nodes.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -"""Cleanup leftover nodes.""" -from maya import cmds # noqa -import pyblish.api - - -class CleanNodesUp(pyblish.api.InstancePlugin): - """Cleans up the staging directory after a successful publish. - - This will also clean published renders and delete their parent directories. - - """ - - order = pyblish.api.IntegratorOrder + 10 - label = "Clean Nodes" - optional = True - active = True - - def process(self, instance): - if not instance.data.get("cleanNodes"): - self.log.info("Nothing to clean.") - return - - nodes_to_clean = instance.data.pop("cleanNodes", []) - self.log.info("Removing {} nodes".format(len(nodes_to_clean))) - for node in nodes_to_clean: - try: - cmds.delete(node) - except ValueError: - # object might be already deleted, don't complain about it - pass diff --git a/openpype/hosts/maya/plugins/publish/collect_ass.py b/openpype/hosts/maya/plugins/publish/collect_ass.py index 8e6691120a..7c9a1b76fb 100644 --- a/openpype/hosts/maya/plugins/publish/collect_ass.py +++ b/openpype/hosts/maya/plugins/publish/collect_ass.py @@ -1,23 +1,16 @@ from maya import cmds -import pymel.core as pm import pyblish.api -import avalon.api + class CollectAssData(pyblish.api.InstancePlugin): - """Collect Ass data - - """ + """Collect Ass data.""" order = pyblish.api.CollectorOrder + 0.2 label = 'Collect Ass' families = ["ass"] def process(self, instance): - - - context = instance.context - objsets = instance.data['setMembers'] for objset in objsets: diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index a525b562f3..912fe179dd 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -49,8 +49,8 @@ import maya.app.renderSetup.model.renderSetup as renderSetup import pyblish.api -from avalon import api from openpype.lib import get_formatted_current_time +from openpype.pipeline import legacy_io from openpype.hosts.maya.api.lib_renderproducts import get as get_layer_render_products # noqa: E501 from openpype.hosts.maya.api import lib @@ -93,7 +93,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): render_globals = render_instance collected_render_layers = render_instance.data["setMembers"] filepath = context.data["currentFile"].replace("\\", "/") - asset = api.Session["AVALON_ASSET"] + asset = legacy_io.Session["AVALON_ASSET"] workspace = context.data["workspaceDir"] deadline_settings = ( @@ -208,6 +208,9 @@ class CollectMayaRender(pyblish.api.ContextPlugin): product) }) + has_cameras = any(product.camera for product in render_products) + assert has_cameras, "No render cameras found." + self.log.info("multipart: {}".format( multipart)) assert exp_files, "no file names were generated, this is bug" @@ -386,6 +389,12 @@ class CollectMayaRender(pyblish.api.ContextPlugin): overrides = self.parse_options(str(render_globals)) data.update(**overrides) + # get string values for pools + primary_pool = overrides["renderGlobals"]["Pool"] + secondary_pool = overrides["renderGlobals"].get("SecondaryPool") + data["primaryPool"] = primary_pool + data["secondaryPool"] = secondary_pool + # Define nice label label = "{0} ({1})".format(expected_layer_name, data["asset"]) label += " [{0}-{1}]".format( diff --git a/openpype/hosts/maya/plugins/publish/collect_review.py b/openpype/hosts/maya/plugins/publish/collect_review.py index 60183341f9..1af92c3bfc 100644 --- a/openpype/hosts/maya/plugins/publish/collect_review.py +++ b/openpype/hosts/maya/plugins/publish/collect_review.py @@ -2,7 +2,8 @@ from maya import cmds, mel import pymel.core as pm import pyblish.api -import avalon.api + +from openpype.pipeline import legacy_io class CollectReview(pyblish.api.InstancePlugin): @@ -19,7 +20,7 @@ class CollectReview(pyblish.api.InstancePlugin): self.log.debug('instance: {}'.format(instance)) - task = avalon.api.Session["AVALON_TASK"] + task = legacy_io.Session["AVALON_TASK"] # get cameras members = instance.data['setMembers'] diff --git a/openpype/hosts/maya/plugins/publish/collect_unreal_skeletalmesh.py b/openpype/hosts/maya/plugins/publish/collect_unreal_skeletalmesh.py new file mode 100644 index 0000000000..79693bb35e --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/collect_unreal_skeletalmesh.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +from maya import cmds # noqa +import pyblish.api + + +class CollectUnrealSkeletalMesh(pyblish.api.InstancePlugin): + """Collect Unreal Skeletal Mesh.""" + + order = pyblish.api.CollectorOrder + 0.2 + label = "Collect Unreal Skeletal Meshes" + families = ["skeletalMesh"] + + def process(self, instance): + frame = cmds.currentTime(query=True) + instance.data["frameStart"] = frame + instance.data["frameEnd"] = frame + + geo_sets = [ + i for i in instance[:] + if i.lower().startswith("geometry_set") + ] + + joint_sets = [ + i for i in instance[:] + if i.lower().startswith("joints_set") + ] + + instance.data["geometry"] = [] + instance.data["joints"] = [] + + for geo_set in geo_sets: + geo_content = cmds.ls(cmds.sets(geo_set, query=True), long=True) + if geo_content: + instance.data["geometry"] += geo_content + + for join_set in joint_sets: + join_content = cmds.ls(cmds.sets(join_set, query=True), long=True) + if join_content: + instance.data["joints"] += join_content diff --git a/openpype/hosts/maya/plugins/publish/collect_unreal_staticmesh.py b/openpype/hosts/maya/plugins/publish/collect_unreal_staticmesh.py index b1fb0542f2..79d0856fa0 100644 --- a/openpype/hosts/maya/plugins/publish/collect_unreal_staticmesh.py +++ b/openpype/hosts/maya/plugins/publish/collect_unreal_staticmesh.py @@ -1,38 +1,36 @@ # -*- coding: utf-8 -*- -from maya import cmds +from maya import cmds # noqa import pyblish.api +from pprint import pformat class CollectUnrealStaticMesh(pyblish.api.InstancePlugin): - """Collect Unreal Static Mesh - - Ensures always only a single frame is extracted (current frame). This - also sets correct FBX options for later extraction. - - """ + """Collect Unreal Static Mesh.""" order = pyblish.api.CollectorOrder + 0.2 label = "Collect Unreal Static Meshes" - families = ["unrealStaticMesh"] + families = ["staticMesh"] def process(self, instance): - # add fbx family to trigger fbx extractor - instance.data["families"].append("fbx") - # take the name from instance (without the `S_` prefix) - instance.data["staticMeshCombinedName"] = instance.name[2:] - - geometry_set = [i for i in instance if i == "geometry_SET"] - instance.data["membersToCombine"] = cmds.sets( + geometry_set = [ + i for i in instance + if i.startswith("geometry_SET") + ] + instance.data["geometryMembers"] = cmds.sets( geometry_set, query=True) - collision_set = [i for i in instance if i == "collisions_SET"] + self.log.info("geometry: {}".format( + pformat(instance.data.get("geometryMembers")))) + + collision_set = [ + i for i in instance + if i.startswith("collisions_SET") + ] instance.data["collisionMembers"] = cmds.sets( collision_set, query=True) - # set fbx overrides on instance - instance.data["smoothingGroups"] = True - instance.data["smoothMesh"] = True - instance.data["triangulate"] = True + self.log.info("collisions: {}".format( + pformat(instance.data.get("collisionMembers")))) frame = cmds.currentTime(query=True) instance.data["frameStart"] = frame diff --git a/openpype/hosts/maya/plugins/publish/collect_vrayscene.py b/openpype/hosts/maya/plugins/publish/collect_vrayscene.py index 327fc836dc..afdb570cbc 100644 --- a/openpype/hosts/maya/plugins/publish/collect_vrayscene.py +++ b/openpype/hosts/maya/plugins/publish/collect_vrayscene.py @@ -6,7 +6,8 @@ import maya.app.renderSetup.model.renderSetup as renderSetup from maya import cmds import pyblish.api -from avalon import api + +from openpype.pipeline import legacy_io from openpype.lib import get_formatted_current_time from openpype.hosts.maya.api import lib @@ -117,7 +118,7 @@ class CollectVrayScene(pyblish.api.InstancePlugin): # instance subset "family": "vrayscene_layer", "families": ["vrayscene_layer"], - "asset": api.Session["AVALON_ASSET"], + "asset": legacy_io.Session["AVALON_ASSET"], "time": get_formatted_current_time(), "author": context.data["user"], # Add source to allow tracing back to the scene from diff --git a/openpype/hosts/maya/plugins/publish/collect_workfile.py b/openpype/hosts/maya/plugins/publish/collect_workfile.py index ee676f50d0..12d86869ea 100644 --- a/openpype/hosts/maya/plugins/publish/collect_workfile.py +++ b/openpype/hosts/maya/plugins/publish/collect_workfile.py @@ -1,7 +1,8 @@ -import pyblish.api -import avalon.api import os +import pyblish.api + from maya import cmds +from openpype.pipeline import legacy_io class CollectWorkfile(pyblish.api.ContextPlugin): @@ -19,7 +20,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): folder, file = os.path.split(current_file) filename, ext = os.path.splitext(file) - task = avalon.api.Session["AVALON_TASK"] + task = legacy_io.Session["AVALON_TASK"] data = {} diff --git a/openpype/hosts/maya/plugins/publish/extract_fbx.py b/openpype/hosts/maya/plugins/publish/extract_fbx.py index a2adcb3091..fbbe8e06b0 100644 --- a/openpype/hosts/maya/plugins/publish/extract_fbx.py +++ b/openpype/hosts/maya/plugins/publish/extract_fbx.py @@ -5,152 +5,29 @@ from maya import cmds # noqa import maya.mel as mel # noqa import pyblish.api import openpype.api -from openpype.hosts.maya.api.lib import ( - root_parent, - maintained_selection -) +from openpype.hosts.maya.api.lib import maintained_selection + +from openpype.hosts.maya.api import fbx class ExtractFBX(openpype.api.Extractor): """Extract FBX from Maya. - This extracts reproducible FBX exports ignoring any of the settings set - on the local machine in the FBX export options window. - - All export settings are applied with the `FBXExport*` commands prior - to the `FBXExport` call itself. The options can be overridden with their - nice names as seen in the "options" property on this class. - - For more information on FBX exports see: - - https://knowledge.autodesk.com/support/maya/learn-explore/caas - /CloudHelp/cloudhelp/2016/ENU/Maya/files/GUID-6CCE943A-2ED4-4CEE-96D4 - -9CB19C28F4E0-htm.html - - http://forums.cgsociety.org/archive/index.php?t-1032853.html - - https://groups.google.com/forum/#!msg/python_inside_maya/cLkaSo361oE - /LKs9hakE28kJ + This extracts reproducible FBX exports ignoring any of the + settings set on the local machine in the FBX export options window. """ - order = pyblish.api.ExtractorOrder label = "Extract FBX" families = ["fbx"] - @property - def options(self): - """Overridable options for FBX Export - - Given in the following format - - {NAME: EXPECTED TYPE} - - If the overridden option's type does not match, - the option is not included and a warning is logged. - - """ - - return { - "cameras": bool, - "smoothingGroups": bool, - "hardEdges": bool, - "tangents": bool, - "smoothMesh": bool, - "instances": bool, - # "referencedContainersContent": bool, # deprecated in Maya 2016+ - "bakeComplexAnimation": int, - "bakeComplexStart": int, - "bakeComplexEnd": int, - "bakeComplexStep": int, - "bakeResampleAnimation": bool, - "animationOnly": bool, - "useSceneName": bool, - "quaternion": str, # "euler" - "shapes": bool, - "skins": bool, - "constraints": bool, - "lights": bool, - "embeddedTextures": bool, - "inputConnections": bool, - "upAxis": str, # x, y or z, - "triangulate": bool - } - - @property - def default_options(self): - """The default options for FBX extraction. - - This includes shapes, skins, constraints, lights and incoming - connections and exports with the Y-axis as up-axis. - - By default this uses the time sliders start and end time. - - """ - - start_frame = int(cmds.playbackOptions(query=True, - animationStartTime=True)) - end_frame = int(cmds.playbackOptions(query=True, - animationEndTime=True)) - - return { - "cameras": False, - "smoothingGroups": False, - "hardEdges": False, - "tangents": False, - "smoothMesh": False, - "instances": False, - "bakeComplexAnimation": True, - "bakeComplexStart": start_frame, - "bakeComplexEnd": end_frame, - "bakeComplexStep": 1, - "bakeResampleAnimation": True, - "animationOnly": False, - "useSceneName": False, - "quaternion": "euler", - "shapes": True, - "skins": True, - "constraints": False, - "lights": True, - "embeddedTextures": True, - "inputConnections": True, - "upAxis": "y", - "triangulate": False - } - - def parse_overrides(self, instance, options): - """Inspect data of instance to determine overridden options - - An instance may supply any of the overridable options - as data, the option is then added to the extraction. - - """ - - for key in instance.data: - if key not in self.options: - continue - - # Ensure the data is of correct type - value = instance.data[key] - if not isinstance(value, self.options[key]): - self.log.warning( - "Overridden attribute {key} was of " - "the wrong type: {invalid_type} " - "- should have been {valid_type}".format( - key=key, - invalid_type=type(value).__name__, - valid_type=self.options[key].__name__)) - continue - - options[key] = value - - return options - def process(self, instance): - - # Ensure FBX plug-in is loaded - cmds.loadPlugin("fbxmaya", quiet=True) + fbx_exporter = fbx.FBXExtractor(log=self.log) # Define output path - stagingDir = self.staging_dir(instance) + staging_dir = self.staging_dir(instance) filename = "{0}.fbx".format(instance.name) - path = os.path.join(stagingDir, filename) + path = os.path.join(staging_dir, filename) # The export requires forward slashes because we need # to format it into a string in a mel expression @@ -162,54 +39,13 @@ class ExtractFBX(openpype.api.Extractor): self.log.info("Members: {0}".format(members)) self.log.info("Instance: {0}".format(instance[:])) - # Parse export options - options = self.default_options - options = self.parse_overrides(instance, options) - self.log.info("Export options: {0}".format(options)) - - # Collect the start and end including handles - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - options['bakeComplexStart'] = start - options['bakeComplexEnd'] = end - - # First apply the default export settings to be fully consistent - # each time for successive publishes - mel.eval("FBXResetExport") - - # Apply the FBX overrides through MEL since the commands - # only work correctly in MEL according to online - # available discussions on the topic - _iteritems = getattr(options, "iteritems", options.items) - for option, value in _iteritems(): - key = option[0].upper() + option[1:] # uppercase first letter - - # Boolean must be passed as lower-case strings - # as to MEL standards - if isinstance(value, bool): - value = str(value).lower() - - template = "FBXExport{0} {1}" if key == "UpAxis" else "FBXExport{0} -v {1}" # noqa - cmd = template.format(key, value) - self.log.info(cmd) - mel.eval(cmd) - - # Never show the UI or generate a log - mel.eval("FBXExportShowUI -v false") - mel.eval("FBXExportGenerateLog -v false") + fbx_exporter.set_options_from_instance(instance) # Export - if "unrealStaticMesh" in instance.data["families"]: - with maintained_selection(): - with root_parent(members): - self.log.info("Un-parenting: {}".format(members)) - cmds.select(members, r=1, noExpand=True) - mel.eval('FBXExport -f "{}" -s'.format(path)) - else: - with maintained_selection(): - cmds.select(members, r=1, noExpand=True) - mel.eval('FBXExport -f "{}" -s'.format(path)) + with maintained_selection(): + fbx_exporter.export(members, path) + cmds.select(members, r=1, noExpand=True) + mel.eval('FBXExport -f "{}" -s'.format(path)) if "representations" not in instance.data: instance.data["representations"] = [] @@ -218,7 +54,7 @@ class ExtractFBX(openpype.api.Extractor): 'name': 'fbx', 'ext': 'fbx', 'files': filename, - "stagingDir": stagingDir, + "stagingDir": staging_dir, } instance.data["representations"].append(representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py index a8893072d0..881705b92c 100644 --- a/openpype/hosts/maya/plugins/publish/extract_look.py +++ b/openpype/hosts/maya/plugins/publish/extract_look.py @@ -4,6 +4,7 @@ import os import sys import json import tempfile +import platform import contextlib import subprocess from collections import OrderedDict @@ -11,9 +12,9 @@ from collections import OrderedDict from maya import cmds # noqa import pyblish.api -from avalon import io import openpype.api +from openpype.pipeline import legacy_io from openpype.hosts.maya.api import lib # Modes for transfer @@ -39,7 +40,7 @@ def find_paths_by_hash(texture_hash): """ key = "data.sourceHashes.{0}".format(texture_hash) - return io.distinct(key, {"type": "version"}) + return legacy_io.distinct(key, {"type": "version"}) def maketx(source, destination, *args): @@ -334,7 +335,14 @@ class ExtractLook(openpype.api.Extractor): transfers = [] hardlinks = [] hashes = {} - force_copy = instance.data.get("forceCopy", False) + # Temporary fix to NOT create hardlinks on windows machines + if platform.system().lower() == "windows": + self.log.info( + "Forcing copy instead of hardlink due to issues on Windows..." + ) + force_copy = True + else: + force_copy = instance.data.get("forceCopy", False) for filepath in files_metadata: diff --git a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py index 389995d30c..3a47cdadb5 100644 --- a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py +++ b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py @@ -6,7 +6,7 @@ from maya import cmds import openpype.api from openpype.hosts.maya.api.lib import maintained_selection -from avalon.pipeline import AVALON_CONTAINER_ID +from openpype.pipeline import AVALON_CONTAINER_ID class ExtractMayaSceneRaw(openpype.api.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd.py b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd.py new file mode 100644 index 0000000000..4e4efdc32c --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd.py @@ -0,0 +1,210 @@ +import os +import six + +from maya import cmds + +import openpype.api +from openpype.hosts.maya.api.lib import maintained_selection + + +class ExtractMultiverseUsd(openpype.api.Extractor): + """Extractor for USD by Multiverse.""" + + label = "Extract Multiverse USD" + hosts = ["maya"] + families = ["usd"] + + @property + def options(self): + """Overridable options for Multiverse USD Export + + Given in the following format + - {NAME: EXPECTED TYPE} + + If the overridden option's type does not match, + the option is not included and a warning is logged. + + """ + + return { + "stripNamespaces": bool, + "mergeTransformAndShape": bool, + "writeAncestors": bool, + "flattenParentXforms": bool, + "writeSparseOverrides": bool, + "useMetaPrimPath": bool, + "customRootPath": str, + "customAttributes": str, + "nodeTypesToIgnore": str, + "writeMeshes": bool, + "writeCurves": bool, + "writeParticles": bool, + "writeCameras": bool, + "writeLights": bool, + "writeJoints": bool, + "writeCollections": bool, + "writePositions": bool, + "writeNormals": bool, + "writeUVs": bool, + "writeColorSets": bool, + "writeTangents": bool, + "writeRefPositions": bool, + "writeBlendShapes": bool, + "writeDisplayColor": bool, + "writeSkinWeights": bool, + "writeMaterialAssignment": bool, + "writeHardwareShader": bool, + "writeShadingNetworks": bool, + "writeTransformMatrix": bool, + "writeUsdAttributes": bool, + "timeVaryingTopology": bool, + "customMaterialNamespace": str, + "numTimeSamples": int, + "timeSamplesSpan": float + } + + @property + def default_options(self): + """The default options for Multiverse USD extraction.""" + + return { + "stripNamespaces": False, + "mergeTransformAndShape": False, + "writeAncestors": True, + "flattenParentXforms": False, + "writeSparseOverrides": False, + "useMetaPrimPath": False, + "customRootPath": str(), + "customAttributes": str(), + "nodeTypesToIgnore": str(), + "writeMeshes": True, + "writeCurves": True, + "writeParticles": True, + "writeCameras": False, + "writeLights": False, + "writeJoints": False, + "writeCollections": False, + "writePositions": True, + "writeNormals": True, + "writeUVs": True, + "writeColorSets": False, + "writeTangents": False, + "writeRefPositions": False, + "writeBlendShapes": False, + "writeDisplayColor": False, + "writeSkinWeights": False, + "writeMaterialAssignment": False, + "writeHardwareShader": False, + "writeShadingNetworks": False, + "writeTransformMatrix": True, + "writeUsdAttributes": False, + "timeVaryingTopology": False, + "customMaterialNamespace": str(), + "numTimeSamples": 1, + "timeSamplesSpan": 0.0 + } + + def parse_overrides(self, instance, options): + """Inspect data of instance to determine overridden options""" + + for key in instance.data: + if key not in self.options: + continue + + # Ensure the data is of correct type + value = instance.data[key] + if isinstance(value, six.text_type): + value = str(value) + if not isinstance(value, self.options[key]): + self.log.warning( + "Overridden attribute {key} was of " + "the wrong type: {invalid_type} " + "- should have been {valid_type}".format( + key=key, + invalid_type=type(value).__name__, + valid_type=self.options[key].__name__)) + continue + + options[key] = value + + return options + + def process(self, instance): + # Load plugin firstly + cmds.loadPlugin("MultiverseForMaya", quiet=True) + + # Define output file path + staging_dir = self.staging_dir(instance) + file_name = "{}.usd".format(instance.name) + file_path = os.path.join(staging_dir, file_name) + file_path = file_path.replace('\\', '/') + + # Parse export options + options = self.default_options + options = self.parse_overrides(instance, options) + self.log.info("Export options: {0}".format(options)) + + # Perform extraction + self.log.info("Performing extraction ...") + + with maintained_selection(): + members = instance.data("setMembers") + members = cmds.ls(members, + dag=True, + shapes=True, + type=("mesh"), + noIntermediate=True, + long=True) + self.log.info('Collected object {}'.format(members)) + + import multiverse + + time_opts = None + frame_start = instance.data['frameStart'] + frame_end = instance.data['frameEnd'] + handle_start = instance.data['handleStart'] + handle_end = instance.data['handleEnd'] + step = instance.data['step'] + fps = instance.data['fps'] + if frame_end != frame_start: + time_opts = multiverse.TimeOptions() + + time_opts.writeTimeRange = True + time_opts.frameRange = ( + frame_start - handle_start, frame_end + handle_end) + time_opts.frameIncrement = step + time_opts.numTimeSamples = instance.data["numTimeSamples"] + time_opts.timeSamplesSpan = instance.data["timeSamplesSpan"] + time_opts.framePerSecond = fps + + asset_write_opts = multiverse.AssetWriteOptions(time_opts) + options_discard_keys = { + 'numTimeSamples', + 'timeSamplesSpan', + 'frameStart', + 'frameEnd', + 'handleStart', + 'handleEnd', + 'step', + 'fps' + } + for key, value in options.items(): + if key in options_discard_keys: + continue + setattr(asset_write_opts, key, value) + + multiverse.WriteAsset(file_path, members, asset_write_opts) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'usd', + 'ext': 'usd', + 'files': file_name, + "stagingDir": staging_dir + } + instance.data["representations"].append(representation) + + self.log.info("Extracted instance {} to {}".format( + instance.name, file_path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py new file mode 100644 index 0000000000..8fccc412e6 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py @@ -0,0 +1,151 @@ +import os + +from maya import cmds + +import openpype.api +from openpype.hosts.maya.api.lib import maintained_selection + + +class ExtractMultiverseUsdComposition(openpype.api.Extractor): + """Extractor of Multiverse USD Composition.""" + + label = "Extract Multiverse USD Composition" + hosts = ["maya"] + families = ["usdComposition"] + + @property + def options(self): + """Overridable options for Multiverse USD Export + + Given in the following format + - {NAME: EXPECTED TYPE} + + If the overridden option's type does not match, + the option is not included and a warning is logged. + + """ + + return { + "stripNamespaces": bool, + "mergeTransformAndShape": bool, + "flattenContent": bool, + "writePendingOverrides": bool, + "numTimeSamples": int, + "timeSamplesSpan": float + } + + @property + def default_options(self): + """The default options for Multiverse USD extraction.""" + + return { + "stripNamespaces": True, + "mergeTransformAndShape": False, + "flattenContent": False, + "writePendingOverrides": False, + "numTimeSamples": 1, + "timeSamplesSpan": 0.0 + } + + def parse_overrides(self, instance, options): + """Inspect data of instance to determine overridden options""" + + for key in instance.data: + if key not in self.options: + continue + + # Ensure the data is of correct type + value = instance.data[key] + if not isinstance(value, self.options[key]): + self.log.warning( + "Overridden attribute {key} was of " + "the wrong type: {invalid_type} " + "- should have been {valid_type}".format( + key=key, + invalid_type=type(value).__name__, + valid_type=self.options[key].__name__)) + continue + + options[key] = value + + return options + + def process(self, instance): + # Load plugin firstly + cmds.loadPlugin("MultiverseForMaya", quiet=True) + + # Define output file path + staging_dir = self.staging_dir(instance) + file_name = "{}.usd".format(instance.name) + file_path = os.path.join(staging_dir, file_name) + file_path = file_path.replace('\\', '/') + + # Parse export options + options = self.default_options + options = self.parse_overrides(instance, options) + self.log.info("Export options: {0}".format(options)) + + # Perform extraction + self.log.info("Performing extraction ...") + + with maintained_selection(): + members = instance.data("setMembers") + members = cmds.ls(members, + dag=True, + shapes=True, + type="mvUsdCompoundShape", + noIntermediate=True, + long=True) + self.log.info('Collected object {}'.format(members)) + + import multiverse + + time_opts = None + frame_start = instance.data['frameStart'] + frame_end = instance.data['frameEnd'] + handle_start = instance.data['handleStart'] + handle_end = instance.data['handleEnd'] + step = instance.data['step'] + fps = instance.data['fps'] + if frame_end != frame_start: + time_opts = multiverse.TimeOptions() + + time_opts.writeTimeRange = True + time_opts.frameRange = ( + frame_start - handle_start, frame_end + handle_end) + time_opts.frameIncrement = step + time_opts.numTimeSamples = instance.data["numTimeSamples"] + time_opts.timeSamplesSpan = instance.data["timeSamplesSpan"] + time_opts.framePerSecond = fps + + comp_write_opts = multiverse.CompositionWriteOptions() + options_discard_keys = { + 'numTimeSamples', + 'timeSamplesSpan', + 'frameStart', + 'frameEnd', + 'handleStart', + 'handleEnd', + 'step', + 'fps' + } + for key, value in options.items(): + if key in options_discard_keys: + continue + setattr(comp_write_opts, key, value) + + multiverse.WriteComposition(file_path, members, comp_write_opts) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'usd', + 'ext': 'usd', + 'files': file_name, + "stagingDir": staging_dir + } + instance.data["representations"].append(representation) + + self.log.info("Extracted instance {} to {}".format( + instance.name, file_path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py new file mode 100644 index 0000000000..ce0e8a392a --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py @@ -0,0 +1,139 @@ +import os + +import openpype.api +from openpype.hosts.maya.api.lib import maintained_selection + +from maya import cmds + + +class ExtractMultiverseUsdOverride(openpype.api.Extractor): + """Extractor for USD Override by Multiverse.""" + + label = "Extract Multiverse USD Override" + hosts = ["maya"] + families = ["usdOverride"] + + @property + def options(self): + """Overridable options for Multiverse USD Export + + Given in the following format + - {NAME: EXPECTED TYPE} + + If the overridden option's type does not match, + the option is not included and a warning is logged. + + """ + + return { + "writeAll": bool, + "writeTransforms": bool, + "writeVisibility": bool, + "writeAttributes": bool, + "writeMaterials": bool, + "writeVariants": bool, + "writeVariantsDefinition": bool, + "writeActiveState": bool, + "writeNamespaces": bool, + "numTimeSamples": int, + "timeSamplesSpan": float + } + + @property + def default_options(self): + """The default options for Multiverse USD extraction.""" + + return { + "writeAll": False, + "writeTransforms": True, + "writeVisibility": True, + "writeAttributes": True, + "writeMaterials": True, + "writeVariants": True, + "writeVariantsDefinition": True, + "writeActiveState": True, + "writeNamespaces": False, + "numTimeSamples": 1, + "timeSamplesSpan": 0.0 + } + + def process(self, instance): + # Load plugin firstly + cmds.loadPlugin("MultiverseForMaya", quiet=True) + + # Define output file path + staging_dir = self.staging_dir(instance) + file_name = "{}.usda".format(instance.name) + file_path = os.path.join(staging_dir, file_name) + file_path = file_path.replace("\\", "/") + + # Parse export options + options = self.default_options + self.log.info("Export options: {0}".format(options)) + + # Perform extraction + self.log.info("Performing extraction ...") + + with maintained_selection(): + members = instance.data("setMembers") + members = cmds.ls(members, + dag=True, + shapes=True, + type="mvUsdCompoundShape", + noIntermediate=True, + long=True) + self.log.info("Collected object {}".format(members)) + + # TODO: Deal with asset, composition, overide with options. + import multiverse + + time_opts = None + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + step = instance.data["step"] + fps = instance.data["fps"] + if frame_end != frame_start: + time_opts = multiverse.TimeOptions() + + time_opts.writeTimeRange = True + time_opts.frameRange = ( + frame_start - handle_start, frame_end + handle_end) + time_opts.frameIncrement = step + time_opts.numTimeSamples = instance.data["numTimeSamples"] + time_opts.timeSamplesSpan = instance.data["timeSamplesSpan"] + time_opts.framePerSecond = fps + + over_write_opts = multiverse.OverridesWriteOptions(time_opts) + options_discard_keys = { + "numTimeSamples", + "timeSamplesSpan", + "frameStart", + "frameEnd", + "handleStart", + "handleEnd", + "step", + "fps" + } + for key, value in options.items(): + if key in options_discard_keys: + continue + setattr(over_write_opts, key, value) + + for member in members: + multiverse.WriteOverrides(file_path, member, over_write_opts) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "usd", + "ext": "usd", + "files": file_name, + "stagingDir": staging_dir + } + instance.data["representations"].append(representation) + + self.log.info("Extracted instance {} to {}".format( + instance.name, file_path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_playblast.py b/openpype/hosts/maya/plugins/publish/extract_playblast.py index b233a57453..bb1ecf279d 100644 --- a/openpype/hosts/maya/plugins/publish/extract_playblast.py +++ b/openpype/hosts/maya/plugins/publish/extract_playblast.py @@ -73,6 +73,11 @@ class ExtractPlayblast(openpype.api.Extractor): pm.currentTime(refreshFrameInt - 1, edit=True) pm.currentTime(refreshFrameInt, edit=True) + # Override transparency if requested. + transparency = instance.data.get("transparency", 0) + if transparency != 0: + preset["viewport2_options"]["transparencyAlgorithm"] = transparency + # Isolate view is requested by having objects in the set besides a # camera. if preset.pop("isolate_view", False) and instance.data.get("isolate"): diff --git a/openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh.py b/openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh.py new file mode 100644 index 0000000000..7ef7f2f181 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +"""Create Unreal Skeletal Mesh data to be extracted as FBX.""" +import os +from contextlib import contextmanager + +from maya import cmds # noqa + +import pyblish.api +import openpype.api +from openpype.hosts.maya.api import fbx + + +@contextmanager +def renamed(original_name, renamed_name): + # type: (str, str) -> None + try: + cmds.rename(original_name, renamed_name) + yield + finally: + cmds.rename(renamed_name, original_name) + + +class ExtractUnrealSkeletalMesh(openpype.api.Extractor): + """Extract Unreal Skeletal Mesh as FBX from Maya. """ + + order = pyblish.api.ExtractorOrder - 0.1 + label = "Extract Unreal Skeletal Mesh" + families = ["skeletalMesh"] + + def process(self, instance): + fbx_exporter = fbx.FBXExtractor(log=self.log) + + # Define output path + staging_dir = self.staging_dir(instance) + filename = "{0}.fbx".format(instance.name) + path = os.path.join(staging_dir, filename) + + geo = instance.data.get("geometry") + joints = instance.data.get("joints") + + to_extract = geo + joints + + # The export requires forward slashes because we need + # to format it into a string in a mel expression + path = path.replace('\\', '/') + + self.log.info("Extracting FBX to: {0}".format(path)) + self.log.info("Members: {0}".format(to_extract)) + self.log.info("Instance: {0}".format(instance[:])) + + fbx_exporter.set_options_from_instance(instance) + + # This magic is done for variants. To let Unreal merge correctly + # existing data, top node must have the same name. So for every + # variant we extract we need to rename top node of the rig correctly. + # It is finally done in context manager so it won't affect current + # scene. + + # we rely on hierarchy under one root. + original_parent = to_extract[0].split("|")[1] + + parent_node = instance.data.get("asset") + + renamed_to_extract = [] + for node in to_extract: + node_path = node.split("|") + node_path[1] = parent_node + renamed_to_extract.append("|".join(node_path)) + + with renamed(original_parent, parent_node): + self.log.info("Extracting: {}".format(renamed_to_extract, path)) + fbx_exporter.export(renamed_to_extract, path) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'fbx', + 'ext': 'fbx', + 'files': filename, + "stagingDir": staging_dir, + } + instance.data["representations"].append(representation) + + self.log.info("Extract FBX successful to: {0}".format(path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py b/openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py index 32dc9d1d1c..69d51f9ff1 100644 --- a/openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py +++ b/openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py @@ -1,33 +1,61 @@ # -*- coding: utf-8 -*- """Create Unreal Static Mesh data to be extracted as FBX.""" -import openpype.api -import pyblish.api +import os + from maya import cmds # noqa +import pyblish.api +import openpype.api +from openpype.hosts.maya.api.lib import ( + parent_nodes, + maintained_selection +) +from openpype.hosts.maya.api import fbx + class ExtractUnrealStaticMesh(openpype.api.Extractor): - """Extract FBX from Maya. """ + """Extract Unreal Static Mesh as FBX from Maya. """ order = pyblish.api.ExtractorOrder - 0.1 label = "Extract Unreal Static Mesh" - families = ["unrealStaticMesh"] + families = ["staticMesh"] def process(self, instance): - to_combine = instance.data.get("membersToCombine") - static_mesh_name = instance.data.get("staticMeshCombinedName") - self.log.info( - "merging {} into {}".format( - " + ".join(to_combine), static_mesh_name)) - duplicates = cmds.duplicate(to_combine, ic=True) - cmds.polyUnite( - *duplicates, - n=static_mesh_name, ch=False) + members = instance.data.get("geometryMembers", []) + if instance.data.get("collisionMembers"): + members = members + instance.data.get("collisionMembers") - if not instance.data.get("cleanNodes"): - instance.data["cleanNodes"] = [] + fbx_exporter = fbx.FBXExtractor(log=self.log) - instance.data["cleanNodes"].append(static_mesh_name) - instance.data["cleanNodes"] += duplicates + # Define output path + staging_dir = self.staging_dir(instance) + filename = "{0}.fbx".format(instance.name) + path = os.path.join(staging_dir, filename) - instance.data["setMembers"] = [static_mesh_name] - instance.data["setMembers"] += instance.data["collisionMembers"] + # The export requires forward slashes because we need + # to format it into a string in a mel expression + path = path.replace('\\', '/') + + self.log.info("Extracting FBX to: {0}".format(path)) + self.log.info("Members: {0}".format(members)) + self.log.info("Instance: {0}".format(instance[:])) + + fbx_exporter.set_options_from_instance(instance) + + with maintained_selection(): + with parent_nodes(members): + self.log.info("Un-parenting: {}".format(members)) + fbx_exporter.export(members, path) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'fbx', + 'ext': 'fbx', + 'files': filename, + "stagingDir": staging_dir, + } + instance.data["representations"].append(representation) + + self.log.info("Extract FBX successful to: {0}".format(path)) diff --git a/openpype/hosts/maya/plugins/publish/help/validate_skeletalmesh_hierarchy.xml b/openpype/hosts/maya/plugins/publish/help/validate_skeletalmesh_hierarchy.xml new file mode 100644 index 0000000000..d30c4cb69d --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/help/validate_skeletalmesh_hierarchy.xml @@ -0,0 +1,14 @@ + + + +Skeletal Mesh Top Node +## Skeletal meshes needs common root + +Skeletal meshes and their joints must be under one common root. + +### How to repair? + +Make sure all geometry and joints resides under same root. + + + diff --git a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py index f852904580..c4250a20bd 100644 --- a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py +++ b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py @@ -4,14 +4,13 @@ import getpass import platform import appdirs -import requests from maya import cmds -from avalon import api - import pyblish.api +from openpype.lib import requests_post from openpype.hosts.maya.api import lib +from openpype.pipeline import legacy_io from openpype.api import get_system_settings @@ -184,7 +183,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): "select": "name" } api_entry = '/api/templates/list' - response = self._requests_post( + response = requests_post( self.MUSTER_REST_URL + api_entry, params=params) if response.status_code != 200: self.log.error( @@ -235,7 +234,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): "name": "submit" } api_entry = '/api/queue/actions' - response = self._requests_post( + response = requests_post( self.MUSTER_REST_URL + api_entry, params=params, json=payload) if response.status_code != 200: @@ -489,7 +488,6 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): "MAYA_RENDER_DESC_PATH", "MAYA_MODULE_PATH", "ARNOLD_PLUGIN_PATH", - "AVALON_SCHEMA", "FTRACK_API_KEY", "FTRACK_API_USER", "FTRACK_SERVER", @@ -503,7 +501,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): "TOOL_ENV" ] environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) # self.log.debug("enviro: {}".format(pprint(environment))) for path in os.environ: if path.lower().startswith('pype_'): @@ -548,17 +546,3 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): "%f=%d was rounded off to nearest integer" % (value, int(value)) ) - - def _requests_post(self, *args, **kwargs): - """ Wrapper for requests, disabling SSL certificate validation if - DONT_VERIFY_SSL environment variable is found. This is useful when - Deadline or Muster server are running with self-signed certificates - and their certificate is not added to trusted certificates on - client machines. - - WARNING: disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa - return requests.post(*args, **kwargs) diff --git a/openpype/hosts/maya/plugins/publish/validate_camera_contents.py b/openpype/hosts/maya/plugins/publish/validate_camera_contents.py index d9e88edaac..20af8d2315 100644 --- a/openpype/hosts/maya/plugins/publish/validate_camera_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_camera_contents.py @@ -40,7 +40,14 @@ class ValidateCameraContents(pyblish.api.InstancePlugin): # list when there are no actual cameras results in # still an empty 'invalid' list if len(cameras) < 1: - raise RuntimeError("No cameras in instance.") + if members: + # If there are members in the instance return all of + # them as 'invalid' so the user can still select invalid + cls.log.error("No cameras found in instance " + "members: {}".format(members)) + return members + + raise RuntimeError("No cameras found in empty instance.") # non-camera shapes valid_shapes = cmds.ls(shapes, type=('camera', 'locator'), long=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_model_name.py b/openpype/hosts/maya/plugins/publish/validate_model_name.py index 3757e13a9b..50acf2b8b7 100644 --- a/openpype/hosts/maya/plugins/publish/validate_model_name.py +++ b/openpype/hosts/maya/plugins/publish/validate_model_name.py @@ -1,16 +1,17 @@ # -*- coding: utf-8 -*- """Validate model nodes names.""" +import os +import re from maya import cmds import pyblish.api + import openpype.api -import avalon.api +from openpype.pipeline import legacy_io import openpype.hosts.maya.api.action from openpype.hosts.maya.api.shader_definition_editor import ( DEFINITION_FILENAME) from openpype.lib.mongo import OpenPypeMongoConnection import gridfs -import re -import os class ValidateModelName(pyblish.api.InstancePlugin): @@ -68,7 +69,7 @@ class ValidateModelName(pyblish.api.InstancePlugin): invalid.append(top_group) else: if "asset" in r.groupindex: - if m.group("asset") != avalon.api.Session["AVALON_ASSET"]: + if m.group("asset") != legacy_io.Session["AVALON_ASSET"]: cls.log.error("Invalid asset name in top level group.") return top_group if "subset" in r.groupindex: @@ -76,7 +77,7 @@ class ValidateModelName(pyblish.api.InstancePlugin): cls.log.error("Invalid subset name in top level group.") return top_group if "project" in r.groupindex: - if m.group("project") != avalon.api.Session["AVALON_PROJECT"]: + if m.group("project") != legacy_io.Session["AVALON_PROJECT"]: cls.log.error("Invalid project name in top level group.") return top_group diff --git a/openpype/hosts/maya/plugins/publish/validate_muster_connection.py b/openpype/hosts/maya/plugins/publish/validate_muster_connection.py index af32c82f97..6dc7bd3bc4 100644 --- a/openpype/hosts/maya/plugins/publish/validate_muster_connection.py +++ b/openpype/hosts/maya/plugins/publish/validate_muster_connection.py @@ -2,9 +2,9 @@ import os import json import appdirs -import requests import pyblish.api +from openpype.lib import requests_get from openpype.plugin import contextplugin_should_run import openpype.hosts.maya.api.action @@ -51,7 +51,7 @@ class ValidateMusterConnection(pyblish.api.ContextPlugin): 'authToken': self._token } api_entry = '/api/pools/list' - response = self._requests_get( + response = requests_get( MUSTER_REST_URL + api_entry, params=params) assert response.status_code == 200, "invalid response from server" assert response.json()['ResponseData'], "invalid data in response" @@ -88,35 +88,7 @@ class ValidateMusterConnection(pyblish.api.ContextPlugin): api_url = "{}/muster/show_login".format( os.environ["OPENPYPE_WEBSERVER_URL"]) cls.log.debug(api_url) - response = cls._requests_get(api_url, timeout=1) + response = requests_get(api_url, timeout=1) if response.status_code != 200: cls.log.error('Cannot show login form to Muster') raise Exception('Cannot show login form to Muster') - - def _requests_post(self, *args, **kwargs): - """ Wrapper for requests, disabling SSL certificate validation if - DONT_VERIFY_SSL environment variable is found. This is useful when - Deadline or Muster server are running with self-signed certificates - and their certificate is not added to trusted certificates on - client machines. - - WARNING: disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa - return requests.post(*args, **kwargs) - - def _requests_get(self, *args, **kwargs): - """ Wrapper for requests, disabling SSL certificate validation if - DONT_VERIFY_SSL environment variable is found. This is useful when - Deadline or Muster server are running with self-signed certificates - and their certificate is not added to trusted certificates on - client machines. - - WARNING: disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa - return requests.get(*args, **kwargs) diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py b/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py index c5f675c8ca..068d6b38a1 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py @@ -1,8 +1,7 @@ import pyblish.api -from avalon import io - import openpype.api +from openpype.pipeline import legacy_io import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib @@ -43,7 +42,7 @@ class ValidateNodeIdsInDatabase(pyblish.api.InstancePlugin): nodes=instance[:]) # check ids against database ids - db_asset_ids = io.find({"type": "asset"}).distinct("_id") + db_asset_ids = legacy_io.find({"type": "asset"}).distinct("_id") db_asset_ids = set(str(i) for i in db_asset_ids) # Get all asset IDs diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py b/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py index 276b6713f4..38407e4176 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py @@ -1,9 +1,8 @@ import pyblish.api import openpype.api -from avalon import io +from openpype.pipeline import legacy_io import openpype.hosts.maya.api.action - from openpype.hosts.maya.api import lib @@ -38,7 +37,7 @@ class ValidateNodeIDsRelated(pyblish.api.InstancePlugin): invalid = list() asset = instance.data['asset'] - asset_data = io.find_one( + asset_data = legacy_io.find_one( { "name": asset, "type": "asset" diff --git a/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py b/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py index 4eb445ac68..e65150eb0f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py +++ b/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py @@ -1,7 +1,7 @@ import pyblish.api import openpype.hosts.maya.api.action -from avalon import io +from openpype.pipeline import legacy_io import openpype.api @@ -48,8 +48,8 @@ class ValidateRenderLayerAOVs(pyblish.api.InstancePlugin): def validate_subset_registered(self, asset_name, subset_name): """Check if subset is registered in the database under the asset""" - asset = io.find_one({"type": "asset", "name": asset_name}) - is_valid = io.find_one({ + asset = legacy_io.find_one({"type": "asset", "name": asset_name}) + is_valid = legacy_io.find_one({ "type": "subset", "name": subset_name, "parent": asset["_id"] diff --git a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py index e24e88cab7..023e27de17 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py @@ -69,14 +69,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): redshift_AOV_prefix = "/{aov_separator}" # noqa: E501 - # WARNING: There is bug? in renderman, translating token - # to something left behind mayas default image prefix. So instead - # `SceneName_v01` it translates to: - # `SceneName_v01//` that means - # for example: - # `SceneName_v01/Main/Main_`. Possible solution is to define - # custom token like to point to determined scene name. - RendermanDirPrefix = "/renders/maya//" + renderman_dir_prefix = "maya//" R_AOV_TOKEN = re.compile( r'%a||', re.IGNORECASE) @@ -116,15 +109,22 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): prefix = prefix.replace( "{aov_separator}", instance.data.get("aovSeparator", "_")) + + required_prefix = "maya/" + if not anim_override: invalid = True cls.log.error("Animation needs to be enabled. Use the same " "frame for start and end to render single frame") - if not prefix.lower().startswith("maya/"): + if renderer != "renderman" and not prefix.lower().startswith( + required_prefix): invalid = True - cls.log.error("Wrong image prefix [ {} ] - " - "doesn't start with: 'maya/'".format(prefix)) + cls.log.error( + ("Wrong image prefix [ {} ] " + " - doesn't start with: '{}'").format( + prefix, required_prefix) + ) if not re.search(cls.R_LAYER_TOKEN, prefix): invalid = True @@ -198,7 +198,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): invalid = True cls.log.error("Wrong image prefix [ {} ]".format(file_prefix)) - if dir_prefix.lower() != cls.RendermanDirPrefix.lower(): + if dir_prefix.lower() != cls.renderman_dir_prefix.lower(): invalid = True cls.log.error("Wrong directory prefix [ {} ]".format( dir_prefix)) @@ -304,7 +304,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): default_prefix, type="string") cmds.setAttr("rmanGlobals.imageOutputDir", - cls.RendermanDirPrefix, + cls.renderman_dir_prefix, type="string") if renderer == "vray": diff --git a/openpype/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py b/openpype/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py new file mode 100644 index 0000000000..54a86d27cf --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +import pyblish.api +import openpype.api +from openpype.pipeline import PublishXmlValidationError + +from maya import cmds + + +class ValidateSkeletalMeshHierarchy(pyblish.api.InstancePlugin): + """Validates that nodes has common root.""" + + order = openpype.api.ValidateContentsOrder + hosts = ["maya"] + families = ["skeletalMesh"] + label = "Skeletal Mesh Top Node" + + def process(self, instance): + geo = instance.data.get("geometry") + joints = instance.data.get("joints") + + joints_parents = cmds.ls(joints, long=True) + geo_parents = cmds.ls(geo, long=True) + + parents_set = { + parent.split("|")[1] for parent in (joints_parents + geo_parents) + } + + if len(set(parents_set)) != 1: + raise PublishXmlValidationError( + self, + "Multiple roots on geometry or joints." + ) diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py b/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py index b2ef174374..c05121a1b0 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py @@ -10,10 +10,11 @@ class ValidateUnrealMeshTriangulated(pyblish.api.InstancePlugin): order = openpype.api.ValidateMeshOrder hosts = ["maya"] - families = ["unrealStaticMesh"] + families = ["staticMesh"] category = "geometry" label = "Mesh is Triangulated" actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + active = False @classmethod def get_invalid(cls, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py b/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py index 901a2ec75e..33788d1835 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- +"""Validator for correct naming of Static Meshes.""" +import re -from maya import cmds # noqa import pyblish.api import openpype.api import openpype.hosts.maya.api.action -from avalon.api import Session +from openpype.pipeline import legacy_io from openpype.api import get_project_settings -import re class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): @@ -52,8 +52,8 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): optional = True order = openpype.api.ValidateContentsOrder hosts = ["maya"] - families = ["unrealStaticMesh"] - label = "Unreal StaticMesh Name" + families = ["staticMesh"] + label = "Unreal Static Mesh Name" actions = [openpype.hosts.maya.api.action.SelectInvalidAction] regex_mesh = r"(?P.*))" regex_collision = r"(?P.*)" @@ -63,7 +63,9 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): invalid = [] - project_settings = get_project_settings(Session["AVALON_PROJECT"]) + project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) collision_prefixes = ( project_settings ["maya"] @@ -72,15 +74,13 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): ["collision_prefixes"] ) - combined_geometry_name = instance.data.get( - "staticMeshCombinedName", None) if cls.validate_mesh: # compile regex for testing names regex_mesh = "{}{}".format( ("_" + cls.static_mesh_prefix) or "", cls.regex_mesh ) sm_r = re.compile(regex_mesh) - if not sm_r.match(combined_geometry_name): + if not sm_r.match(instance.data.get("subset")): cls.log.error("Mesh doesn't comply with name validation.") return True @@ -91,7 +91,7 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): cls.log.warning("No collision objects to validate.") return False - regex_collision = "{}{}".format( + regex_collision = "{}{}_(\\d+)".format( "(?P({}))_".format( "|".join("{0}".format(p) for p in collision_prefixes) ) or "", cls.regex_collision @@ -99,6 +99,9 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): cl_r = re.compile(regex_collision) + mesh_name = "{}{}".format(instance.data["asset"], + instance.data.get("variant", [])) + for obj in collision_set: cl_m = cl_r.match(obj) if not cl_m: @@ -107,7 +110,7 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): else: expected_collision = "{}_{}".format( cl_m.group("prefix"), - combined_geometry_name + mesh_name ) if not obj.startswith(expected_collision): @@ -116,11 +119,11 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): "Collision object name doesn't match " "static mesh name" ) - cls.log.error("{}_{} != {}_{}".format( + cls.log.error("{}_{} != {}_{}*".format( cl_m.group("prefix"), cl_m.group("renderName"), cl_m.group("prefix"), - combined_geometry_name, + mesh_name, )) invalid.append(obj) diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py b/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py index 5a8c29c22d..5e1b04889f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py @@ -9,9 +9,10 @@ class ValidateUnrealUpAxis(pyblish.api.ContextPlugin): """Validate if Z is set as up axis in Maya""" optional = True + active = False order = openpype.api.ValidateContentsOrder hosts = ["maya"] - families = ["unrealStaticMesh"] + families = ["staticMesh"] label = "Unreal Up-Axis check" actions = [openpype.api.RepairAction] diff --git a/openpype/hosts/maya/startup/userSetup.py b/openpype/hosts/maya/startup/userSetup.py index b89244817a..a3ab483add 100644 --- a/openpype/hosts/maya/startup/userSetup.py +++ b/openpype/hosts/maya/startup/userSetup.py @@ -1,11 +1,10 @@ import os -import avalon.api from openpype.api import get_project_settings +from openpype.pipeline import install_host from openpype.hosts.maya import api -import openpype.hosts.maya.api.lib as mlib from maya import cmds -avalon.api.install(api) +install_host(api) print("starting OpenPype usersetup") diff --git a/openpype/hosts/nuke/__init__.py b/openpype/hosts/nuke/__init__.py index 60b37ce1dd..134a6621c4 100644 --- a/openpype/hosts/nuke/__init__.py +++ b/openpype/hosts/nuke/__init__.py @@ -10,7 +10,7 @@ def add_implementation_envs(env, _app): ] old_nuke_path = env.get("NUKE_PATH") or "" for path in old_nuke_path.split(os.pathsep): - if not path or not os.path.exists(path): + if not path: continue norm_path = os.path.normpath(path) diff --git a/openpype/hosts/nuke/api/command.py b/openpype/hosts/nuke/api/command.py index 212d4757c6..c756c48a12 100644 --- a/openpype/hosts/nuke/api/command.py +++ b/openpype/hosts/nuke/api/command.py @@ -1,9 +1,9 @@ import logging import contextlib import nuke +from bson.objectid import ObjectId -from avalon import api, io - +from openpype.pipeline import legacy_io log = logging.getLogger(__name__) @@ -14,11 +14,11 @@ def reset_frame_range(): displayed handles """ - fps = float(api.Session.get("AVALON_FPS", 25)) + fps = float(legacy_io.Session.get("AVALON_FPS", 25)) nuke.root()["fps"].setValue(fps) - name = api.Session["AVALON_ASSET"] - asset = io.find_one({"name": name, "type": "asset"}) + name = legacy_io.Session["AVALON_ASSET"] + asset = legacy_io.find_one({"name": name, "type": "asset"}) asset_data = asset["data"] handles = get_handles(asset) @@ -70,10 +70,10 @@ def get_handles(asset): if "visualParent" in data: vp = data["visualParent"] if vp is not None: - parent_asset = io.find_one({"_id": io.ObjectId(vp)}) + parent_asset = legacy_io.find_one({"_id": ObjectId(vp)}) if parent_asset is None: - parent_asset = io.find_one({"_id": io.ObjectId(asset["parent"])}) + parent_asset = legacy_io.find_one({"_id": ObjectId(asset["parent"])}) if parent_asset is not None: return get_handles(parent_asset) @@ -83,7 +83,7 @@ def get_handles(asset): def reset_resolution(): """Set resolution to project resolution.""" - project = io.find_one({"type": "project"}) + project = legacy_io.find_one({"type": "project"}) p_data = project["data"] width = p_data.get("resolution_width", diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index dba7ec1b85..4e38f811c9 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -6,11 +6,10 @@ import contextlib from collections import OrderedDict import clique +from bson.objectid import ObjectId import nuke -from avalon import api, io, lib - from openpype.api import ( Logger, Anatomy, @@ -20,12 +19,15 @@ from openpype.api import ( get_workdir_data, get_asset, get_current_project_settings, - ApplicationManager ) from openpype.tools.utils import host_tools from openpype.lib.path_tools import HostDirmap from openpype.settings import get_project_settings from openpype.modules import ModulesManager +from openpype.pipeline import ( + discover_legacy_creator_plugins, + legacy_io, +) from .workio import ( save_file, @@ -568,9 +570,9 @@ def check_inventory_versions(): avalon_knob_data = read(node) # get representation from io - representation = io.find_one({ + representation = legacy_io.find_one({ "type": "representation", - "_id": io.ObjectId(avalon_knob_data["representation"]) + "_id": ObjectId(avalon_knob_data["representation"]) }) # Failsafe for not finding the representation. @@ -582,13 +584,13 @@ def check_inventory_versions(): continue # Get start frame from version data - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') @@ -725,8 +727,8 @@ def format_anatomy(data): file = script_name() data["version"] = get_version_from_path(file) - project_doc = io.find_one({"type": "project"}) - asset_doc = io.find_one({ + project_doc = legacy_io.find_one({"type": "project"}) + asset_doc = legacy_io.find_one({ "type": "asset", "name": data["avalon"]["asset"] }) @@ -1047,17 +1049,36 @@ def add_review_knob(node): def add_deadline_tab(node): node.addKnob(nuke.Tab_Knob("Deadline")) - knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size") - knob.setValue(0) - node.addKnob(knob) - knob = nuke.Int_Knob("deadlinePriority", "Priority") knob.setValue(50) node.addKnob(knob) + knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size") + knob.setValue(0) + node.addKnob(knob) + + knob = nuke.Int_Knob("deadlineConcurrentTasks", "Concurrent tasks") + # zero as default will get value from Settings during collection + # instead of being an explicit user override, see precollect_write.py + knob.setValue(0) + node.addKnob(knob) + + knob = nuke.Text_Knob("divd", '') + knob.setValue('') + node.addKnob(knob) + + knob = nuke.Boolean_Knob("suspend_publish", "Suspend publish") + knob.setValue(False) + node.addKnob(knob) + def get_deadline_knob_names(): - return ["Deadline", "deadlineChunkSize", "deadlinePriority"] + return [ + "Deadline", + "deadlineChunkSize", + "deadlinePriority", + "deadlineConcurrentTasks" + ] def create_backdrop(label="", color=None, layer=0, @@ -1126,8 +1147,11 @@ class WorkfileSettings(object): nodes=None, **kwargs): Context._project_doc = kwargs.get( - "project") or io.find_one({"type": "project"}) - self._asset = kwargs.get("asset_name") or api.Session["AVALON_ASSET"] + "project") or legacy_io.find_one({"type": "project"}) + self._asset = ( + kwargs.get("asset_name") + or legacy_io.Session["AVALON_ASSET"] + ) self._asset_entity = get_asset(self._asset) self._root_node = root_node or nuke.root() self._nodes = self.get_nodes(nodes=nodes) @@ -1474,9 +1498,9 @@ class WorkfileSettings(object): def reset_resolution(self): """Set resolution to project resolution.""" log.info("Resetting resolution") - project = io.find_one({"type": "project"}) - asset = api.Session["AVALON_ASSET"] - asset = io.find_one({"name": asset, "type": "asset"}) + project = legacy_io.find_one({"type": "project"}) + asset = legacy_io.Session["AVALON_ASSET"] + asset = legacy_io.find_one({"name": asset, "type": "asset"}) asset_data = asset.get('data', {}) data = { @@ -1596,7 +1620,7 @@ def get_hierarchical_attr(entity, attr, default=None): ): parent_id = entity['data']['visualParent'] - parent = io.find_one({'_id': parent_id}) + parent = legacy_io.find_one({'_id': parent_id}) return get_hierarchical_attr(parent, attr) @@ -1902,7 +1926,7 @@ def recreate_instance(origin_node, avalon_data=None): # create new node # get appropriate plugin class creator_plugin = None - for Creator in api.discover(api.Creator): + for Creator in discover_legacy_creator_plugins(): if Creator.__name__ == data["creator"]: creator_plugin = Creator break diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py index fd2e16b8d3..0194acd196 100644 --- a/openpype/hosts/nuke/api/pipeline.py +++ b/openpype/hosts/nuke/api/pipeline.py @@ -5,8 +5,6 @@ from collections import OrderedDict import nuke import pyblish.api -import avalon.api -from avalon import pipeline import openpype from openpype.api import ( @@ -16,9 +14,13 @@ from openpype.api import ( ) from openpype.lib import register_event_callback from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, + register_creator_plugin_path, + register_inventory_action_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, + deregister_inventory_action_path, + AVALON_CONTAINER_ID, ) from openpype.tools.utils import host_tools @@ -36,7 +38,6 @@ from .lib import ( log = Logger.get_logger(__name__) -AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.nuke.__file__)) PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") @@ -77,11 +78,11 @@ def reload_config(): """ for module in ( - "{}.api".format(AVALON_CONFIG), - "{}.hosts.nuke.api.actions".format(AVALON_CONFIG), - "{}.hosts.nuke.api.menu".format(AVALON_CONFIG), - "{}.hosts.nuke.api.plugin".format(AVALON_CONFIG), - "{}.hosts.nuke.api.lib".format(AVALON_CONFIG), + "openpype.api", + "openpype.hosts.nuke.api.actions", + "openpype.hosts.nuke.api.menu", + "openpype.hosts.nuke.api.plugin", + "openpype.hosts.nuke.api.lib", ): log.info("Reloading module: {}...".format(module)) @@ -104,8 +105,8 @@ def install(): log.info("Registering Nuke plug-ins..") pyblish.api.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) - avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH) + register_creator_plugin_path(CREATE_PATH) + register_inventory_action_path(INVENTORY_PATH) # Register Avalon event for workfiles loading. register_event_callback("workio.open_file", check_inventory_versions) @@ -130,7 +131,8 @@ def uninstall(): pyblish.deregister_host("nuke") pyblish.api.deregister_plugin_path(PUBLISH_PATH) deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) + deregister_inventory_action_path(INVENTORY_PATH) pyblish.api.deregister_callback( "instanceToggled", on_pyblish_instance_toggled) @@ -330,7 +332,7 @@ def containerise(node, data = OrderedDict( [ ("schema", "openpype:container-2.0"), - ("id", pipeline.AVALON_CONTAINER_ID), + ("id", AVALON_CONTAINER_ID), ("name", name), ("namespace", namespace), ("loader", str(loader)), diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index d0bb45a05d..eaf0ab6911 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -1,6 +1,8 @@ import os import random import string +from collections import OrderedDict +from abc import abstractmethod import nuke @@ -450,6 +452,7 @@ class ExporterReviewMov(ExporterReview): def generate_mov(self, farm=False, **kwargs): self.publish_on_farm = farm + read_raw = kwargs["read_raw"] reformat_node_add = kwargs["reformat_node_add"] reformat_node_config = kwargs["reformat_node_config"] bake_viewer_process = kwargs["bake_viewer_process"] @@ -484,6 +487,9 @@ class ExporterReviewMov(ExporterReview): r_node["origlast"].setValue(self.last_frame) r_node["colorspace"].setValue(self.write_colorspace) + if read_raw: + r_node["raw"].setValue(1) + # connect self._temp_nodes[subset].append(r_node) self.previous_node = r_node @@ -590,3 +596,139 @@ class ExporterReviewMov(ExporterReview): nuke.scriptSave() return self.data + + +class AbstractWriteRender(OpenPypeCreator): + """Abstract creator to gather similar implementation for Write creators""" + name = "" + label = "" + hosts = ["nuke"] + n_class = "Write" + family = "render" + icon = "sign-out" + defaults = ["Main", "Mask"] + + def __init__(self, *args, **kwargs): + super(AbstractWriteRender, self).__init__(*args, **kwargs) + + data = OrderedDict() + + data["family"] = self.family + data["families"] = self.n_class + + for k, v in self.data.items(): + if k not in data.keys(): + data.update({k: v}) + + self.data = data + self.nodes = nuke.selectedNodes() + self.log.debug("_ self.data: '{}'".format(self.data)) + + def process(self): + + inputs = [] + outputs = [] + instance = nuke.toNode(self.data["subset"]) + selected_node = None + + # use selection + if (self.options or {}).get("useSelection"): + nodes = self.nodes + + if not (len(nodes) < 2): + msg = ("Select only one node. " + "The node you want to connect to, " + "or tick off `Use selection`") + self.log.error(msg) + nuke.message(msg) + return + + if len(nodes) == 0: + msg = ( + "No nodes selected. Please select a single node to connect" + " to or tick off `Use selection`" + ) + self.log.error(msg) + nuke.message(msg) + return + + selected_node = nodes[0] + inputs = [selected_node] + outputs = selected_node.dependent() + + if instance: + if (instance.name() in selected_node.name()): + selected_node = instance.dependencies()[0] + + # if node already exist + if instance: + # collect input / outputs + inputs = instance.dependencies() + outputs = instance.dependent() + selected_node = inputs[0] + # remove old one + nuke.delete(instance) + + # recreate new + write_data = { + "nodeclass": self.n_class, + "families": [self.family], + "avalon": self.data + } + + # add creator data + creator_data = {"creator": self.__class__.__name__} + self.data.update(creator_data) + write_data.update(creator_data) + + if self.presets.get('fpath_template'): + self.log.info("Adding template path from preset") + write_data.update( + {"fpath_template": self.presets["fpath_template"]} + ) + else: + self.log.info("Adding template path from plugin") + write_data.update({ + "fpath_template": + ("{work}/" + self.family + "s/nuke/{subset}" + "/{subset}.{frame}.{ext}")}) + + write_node = self._create_write_node(selected_node, + inputs, outputs, + write_data) + + # relinking to collected connections + for i, input in enumerate(inputs): + write_node.setInput(i, input) + + write_node.autoplace() + + for output in outputs: + output.setInput(0, write_node) + + write_node = self._modify_write_node(write_node) + + return write_node + + @abstractmethod + def _create_write_node(self, selected_node, inputs, outputs, write_data): + """Family dependent implementation of Write node creation + + Args: + selected_node (nuke.Node) + inputs (list of nuke.Node) - input dependencies (what is connected) + outputs (list of nuke.Node) - output dependencies + write_data (dict) - values used to fill Knobs + Returns: + node (nuke.Node): group node with data as Knobs + """ + pass + + @abstractmethod + def _modify_write_node(self, write_node): + """Family dependent modification of created 'write_node' + + Returns: + node (nuke.Node): group node with data as Knobs + """ + pass diff --git a/openpype/hosts/nuke/api/workio.py b/openpype/hosts/nuke/api/workio.py index dbc24fdc9b..68fcb0927f 100644 --- a/openpype/hosts/nuke/api/workio.py +++ b/openpype/hosts/nuke/api/workio.py @@ -1,11 +1,12 @@ """Host API required Work Files tool""" import os import nuke -import avalon.api + +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS def file_extensions(): - return avalon.api.HOST_WORKFILE_EXTENSIONS["nuke"] + return HOST_WORKFILE_EXTENSIONS["nuke"] def has_unsaved_changes(): diff --git a/openpype/hosts/nuke/plugins/__init__.py b/openpype/hosts/nuke/plugins/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/nuke/plugins/create/__init__.py b/openpype/hosts/nuke/plugins/create/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/nuke/plugins/create/create_read.py b/openpype/hosts/nuke/plugins/create/create_read.py index bdc67add42..87a9dff0f8 100644 --- a/openpype/hosts/nuke/plugins/create/create_read.py +++ b/openpype/hosts/nuke/plugins/create/create_read.py @@ -2,8 +2,6 @@ from collections import OrderedDict import nuke -import avalon.api -from openpype import api as pype from openpype.hosts.nuke.api import plugin from openpype.hosts.nuke.api.lib import ( set_avalon_knob_data diff --git a/openpype/hosts/nuke/plugins/create/create_write_prerender.py b/openpype/hosts/nuke/plugins/create/create_write_prerender.py index 761439fdb2..7297f74c13 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_prerender.py +++ b/openpype/hosts/nuke/plugins/create/create_write_prerender.py @@ -1,12 +1,10 @@ -from collections import OrderedDict - import nuke from openpype.hosts.nuke.api import plugin from openpype.hosts.nuke.api.lib import create_write_node -class CreateWritePrerender(plugin.OpenPypeCreator): +class CreateWritePrerender(plugin.AbstractWriteRender): # change this to template preset name = "WritePrerender" label = "Create Write Prerender" @@ -19,85 +17,7 @@ class CreateWritePrerender(plugin.OpenPypeCreator): def __init__(self, *args, **kwargs): super(CreateWritePrerender, self).__init__(*args, **kwargs) - data = OrderedDict() - - data["family"] = self.family - data["families"] = self.n_class - - for k, v in self.data.items(): - if k not in data.keys(): - data.update({k: v}) - - self.data = data - self.nodes = nuke.selectedNodes() - self.log.debug("_ self.data: '{}'".format(self.data)) - - def process(self): - inputs = [] - outputs = [] - instance = nuke.toNode(self.data["subset"]) - selected_node = None - - # use selection - if (self.options or {}).get("useSelection"): - nodes = self.nodes - - if not (len(nodes) < 2): - msg = ("Select only one node. The node " - "you want to connect to, " - "or tick off `Use selection`") - self.log.error(msg) - nuke.message(msg) - - if len(nodes) == 0: - msg = ( - "No nodes selected. Please select a single node to connect" - " to or tick off `Use selection`" - ) - self.log.error(msg) - nuke.message(msg) - - selected_node = nodes[0] - inputs = [selected_node] - outputs = selected_node.dependent() - - if instance: - if (instance.name() in selected_node.name()): - selected_node = instance.dependencies()[0] - - # if node already exist - if instance: - # collect input / outputs - inputs = instance.dependencies() - outputs = instance.dependent() - selected_node = inputs[0] - # remove old one - nuke.delete(instance) - - # recreate new - write_data = { - "nodeclass": self.n_class, - "families": [self.family], - "avalon": self.data - } - - # add creator data - creator_data = {"creator": self.__class__.__name__} - self.data.update(creator_data) - write_data.update(creator_data) - - if self.presets.get('fpath_template'): - self.log.info("Adding template path from preset") - write_data.update( - {"fpath_template": self.presets["fpath_template"]} - ) - else: - self.log.info("Adding template path from plugin") - write_data.update({ - "fpath_template": ("{work}/prerenders/nuke/{subset}" - "/{subset}.{frame}.{ext}")}) - - self.log.info("write_data: {}".format(write_data)) + def _create_write_node(self, selected_node, inputs, outputs, write_data): reviewable = self.presets.get("reviewable") write_node = create_write_node( self.data["subset"], @@ -107,15 +27,9 @@ class CreateWritePrerender(plugin.OpenPypeCreator): review=reviewable, linked_knobs=["channels", "___", "first", "last", "use_limit"]) - # relinking to collected connections - for i, input in enumerate(inputs): - write_node.setInput(i, input) - - write_node.autoplace() - - for output in outputs: - output.setInput(0, write_node) + return write_node + def _modify_write_node(self, write_node): # open group node write_node.begin() for n in nuke.allNodes(): diff --git a/openpype/hosts/nuke/plugins/create/create_write_render.py b/openpype/hosts/nuke/plugins/create/create_write_render.py index a9c4b5341e..18a101546f 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_render.py +++ b/openpype/hosts/nuke/plugins/create/create_write_render.py @@ -1,12 +1,10 @@ -from collections import OrderedDict - import nuke from openpype.hosts.nuke.api import plugin from openpype.hosts.nuke.api.lib import create_write_node -class CreateWriteRender(plugin.OpenPypeCreator): +class CreateWriteRender(plugin.AbstractWriteRender): # change this to template preset name = "WriteRender" label = "Create Write Render" @@ -19,87 +17,7 @@ class CreateWriteRender(plugin.OpenPypeCreator): def __init__(self, *args, **kwargs): super(CreateWriteRender, self).__init__(*args, **kwargs) - data = OrderedDict() - - data["family"] = self.family - data["families"] = self.n_class - - for k, v in self.data.items(): - if k not in data.keys(): - data.update({k: v}) - - self.data = data - self.nodes = nuke.selectedNodes() - self.log.debug("_ self.data: '{}'".format(self.data)) - - def process(self): - - inputs = [] - outputs = [] - instance = nuke.toNode(self.data["subset"]) - selected_node = None - - # use selection - if (self.options or {}).get("useSelection"): - nodes = self.nodes - - if not (len(nodes) < 2): - msg = ("Select only one node. " - "The node you want to connect to, " - "or tick off `Use selection`") - self.log.error(msg) - nuke.message(msg) - return - - if len(nodes) == 0: - msg = ( - "No nodes selected. Please select a single node to connect" - " to or tick off `Use selection`" - ) - self.log.error(msg) - nuke.message(msg) - return - - selected_node = nodes[0] - inputs = [selected_node] - outputs = selected_node.dependent() - - if instance: - if (instance.name() in selected_node.name()): - selected_node = instance.dependencies()[0] - - # if node already exist - if instance: - # collect input / outputs - inputs = instance.dependencies() - outputs = instance.dependent() - selected_node = inputs[0] - # remove old one - nuke.delete(instance) - - # recreate new - write_data = { - "nodeclass": self.n_class, - "families": [self.family], - "avalon": self.data - } - - # add creator data - creator_data = {"creator": self.__class__.__name__} - self.data.update(creator_data) - write_data.update(creator_data) - - if self.presets.get('fpath_template'): - self.log.info("Adding template path from preset") - write_data.update( - {"fpath_template": self.presets["fpath_template"]} - ) - else: - self.log.info("Adding template path from plugin") - write_data.update({ - "fpath_template": ("{work}/renders/nuke/{subset}" - "/{subset}.{frame}.{ext}")}) - + def _create_write_node(self, selected_node, inputs, outputs, write_data): # add reformat node to cut off all outside of format bounding box # get width and height try: @@ -126,13 +44,7 @@ class CreateWriteRender(plugin.OpenPypeCreator): input=selected_node, prenodes=_prenodes) - # relinking to collected connections - for i, input in enumerate(inputs): - write_node.setInput(i, input) - - write_node.autoplace() - - for output in outputs: - output.setInput(0, write_node) - + return write_node + + def _modify_write_node(self, write_node): return write_node diff --git a/openpype/hosts/nuke/plugins/create/create_write_still.py b/openpype/hosts/nuke/plugins/create/create_write_still.py index 0037b64ce3..d22b5eab3f 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_still.py +++ b/openpype/hosts/nuke/plugins/create/create_write_still.py @@ -1,12 +1,10 @@ -from collections import OrderedDict - import nuke from openpype.hosts.nuke.api import plugin from openpype.hosts.nuke.api.lib import create_write_node -class CreateWriteStill(plugin.OpenPypeCreator): +class CreateWriteStill(plugin.AbstractWriteRender): # change this to template preset name = "WriteStillFrame" label = "Create Write Still Image" @@ -23,77 +21,8 @@ class CreateWriteStill(plugin.OpenPypeCreator): def __init__(self, *args, **kwargs): super(CreateWriteStill, self).__init__(*args, **kwargs) - data = OrderedDict() - - data["family"] = self.family - data["families"] = self.n_class - - for k, v in self.data.items(): - if k not in data.keys(): - data.update({k: v}) - - self.data = data - self.nodes = nuke.selectedNodes() - self.log.debug("_ self.data: '{}'".format(self.data)) - - def process(self): - - inputs = [] - outputs = [] - instance = nuke.toNode(self.data["subset"]) - selected_node = None - - # use selection - if (self.options or {}).get("useSelection"): - nodes = self.nodes - - if not (len(nodes) < 2): - msg = ("Select only one node. " - "The node you want to connect to, " - "or tick off `Use selection`") - self.log.error(msg) - nuke.message(msg) - return - - if len(nodes) == 0: - msg = ( - "No nodes selected. Please select a single node to connect" - " to or tick off `Use selection`" - ) - self.log.error(msg) - nuke.message(msg) - return - - selected_node = nodes[0] - inputs = [selected_node] - outputs = selected_node.dependent() - - if instance: - if (instance.name() in selected_node.name()): - selected_node = instance.dependencies()[0] - - # if node already exist - if instance: - # collect input / outputs - inputs = instance.dependencies() - outputs = instance.dependent() - selected_node = inputs[0] - # remove old one - nuke.delete(instance) - - # recreate new - write_data = { - "nodeclass": self.n_class, - "families": [self.family], - "avalon": self.data - } - - # add creator data - creator_data = {"creator": self.__class__.__name__} - self.data.update(creator_data) - write_data.update(creator_data) - - self.log.info("Adding template path from plugin") + def _create_write_node(self, selected_node, inputs, outputs, write_data): + # explicitly reset template to 'renders', not same as other 2 writes write_data.update({ "fpath_template": ( "{work}/renders/nuke/{subset}/{subset}.{ext}")}) @@ -118,16 +47,9 @@ class CreateWriteStill(plugin.OpenPypeCreator): farm=False, linked_knobs=["channels", "___", "first", "last", "use_limit"]) - # relinking to collected connections - for i, input in enumerate(inputs): - write_node.setInput(i, input) + return write_node - write_node.autoplace() - - for output in outputs: - output.setInput(0, write_node) - - # link frame hold to group node + def _modify_write_node(self, write_node): write_node.begin() for n in nuke.allNodes(): # get write node diff --git a/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py b/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py index 5f834be557..c04c939a8d 100644 --- a/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py +++ b/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py @@ -1,9 +1,9 @@ -from avalon import api from openpype.api import Logger +from openpype.pipeline import InventoryAction from openpype.hosts.nuke.api.lib import set_avalon_knob_data -class RepairOldLoaders(api.InventoryAction): +class RepairOldLoaders(InventoryAction): label = "Repair Old Loaders" icon = "gears" diff --git a/openpype/hosts/nuke/plugins/inventory/select_containers.py b/openpype/hosts/nuke/plugins/inventory/select_containers.py index 3f174b3562..4e7a20fb26 100644 --- a/openpype/hosts/nuke/plugins/inventory/select_containers.py +++ b/openpype/hosts/nuke/plugins/inventory/select_containers.py @@ -1,8 +1,8 @@ -from avalon import api -from openpype.hosts.nuke.api.commands import viewer_update_and_undo_stop +from openpype.pipeline import InventoryAction +from openpype.hosts.nuke.api.command import viewer_update_and_undo_stop -class SelectContainers(api.InventoryAction): +class SelectContainers(InventoryAction): label = "Select Containers" icon = "mouse-pointer" diff --git a/openpype/hosts/nuke/plugins/load/load_backdrop.py b/openpype/hosts/nuke/plugins/load/load_backdrop.py index 36cec6f4c5..143fdf1f30 100644 --- a/openpype/hosts/nuke/plugins/load/load_backdrop.py +++ b/openpype/hosts/nuke/plugins/load/load_backdrop.py @@ -1,8 +1,8 @@ -from avalon import io import nuke import nukescripts from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -14,7 +14,7 @@ from openpype.hosts.nuke.api.lib import ( get_avalon_knob_data, set_avalon_knob_data ) -from openpype.hosts.nuke.api.commands import viewer_update_and_undo_stop +from openpype.hosts.nuke.api.command import viewer_update_and_undo_stop from openpype.hosts.nuke.api import containerise, update_container @@ -188,7 +188,7 @@ class LoadBackdropNodes(load.LoaderPlugin): # get main variables # Get version from io - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -237,7 +237,7 @@ class LoadBackdropNodes(load.LoaderPlugin): GN["name"].setValue(object_name) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_camera_abc.py b/openpype/hosts/nuke/plugins/load/load_camera_abc.py index fb5f7f8ede..964ca5ec90 100644 --- a/openpype/hosts/nuke/plugins/load/load_camera_abc.py +++ b/openpype/hosts/nuke/plugins/load/load_camera_abc.py @@ -1,7 +1,7 @@ import nuke -from avalon import io from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -102,7 +102,7 @@ class AlembicCameraLoader(load.LoaderPlugin): None """ # Get version from io - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -175,7 +175,7 @@ class AlembicCameraLoader(load.LoaderPlugin): """ Coloring a node by correct color by actual version """ # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_clip.py b/openpype/hosts/nuke/plugins/load/load_clip.py index 9b0588feac..681561e303 100644 --- a/openpype/hosts/nuke/plugins/load/load_clip.py +++ b/openpype/hosts/nuke/plugins/load/load_clip.py @@ -1,8 +1,10 @@ import nuke import qargparse -from avalon import io -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + legacy_io, + get_representation_path, +) from openpype.hosts.nuke.api.lib import ( get_imageio_input_colorspace, maintained_selection @@ -194,7 +196,7 @@ class LoadClip(plugin.NukeLoader): start_at_workfile = bool("start at" in read_node['frame_mode'].value()) - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -264,7 +266,7 @@ class LoadClip(plugin.NukeLoader): # change color of read_node # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_effects.py b/openpype/hosts/nuke/plugins/load/load_effects.py index 68c3952942..6a30330ed0 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects.py +++ b/openpype/hosts/nuke/plugins/load/load_effects.py @@ -1,10 +1,10 @@ import json from collections import OrderedDict import nuke - -from avalon import io +import six from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -72,7 +72,7 @@ class LoadEffects(load.LoaderPlugin): # getting data from json file with unicode conversion with open(file, "r") as f: json_f = {self.byteify(key): self.byteify(value) - for key, value in json.load(f).iteritems()} + for key, value in json.load(f).items()} # get correct order of nodes by positions on track and subtrack nodes_order = self.reorder_nodes(json_f) @@ -148,7 +148,7 @@ class LoadEffects(load.LoaderPlugin): """ # get main variables # Get version from io - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -188,7 +188,7 @@ class LoadEffects(load.LoaderPlugin): # getting data from json file with unicode conversion with open(file, "r") as f: json_f = {self.byteify(key): self.byteify(value) - for key, value in json.load(f).iteritems()} + for key, value in json.load(f).items()} # get correct order of nodes by positions on track and subtrack nodes_order = self.reorder_nodes(json_f) @@ -244,7 +244,7 @@ class LoadEffects(load.LoaderPlugin): self.connect_read_node(GN, namespace, json_f["assignTo"]) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') @@ -330,11 +330,11 @@ class LoadEffects(load.LoaderPlugin): if isinstance(input, dict): return {self.byteify(key): self.byteify(value) - for key, value in input.iteritems()} + for key, value in input.items()} elif isinstance(input, list): return [self.byteify(element) for element in input] - elif isinstance(input, unicode): - return input.encode('utf-8') + elif isinstance(input, six.text_type): + return str(input) else: return input diff --git a/openpype/hosts/nuke/plugins/load/load_effects_ip.py b/openpype/hosts/nuke/plugins/load/load_effects_ip.py index 9c4fd4c2c6..eaf151b3b8 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_effects_ip.py @@ -1,11 +1,10 @@ import json from collections import OrderedDict - +import six import nuke -from avalon import io - from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -74,7 +73,7 @@ class LoadEffectsInputProcess(load.LoaderPlugin): # getting data from json file with unicode conversion with open(file, "r") as f: json_f = {self.byteify(key): self.byteify(value) - for key, value in json.load(f).iteritems()} + for key, value in json.load(f).items()} # get correct order of nodes by positions on track and subtrack nodes_order = self.reorder_nodes(json_f) @@ -154,7 +153,7 @@ class LoadEffectsInputProcess(load.LoaderPlugin): # get main variables # Get version from io - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -194,7 +193,7 @@ class LoadEffectsInputProcess(load.LoaderPlugin): # getting data from json file with unicode conversion with open(file, "r") as f: json_f = {self.byteify(key): self.byteify(value) - for key, value in json.load(f).iteritems()} + for key, value in json.load(f).items()} # get correct order of nodes by positions on track and subtrack nodes_order = self.reorder_nodes(json_f) @@ -252,7 +251,7 @@ class LoadEffectsInputProcess(load.LoaderPlugin): # return # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') @@ -350,11 +349,11 @@ class LoadEffectsInputProcess(load.LoaderPlugin): if isinstance(input, dict): return {self.byteify(key): self.byteify(value) - for key, value in input.iteritems()} + for key, value in input.items()} elif isinstance(input, list): return [self.byteify(element) for element in input] - elif isinstance(input, unicode): - return input.encode('utf-8') + elif isinstance(input, six.text_type): + return str(input) else: return input diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo.py b/openpype/hosts/nuke/plugins/load/load_gizmo.py index 6f2b191be9..4ea9d64d7d 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo.py @@ -1,8 +1,7 @@ import nuke -from avalon import io - from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -102,7 +101,7 @@ class LoadGizmo(load.LoaderPlugin): # get main variables # Get version from io - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -150,7 +149,7 @@ class LoadGizmo(load.LoaderPlugin): GN["name"].setValue(object_name) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py index 87bebce15b..38dd70935e 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py @@ -1,8 +1,8 @@ import nuke - -from avalon import io +import six from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -108,7 +108,7 @@ class LoadGizmoInputProcess(load.LoaderPlugin): # get main variables # Get version from io - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -156,7 +156,7 @@ class LoadGizmoInputProcess(load.LoaderPlugin): GN["name"].setValue(object_name) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') @@ -240,11 +240,11 @@ class LoadGizmoInputProcess(load.LoaderPlugin): if isinstance(input, dict): return {self.byteify(key): self.byteify(value) - for key, value in input.iteritems()} + for key, value in input.items()} elif isinstance(input, list): return [self.byteify(element) for element in input] - elif isinstance(input, unicode): - return input.encode('utf-8') + elif isinstance(input, six.text_type): + return str(input) else: return input diff --git a/openpype/hosts/nuke/plugins/load/load_image.py b/openpype/hosts/nuke/plugins/load/load_image.py index 9a175a0cba..6df286a4f7 100644 --- a/openpype/hosts/nuke/plugins/load/load_image.py +++ b/openpype/hosts/nuke/plugins/load/load_image.py @@ -1,9 +1,9 @@ import nuke import qargparse -from avalon import io from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -186,13 +186,13 @@ class LoadImage(load.LoaderPlugin): format(frame_number, "0{}".format(padding))) # Get start frame from version data - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_model.py b/openpype/hosts/nuke/plugins/load/load_model.py index e445beca05..9788bb25d2 100644 --- a/openpype/hosts/nuke/plugins/load/load_model.py +++ b/openpype/hosts/nuke/plugins/load/load_model.py @@ -1,6 +1,7 @@ import nuke -from avalon import io + from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -99,7 +100,7 @@ class AlembicModelLoader(load.LoaderPlugin): None """ # Get version from io - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -172,7 +173,7 @@ class AlembicModelLoader(load.LoaderPlugin): """ Coloring a node by correct color by actual version """ # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/load/load_script_precomp.py b/openpype/hosts/nuke/plugins/load/load_script_precomp.py index 779f101682..bd351ad785 100644 --- a/openpype/hosts/nuke/plugins/load/load_script_precomp.py +++ b/openpype/hosts/nuke/plugins/load/load_script_precomp.py @@ -1,8 +1,7 @@ import nuke -from avalon import io - from openpype.pipeline import ( + legacy_io, load, get_representation_path, ) @@ -117,13 +116,13 @@ class LinkAsGroup(load.LoaderPlugin): root = get_representation_path(representation).replace("\\", "/") # Get start frame from version data - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/nuke/plugins/publish/collect_reads.py b/openpype/hosts/nuke/plugins/publish/collect_reads.py index 45e9969eb9..4d6944f523 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_reads.py +++ b/openpype/hosts/nuke/plugins/publish/collect_reads.py @@ -2,7 +2,8 @@ import os import re import nuke import pyblish.api -from avalon import io, api + +from openpype.pipeline import legacy_io @pyblish.api.log @@ -15,8 +16,10 @@ class CollectNukeReads(pyblish.api.InstancePlugin): families = ["source"] def process(self, instance): - asset_data = io.find_one({"type": "asset", - "name": api.Session["AVALON_ASSET"]}) + asset_data = legacy_io.find_one({ + "type": "asset", + "name": legacy_io.Session["AVALON_ASSET"] + }) self.log.debug("asset_data: {}".format(asset_data["data"])) diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data.py b/openpype/hosts/nuke/plugins/publish/extract_review_data.py new file mode 100644 index 0000000000..38a8140cff --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data.py @@ -0,0 +1,47 @@ +import os +import pyblish.api +import openpype +from pprint import pformat + + +class ExtractReviewData(openpype.api.Extractor): + """Extracts review tag into available representation + """ + + order = pyblish.api.ExtractorOrder + 0.01 + # order = pyblish.api.CollectorOrder + 0.499 + label = "Extract Review Data" + + families = ["review"] + hosts = ["nuke"] + + def process(self, instance): + fpath = instance.data["path"] + ext = os.path.splitext(fpath)[-1][1:] + + representations = instance.data.get("representations", []) + + # review can be removed since `ProcessSubmittedJobOnFarm` will create + # reviable representation if needed + if ( + "render.farm" in instance.data["families"] + and "review" in instance.data["families"] + ): + instance.data["families"].remove("review") + + # iterate representations and add `review` tag + for repre in representations: + if ext != repre["ext"]: + continue + + if not repre.get("tags"): + repre["tags"] = [] + + if "review" not in repre["tags"]: + repre["tags"].append("review") + + self.log.debug("Matching representation: {}".format( + pformat(repre) + )) + + instance.data["representations"] = representations diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py index 544b9e04da..2e8843d2e0 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py @@ -24,7 +24,11 @@ class ExtractReviewDataMov(openpype.api.Extractor): outputs = {} def process(self, instance): - families = instance.data["families"] + families = set(instance.data["families"]) + + # add main family to make sure all families are compared + families.add(instance.data["family"]) + task_type = instance.context.data["taskType"] subset = instance.data["subset"] self.log.info("Creating staging dir...") @@ -50,51 +54,31 @@ class ExtractReviewDataMov(openpype.api.Extractor): f_task_types = o_data["filter"]["task_types"] f_subsets = o_data["filter"]["sebsets"] + self.log.debug( + "f_families `{}` > families: {}".format( + f_families, families)) + + self.log.debug( + "f_task_types `{}` > task_type: {}".format( + f_task_types, task_type)) + + self.log.debug( + "f_subsets `{}` > subset: {}".format( + f_subsets, subset)) + # test if family found in context - test_families = any([ - # first if exact family set is matching - # make sure only interesetion of list is correct - bool(set(families).intersection(f_families)), - # and if famiies are set at all - # if not then return True because we want this preset - # to be active if nothig is set - bool(not f_families) - ]) + # using intersection to make sure all defined + # families are present in combination + if f_families and not families.intersection(f_families): + continue # test task types from filter - test_task_types = any([ - # check if actual task type is defined in task types - # set in preset's filter - bool(task_type in f_task_types), - # and if taskTypes are defined in preset filter - # if not then return True, because we want this filter - # to be active if no taskType is set - bool(not f_task_types) - ]) + if f_task_types and task_type not in f_task_types: + continue # test subsets from filter - test_subsets = any([ - # check if any of subset filter inputs - # converted to regex patern is not found in subset - # we keep strict case sensitivity - bool(next(( - s for s in f_subsets - if re.search(re.compile(s), subset) - ), None)), - # but if no subsets were set then make this acuntable too - bool(not f_subsets) - ]) - - # we need all filters to be positive for this - # preset to be activated - test_all = all([ - test_families, - test_task_types, - test_subsets - ]) - - # if it is not positive then skip this preset - if not test_all: + if f_subsets and not any( + re.search(s, subset) for s in f_subsets): continue self.log.info( @@ -139,6 +123,7 @@ class ExtractReviewDataMov(openpype.api.Extractor): if generated_repres: # assign to representations instance.data["representations"] += generated_repres + instance.data["useSequenceForReview"] = False else: instance.data["families"].remove("review") self.log.info(( diff --git a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py index e917a28046..fb52fc18b4 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py +++ b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py @@ -1,6 +1,9 @@ import os import nuke +import copy + import pyblish.api + import openpype from openpype.hosts.nuke.api.lib import maintained_selection @@ -18,6 +21,13 @@ class ExtractSlateFrame(openpype.api.Extractor): families = ["slate"] hosts = ["nuke"] + # Settings values + # - can be extended by other attributes from node in the future + key_value_mapping = { + "f_submission_note": [True, "{comment}"], + "f_submitting_for": [True, "{intent[value]}"], + "f_vfx_scope_of_work": [False, ""] + } def process(self, instance): if hasattr(self, "viewer_lut_raw"): @@ -129,9 +139,7 @@ class ExtractSlateFrame(openpype.api.Extractor): for node in temporary_nodes: nuke.delete(node) - def get_view_process_node(self): - # Select only the target node if nuke.selectedNodes(): [n.setSelected(False) for n in nuke.selectedNodes()] @@ -162,13 +170,56 @@ class ExtractSlateFrame(openpype.api.Extractor): return comment = instance.context.data.get("comment") - intent_value = instance.context.data.get("intent") - if intent_value and isinstance(intent_value, dict): - intent_value = intent_value.get("value") + intent = instance.context.data.get("intent") + if not isinstance(intent, dict): + intent = { + "label": intent, + "value": intent + } - try: - node["f_submission_note"].setValue(comment) - node["f_submitting_for"].setValue(intent_value or "") - except NameError: - return - instance.data.pop("slateNode") + fill_data = copy.deepcopy(instance.data["anatomyData"]) + fill_data.update({ + "custom": copy.deepcopy( + instance.data.get("customData") or {} + ), + "comment": comment, + "intent": intent + }) + + for key, value in self.key_value_mapping.items(): + enabled, template = value + if not enabled: + self.log.debug("Key \"{}\" is disabled".format(key)) + continue + + try: + value = template.format(**fill_data) + + except ValueError: + self.log.warning( + "Couldn't fill template \"{}\" with data: {}".format( + template, fill_data + ), + exc_info=True + ) + continue + + except KeyError: + self.log.warning( + ( + "Template contains unknown key." + " Template \"{}\" Data: {}" + ).format(template, fill_data), + exc_info=True + ) + continue + + try: + node[key].setValue(value) + self.log.info("Change key \"{}\" to value \"{}\"".format( + key, value + )) + except NameError: + self.log.warning(( + "Failed to set value \"{}\" on node attribute \"{}\"" + ).format(value)) diff --git a/openpype/hosts/nuke/plugins/publish/precollect_instances.py b/openpype/hosts/nuke/plugins/publish/precollect_instances.py index 29c706f302..1a8fa3e6ad 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_instances.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_instances.py @@ -1,6 +1,7 @@ import nuke import pyblish.api -from avalon import io, api + +from openpype.pipeline import legacy_io from openpype.hosts.nuke.api.lib import ( add_publish_knob, get_avalon_knob_data @@ -19,9 +20,9 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): sync_workfile_version_on_families = [] def process(self, context): - asset_data = io.find_one({ + asset_data = legacy_io.find_one({ "type": "asset", - "name": api.Session["AVALON_ASSET"] + "name": legacy_io.Session["AVALON_ASSET"] }) self.log.debug("asset_data: {}".format(asset_data["data"])) @@ -69,6 +70,11 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): instance = context.create_instance(subset) instance.append(node) + suspend_publish = False + if "suspend_publish" in node.knobs(): + suspend_publish = node["suspend_publish"].value() + instance.data["suspend_publish"] = suspend_publish + # get review knob value review = False if "review" in node.knobs(): diff --git a/openpype/hosts/nuke/plugins/publish/precollect_writes.py b/openpype/hosts/nuke/plugins/publish/precollect_writes.py index 85e98db7ed..8669f4f485 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_writes.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_writes.py @@ -3,9 +3,12 @@ import re from pprint import pformat import nuke import pyblish.api -from avalon import io + import openpype.api as pype -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + legacy_io, + get_representation_path, +) @pyblish.api.log @@ -128,13 +131,17 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): } group_node = [x for x in instance if x.Class() == "Group"][0] - deadlineChunkSize = 1 + dl_chunk_size = 1 if "deadlineChunkSize" in group_node.knobs(): - deadlineChunkSize = group_node["deadlineChunkSize"].value() + dl_chunk_size = group_node["deadlineChunkSize"].value() - deadlinePriority = 50 + dl_priority = 50 if "deadlinePriority" in group_node.knobs(): - deadlinePriority = group_node["deadlinePriority"].value() + dl_priority = group_node["deadlinePriority"].value() + + dl_concurrent_tasks = 0 + if "deadlineConcurrentTasks" in group_node.knobs(): + dl_concurrent_tasks = group_node["deadlineConcurrentTasks"].value() instance.data.update({ "versionData": version_data, @@ -144,8 +151,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "label": label, "outputType": output_type, "colorspace": colorspace, - "deadlineChunkSize": deadlineChunkSize, - "deadlinePriority": deadlinePriority + "deadlineChunkSize": dl_chunk_size, + "deadlinePriority": dl_priority, + "deadlineConcurrentTasks": dl_concurrent_tasks }) if self.is_prerender(_families_test): @@ -175,7 +183,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): repre_doc = None if version_doc: # Try to find it's representation (Expected there is only one) - repre_doc = io.find_one( + repre_doc = legacy_io.find_one( {"type": "representation", "parent": version_doc["_id"]} ) diff --git a/openpype/hosts/nuke/plugins/publish/validate_script.py b/openpype/hosts/nuke/plugins/publish/validate_script.py index c35d09dcde..10c9e93f8b 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_script.py +++ b/openpype/hosts/nuke/plugins/publish/validate_script.py @@ -1,6 +1,7 @@ import pyblish.api -from avalon import io + from openpype import lib +from openpype.pipeline import legacy_io @pyblish.api.log @@ -115,7 +116,7 @@ class ValidateScript(pyblish.api.InstancePlugin): def check_parent_hierarchical(self, entityId, attr): if entityId is None: return None - entity = io.find_one({"_id": entityId}) + entity = legacy_io.find_one({"_id": entityId}) if attr in entity['data']: self.log.info(attr) return entity['data'][attr] diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py b/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py index 5ee93403d0..907577a97d 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py +++ b/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py @@ -25,7 +25,7 @@ class RepairNukeWriteDeadlineTab(pyblish.api.Action): # Remove existing knobs. knob_names = openpype.hosts.nuke.lib.get_deadline_knob_names() - for name, knob in group_node.knobs().iteritems(): + for name, knob in group_node.knobs().items(): if name in knob_names: group_node.removeKnob(knob) diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py b/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py index 08f09f8097..9fb57c1698 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py +++ b/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py @@ -1,11 +1,10 @@ -import os import toml import nuke -from avalon import api import pyblish.api import openpype.api +from openpype.pipeline import discover_creator_plugins from openpype.hosts.nuke.api.lib import get_avalon_knob_data @@ -79,7 +78,7 @@ class ValidateWriteLegacy(pyblish.api.InstancePlugin): # get appropriate plugin class creator_plugin = None - for Creator in api.discover(api.Creator): + for Creator in discover_creator_plugins(): if Creator.__name__ != Create_name: continue diff --git a/openpype/hosts/nuke/startup/menu.py b/openpype/hosts/nuke/startup/menu.py index 2cac6d09e7..9ed43b2110 100644 --- a/openpype/hosts/nuke/startup/menu.py +++ b/openpype/hosts/nuke/startup/menu.py @@ -1,7 +1,7 @@ import nuke -import avalon.api from openpype.api import Logger +from openpype.pipeline import install_host from openpype.hosts.nuke import api from openpype.hosts.nuke.api.lib import ( on_script_load, @@ -13,7 +13,7 @@ from openpype.hosts.nuke.api.lib import ( log = Logger.get_logger(__name__) -avalon.api.install(api) +install_host(api) # fix ffmpeg settings on script nuke.addOnScriptLoad(on_script_load) diff --git a/openpype/hosts/photoshop/api/__init__.py b/openpype/hosts/photoshop/api/__init__.py index 17ea957066..94152b5706 100644 --- a/openpype/hosts/photoshop/api/__init__.py +++ b/openpype/hosts/photoshop/api/__init__.py @@ -12,7 +12,10 @@ from .pipeline import ( remove_instance, install, uninstall, - containerise + containerise, + get_context_data, + update_context_data, + get_context_title ) from .plugin import ( PhotoshopLoader, @@ -43,6 +46,9 @@ __all__ = [ "install", "uninstall", "containerise", + "get_context_data", + "update_context_data", + "get_context_title", # Plugin "PhotoshopLoader", diff --git a/openpype/hosts/photoshop/api/launch_logic.py b/openpype/hosts/photoshop/api/launch_logic.py index 0021905cb5..0bbb19523d 100644 --- a/openpype/hosts/photoshop/api/launch_logic.py +++ b/openpype/hosts/photoshop/api/launch_logic.py @@ -11,9 +11,8 @@ from wsrpc_aiohttp import ( from Qt import QtCore from openpype.api import Logger +from openpype.pipeline import legacy_io from openpype.tools.utils import host_tools - -from avalon import api from openpype.tools.adobe_webserver.app import WebServerTool from .ws_stub import PhotoshopServerStub @@ -320,13 +319,13 @@ class PhotoshopRoute(WebSocketRoute): log.info("Setting context change") log.info("project {} asset {} ".format(project, asset)) if project: - api.Session["AVALON_PROJECT"] = project + legacy_io.Session["AVALON_PROJECT"] = project os.environ["AVALON_PROJECT"] = project if asset: - api.Session["AVALON_ASSET"] = asset + legacy_io.Session["AVALON_ASSET"] = asset os.environ["AVALON_ASSET"] = asset if task: - api.Session["AVALON_TASK"] = task + legacy_io.Session["AVALON_TASK"] = task os.environ["AVALON_TASK"] = task async def read(self): diff --git a/openpype/hosts/photoshop/api/lib.py b/openpype/hosts/photoshop/api/lib.py index 6d2a493a94..2f57d64464 100644 --- a/openpype/hosts/photoshop/api/lib.py +++ b/openpype/hosts/photoshop/api/lib.py @@ -5,9 +5,8 @@ import traceback from Qt import QtWidgets -import avalon.api - from openpype.api import Logger +from openpype.pipeline import install_host from openpype.tools.utils import host_tools from openpype.lib.remote_publish import headless_publish from openpype.lib import env_value_to_bool @@ -24,7 +23,7 @@ def safe_excepthook(*args): def main(*subprocess_args): from openpype.hosts.photoshop import api - avalon.api.install(api) + install_host(api) sys.excepthook = safe_excepthook # coloring in StdOutBroker diff --git a/openpype/hosts/photoshop/api/pipeline.py b/openpype/hosts/photoshop/api/pipeline.py index e814e1ca4d..20a6e3169f 100644 --- a/openpype/hosts/photoshop/api/pipeline.py +++ b/openpype/hosts/photoshop/api/pipeline.py @@ -1,16 +1,19 @@ import os from Qt import QtWidgets +from bson.objectid import ObjectId import pyblish.api -import avalon.api -from avalon import pipeline, io from openpype.api import Logger from openpype.lib import register_event_callback from openpype.pipeline import ( - LegacyCreator, + legacy_io, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, + AVALON_CONTAINER_ID, + registered_host, ) import openpype.hosts.photoshop @@ -30,22 +33,11 @@ def check_inventory(): if not lib.any_outdated(): return - host = avalon.api.registered_host() - outdated_containers = [] - for container in host.ls(): - representation = container['representation'] - representation_doc = io.find_one( - { - "_id": io.ObjectId(representation), - "type": "representation" - }, - projection={"parent": True} - ) - if representation_doc and not lib.is_latest(representation_doc): - outdated_containers.append(container) - # Warn about outdated containers. - print("Starting new QApplication..") + _app = QtWidgets.QApplication.instance() + if not _app: + print("Starting new QApplication..") + _app = QtWidgets.QApplication([]) message_box = QtWidgets.QMessageBox() message_box.setIcon(QtWidgets.QMessageBox.Warning) @@ -73,7 +65,7 @@ def install(): pyblish.api.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) log.info(PUBLISH_PATH) pyblish.api.register_callback( @@ -86,7 +78,7 @@ def install(): def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) def ls(): @@ -146,13 +138,9 @@ def list_instances(): instances = [] layers_meta = stub.get_layers_metadata() if layers_meta: - for key, instance in layers_meta.items(): - schema = instance.get("schema") - if schema and "container" in schema: - continue - - instance['uuid'] = key - instances.append(instance) + for instance in layers_meta: + if instance.get("id") == "pyblish.avalon.instance": + instances.append(instance) return instances @@ -173,11 +161,18 @@ def remove_instance(instance): if not stub: return - stub.remove_instance(instance.get("uuid")) - layer = stub.get_layer(instance.get("uuid")) - if layer: - stub.rename_layer(instance.get("uuid"), - layer.name.replace(stub.PUBLISH_ICON, '')) + inst_id = instance.get("instance_id") or instance.get("uuid") # legacy + if not inst_id: + log.warning("No instance identifier for {}".format(instance)) + return + + stub.remove_instance(inst_id) + + if instance.get("members"): + item = stub.get_layer(instance["members"][0]) + if item: + stub.rename_layer(item.id, + item.name.replace(stub.PUBLISH_ICON, '')) def _get_stub(): @@ -221,7 +216,7 @@ def containerise( data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "name": name, "namespace": namespace, "loader": str(loader), @@ -229,6 +224,33 @@ def containerise( "members": [str(layer.id)] } stub = lib.stub() - stub.imprint(layer, data) + stub.imprint(layer.id, data) return layer + + +def get_context_data(): + """Get stored values for context (validation enable/disable etc)""" + meta = _get_stub().get_layers_metadata() + for item in meta: + if item.get("id") == "publish_context": + item.pop("id") + return item + + return {} + + +def update_context_data(data, changes): + """Store value needed for context""" + item = data + item["id"] = "publish_context" + _get_stub().imprint(item["id"], item) + + +def get_context_title(): + """Returns title for Creator window""" + + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + return "{}/{}/{}".format(project_name, asset_name, task_name) diff --git a/openpype/hosts/photoshop/api/workio.py b/openpype/hosts/photoshop/api/workio.py index 0bf3ed2bd9..951c5dbfff 100644 --- a/openpype/hosts/photoshop/api/workio.py +++ b/openpype/hosts/photoshop/api/workio.py @@ -1,8 +1,7 @@ """Host API required Work Files tool""" import os -import avalon.api - +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS from . import lib @@ -15,7 +14,7 @@ def _active_document(): def file_extensions(): - return avalon.api.HOST_WORKFILE_EXTENSIONS["photoshop"] + return HOST_WORKFILE_EXTENSIONS["photoshop"] def has_unsaved_changes(): diff --git a/openpype/hosts/photoshop/api/ws_stub.py b/openpype/hosts/photoshop/api/ws_stub.py index 64d89f5420..fa076ecc7e 100644 --- a/openpype/hosts/photoshop/api/ws_stub.py +++ b/openpype/hosts/photoshop/api/ws_stub.py @@ -27,6 +27,7 @@ class PSItem(object): members = attr.ib(factory=list) long_name = attr.ib(default=None) color_code = attr.ib(default=None) # color code of layer + instance_id = attr.ib(default=None) class PhotoshopServerStub: @@ -76,13 +77,31 @@ class PhotoshopServerStub: layer: (PSItem) layers_meta: full list from Headline (for performance in loops) Returns: + (dict) of layer metadata stored in PS file + + Example: + { + 'id': 'pyblish.avalon.container', + 'loader': 'ImageLoader', + 'members': ['64'], + 'name': 'imageMainMiddle', + 'namespace': 'Hero_imageMainMiddle_001', + 'representation': '6203dc91e80934d9f6ee7d96', + 'schema': 'openpype:container-2.0' + } """ if layers_meta is None: layers_meta = self.get_layers_metadata() - return layers_meta.get(str(layer.id)) + for layer_meta in layers_meta: + layer_id = layer_meta.get("uuid") # legacy + if layer_meta.get("members"): + layer_id = layer_meta["members"][0] + if str(layer.id) == str(layer_id): + return layer_meta + print("Unable to find layer metadata for {}".format(layer.id)) - def imprint(self, layer, data, all_layers=None, layers_meta=None): + def imprint(self, item_id, data, all_layers=None, items_meta=None): """Save layer metadata to Headline field of active document Stores metadata in format: @@ -108,28 +127,37 @@ class PhotoshopServerStub: }] - for loaded instances Args: - layer (PSItem): + item_id (str): data(string): json representation for single layer all_layers (list of PSItem): for performance, could be injected for usage in loop, if not, single call will be triggered - layers_meta(string): json representation from Headline + items_meta(string): json representation from Headline (for performance - provide only if imprint is in loop - value should be same) Returns: None """ - if not layers_meta: - layers_meta = self.get_layers_metadata() + if not items_meta: + items_meta = self.get_layers_metadata() # json.dumps writes integer values in a dictionary to string, so # anticipating it here. - if str(layer.id) in layers_meta and layers_meta[str(layer.id)]: - if data: - layers_meta[str(layer.id)].update(data) + item_id = str(item_id) + is_new = True + result_meta = [] + for item_meta in items_meta: + if ((item_meta.get('members') and + item_id == str(item_meta.get('members')[0])) or + item_meta.get("instance_id") == item_id): + is_new = False + if data: + item_meta.update(data) + result_meta.append(item_meta) else: - layers_meta.pop(str(layer.id)) - else: - layers_meta[str(layer.id)] = data + result_meta.append(item_meta) + + if is_new: + result_meta.append(data) # Ensure only valid ids are stored. if not all_layers: @@ -137,12 +165,14 @@ class PhotoshopServerStub: layer_ids = [layer.id for layer in all_layers] cleaned_data = [] - for layer_id in layers_meta: - if int(layer_id) in layer_ids: - cleaned_data.append(layers_meta[layer_id]) + for item in result_meta: + if item.get("members"): + if int(item["members"][0]) not in layer_ids: + continue + + cleaned_data.append(item) payload = json.dumps(cleaned_data, indent=4) - self.websocketserver.call( self.client.call('Photoshop.imprint', payload=payload) ) @@ -370,38 +400,27 @@ class PhotoshopServerStub: (Headline accessible by File > File Info) Returns: - (string): - json documents + (list) example: {"8":{"active":true,"subset":"imageBG", "family":"image","id":"pyblish.avalon.instance", "asset":"Town"}} 8 is layer(group) id - used for deletion, update etc. """ - layers_data = {} res = self.websocketserver.call(self.client.call('Photoshop.read')) + layers_data = [] try: - layers_data = json.loads(res) + if res: + layers_data = json.loads(res) except json.decoder.JSONDecodeError: - pass + raise ValueError("{} cannot be parsed, recreate meta".format(res)) # format of metadata changed from {} to [] because of standardization # keep current implementation logic as its working - if not isinstance(layers_data, dict): - temp_layers_meta = {} - for layer_meta in layers_data: - layer_id = layer_meta.get("uuid") - if not layer_id: - layer_id = layer_meta.get("members")[0] - - temp_layers_meta[layer_id] = layer_meta - layers_data = temp_layers_meta - else: - # legacy version of metadata + if isinstance(layers_data, dict): for layer_id, layer_meta in layers_data.items(): if layer_meta.get("schema") != "openpype:container-2.0": - layer_meta["uuid"] = str(layer_id) - else: layer_meta["members"] = [str(layer_id)] - + layers_data = list(layers_data.values()) return layers_data def import_smart_object(self, path, layer_name, as_reference=False): @@ -472,11 +491,12 @@ class PhotoshopServerStub: ) def remove_instance(self, instance_id): - cleaned_data = {} + cleaned_data = [] - for key, instance in self.get_layers_metadata().items(): - if key != instance_id: - cleaned_data[key] = instance + for item in self.get_layers_metadata(): + inst_id = item.get("instance_id") or item.get("uuid") + if inst_id != instance_id: + cleaned_data.append(item) payload = json.dumps(cleaned_data, indent=4) @@ -528,6 +548,7 @@ class PhotoshopServerStub: d.get('type'), d.get('members'), d.get('long_name'), - d.get("color_code") + d.get("color_code"), + d.get("instance_id") )) return ret diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py index a001b5f171..f15068b031 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_image.py @@ -1,99 +1,145 @@ -from Qt import QtWidgets -from openpype.pipeline import create -from openpype.hosts.photoshop import api as photoshop +from openpype.hosts.photoshop import api +from openpype.lib import BoolDef +from openpype.pipeline import ( + Creator, + CreatedInstance, + legacy_io +) -class CreateImage(create.LegacyCreator): - """Image folder for publish.""" - - name = "imageDefault" +class ImageCreator(Creator): + """Creates image instance for publishing.""" + identifier = "image" label = "Image" family = "image" - defaults = ["Main"] + description = "Image creator" - def process(self): - groups = [] - layers = [] - create_group = False + def collect_instances(self): + for instance_data in api.list_instances(): + # legacy instances have family=='image' + creator_id = (instance_data.get("creator_identifier") or + instance_data.get("family")) - stub = photoshop.stub() - if (self.options or {}).get("useSelection"): - multiple_instances = False - selection = stub.get_selected_layers() - self.log.info("selection {}".format(selection)) - if len(selection) > 1: - # Ask user whether to create one image or image per selected - # item. - msg_box = QtWidgets.QMessageBox() - msg_box.setIcon(QtWidgets.QMessageBox.Warning) - msg_box.setText( - "Multiple layers selected." - "\nDo you want to make one image per layer?" + if creator_id == self.identifier: + instance_data = self._handle_legacy(instance_data) + layer = api.stub().get_layer(instance_data["members"][0]) + instance_data["layer"] = layer + instance = CreatedInstance.from_existing( + instance_data, self ) - msg_box.setStandardButtons( - QtWidgets.QMessageBox.Yes | - QtWidgets.QMessageBox.No | - QtWidgets.QMessageBox.Cancel - ) - ret = msg_box.exec_() - if ret == QtWidgets.QMessageBox.Yes: - multiple_instances = True - elif ret == QtWidgets.QMessageBox.Cancel: - return + self._add_instance_to_context(instance) - if multiple_instances: - for item in selection: - if item.group: - groups.append(item) - else: - layers.append(item) + def create(self, subset_name_from_ui, data, pre_create_data): + groups_to_create = [] + top_layers_to_wrap = [] + create_empty_group = False + + stub = api.stub() # only after PS is up + top_level_selected_items = stub.get_selected_layers() + if pre_create_data.get("use_selection"): + only_single_item_selected = len(top_level_selected_items) == 1 + for selected_item in top_level_selected_items: + if ( + only_single_item_selected or + pre_create_data.get("create_multiple")): + if selected_item.group: + groups_to_create.append(selected_item) + else: + top_layers_to_wrap.append(selected_item) else: - group = stub.group_selected_layers(self.name) - groups.append(group) + group = stub.group_selected_layers(subset_name_from_ui) + groups_to_create.append(group) - elif len(selection) == 1: - # One selected item. Use group if its a LayerSet (group), else - # create a new group. - if selection[0].group: - groups.append(selection[0]) - else: - layers.append(selection[0]) - elif len(selection) == 0: - # No selection creates an empty group. - create_group = True - else: - group = stub.create_group(self.name) - groups.append(group) + if not groups_to_create and not top_layers_to_wrap: + group = stub.create_group(subset_name_from_ui) + groups_to_create.append(group) - if create_group: - group = stub.create_group(self.name) - groups.append(group) - - for layer in layers: + # wrap each top level layer into separate new group + for layer in top_layers_to_wrap: stub.select_layers([layer]) group = stub.group_selected_layers(layer.name) - groups.append(group) + groups_to_create.append(group) - creator_subset_name = self.data["subset"] - for group in groups: - long_names = [] - group.name = group.name.replace(stub.PUBLISH_ICON, ''). \ - replace(stub.LOADED_ICON, '') + creating_multiple_groups = len(groups_to_create) > 1 + for group in groups_to_create: + subset_name = subset_name_from_ui # reset to name from creator UI + layer_names_in_hierarchy = [] + created_group_name = self._clean_highlights(stub, group.name) - subset_name = creator_subset_name - if len(groups) > 1: + if creating_multiple_groups: + # concatenate with layer name to differentiate subsets subset_name += group.name.title().replace(" ", "") if group.long_name: for directory in group.long_name[::-1]: - name = directory.replace(stub.PUBLISH_ICON, '').\ - replace(stub.LOADED_ICON, '') - long_names.append(name) + name = self._clean_highlights(stub, directory) + layer_names_in_hierarchy.append(name) - self.data.update({"subset": subset_name}) - self.data.update({"uuid": str(group.id)}) - self.data.update({"long_name": "_".join(long_names)}) - stub.imprint(group, self.data) + data.update({"subset": subset_name}) + data.update({"members": [str(group.id)]}) + data.update({"long_name": "_".join(layer_names_in_hierarchy)}) + + new_instance = CreatedInstance(self.family, subset_name, data, + self) + + stub.imprint(new_instance.get("instance_id"), + new_instance.data_to_store()) + self._add_instance_to_context(new_instance) # reusing existing group, need to rename afterwards - if not create_group: - stub.rename_layer(group.id, stub.PUBLISH_ICON + group.name) + if not create_empty_group: + stub.rename_layer(group.id, + stub.PUBLISH_ICON + created_group_name) + + def update_instances(self, update_list): + self.log.debug("update_list:: {}".format(update_list)) + for created_inst, _changes in update_list: + if created_inst.get("layer"): + # not storing PSItem layer to metadata + created_inst.pop("layer") + api.stub().imprint(created_inst.get("instance_id"), + created_inst.data_to_store()) + + def remove_instances(self, instances): + for instance in instances: + api.remove_instance(instance) + self._remove_instance_from_context(instance) + + def get_default_variants(self): + return [ + "Main" + ] + + def get_pre_create_attr_defs(self): + output = [ + BoolDef("use_selection", default=True, + label="Create only for selected"), + BoolDef("create_multiple", + default=True, + label="Create separate instance for each selected") + ] + return output + + def get_detail_description(self): + return """Creator for Image instances""" + + def _handle_legacy(self, instance_data): + """Converts old instances to new format.""" + if not instance_data.get("members"): + instance_data["members"] = [instance_data.get("uuid")] + + if instance_data.get("uuid"): + # uuid not needed, replaced with unique instance_id + api.stub().remove_instance(instance_data.get("uuid")) + instance_data.pop("uuid") + + if not instance_data.get("task"): + instance_data["task"] = legacy_io.Session.get("AVALON_TASK") + + if not instance_data.get("variant"): + instance_data["variant"] = '' + + return instance_data + + def _clean_highlights(self, stub, item): + return item.replace(stub.PUBLISH_ICON, '').replace(stub.LOADED_ICON, + '') diff --git a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py new file mode 100644 index 0000000000..9736471a26 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py @@ -0,0 +1,100 @@ +from Qt import QtWidgets +from openpype.pipeline import create +from openpype.hosts.photoshop import api as photoshop + + +class CreateImage(create.LegacyCreator): + """Image folder for publish.""" + + name = "imageDefault" + label = "Image" + family = "image" + defaults = ["Main"] + + def process(self): + groups = [] + layers = [] + create_group = False + + stub = photoshop.stub() + if (self.options or {}).get("useSelection"): + multiple_instances = False + selection = stub.get_selected_layers() + self.log.info("selection {}".format(selection)) + if len(selection) > 1: + # Ask user whether to create one image or image per selected + # item. + msg_box = QtWidgets.QMessageBox() + msg_box.setIcon(QtWidgets.QMessageBox.Warning) + msg_box.setText( + "Multiple layers selected." + "\nDo you want to make one image per layer?" + ) + msg_box.setStandardButtons( + QtWidgets.QMessageBox.Yes | + QtWidgets.QMessageBox.No | + QtWidgets.QMessageBox.Cancel + ) + ret = msg_box.exec_() + if ret == QtWidgets.QMessageBox.Yes: + multiple_instances = True + elif ret == QtWidgets.QMessageBox.Cancel: + return + + if multiple_instances: + for item in selection: + if item.group: + groups.append(item) + else: + layers.append(item) + else: + group = stub.group_selected_layers(self.name) + groups.append(group) + + elif len(selection) == 1: + # One selected item. Use group if its a LayerSet (group), else + # create a new group. + if selection[0].group: + groups.append(selection[0]) + else: + layers.append(selection[0]) + elif len(selection) == 0: + # No selection creates an empty group. + create_group = True + else: + group = stub.create_group(self.name) + groups.append(group) + + if create_group: + group = stub.create_group(self.name) + groups.append(group) + + for layer in layers: + stub.select_layers([layer]) + group = stub.group_selected_layers(layer.name) + groups.append(group) + + creator_subset_name = self.data["subset"] + for group in groups: + long_names = [] + group.name = group.name.replace(stub.PUBLISH_ICON, ''). \ + replace(stub.LOADED_ICON, '') + + subset_name = creator_subset_name + if len(groups) > 1: + subset_name += group.name.title().replace(" ", "") + + if group.long_name: + for directory in group.long_name[::-1]: + name = directory.replace(stub.PUBLISH_ICON, '').\ + replace(stub.LOADED_ICON, '') + long_names.append(name) + + self.data.update({"subset": subset_name}) + self.data.update({"uuid": str(group.id)}) + self.data.update({"members": [str(group.id)]}) + self.data.update({"long_name": "_".join(long_names)}) + stub.imprint(group, self.data) + # reusing existing group, need to rename afterwards + if not create_group: + stub.rename_layer(group.id, stub.PUBLISH_ICON + group.name) diff --git a/openpype/hosts/photoshop/plugins/create/workfile_creator.py b/openpype/hosts/photoshop/plugins/create/workfile_creator.py new file mode 100644 index 0000000000..875a9b8a94 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/create/workfile_creator.py @@ -0,0 +1,78 @@ +import openpype.hosts.photoshop.api as api +from openpype.pipeline import ( + AutoCreator, + CreatedInstance, + legacy_io +) + + +class PSWorkfileCreator(AutoCreator): + identifier = "workfile" + family = "workfile" + + def get_instance_attr_defs(self): + return [] + + def collect_instances(self): + for instance_data in api.list_instances(): + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + subset_name = instance_data["subset"] + instance = CreatedInstance( + self.family, subset_name, instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + # nothing to change on workfiles + pass + + def create(self, options=None): + existing_instance = None + for instance in self.create_context.instances: + if instance.family == self.family: + existing_instance = instance + break + + variant = '' + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + host_name = legacy_io.Session["AVALON_APP"] + if existing_instance is None: + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": variant + } + data.update(self.get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name + )) + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(new_instance) + api.stub().imprint(new_instance.get("instance_id"), + new_instance.data_to_store()) + + elif ( + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name diff --git a/openpype/hosts/photoshop/plugins/load/load_image.py b/openpype/hosts/photoshop/plugins/load/load_image.py index 0a9421b8f2..91a9787781 100644 --- a/openpype/hosts/photoshop/plugins/load/load_image.py +++ b/openpype/hosts/photoshop/plugins/load/load_image.py @@ -61,7 +61,7 @@ class ImageLoader(photoshop.PhotoshopLoader): ) stub.imprint( - layer, {"representation": str(representation["_id"])} + layer.id, {"representation": str(representation["_id"])} ) def remove(self, container): @@ -73,7 +73,7 @@ class ImageLoader(photoshop.PhotoshopLoader): stub = self.get_stub() layer = container.pop("layer") - stub.imprint(layer, {}) + stub.imprint(layer.id, {}) stub.delete_layer(layer.id) def switch(self, container, representation): diff --git a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py b/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py index 5f39121ae1..c25c5a8f2c 100644 --- a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py +++ b/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py @@ -2,7 +2,6 @@ import os import qargparse -from openpype.pipeline import get_representation_path_from_context from openpype.hosts.photoshop import api as photoshop from openpype.hosts.photoshop.api import get_unique_layer_name @@ -63,7 +62,7 @@ class ImageFromSequenceLoader(photoshop.PhotoshopLoader): """ files = [] for context in repre_contexts: - fname = get_representation_path_from_context(context) + fname = cls.filepath_from_context(context) _, file_extension = os.path.splitext(fname) for file_name in os.listdir(os.path.dirname(fname)): diff --git a/openpype/hosts/photoshop/plugins/load/load_reference.py b/openpype/hosts/photoshop/plugins/load/load_reference.py index f5f0545d39..1f32a5d23c 100644 --- a/openpype/hosts/photoshop/plugins/load/load_reference.py +++ b/openpype/hosts/photoshop/plugins/load/load_reference.py @@ -61,7 +61,7 @@ class ReferenceLoader(photoshop.PhotoshopLoader): ) stub.imprint( - layer, {"representation": str(representation["_id"])} + layer.id, {"representation": str(representation["_id"])} ) def remove(self, container): @@ -72,7 +72,7 @@ class ReferenceLoader(photoshop.PhotoshopLoader): """ stub = self.get_stub() layer = container.pop("layer") - stub.imprint(layer, {}) + stub.imprint(layer.id, {}) stub.delete_layer(layer.id) def switch(self, container, representation): diff --git a/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py new file mode 100644 index 0000000000..448493d370 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py @@ -0,0 +1,74 @@ +"""Parses batch context from json and continues in publish process. + +Provides: + context -> Loaded batch file. + - asset + - task (task name) + - taskType + - project_name + - variant + +Code is practically copy of `openype/hosts/webpublish/collect_batch_data` as +webpublisher should be eventually ejected as an addon, eg. mentioned plugin +shouldn't be pushed into general publish plugins. +""" + +import os + +import pyblish.api + +from openpype.lib.plugin_tools import ( + parse_json, + get_batch_asset_task_info +) +from openpype.pipeline import legacy_io + + +class CollectBatchData(pyblish.api.ContextPlugin): + """Collect batch data from json stored in 'OPENPYPE_PUBLISH_DATA' env dir. + + The directory must contain 'manifest.json' file where batch data should be + stored. + """ + # must be really early, context values are only in json file + order = pyblish.api.CollectorOrder - 0.495 + label = "Collect batch data" + hosts = ["photoshop"] + targets = ["remotepublish"] + + def process(self, context): + self.log.info("CollectBatchData") + batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") + + assert batch_dir, ( + "Missing `OPENPYPE_PUBLISH_DATA`") + + assert os.path.exists(batch_dir), \ + "Folder {} doesn't exist".format(batch_dir) + + project_name = os.environ.get("AVALON_PROJECT") + if project_name is None: + raise AssertionError( + "Environment `AVALON_PROJECT` was not found." + "Could not set project `root` which may cause issues." + ) + + batch_data = parse_json(os.path.join(batch_dir, "manifest.json")) + + context.data["batchDir"] = batch_dir + context.data["batchData"] = batch_data + + asset_name, task_name, task_type = get_batch_asset_task_info( + batch_data["context"] + ) + + os.environ["AVALON_ASSET"] = asset_name + os.environ["AVALON_TASK"] = task_name + legacy_io.Session["AVALON_ASSET"] = asset_name + legacy_io.Session["AVALON_TASK"] = task_name + + context.data["asset"] = asset_name + context.data["task"] = task_name + context.data["taskType"] = task_type + context.data["project_name"] = project_name + context.data["variant"] = batch_data["variant"] diff --git a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py index 7d44d55a80..122428eea0 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py @@ -4,7 +4,6 @@ import re import pyblish.api from openpype.lib import prepare_template_data -from openpype.lib.plugin_tools import parse_json, get_batch_asset_task_info from openpype.hosts.photoshop import api as photoshop @@ -46,7 +45,10 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): existing_subset_names = self._get_existing_subset_names(context) - asset_name, task_name, variant = self._parse_batch(batch_dir) + # from CollectBatchData + asset_name = context.data["asset"] + task_name = context.data["task"] + variant = context.data["variant"] stub = photoshop.stub() layers = stub.get_layers() @@ -130,25 +132,6 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): return existing_subset_names - def _parse_batch(self, batch_dir): - """Parses asset_name, task_name, variant from batch manifest.""" - task_data = None - if batch_dir and os.path.exists(batch_dir): - task_data = parse_json(os.path.join(batch_dir, - "manifest.json")) - if not task_data: - raise ValueError( - "Cannot parse batch meta in {} folder".format(batch_dir)) - variant = task_data["variant"] - - asset, task_name, task_type = get_batch_asset_task_info( - task_data["context"]) - - if not task_name: - task_name = task_type - - return asset, task_name, variant - def _create_instance(self, context, layer, family, asset, subset, task_name): instance = context.create_instance(layer.name) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_instances.py b/openpype/hosts/photoshop/plugins/publish/collect_instances.py index c3e27e9646..b466ec8687 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_instances.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_instances.py @@ -1,61 +1,116 @@ +import pprint + import pyblish.api +from openpype.settings import get_project_settings from openpype.hosts.photoshop import api as photoshop +from openpype.lib import prepare_template_data +from openpype.pipeline import legacy_io class CollectInstances(pyblish.api.ContextPlugin): """Gather instances by LayerSet and file metadata - This collector takes into account assets that are associated with - an LayerSet and marked with a unique identifier; + Collects publishable instances from file metadata or enhance + already collected by creator (family == "image"). + + If no image instances are explicitly created, it looks if there is value + in `flatten_subset_template` (configurable in Settings), in that case it + produces flatten image with all visible layers. Identifier: id (str): "pyblish.avalon.instance" """ - label = "Instances" + label = "Collect Instances" order = pyblish.api.CollectorOrder hosts = ["photoshop"] families_mapping = { "image": [] } + # configurable in Settings + flatten_subset_template = "" def process(self, context): + instance_by_layer_id = {} + for instance in context: + if ( + instance.data["family"] == "image" and + instance.data.get("members")): + layer_id = str(instance.data["members"][0]) + instance_by_layer_id[layer_id] = instance + stub = photoshop.stub() - layers = stub.get_layers() + layer_items = stub.get_layers() layers_meta = stub.get_layers_metadata() instance_names = [] - for layer in layers: - layer_data = stub.read(layer, layers_meta) + + all_layer_ids = [] + for layer_item in layer_items: + layer_meta_data = stub.read(layer_item, layers_meta) + all_layer_ids.append(layer_item.id) # Skip layers without metadata. - if layer_data is None: + if layer_meta_data is None: continue # Skip containers. - if "container" in layer_data["id"]: + if "container" in layer_meta_data["id"]: continue - # child_layers = [*layer.Layers] - # self.log.debug("child_layers {}".format(child_layers)) - # if not child_layers: - # self.log.info("%s skipped, it was empty." % layer.Name) - # continue + # active might not be in legacy meta + if not layer_meta_data.get("active", True): + continue - instance = context.create_instance(layer_data["subset"]) - instance.data["layer"] = layer - instance.data.update(layer_data) + instance = instance_by_layer_id.get(str(layer_item.id)) + if instance is None: + instance = context.create_instance(layer_meta_data["subset"]) + + instance.data["layer"] = layer_item + instance.data.update(layer_meta_data) instance.data["families"] = self.families_mapping[ - layer_data["family"] + layer_meta_data["family"] ] - instance.data["publish"] = layer.visible - instance_names.append(layer_data["subset"]) + instance.data["publish"] = layer_item.visible + instance_names.append(layer_meta_data["subset"]) # Produce diagnostic message for any graphical # user interface interested in visualising it. self.log.info("Found: \"%s\" " % instance.data["name"]) - self.log.info("instance: {} ".format(instance.data)) + self.log.info("instance: {} ".format( + pprint.pformat(instance.data, indent=4))) if len(instance_names) != len(set(instance_names)): self.log.warning("Duplicate instances found. " + "Remove unwanted via SubsetManager") + + if len(instance_names) == 0 and self.flatten_subset_template: + project_name = context.data["projectEntity"]["name"] + variants = get_project_settings(project_name).get( + "photoshop", {}).get( + "create", {}).get( + "CreateImage", {}).get( + "defaults", ['']) + family = "image" + task_name = legacy_io.Session["AVALON_TASK"] + asset_name = context.data["assetEntity"]["name"] + + variant = context.data.get("variant") or variants[0] + fill_pairs = { + "variant": variant, + "family": family, + "task": task_name + } + + subset = self.flatten_subset_template.format( + **prepare_template_data(fill_pairs)) + + instance = context.create_instance(subset) + instance.data["family"] = family + instance.data["asset"] = asset_name + instance.data["subset"] = subset + instance.data["ids"] = all_layer_ids + instance.data["families"] = self.families_mapping[family] + instance.data["publish"] = True + + self.log.info("flatten instance: {} ".format(instance.data)) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_review.py b/openpype/hosts/photoshop/plugins/publish/collect_review.py index 5ab48b76da..2ea5503f3f 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_review.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_review.py @@ -1,30 +1,48 @@ +""" +Requires: + None + +Provides: + instance -> family ("review") +""" + import os import pyblish.api +from openpype.lib import get_subset_name_with_asset_doc + class CollectReview(pyblish.api.ContextPlugin): - """Gather the active document as review instance.""" + """Gather the active document as review instance. + Triggers once even if no 'image' is published as by defaults it creates + flatten image from a workfile. + """ + + label = "Collect Review" label = "Review" - order = pyblish.api.CollectorOrder hosts = ["photoshop"] + order = pyblish.api.CollectorOrder + 0.1 def process(self, context): family = "review" - task = os.getenv("AVALON_TASK", None) - subset = family + task.capitalize() - - file_path = context.data["currentFile"] - base_name = os.path.basename(file_path) + subset = get_subset_name_with_asset_doc( + family, + context.data.get("variant", ''), + context.data["anatomyData"]["task"]["name"], + context.data["assetEntity"], + context.data["anatomyData"]["project"]["name"], + host_name=context.data["hostName"] + ) instance = context.create_instance(subset) instance.data.update({ "subset": subset, - "label": base_name, - "name": base_name, + "label": subset, + "name": subset, "family": family, - "families": ["ftrack"], + "families": [], "representations": [], "asset": os.environ["AVALON_ASSET"] }) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py index db1ede14d5..e4f0a07b34 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py @@ -1,6 +1,8 @@ import os import pyblish.api +from openpype.lib import get_subset_name_with_asset_doc + class CollectWorkfile(pyblish.api.ContextPlugin): """Collect current script for publish.""" @@ -10,25 +12,41 @@ class CollectWorkfile(pyblish.api.ContextPlugin): hosts = ["photoshop"] def process(self, context): + existing_instance = None + for instance in context: + if instance.data["family"] == "workfile": + self.log.debug("Workfile instance found, won't create new") + existing_instance = instance + break + family = "workfile" - task = os.getenv("AVALON_TASK", None) - subset = family + task.capitalize() + subset = get_subset_name_with_asset_doc( + family, + "", + context.data["anatomyData"]["task"]["name"], + context.data["assetEntity"], + context.data["anatomyData"]["project"]["name"], + host_name=context.data["hostName"] + ) file_path = context.data["currentFile"] staging_dir = os.path.dirname(file_path) base_name = os.path.basename(file_path) # Create instance - instance = context.create_instance(subset) - instance.data.update({ - "subset": subset, - "label": base_name, - "name": base_name, - "family": family, - "families": [], - "representations": [], - "asset": os.environ["AVALON_ASSET"] - }) + if existing_instance is None: + instance = context.create_instance(subset) + instance.data.update({ + "subset": subset, + "label": base_name, + "name": base_name, + "family": family, + "families": [], + "representations": [], + "asset": os.environ["AVALON_ASSET"] + }) + else: + instance = existing_instance # creating representation _, ext = os.path.splitext(file_path) diff --git a/openpype/hosts/photoshop/plugins/publish/extract_image.py b/openpype/hosts/photoshop/plugins/publish/extract_image.py index 04ce77ee34..a133e33409 100644 --- a/openpype/hosts/photoshop/plugins/publish/extract_image.py +++ b/openpype/hosts/photoshop/plugins/publish/extract_image.py @@ -16,7 +16,6 @@ class ExtractImage(openpype.api.Extractor): formats = ["png", "jpg"] def process(self, instance): - staging_dir = self.staging_dir(instance) self.log.info("Outputting image to {}".format(staging_dir)) @@ -26,8 +25,10 @@ class ExtractImage(openpype.api.Extractor): with photoshop.maintained_selection(): self.log.info("Extracting %s" % str(list(instance))) with photoshop.maintained_visibility(): + ids = set() layer = instance.data.get("layer") - ids = set([layer.id]) + if layer: + ids.add(layer.id) add_ids = instance.data.pop("ids", None) if add_ids: ids.update(set(add_ids)) diff --git a/openpype/hosts/photoshop/plugins/publish/extract_review.py b/openpype/hosts/photoshop/plugins/publish/extract_review.py index b8f4470c7b..d076610ead 100644 --- a/openpype/hosts/photoshop/plugins/publish/extract_review.py +++ b/openpype/hosts/photoshop/plugins/publish/extract_review.py @@ -155,6 +155,9 @@ class ExtractReview(openpype.api.Extractor): for image_instance in instance.context: if image_instance.data["family"] != "image": continue + if not image_instance.data.get("layer"): + # dummy instance for flatten image + continue layers.append(image_instance.data.get("layer")) return sorted(layers) diff --git a/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml b/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml new file mode 100644 index 0000000000..5a1e266748 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml @@ -0,0 +1,21 @@ + + + +Subset name + +## Invalid subset or layer name + +Subset or layer name cannot contain specific characters (spaces etc) which could cause issue when subset name is used in a published file name. + {msg} + +### How to repair? + +You can fix this with "repair" button on the right. + + +### __Detailed Info__ (optional) + +Not all characters are available in a file names on all OS. Wrong characters could be configured in Settings. + + + \ No newline at end of file diff --git a/openpype/hosts/photoshop/plugins/publish/help/validate_unique_subsets.xml b/openpype/hosts/photoshop/plugins/publish/help/validate_unique_subsets.xml new file mode 100644 index 0000000000..4b47973193 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/help/validate_unique_subsets.xml @@ -0,0 +1,14 @@ + + + +Subset not unique + +## Non unique subset name found + + Non unique subset names: '{non_unique}' +### How to repair? + +Remove offending instance, rename it to have unique name. Maybe layer name wasn't used for multiple instances? + + + \ No newline at end of file diff --git a/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py b/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py index ebe9cc21ea..b65f9d259f 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py @@ -1,6 +1,7 @@ -from avalon import api import pyblish.api + import openpype.api +from openpype.pipeline import legacy_io from openpype.hosts.photoshop import api as photoshop @@ -26,7 +27,7 @@ class ValidateInstanceAssetRepair(pyblish.api.Action): for instance in instances: data = stub.read(instance[0]) - data["asset"] = api.Session["AVALON_ASSET"] + data["asset"] = legacy_io.Session["AVALON_ASSET"] stub.imprint(instance[0], data) @@ -48,7 +49,7 @@ class ValidateInstanceAsset(pyblish.api.InstancePlugin): def process(self, instance): instance_asset = instance.data["asset"] - current_asset = api.Session["AVALON_ASSET"] + current_asset = legacy_io.Session["AVALON_ASSET"] msg = ( f"Instance asset {instance_asset} is not the same " f"as current context {current_asset}. PLEASE DO:\n" diff --git a/openpype/hosts/photoshop/plugins/publish/validate_naming.py b/openpype/hosts/photoshop/plugins/publish/validate_naming.py index b40e44d016..bcae24108c 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_naming.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_naming.py @@ -2,6 +2,7 @@ import re import pyblish.api import openpype.api +from openpype.pipeline import PublishXmlValidationError from openpype.hosts.photoshop import api as photoshop @@ -22,32 +23,34 @@ class ValidateNamingRepair(pyblish.api.Action): failed.append(result["instance"]) invalid_chars, replace_char = plugin.get_replace_chars() - self.log.info("{} --- {}".format(invalid_chars, replace_char)) + self.log.debug("{} --- {}".format(invalid_chars, replace_char)) # Apply pyblish.logic to get the instances for the plug-in instances = pyblish.api.instances_by_plugin(failed, plugin) stub = photoshop.stub() for instance in instances: - self.log.info("validate_naming instance {}".format(instance)) - metadata = stub.read(instance[0]) - self.log.info("metadata instance {}".format(metadata)) - layer_name = None - if metadata.get("uuid"): - layer_data = stub.get_layer(metadata["uuid"]) - self.log.info("layer_data {}".format(layer_data)) - if layer_data: - layer_name = re.sub(invalid_chars, - replace_char, - layer_data.name) + self.log.debug("validate_naming instance {}".format(instance)) + current_layer_state = stub.get_layer(instance.data["layer"].id) + self.log.debug("current_layer{}".format(current_layer_state)) - stub.rename_layer(instance.data["uuid"], layer_name) + layer_meta = stub.read(current_layer_state) + instance_id = (layer_meta.get("instance_id") or + layer_meta.get("uuid")) + if not instance_id: + self.log.warning("Unable to repair, cannot find layer") + continue + + layer_name = re.sub(invalid_chars, + replace_char, + current_layer_state.name) + + stub.rename_layer(current_layer_state.id, layer_name) subset_name = re.sub(invalid_chars, replace_char, - instance.data["name"]) + instance.data["subset"]) - instance[0].Name = layer_name or subset_name - metadata["subset"] = subset_name - stub.imprint(instance[0], metadata) + layer_meta["subset"] = subset_name + stub.imprint(instance_id, layer_meta) return True @@ -72,11 +75,18 @@ class ValidateNaming(pyblish.api.InstancePlugin): help_msg = ' Use Repair action (A) in Pyblish to fix it.' msg = "Name \"{}\" is not allowed.{}".format(instance.data["name"], help_msg) - assert not re.search(self.invalid_chars, instance.data["name"]), msg + + formatting_data = {"msg": msg} + if re.search(self.invalid_chars, instance.data["name"]): + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) msg = "Subset \"{}\" is not allowed.{}".format(instance.data["subset"], help_msg) - assert not re.search(self.invalid_chars, instance.data["subset"]), msg + formatting_data = {"msg": msg} + if re.search(self.invalid_chars, instance.data["subset"]): + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) @classmethod def get_replace_chars(cls): diff --git a/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py b/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py index 40abfb1bbd..01f2323157 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py @@ -1,6 +1,7 @@ import collections import pyblish.api import openpype.api +from openpype.pipeline import PublishXmlValidationError class ValidateSubsetUniqueness(pyblish.api.ContextPlugin): @@ -27,4 +28,10 @@ class ValidateSubsetUniqueness(pyblish.api.ContextPlugin): if count > 1] msg = ("Instance subset names {} are not unique. ".format(non_unique) + "Remove duplicates via SubsetManager.") - assert not non_unique, msg + formatting_data = { + "non_unique": ",".join(non_unique) + } + + if non_unique: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/hosts/resolve/api/pipeline.py b/openpype/hosts/resolve/api/pipeline.py index fa309e3503..4a7d1c5bea 100644 --- a/openpype/hosts/resolve/api/pipeline.py +++ b/openpype/hosts/resolve/api/pipeline.py @@ -4,15 +4,17 @@ Basic avalon integration import os import contextlib from collections import OrderedDict -from avalon import api as avalon -from avalon import schema -from avalon.pipeline import AVALON_CONTAINER_ID + from pyblish import api as pyblish + from openpype.api import Logger from openpype.pipeline import ( - LegacyCreator, + schema, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, + AVALON_CONTAINER_ID, ) from . import lib from . import PLUGINS_DIR @@ -22,7 +24,6 @@ log = Logger().get_logger(__name__) PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") AVALON_CONTAINERS = ":AVALON_CONTAINERS" @@ -47,8 +48,7 @@ def install(): log.info("Registering DaVinci Resovle plug-ins..") register_loader_plugin_path(LOAD_PATH) - avalon.register_plugin_path(LegacyCreator, CREATE_PATH) - avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) + register_creator_plugin_path(CREATE_PATH) # register callback for switching publishable pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) @@ -72,8 +72,7 @@ def uninstall(): log.info("Deregistering DaVinci Resovle plug-ins..") deregister_loader_plugin_path(LOAD_PATH) - avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH) - avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH) + deregister_creator_plugin_path(CREATE_PATH) # register callback for switching publishable pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) diff --git a/openpype/hosts/resolve/plugins/load/load_clip.py b/openpype/hosts/resolve/plugins/load/load_clip.py index 71850d95f6..cf88b14e81 100644 --- a/openpype/hosts/resolve/plugins/load/load_clip.py +++ b/openpype/hosts/resolve/plugins/load/load_clip.py @@ -1,9 +1,11 @@ from copy import deepcopy from importlib import reload -from avalon import io from openpype.hosts import resolve -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + legacy_io, +) from openpype.hosts.resolve.api import lib, plugin reload(plugin) reload(lib) @@ -94,7 +96,7 @@ class LoadClip(resolve.TimelineItemLoader): namespace = container['namespace'] timeline_item_data = resolve.get_pype_timeline_item_by_name(namespace) timeline_item = timeline_item_data["clip"]["item"] - version = io.find_one({ + version = legacy_io.find_one({ "type": "version", "_id": representation["parent"] }) @@ -140,7 +142,7 @@ class LoadClip(resolve.TimelineItemLoader): # define version name version_name = version.get("name", None) # get all versions in list - versions = io.find({ + versions = legacy_io.find({ "type": "version", "parent": version["parent"] }).distinct('name') diff --git a/openpype/hosts/resolve/plugins/publish/precollect_workfile.py b/openpype/hosts/resolve/plugins/publish/precollect_workfile.py index 1333516177..a58f288770 100644 --- a/openpype/hosts/resolve/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/resolve/plugins/publish/precollect_workfile.py @@ -1,10 +1,9 @@ import pyblish.api -from openpype.hosts import resolve -from avalon import api as avalon from pprint import pformat - -# dev from importlib import reload + +from openpype.hosts import resolve +from openpype.pipeline import legacy_io from openpype.hosts.resolve.otio import davinci_export reload(davinci_export) @@ -17,7 +16,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin): def process(self, context): - asset = avalon.Session["AVALON_ASSET"] + asset = legacy_io.Session["AVALON_ASSET"] subset = "workfile" project = resolve.get_current_project() fps = project.GetSetting("timelineFrameRate") diff --git a/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py b/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py index ac66916b91..3a16b9c966 100644 --- a/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py +++ b/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py @@ -1,13 +1,14 @@ #!/usr/bin/env python import os import sys -import openpype + +from openpype.pipeline import install_host def main(env): import openpype.hosts.resolve as bmdvr # Registers openpype's Global pyblish plugins - openpype.install() + install_host(bmdvr) bmdvr.setup(env) diff --git a/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py b/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py index b0cef1838a..89ade9238b 100644 --- a/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py +++ b/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py @@ -1,8 +1,7 @@ import os import sys -import avalon.api as avalon -import openpype +from openpype.pipeline import install_host from openpype.api import Logger log = Logger().get_logger(__name__) @@ -10,13 +9,9 @@ log = Logger().get_logger(__name__) def main(env): import openpype.hosts.resolve as bmdvr - # Registers openpype's Global pyblish plugins - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) - - log.info(f"Avalon registered hosts: {avalon.registered_host()}") + install_host(bmdvr) bmdvr.launch_pype_menu() diff --git a/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py b/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py index 5430ad32df..8433bd9172 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py +++ b/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py @@ -1,9 +1,11 @@ #! python3 import os import sys -import avalon.api as avalon -import openpype + import opentimelineio as otio + +from openpype.pipeline import install_host + from openpype.hosts.resolve import TestGUI import openpype.hosts.resolve as bmdvr from openpype.hosts.resolve.otio import davinci_export as otio_export @@ -14,10 +16,8 @@ class ThisTestGUI(TestGUI): def __init__(self): super(ThisTestGUI, self).__init__() - # Registers openpype's Global pyblish plugins - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) + install_host(bmdvr) def _open_dir_button_pressed(self, event): # selected_path = self.fu.RequestFile(os.path.expanduser("~")) diff --git a/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py b/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py index afa311e0b8..477955d527 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py +++ b/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py @@ -1,8 +1,8 @@ #! python3 import os import sys -import avalon.api as avalon -import openpype + +from openpype.pipeline import install_host from openpype.hosts.resolve import TestGUI import openpype.hosts.resolve as bmdvr import clique @@ -13,10 +13,8 @@ class ThisTestGUI(TestGUI): def __init__(self): super(ThisTestGUI, self).__init__() - # Registers openpype's Global pyblish plugins - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) + install_host(bmdvr) def _open_dir_button_pressed(self, event): # selected_path = self.fu.RequestFile(os.path.expanduser("~")) diff --git a/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py b/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py index cfdbe890e5..872d620162 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py +++ b/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py @@ -1,6 +1,5 @@ #! python3 -import avalon.api as avalon -import openpype +from openpype.pipeline import install_host import openpype.hosts.resolve as bmdvr @@ -15,8 +14,7 @@ def file_processing(fpath): if __name__ == "__main__": path = "C:/CODE/__openpype_projects/jtest03dev/shots/sq01/mainsq01sh030/publish/plate/plateMain/v006/jt3d_mainsq01sh030_plateMain_v006.0996.exr" - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) + install_host(bmdvr) - file_processing(path) \ No newline at end of file + file_processing(path) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py new file mode 100644 index 0000000000..857f3dca20 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py @@ -0,0 +1,13 @@ +import pyblish.api + + +class CollectSAAppName(pyblish.api.ContextPlugin): + """Collect app name and label.""" + + label = "Collect App Name/Label" + order = pyblish.api.CollectorOrder - 0.5 + hosts = ["standalonepublisher"] + + def process(self, context): + context.data["appName"] = "standalone publisher" + context.data["appLabel"] = "Standalone publisher" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py index 9f075d66cf..3e7fb19c00 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py @@ -2,8 +2,8 @@ import copy import json import pyblish.api -from avalon import io from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline import legacy_io class CollectBulkMovInstances(pyblish.api.InstancePlugin): @@ -26,7 +26,7 @@ class CollectBulkMovInstances(pyblish.api.InstancePlugin): context = instance.context asset_name = instance.data["asset"] - asset_doc = io.find_one({ + asset_doc = legacy_io.find_one({ "type": "asset", "name": asset_name }) @@ -52,7 +52,7 @@ class CollectBulkMovInstances(pyblish.api.InstancePlugin): self.subset_name_variant, task_name, asset_doc, - io.Session["AVALON_PROJECT"] + legacy_io.Session["AVALON_PROJECT"] ) instance_name = f"{asset_name}_{subset_name}" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py index 6913e0836d..2bf3917e2f 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py @@ -19,7 +19,8 @@ import copy from pprint import pformat import clique import pyblish.api -from avalon import io + +from openpype.pipeline import legacy_io class CollectContextDataSAPublish(pyblish.api.ContextPlugin): @@ -37,7 +38,7 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): def process(self, context): # get json paths from os and load them - io.install() + legacy_io.install() # get json file context input_json_path = os.environ.get("SAPUBLISH_INPATH") @@ -247,7 +248,8 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): self.log.debug("collecting sequence: {}".format(collections)) instance.data["frameStart"] = int(component["frameStart"]) instance.data["frameEnd"] = int(component["frameEnd"]) - instance.data["fps"] = int(component["fps"]) + if component.get("fps"): + instance.data["fps"] = int(component["fps"]) ext = component["ext"] if ext.startswith("."): diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py index b2735f3428..77163651c4 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py @@ -1,8 +1,10 @@ -import pyblish.api -import re import os -from avalon import io +import re from copy import deepcopy +import pyblish.api + +from openpype.pipeline import legacy_io + class CollectHierarchyInstance(pyblish.api.ContextPlugin): """Collecting hierarchy context from `parents` and `hierarchy` data @@ -63,7 +65,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): hierarchy = list() visual_hierarchy = [instance.context.data["assetEntity"]] while True: - visual_parent = io.find_one( + visual_parent = legacy_io.find_one( {"_id": visual_hierarchy[-1]["data"]["visualParent"]} ) if visual_parent: @@ -129,7 +131,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): if self.shot_add_tasks: tasks_to_add = dict() - project_tasks = io.find_one({"type": "project"})["config"]["tasks"] + project_doc = legacy_io.find_one({"type": "project"}) + project_tasks = project_doc["config"]["tasks"] for task_name, task_data in self.shot_add_tasks.items(): _task_data = deepcopy(task_data) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py index 0d629b1b44..9d94bfdc91 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py @@ -2,9 +2,10 @@ import os import re import collections import pyblish.api -from avalon import io from pprint import pformat +from openpype.pipeline import legacy_io + class CollectMatchingAssetToInstance(pyblish.api.InstancePlugin): """ @@ -119,7 +120,7 @@ class CollectMatchingAssetToInstance(pyblish.api.InstancePlugin): def _asset_docs_by_parent_id(self, instance): # Query all assets for project and store them by parent's id to list asset_docs_by_parent_id = collections.defaultdict(list) - for asset_doc in io.find({"type": "asset"}): + for asset_doc in legacy_io.find({"type": "asset"}): parent_id = asset_doc["data"]["visualParent"] asset_docs_by_parent_id[parent_id].append(asset_doc) return asset_docs_by_parent_id diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_original_basename.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_original_basename.py new file mode 100644 index 0000000000..b83a924d33 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_original_basename.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +"""Collect original base name for use in templates.""" +from pathlib import Path + +import pyblish.api + + +class CollectOriginalBasename(pyblish.api.InstancePlugin): + """Collect original file base name.""" + + order = pyblish.api.CollectorOrder + 0.498 + label = "Collect Base Name" + hosts = ["standalonepublisher"] + families = ["simpleUnrealTexture"] + + def process(self, instance): + file_name = Path(instance.data["representations"][0]["files"]) + instance.data["originalBasename"] = file_name.stem diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_for_compositing.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_for_compositing.py index f07499c15d..9621d70739 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_for_compositing.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_for_compositing.py @@ -1,8 +1,9 @@ import os import json import copy + import openpype.api -from avalon import io +from openpype.pipeline import legacy_io PSDImage = None @@ -221,7 +222,7 @@ class ExtractBGForComp(openpype.api.Extractor): self.log.debug("resourcesDir: \"{}\"".format(resources_folder)) def find_last_version(self, subset_name, asset_doc): - subset_doc = io.find_one({ + subset_doc = legacy_io.find_one({ "type": "subset", "name": subset_name, "parent": asset_doc["_id"] @@ -230,7 +231,7 @@ class ExtractBGForComp(openpype.api.Extractor): if subset_doc is None: self.log.debug("Subset entity does not exist yet.") else: - version_doc = io.find_one( + version_doc = legacy_io.find_one( { "type": "version", "parent": subset_doc["_id"] diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_main_groups.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_main_groups.py index 2c92366ae9..b45f04e574 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_main_groups.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_main_groups.py @@ -1,9 +1,11 @@ import os import copy import json -import openpype.api + import pyblish.api -from avalon import io + +import openpype.api +from openpype.pipeline import legacy_io PSDImage = None @@ -225,7 +227,7 @@ class ExtractBGMainGroups(openpype.api.Extractor): self.log.debug("resourcesDir: \"{}\"".format(resources_folder)) def find_last_version(self, subset_name, asset_doc): - subset_doc = io.find_one({ + subset_doc = legacy_io.find_one({ "type": "subset", "name": subset_name, "parent": asset_doc["_id"] @@ -234,7 +236,7 @@ class ExtractBGMainGroups(openpype.api.Extractor): if subset_doc is None: self.log.debug("Subset entity does not exist yet.") else: - version_doc = io.find_one( + version_doc = legacy_io.find_one( { "type": "version", "parent": subset_doc["_id"] diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_images_from_psd.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_images_from_psd.py index e3094b2e3f..8485fa0915 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_images_from_psd.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/extract_images_from_psd.py @@ -1,8 +1,9 @@ import os import copy -import openpype.api import pyblish.api -from avalon import io + +import openpype.api +from openpype.pipeline import legacy_io PSDImage = None @@ -149,7 +150,7 @@ class ExtractImagesFromPSD(openpype.api.Extractor): new_instance.data["representations"] = [new_repre] def find_last_version(self, subset_name, asset_doc): - subset_doc = io.find_one({ + subset_doc = legacy_io.find_one({ "type": "subset", "name": subset_name, "parent": asset_doc["_id"] @@ -158,7 +159,7 @@ class ExtractImagesFromPSD(openpype.api.Extractor): if subset_doc is None: self.log.debug("Subset entity does not exist yet.") else: - version_doc = io.find_one( + version_doc = legacy_io.find_one( { "type": "version", "parent": subset_doc["_id"] diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_simple_texture_naming.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_simple_texture_naming.xml new file mode 100644 index 0000000000..b65d274fe5 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_simple_texture_naming.xml @@ -0,0 +1,17 @@ + + + +Invalid texture name + +## Invalid file name + +Submitted file has invalid name: +'{invalid_file}' + +### How to repair? + + Texture file must adhere to naming conventions for Unreal: + T_{asset}_*.ext + + + \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_simple_unreal_texture_naming.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_simple_unreal_texture_naming.py new file mode 100644 index 0000000000..ef8da9f280 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_simple_unreal_texture_naming.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +"""Validator for correct file naming.""" +import pyblish.api +import openpype.api +import re +from openpype.pipeline import PublishXmlValidationError + + +class ValidateSimpleUnrealTextureNaming(pyblish.api.InstancePlugin): + label = "Validate Unreal Texture Names" + hosts = ["standalonepublisher"] + families = ["simpleUnrealTexture"] + order = openpype.api.ValidateContentsOrder + regex = "^T_{asset}.*" + + def process(self, instance): + file_name = instance.data.get("originalBasename") + self.log.info(file_name) + pattern = self.regex.format(asset=instance.data.get("asset")) + if not re.match(pattern, file_name): + msg = f"Invalid file name {file_name}" + raise PublishXmlValidationError( + self, msg, formatting_data={ + "invalid_file": file_name, + "asset": instance.data.get("asset") + }) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py index 825092c81b..4c761c7a4c 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py @@ -1,7 +1,9 @@ import pyblish.api -from avalon import io -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + legacy_io, +) class ValidateTaskExistence(pyblish.api.ContextPlugin): @@ -18,7 +20,7 @@ class ValidateTaskExistence(pyblish.api.ContextPlugin): for instance in context: asset_names.add(instance.data["asset"]) - asset_docs = io.find( + asset_docs = legacy_io.find( { "type": "asset", "name": {"$in": list(asset_names)} diff --git a/openpype/hosts/testhost/api/__init__.py b/openpype/hosts/testhost/api/__init__.py index 7840b25892..a929a891aa 100644 --- a/openpype/hosts/testhost/api/__init__.py +++ b/openpype/hosts/testhost/api/__init__.py @@ -1,8 +1,8 @@ import os import logging import pyblish.api -import avalon.api -from openpype.pipeline import BaseCreator + +from openpype.pipeline import register_creator_plugin_path from .pipeline import ( ls, @@ -27,7 +27,7 @@ def install(): log.info("OpenPype - Installing TestHost integration") pyblish.api.register_host("testhost") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(BaseCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) __all__ = ( diff --git a/openpype/hosts/testhost/api/pipeline.py b/openpype/hosts/testhost/api/pipeline.py index 1f5d680705..285fe8f8d6 100644 --- a/openpype/hosts/testhost/api/pipeline.py +++ b/openpype/hosts/testhost/api/pipeline.py @@ -1,5 +1,6 @@ import os import json +from openpype.pipeline import legacy_io class HostContext: @@ -16,9 +17,7 @@ class HostContext: if not asset_name: return project_name - from avalon import io - - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"type": "asset", "name": asset_name}, {"data.parents": 1} ) diff --git a/openpype/hosts/testhost/plugins/create/auto_creator.py b/openpype/hosts/testhost/plugins/create/auto_creator.py index 45c573e487..06b95375b1 100644 --- a/openpype/hosts/testhost/plugins/create/auto_creator.py +++ b/openpype/hosts/testhost/plugins/create/auto_creator.py @@ -1,10 +1,10 @@ +from openpype.lib import NumberDef from openpype.hosts.testhost.api import pipeline from openpype.pipeline import ( + legacy_io, AutoCreator, CreatedInstance, - lib ) -from avalon import io class MyAutoCreator(AutoCreator): @@ -13,7 +13,7 @@ class MyAutoCreator(AutoCreator): def get_instance_attr_defs(self): output = [ - lib.NumberDef("number_key", label="Number") + NumberDef("number_key", label="Number") ] return output @@ -30,7 +30,7 @@ class MyAutoCreator(AutoCreator): def update_instances(self, update_list): pipeline.update_instances(update_list) - def create(self, options=None): + def create(self): existing_instance = None for instance in self.create_context.instances: if instance.family == self.family: @@ -38,13 +38,16 @@ class MyAutoCreator(AutoCreator): break variant = "Main" - project_name = io.Session["AVALON_PROJECT"] - asset_name = io.Session["AVALON_ASSET"] - task_name = io.Session["AVALON_TASK"] - host_name = io.Session["AVALON_APP"] + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + host_name = legacy_io.Session["AVALON_APP"] if existing_instance is None: - asset_doc = io.find_one({"type": "asset", "name": asset_name}) + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) subset_name = self.get_subset_name( variant, task_name, asset_doc, project_name, host_name ) @@ -66,7 +69,10 @@ class MyAutoCreator(AutoCreator): existing_instance["asset"] != asset_name or existing_instance["task"] != task_name ): - asset_doc = io.find_one({"type": "asset", "name": asset_name}) + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) subset_name = self.get_subset_name( variant, task_name, asset_doc, project_name, host_name ) diff --git a/openpype/hosts/testhost/plugins/create/test_creator_1.py b/openpype/hosts/testhost/plugins/create/test_creator_1.py index 45c30e8a27..7664276fa2 100644 --- a/openpype/hosts/testhost/plugins/create/test_creator_1.py +++ b/openpype/hosts/testhost/plugins/create/test_creator_1.py @@ -1,10 +1,16 @@ import json from openpype import resources from openpype.hosts.testhost.api import pipeline +from openpype.lib import ( + UISeparatorDef, + UILabelDef, + BoolDef, + NumberDef, + FileDef, +) from openpype.pipeline import ( Creator, CreatedInstance, - lib ) @@ -54,17 +60,17 @@ class TestCreatorOne(Creator): def get_instance_attr_defs(self): output = [ - lib.NumberDef("number_key", label="Number"), + NumberDef("number_key", label="Number"), ] return output def get_pre_create_attr_defs(self): output = [ - lib.BoolDef("use_selection", label="Use selection"), - lib.UISeparatorDef(), - lib.UILabelDef("Testing label"), - lib.FileDef("filepath", folders=True, label="Filepath"), - lib.FileDef( + BoolDef("use_selection", label="Use selection"), + UISeparatorDef(), + UILabelDef("Testing label"), + FileDef("filepath", folders=True, label="Filepath"), + FileDef( "filepath_2", multipath=True, folders=True, label="Filepath 2" ) ] diff --git a/openpype/hosts/testhost/plugins/create/test_creator_2.py b/openpype/hosts/testhost/plugins/create/test_creator_2.py index e66304a038..f54adee8a2 100644 --- a/openpype/hosts/testhost/plugins/create/test_creator_2.py +++ b/openpype/hosts/testhost/plugins/create/test_creator_2.py @@ -1,8 +1,8 @@ +from openpype.lib import NumberDef, TextDef from openpype.hosts.testhost.api import pipeline from openpype.pipeline import ( Creator, CreatedInstance, - lib ) @@ -40,8 +40,8 @@ class TestCreatorTwo(Creator): def get_instance_attr_defs(self): output = [ - lib.NumberDef("number_key"), - lib.TextDef("text_key") + NumberDef("number_key"), + TextDef("text_key") ] return output diff --git a/openpype/hosts/testhost/plugins/publish/collect_instance_1.py b/openpype/hosts/testhost/plugins/publish/collect_instance_1.py index 3c035eccb6..c7241a15a8 100644 --- a/openpype/hosts/testhost/plugins/publish/collect_instance_1.py +++ b/openpype/hosts/testhost/plugins/publish/collect_instance_1.py @@ -1,10 +1,8 @@ import json import pyblish.api -from openpype.pipeline import ( - OpenPypePyblishPluginMixin, - attribute_definitions -) +from openpype.lib import attribute_definitions +from openpype.pipeline import OpenPypePyblishPluginMixin class CollectInstanceOneTestHost( diff --git a/openpype/hosts/testhost/run_publish.py b/openpype/hosts/testhost/run_publish.py index 44860a30e4..c7ad63aafd 100644 --- a/openpype/hosts/testhost/run_publish.py +++ b/openpype/hosts/testhost/run_publish.py @@ -22,13 +22,11 @@ openpype_dir = multi_dirname(current_file, 4) os.environ["OPENPYPE_MONGO"] = mongo_url os.environ["OPENPYPE_ROOT"] = openpype_dir -os.environ["AVALON_MONGO"] = mongo_url os.environ["AVALON_PROJECT"] = project_name os.environ["AVALON_ASSET"] = asset_name os.environ["AVALON_TASK"] = task_name os.environ["AVALON_APP"] = host_name os.environ["OPENPYPE_DATABASE_NAME"] = "openpype" -os.environ["AVALON_CONFIG"] = "openpype" os.environ["AVALON_TIMEOUT"] = "1000" os.environ["AVALON_DB"] = "avalon" os.environ["FTRACK_SERVER"] = ftrack_url @@ -48,8 +46,8 @@ from openpype.tools.publisher.window import PublisherWindow def main(): """Main function for testing purposes.""" - import avalon.api import pyblish.api + from openpype.pipeline import install_host from openpype.modules import ModulesManager from openpype.hosts.testhost import api as testhost @@ -57,7 +55,7 @@ def main(): for plugin_path in manager.collect_plugin_paths()["publish"]: pyblish.api.register_plugin_path(plugin_path) - avalon.api.install(testhost) + install_host(testhost) QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling) app = QtWidgets.QApplication([]) diff --git a/openpype/hosts/traypublisher/api/pipeline.py b/openpype/hosts/traypublisher/api/pipeline.py index a39e5641ae..954a0bae47 100644 --- a/openpype/hosts/traypublisher/api/pipeline.py +++ b/openpype/hosts/traypublisher/api/pipeline.py @@ -3,11 +3,12 @@ import json import tempfile import atexit -from avalon import io -import avalon.api import pyblish.api -from openpype.pipeline import BaseCreator +from openpype.pipeline import ( + register_creator_plugin_path, + legacy_io, +) ROOT_DIR = os.path.dirname(os.path.dirname( os.path.abspath(__file__) @@ -169,12 +170,12 @@ def install(): pyblish.api.register_host("traypublisher") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(BaseCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) def set_project_name(project_name): # TODO Deregister project specific plugins and register new project plugins os.environ["AVALON_PROJECT"] = project_name - avalon.api.Session["AVALON_PROJECT"] = project_name - io.install() + legacy_io.Session["AVALON_PROJECT"] = project_name + legacy_io.install() HostContext.set_project_name(project_name) diff --git a/openpype/hosts/traypublisher/plugins/create/create_workfile.py b/openpype/hosts/traypublisher/plugins/create/create_workfile.py index 2db4770bbc..5e0af350f0 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_workfile.py +++ b/openpype/hosts/traypublisher/plugins/create/create_workfile.py @@ -1,8 +1,8 @@ from openpype.hosts.traypublisher.api import pipeline +from openpype.lib import FileDef from openpype.pipeline import ( Creator, - CreatedInstance, - lib + CreatedInstance ) @@ -80,7 +80,7 @@ class WorkfileCreator(Creator): def get_instance_attr_defs(self): output = [ - lib.FileDef( + FileDef( "filepath", folders=False, extensions=self.extensions, diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_app_name.py b/openpype/hosts/traypublisher/plugins/publish/collect_app_name.py new file mode 100644 index 0000000000..e38d10e70f --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_app_name.py @@ -0,0 +1,13 @@ +import pyblish.api + + +class CollectTrayPublisherAppName(pyblish.api.ContextPlugin): + """Collect app name and label.""" + + label = "Collect App Name/Label" + order = pyblish.api.CollectorOrder - 0.5 + hosts = ["traypublisher"] + + def process(self, context): + context.data["appName"] = "tray publisher" + context.data["appLabel"] = "Tray publisher" diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py b/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py index 88339d2aac..7501051669 100644 --- a/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py +++ b/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py @@ -6,7 +6,7 @@ from openpype.pipeline import PublishValidationError class ValidateWorkfilePath(pyblish.api.InstancePlugin): """Validate existence of workfile instance existence.""" - label = "Collect Workfile" + label = "Validate Workfile" order = pyblish.api.ValidatorOrder - 0.49 families = ["workfile"] hosts = ["traypublisher"] @@ -14,11 +14,22 @@ class ValidateWorkfilePath(pyblish.api.InstancePlugin): def process(self, instance): filepath = instance.data["sourceFilepath"] if not filepath: - raise PublishValidationError(( - "Filepath of 'workfile' instance \"{}\" is not set" - ).format(instance.data["name"])) + raise PublishValidationError( + ( + "Filepath of 'workfile' instance \"{}\" is not set" + ).format(instance.data["name"]), + "File not filled", + "## Missing file\nYou are supposed to fill the path." + ) if not os.path.exists(filepath): - raise PublishValidationError(( - "Filepath of 'workfile' instance \"{}\" does not exist: {}" - ).format(instance.data["name"], filepath)) + raise PublishValidationError( + ( + "Filepath of 'workfile' instance \"{}\" does not exist: {}" + ).format(instance.data["name"], filepath), + "File not found", + ( + "## File was not found\nFile \"{}\" was not found." + " Check if the path is still available." + ).format(filepath) + ) diff --git a/openpype/hosts/tvpaint/api/launch_script.py b/openpype/hosts/tvpaint/api/launch_script.py index e66bf61df6..0b25027fc6 100644 --- a/openpype/hosts/tvpaint/api/launch_script.py +++ b/openpype/hosts/tvpaint/api/launch_script.py @@ -8,8 +8,8 @@ import logging from Qt import QtWidgets, QtCore, QtGui -from avalon import api from openpype import style +from openpype.pipeline import install_host from openpype.hosts.tvpaint.api.communication_server import ( CommunicationWrapper ) @@ -31,7 +31,7 @@ def main(launch_args): qt_app = QtWidgets.QApplication([]) # Execute pipeline installation - api.install(tvpaint_host) + install_host(tvpaint_host) # Create Communicator object and trigger launch # - this must be done before anything is processed diff --git a/openpype/hosts/tvpaint/api/lib.py b/openpype/hosts/tvpaint/api/lib.py index 9e6404e72f..0c63dbe5be 100644 --- a/openpype/hosts/tvpaint/api/lib.py +++ b/openpype/hosts/tvpaint/api/lib.py @@ -2,8 +2,6 @@ import os import logging import tempfile -import avalon.io - from . import CommunicationWrapper log = logging.getLogger(__name__) diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py index 46c9d3a1dd..f473f51457 100644 --- a/openpype/hosts/tvpaint/api/pipeline.py +++ b/openpype/hosts/tvpaint/api/pipeline.py @@ -7,18 +7,17 @@ import logging import requests import pyblish.api -import avalon.api - -from avalon import io -from avalon.pipeline import AVALON_CONTAINER_ID from openpype.hosts import tvpaint from openpype.api import get_current_project_settings from openpype.lib import register_event_callback from openpype.pipeline import ( - LegacyCreator, + legacy_io, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, + AVALON_CONTAINER_ID, ) from .lib import ( @@ -66,23 +65,20 @@ instances=2 def install(): - """Install Maya-specific functionality of avalon-core. + """Install TVPaint-specific functionality.""" - This function is called automatically on calling `api.install(maya)`. - - """ log.info("OpenPype - Installing TVPaint integration") - io.install() + legacy_io.install() # Create workdir folder if does not exist yet - workdir = io.Session["AVALON_WORKDIR"] + workdir = legacy_io.Session["AVALON_WORKDIR"] if not os.path.exists(workdir): os.makedirs(workdir) pyblish.api.register_host("tvpaint") pyblish.api.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) registered_callbacks = ( pyblish.api.registered_callbacks().get("instanceToggled") or [] @@ -95,16 +91,16 @@ def install(): def uninstall(): - """Uninstall TVPaint-specific functionality of avalon-core. - - This function is called automatically on calling `api.uninstall()`. + """Uninstall TVPaint-specific functionality. + This function is called automatically on calling `uninstall_host()`. """ + log.info("OpenPype - Uninstalling TVPaint integration") pyblish.api.deregister_host("tvpaint") pyblish.api.deregister_plugin_path(PUBLISH_PATH) deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) def containerise( @@ -448,12 +444,12 @@ def set_context_settings(asset_doc=None): """ if asset_doc is None: # Use current session asset if not passed - asset_doc = avalon.io.find_one({ + asset_doc = legacy_io.find_one({ "type": "asset", - "name": avalon.io.Session["AVALON_ASSET"] + "name": legacy_io.Session["AVALON_ASSET"] }) - project_doc = avalon.io.find_one({"type": "project"}) + project_doc = legacy_io.find_one({"type": "project"}) framerate = asset_doc["data"].get("fps") if framerate is None: diff --git a/openpype/hosts/tvpaint/api/workio.py b/openpype/hosts/tvpaint/api/workio.py index c513bec6cf..1a5ad00ca8 100644 --- a/openpype/hosts/tvpaint/api/workio.py +++ b/openpype/hosts/tvpaint/api/workio.py @@ -3,7 +3,10 @@ has_unsaved_changes """ -from avalon import api +from openpype.pipeline import ( + HOST_WORKFILE_EXTENSIONS, + legacy_io, +) from .lib import ( execute_george, execute_george_through_file @@ -23,9 +26,9 @@ def save_file(filepath): """Save the open scene file.""" # Store context to workfile before save context = { - "project": api.Session["AVALON_PROJECT"], - "asset": api.Session["AVALON_ASSET"], - "task": api.Session["AVALON_TASK"] + "project": legacy_io.Session["AVALON_PROJECT"], + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] } save_current_workfile_context(context) @@ -47,7 +50,7 @@ def has_unsaved_changes(): def file_extensions(): """Return the supported file extensions for Blender scene files.""" - return api.HOST_WORKFILE_EXTENSIONS["tvpaint"] + return HOST_WORKFILE_EXTENSIONS["tvpaint"] def work_root(session): diff --git a/openpype/hosts/tvpaint/hooks/pre_launch_args.py b/openpype/hosts/tvpaint/hooks/pre_launch_args.py index 2a8f49d5b0..c31403437a 100644 --- a/openpype/hosts/tvpaint/hooks/pre_launch_args.py +++ b/openpype/hosts/tvpaint/hooks/pre_launch_args.py @@ -1,14 +1,8 @@ -import os -import shutil - -from openpype.hosts import tvpaint from openpype.lib import ( PreLaunchHook, get_openpype_execute_args ) -import avalon - class TvpaintPrelaunchHook(PreLaunchHook): """Launch arguments preparation. diff --git a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py b/openpype/hosts/tvpaint/plugins/load/load_reference_image.py index 5e4e3965d2..af1a4a9b6b 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py +++ b/openpype/hosts/tvpaint/plugins/load/load_reference_image.py @@ -1,6 +1,6 @@ import collections import qargparse -from avalon.pipeline import get_representation_context +from openpype.pipeline import get_representation_context from openpype.hosts.tvpaint.api import lib, pipeline, plugin diff --git a/openpype/hosts/tvpaint/plugins/load/load_workfile.py b/openpype/hosts/tvpaint/plugins/load/load_workfile.py index d224cfc390..0eab083c22 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_workfile.py +++ b/openpype/hosts/tvpaint/plugins/load/load_workfile.py @@ -1,12 +1,15 @@ import os -from avalon import api, io from openpype.lib import ( StringTemplate, get_workfile_template_key_from_context, get_workdir_data, get_last_workfile_with_version, ) +from openpype.pipeline import ( + registered_host, + legacy_io, +) from openpype.api import Anatomy from openpype.hosts.tvpaint.api import lib, pipeline, plugin @@ -22,7 +25,7 @@ class LoadWorkfile(plugin.Loader): def load(self, context, name, namespace, options): # Load context of current workfile as first thing # - which context and extension has - host = api.registered_host() + host = registered_host() current_file = host.current_file() context = pipeline.get_current_workfile_context() @@ -45,13 +48,13 @@ class LoadWorkfile(plugin.Loader): task_name = context.get("task") # Far cases when there is workfile without context if not asset_name: - asset_name = io.Session["AVALON_ASSET"] - task_name = io.Session["AVALON_TASK"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] - project_doc = io.find_one({ + project_doc = legacy_io.find_one({ "type": "project" }) - asset_doc = io.find_one({ + asset_doc = legacy_io.find_one({ "type": "asset", "name": asset_name }) @@ -62,7 +65,7 @@ class LoadWorkfile(plugin.Loader): task_name, host_name, project_name=project_name, - dbcon=io + dbcon=legacy_io ) anatomy = Anatomy(project_name) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py index 9cbfb61550..188aa8c41a 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py @@ -1,10 +1,9 @@ -import os import json import copy import pyblish.api -from avalon import io from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline import legacy_io class CollectInstances(pyblish.api.ContextPlugin): @@ -20,21 +19,30 @@ class CollectInstances(pyblish.api.ContextPlugin): json.dumps(workfile_instances, indent=4) )) + filtered_instance_data = [] # Backwards compatibility for workfiles that already have review # instance in metadata. review_instance_exist = False for instance_data in workfile_instances: - if instance_data["family"] == "review": + family = instance_data["family"] + if family == "review": review_instance_exist = True - break + + elif family not in ("renderPass", "renderLayer"): + self.log.info("Unknown family \"{}\". Skipping {}".format( + family, json.dumps(instance_data, indent=4) + )) + continue + + filtered_instance_data.append(instance_data) # Fake review instance if review was not found in metadata families if not review_instance_exist: - workfile_instances.append( + filtered_instance_data.append( self._create_review_instance_data(context) ) - for instance_data in workfile_instances: + for instance_data in filtered_instance_data: instance_data["fps"] = context.data["sceneFps"] # Store workfile instance data to instance data @@ -42,8 +50,11 @@ class CollectInstances(pyblish.api.ContextPlugin): # Global instance data modifications # Fill families family = instance_data["family"] + families = [family] + if family != "review": + families.append("review") # Add `review` family for thumbnail integration - instance_data["families"] = [family, "review"] + instance_data["families"] = families # Instance name subset_name = instance_data["subset"] @@ -70,7 +81,7 @@ class CollectInstances(pyblish.api.ContextPlugin): # - not sure if it's good idea to require asset id in # get_subset_name? asset_name = context.data["workfile_context"]["asset"] - asset_doc = io.find_one({ + asset_doc = legacy_io.find_one({ "type": "asset", "name": asset_name }) @@ -78,10 +89,10 @@ class CollectInstances(pyblish.api.ContextPlugin): # Project name from workfile context project_name = context.data["workfile_context"]["project"] # Host name from environment variable - host_name = os.environ["AVALON_APP"] + host_name = context.data["hostName"] # Use empty variant value variant = "" - task_name = io.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] new_subset_name = get_subset_name_with_asset_doc( family, variant, @@ -106,12 +117,6 @@ class CollectInstances(pyblish.api.ContextPlugin): instance = self.create_render_pass_instance( context, instance_data ) - else: - raise AssertionError( - "Instance with unknown family \"{}\": {}".format( - family, instance_data - ) - ) if instance is None: continue @@ -151,7 +156,7 @@ class CollectInstances(pyblish.api.ContextPlugin): # Change subset name # Final family of an instance will be `render` new_family = "render" - task_name = io.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] new_subset_name = "{}{}_{}_Beauty".format( new_family, task_name.capitalize(), name ) @@ -196,7 +201,7 @@ class CollectInstances(pyblish.api.ContextPlugin): # Final family of an instance will be `render` new_family = "render" old_subset_name = instance_data["subset"] - task_name = io.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] new_subset_name = "{}{}_{}_{}".format( new_family, task_name.capitalize(), render_layer, pass_name ) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py b/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py new file mode 100644 index 0000000000..1c042a62fb --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py @@ -0,0 +1,110 @@ +import json +import copy +import pyblish.api + +from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline import legacy_io + + +class CollectRenderScene(pyblish.api.ContextPlugin): + """Collect instance which renders whole scene in PNG. + + Creates instance with family 'renderScene' which will have all layers + to render which will be composite into one result. The instance is not + collected from scene. + + Scene will be rendered with all visible layers similar way like review is. + + Instance is disabled if there are any created instances of 'renderLayer' + or 'renderPass'. That is because it is expected that this instance is + used as lazy publish of TVPaint file. + + Subset name is created similar way like 'renderLayer' family. It can use + `renderPass` and `renderLayer` keys which can be set using settings and + `variant` is filled using `renderPass` value. + """ + label = "Collect Render Scene" + order = pyblish.api.CollectorOrder - 0.39 + hosts = ["tvpaint"] + + # Value of 'render_pass' in subset name template + render_pass = "beauty" + + # Settings attributes + enabled = False + # Value of 'render_layer' and 'variant' in subset name template + render_layer = "Main" + + def process(self, context): + # Check if there are created instances of renderPass and renderLayer + # - that will define if renderScene instance is enabled after + # collection + any_created_instance = False + for instance in context: + family = instance.data["family"] + if family in ("renderPass", "renderLayer"): + any_created_instance = True + break + + # Global instance data modifications + # Fill families + family = "renderScene" + # Add `review` family for thumbnail integration + families = [family, "review"] + + # Collect asset doc to get asset id + # - not sure if it's good idea to require asset id in + # get_subset_name? + workfile_context = context.data["workfile_context"] + asset_name = workfile_context["asset"] + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": asset_name + }) + + # Project name from workfile context + project_name = context.data["workfile_context"]["project"] + # Host name from environment variable + host_name = context.data["hostName"] + # Variant is using render pass name + variant = self.render_layer + dynamic_data = { + "render_layer": self.render_layer, + "render_pass": self.render_pass + } + + task_name = workfile_context["task"] + subset_name = get_subset_name_with_asset_doc( + "render", + variant, + task_name, + asset_doc, + project_name, + host_name, + dynamic_data=dynamic_data + ) + + instance_data = { + "family": family, + "families": families, + "fps": context.data["sceneFps"], + "subset": subset_name, + "name": subset_name, + "label": "{} [{}-{}]".format( + subset_name, + context.data["sceneMarkIn"] + 1, + context.data["sceneMarkOut"] + 1 + ), + "active": not any_created_instance, + "publish": not any_created_instance, + "representations": [], + "layers": copy.deepcopy(context.data["layersData"]), + "asset": asset_name, + "task": task_name + } + + instance = context.create_instance(**instance_data) + + self.log.debug("Created instance: {}\n{}".format( + instance, json.dumps(instance.data, indent=4) + )) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py index 89348037d3..70d92f82e9 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py @@ -1,9 +1,9 @@ import os import json import pyblish.api -from avalon import io from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline import legacy_io class CollectWorkfile(pyblish.api.ContextPlugin): @@ -28,7 +28,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): # get_subset_name? family = "workfile" asset_name = context.data["workfile_context"]["asset"] - asset_doc = io.find_one({ + asset_doc = legacy_io.find_one({ "type": "asset", "name": asset_name }) @@ -39,7 +39,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): host_name = os.environ["AVALON_APP"] # Use empty variant value variant = "" - task_name = io.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] subset_name = get_subset_name_with_asset_doc( family, variant, diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py index f5c86c613b..c59ef82f85 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py @@ -3,7 +3,8 @@ import json import tempfile import pyblish.api -import avalon.api + +from openpype.pipeline import legacy_io from openpype.hosts.tvpaint.api import pipeline, lib @@ -49,9 +50,9 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Collect and store current context to have reference current_context = { - "project": avalon.api.Session["AVALON_PROJECT"], - "asset": avalon.api.Session["AVALON_ASSET"], - "task": avalon.api.Session["AVALON_TASK"] + "project": legacy_io.Session["AVALON_PROJECT"], + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] } context.data["previous_context"] = current_context self.log.debug("Current context is: {}".format(current_context)) @@ -69,7 +70,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): ("AVALON_TASK", "task") ) for env_key, key in key_map: - avalon.api.Session[env_key] = workfile_context[key] + legacy_io.Session[env_key] = workfile_context[key] os.environ[env_key] = workfile_context[key] self.log.info("Context changed to: {}".format(workfile_context)) diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py b/openpype/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py new file mode 100644 index 0000000000..ab5bbc5e2c --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py @@ -0,0 +1,99 @@ +"""Plugin converting png files from ExtractSequence into exrs. + +Requires: + ExtractSequence - source of PNG + ExtractReview - review was already created so we can convert to any exr +""" +import os +import json + +import pyblish.api +from openpype.lib import ( + get_oiio_tools_path, + run_subprocess, +) +from openpype.pipeline import KnownPublishError + + +class ExtractConvertToEXR(pyblish.api.InstancePlugin): + # Offset to get after ExtractSequence plugin. + order = pyblish.api.ExtractorOrder + 0.1 + label = "Extract Sequence EXR" + hosts = ["tvpaint"] + families = ["render"] + + enabled = False + + # Replace source PNG files or just add + replace_pngs = True + # EXR compression + exr_compression = "ZIP" + + def process(self, instance): + repres = instance.data.get("representations") + if not repres: + return + + oiio_path = get_oiio_tools_path() + # Raise an exception when oiiotool is not available + # - this can currently happen on MacOS machines + if not os.path.exists(oiio_path): + KnownPublishError( + "OpenImageIO tool is not available on this machine." + ) + + new_repres = [] + for repre in repres: + if repre["name"] != "png": + continue + + self.log.info( + "Processing representation: {}".format( + json.dumps(repre, sort_keys=True, indent=4) + ) + ) + + src_filepaths = set() + new_filenames = [] + for src_filename in repre["files"]: + dst_filename = os.path.splitext(src_filename)[0] + ".exr" + new_filenames.append(dst_filename) + + src_filepath = os.path.join(repre["stagingDir"], src_filename) + dst_filepath = os.path.join(repre["stagingDir"], dst_filename) + + src_filepaths.add(src_filepath) + + args = [ + oiio_path, src_filepath, + "--compression", self.exr_compression, + # TODO how to define color conversion? + "--colorconvert", "sRGB", "linear", + "-o", dst_filepath + ] + run_subprocess(args) + + new_repres.append( + { + "name": "exr", + "ext": "exr", + "files": new_filenames, + "stagingDir": repre["stagingDir"], + "tags": list(repre["tags"]) + } + ) + + if self.replace_pngs: + instance.data["representations"].remove(repre) + + for filepath in src_filepaths: + instance.context.data["cleanupFullPaths"].append(filepath) + + instance.data["representations"].extend(new_repres) + self.log.info( + "Representations: {}".format( + json.dumps( + instance.data["representations"], sort_keys=True, indent=4 + ) + ) + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py index 729c545545..d4fd1dff4b 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -12,14 +12,13 @@ from openpype.hosts.tvpaint.lib import ( fill_reference_frames, composite_rendered_layers, rename_filepaths_by_frame_start, - composite_images ) class ExtractSequence(pyblish.api.Extractor): label = "Extract Sequence" hosts = ["tvpaint"] - families = ["review", "renderPass", "renderLayer"] + families = ["review", "renderPass", "renderLayer", "renderScene"] # Modifiable with settings review_bg = [255, 255, 255, 255] @@ -160,7 +159,7 @@ class ExtractSequence(pyblish.api.Extractor): # Fill tags and new families tags = [] - if family_lowered in ("review", "renderlayer"): + if family_lowered in ("review", "renderlayer", "renderscene"): tags.append("review") # Sequence of one frame @@ -186,7 +185,7 @@ class ExtractSequence(pyblish.api.Extractor): instance.data["representations"].append(new_repre) - if family_lowered in ("renderpass", "renderlayer"): + if family_lowered in ("renderpass", "renderlayer", "renderscene"): # Change family to render instance.data["family"] = "render" diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py index 7ea0587b8f..d3a04cc69f 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py @@ -8,7 +8,7 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin): label = "Validate Layers Visibility" order = pyblish.api.ValidatorOrder - families = ["review", "renderPass", "renderLayer"] + families = ["review", "renderPass", "renderLayer", "renderScene"] def process(self, instance): layer_names = set() diff --git a/openpype/hosts/tvpaint/worker/init_file.tvpp b/openpype/hosts/tvpaint/worker/init_file.tvpp new file mode 100644 index 0000000000..572d278fdb Binary files /dev/null and b/openpype/hosts/tvpaint/worker/init_file.tvpp differ diff --git a/openpype/hosts/tvpaint/worker/worker.py b/openpype/hosts/tvpaint/worker/worker.py index cfd40bc7ba..9295c8afb4 100644 --- a/openpype/hosts/tvpaint/worker/worker.py +++ b/openpype/hosts/tvpaint/worker/worker.py @@ -1,5 +1,8 @@ +import os import signal import time +import tempfile +import shutil import asyncio from openpype.hosts.tvpaint.api.communication_server import ( @@ -36,8 +39,28 @@ class TVPaintWorkerCommunicator(BaseCommunicator): super()._start_webserver() + def _open_init_file(self): + """Open init TVPaint file. + + File triggers dialog missing path to audio file which must be closed + once and is ignored for rest of running process. + """ + current_dir = os.path.dirname(os.path.abspath(__file__)) + init_filepath = os.path.join(current_dir, "init_file.tvpp") + with tempfile.NamedTemporaryFile( + mode="w", prefix="a_tvp_", suffix=".tvpp" + ) as tmp_file: + tmp_filepath = tmp_file.name.replace("\\", "/") + + shutil.copy(init_filepath, tmp_filepath) + george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(tmp_filepath) + self.execute_george_through_file(george_script) + self.execute_george("tv_projectclose") + os.remove(tmp_filepath) + def _on_client_connect(self, *args, **kwargs): super()._on_client_connect(*args, **kwargs) + self._open_init_file() # Register as "ready to work" worker self._worker_connection.register_as_worker() diff --git a/openpype/hosts/unreal/api/pipeline.py b/openpype/hosts/unreal/api/pipeline.py index 9ec11b942d..f2c264e5a4 100644 --- a/openpype/hosts/unreal/api/pipeline.py +++ b/openpype/hosts/unreal/api/pipeline.py @@ -4,13 +4,13 @@ import logging from typing import List import pyblish.api -from avalon.pipeline import AVALON_CONTAINER_ID -from avalon import api from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, + AVALON_CONTAINER_ID, ) from openpype.tools.utils import host_tools import openpype.hosts.unreal @@ -49,7 +49,7 @@ def install(): logger.info("installing OpenPype for Unreal") pyblish.api.register_plugin_path(str(PUBLISH_PATH)) register_loader_plugin_path(str(LOAD_PATH)) - api.register_plugin_path(LegacyCreator, str(CREATE_PATH)) + register_creator_plugin_path(str(CREATE_PATH)) _register_callbacks() _register_events() @@ -58,7 +58,7 @@ def uninstall(): """Uninstall Unreal configuration for Avalon.""" pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) deregister_loader_plugin_path(str(LOAD_PATH)) - api.deregister_plugin_path(LegacyCreator, str(CREATE_PATH)) + deregister_creator_plugin_path(str(CREATE_PATH)) def _register_callbacks(): @@ -75,30 +75,6 @@ def _register_events(): pass -class Creator(LegacyCreator): - hosts = ["unreal"] - asset_types = [] - - def process(self): - nodes = list() - - with unreal.ScopedEditorTransaction("OpenPype Creating Instance"): - if (self.options or {}).get("useSelection"): - self.log.info("setting ...") - print("settings ...") - nodes = unreal.EditorUtilityLibrary.get_selected_assets() - - asset_paths = [a.get_path_name() for a in nodes] - self.name = move_assets_to_path( - "/Game", self.name, asset_paths - ) - - instance = create_publish_instance("/Game", self.name) - imprint(instance, self.data) - - return instance - - def ls(): """List all containers. diff --git a/openpype/hosts/unreal/integration/Content/Python/init_unreal.py b/openpype/hosts/unreal/integration/Content/Python/init_unreal.py index 2ecd301c25..4bb03b07ed 100644 --- a/openpype/hosts/unreal/integration/Content/Python/init_unreal.py +++ b/openpype/hosts/unreal/integration/Content/Python/init_unreal.py @@ -2,13 +2,7 @@ import unreal openpype_detected = True try: - from avalon import api -except ImportError as exc: - api = None - openpype_detected = False - unreal.log_error("Avalon: cannot load Avalon [ {} ]".format(exc)) - -try: + from openpype.pipeline import install_host from openpype.hosts.unreal import api as openpype_host except ImportError as exc: openpype_host = None @@ -16,7 +10,7 @@ except ImportError as exc: unreal.log_error("OpenPype: cannot load OpenPype [ {} ]".format(exc)) if openpype_detected: - api.install(openpype_host) + install_host(openpype_host) @unreal.uclass() diff --git a/openpype/hosts/unreal/plugins/create/create_camera.py b/openpype/hosts/unreal/plugins/create/create_camera.py index e9be7814f8..2842900834 100644 --- a/openpype/hosts/unreal/plugins/create/create_camera.py +++ b/openpype/hosts/unreal/plugins/create/create_camera.py @@ -2,13 +2,11 @@ import unreal from unreal import EditorAssetLibrary as eal from unreal import EditorLevelLibrary as ell -from openpype.hosts.unreal.api.plugin import Creator -from openpype.hosts.unreal.api.pipeline import ( - instantiate, -) +from openpype.hosts.unreal.api import plugin +from openpype.hosts.unreal.api.pipeline import instantiate -class CreateCamera(Creator): +class CreateCamera(plugin.Creator): """Layout output for character rigs""" name = "layoutMain" diff --git a/openpype/hosts/unreal/plugins/create/create_layout.py b/openpype/hosts/unreal/plugins/create/create_layout.py index e20ce9723b..5fef08ce2a 100644 --- a/openpype/hosts/unreal/plugins/create/create_layout.py +++ b/openpype/hosts/unreal/plugins/create/create_layout.py @@ -1,12 +1,11 @@ # -*- coding: utf-8 -*- -from unreal import EditorLevelLibrary as ell -from openpype.hosts.unreal.api.plugin import Creator -from openpype.hosts.unreal.api.pipeline import ( - instantiate, -) +from unreal import EditorLevelLibrary + +from openpype.hosts.unreal.api import plugin +from openpype.hosts.unreal.api.pipeline import instantiate -class CreateLayout(Creator): +class CreateLayout(plugin.Creator): """Layout output for character rigs.""" name = "layoutMain" @@ -30,13 +29,13 @@ class CreateLayout(Creator): # sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() # selection = [a.get_path_name() for a in sel_objects] - data["level"] = ell.get_editor_world().get_path_name() + data["level"] = EditorLevelLibrary.get_editor_world().get_path_name() data["members"] = [] if (self.options or {}).get("useSelection"): # Set as members the selected actors - for actor in ell.get_selected_level_actors(): + for actor in EditorLevelLibrary.get_selected_level_actors(): data["members"].append("{}.{}".format( actor.get_outer().get_name(), actor.get_name())) diff --git a/openpype/hosts/unreal/plugins/create/create_look.py b/openpype/hosts/unreal/plugins/create/create_look.py index 59c40d3e74..12f6b70ae6 100644 --- a/openpype/hosts/unreal/plugins/create/create_look.py +++ b/openpype/hosts/unreal/plugins/create/create_look.py @@ -1,11 +1,10 @@ # -*- coding: utf-8 -*- """Create look in Unreal.""" import unreal # noqa -from openpype.hosts.unreal.api.plugin import Creator -from openpype.hosts.unreal.api import pipeline +from openpype.hosts.unreal.api import pipeline, plugin -class CreateLook(Creator): +class CreateLook(plugin.Creator): """Shader connections defining shape look.""" name = "unrealLook" diff --git a/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py b/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py index 700eac7366..601c2fae06 100644 --- a/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py +++ b/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- """Create Static Meshes as FBX geometry.""" import unreal # noqa -from openpype.hosts.unreal.api.plugin import Creator +from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api.pipeline import ( instantiate, ) -class CreateStaticMeshFBX(Creator): +class CreateStaticMeshFBX(plugin.Creator): """Static FBX geometry.""" name = "unrealStaticMeshMain" diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py index 3508fe5ed7..6ac3531b40 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py @@ -2,8 +2,10 @@ """Loader for published alembics.""" import os -from avalon import pipeline -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -117,7 +119,7 @@ class PointCacheAlembicLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py index 180942de51..b2c3889f68 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py @@ -2,8 +2,10 @@ """Load Skeletal Mesh alembics.""" import os -from avalon import pipeline -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa @@ -81,7 +83,7 @@ class SkeletalMeshAlembicLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py index 4e00af1d97..5a73c72c64 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py @@ -2,8 +2,10 @@ """Loader for Static Mesh alembics.""" import os -from avalon import pipeline -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa @@ -100,7 +102,7 @@ class StaticMeshAlembicLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, diff --git a/openpype/hosts/unreal/plugins/load/load_animation.py b/openpype/hosts/unreal/plugins/load/load_animation.py index 65a9de9353..c7581e0cdd 100644 --- a/openpype/hosts/unreal/plugins/load/load_animation.py +++ b/openpype/hosts/unreal/plugins/load/load_animation.py @@ -8,8 +8,10 @@ from unreal import EditorAssetLibrary from unreal import MovieSceneSkeletalAnimationTrack from unreal import MovieSceneSkeletalAnimationSection -from avalon import pipeline -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -203,7 +205,7 @@ class AnimationFBXLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, diff --git a/openpype/hosts/unreal/plugins/load/load_camera.py b/openpype/hosts/unreal/plugins/load/load_camera.py index 80a5fc3437..ea896f6c44 100644 --- a/openpype/hosts/unreal/plugins/load/load_camera.py +++ b/openpype/hosts/unreal/plugins/load/load_camera.py @@ -6,7 +6,10 @@ import unreal from unreal import EditorAssetLibrary from unreal import EditorLevelLibrary -from avalon import io, pipeline +from openpype.pipeline import ( + AVALON_CONTAINER_ID, + legacy_io, +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -21,7 +24,7 @@ class CameraLoader(plugin.Loader): color = "orange" def _get_data(self, asset_name): - asset_doc = io.find_one({ + asset_doc = legacy_io.find_one({ "type": "asset", "name": asset_name }) @@ -157,7 +160,7 @@ class CameraLoader(plugin.Loader): factory=unreal.LevelSequenceFactoryNew() ) - asset_data = io.find_one({ + asset_data = legacy_io.find_one({ "type": "asset", "name": h.split('/')[-1] }) @@ -168,12 +171,12 @@ class CameraLoader(plugin.Loader): end_frames = [] elements = list( - io.find({"type": "asset", "data.visualParent": id})) + legacy_io.find({"type": "asset", "data.visualParent": id})) for e in elements: start_frames.append(e.get('data').get('clipIn')) end_frames.append(e.get('data').get('clipOut')) - elements.extend(io.find({ + elements.extend(legacy_io.find({ "type": "asset", "data.visualParent": e.get('_id') })) @@ -239,7 +242,7 @@ class CameraLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -297,8 +300,8 @@ class CameraLoader(plugin.Loader): factory=unreal.LevelSequenceFactoryNew() ) - io_asset = io.Session["AVALON_ASSET"] - asset_doc = io.find_one({ + io_asset = legacy_io.Session["AVALON_ASSET"] + asset_doc = legacy_io.find_one({ "type": "asset", "name": io_asset }) diff --git a/openpype/hosts/unreal/plugins/load/load_layout.py b/openpype/hosts/unreal/plugins/load/load_layout.py index 86923ea3b4..e09255d88c 100644 --- a/openpype/hosts/unreal/plugins/load/load_layout.py +++ b/openpype/hosts/unreal/plugins/load/load_layout.py @@ -12,13 +12,13 @@ from unreal import AssetToolsHelpers from unreal import FBXImportType from unreal import MathLibrary as umath -from avalon import io -from avalon.pipeline import AVALON_CONTAINER_ID from openpype.pipeline import ( discover_loader_plugins, loaders_from_representation, load_container, get_representation_path, + AVALON_CONTAINER_ID, + legacy_io, ) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -88,7 +88,7 @@ class LayoutLoader(plugin.Loader): return None def _get_data(self, asset_name): - asset_doc = io.find_one({ + asset_doc = legacy_io.find_one({ "type": "asset", "name": asset_name }) @@ -552,7 +552,7 @@ class LayoutLoader(plugin.Loader): factory=unreal.LevelSequenceFactoryNew() ) - asset_data = io.find_one({ + asset_data = legacy_io.find_one({ "type": "asset", "name": h.split('/')[-1] }) @@ -563,12 +563,12 @@ class LayoutLoader(plugin.Loader): end_frames = [] elements = list( - io.find({"type": "asset", "data.visualParent": id})) + legacy_io.find({"type": "asset", "data.visualParent": id})) for e in elements: start_frames.append(e.get('data').get('clipIn')) end_frames.append(e.get('data').get('clipOut')) - elements.extend(io.find({ + elements.extend(legacy_io.find({ "type": "asset", "data.visualParent": e.get('_id') })) diff --git a/openpype/hosts/unreal/plugins/load/load_rig.py b/openpype/hosts/unreal/plugins/load/load_rig.py index 3d5616364c..ff844a5e94 100644 --- a/openpype/hosts/unreal/plugins/load/load_rig.py +++ b/openpype/hosts/unreal/plugins/load/load_rig.py @@ -2,8 +2,10 @@ """Load Skeletal Meshes form FBX.""" import os -from avalon import pipeline -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa @@ -101,7 +103,7 @@ class SkeletalMeshFBXLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, diff --git a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py index 587fc83a77..282d249947 100644 --- a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py +++ b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py @@ -2,8 +2,10 @@ """Load Static meshes form FBX.""" import os -from avalon import pipeline -from openpype.pipeline import get_representation_path +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa @@ -95,7 +97,7 @@ class StaticMeshFBXLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, diff --git a/openpype/hosts/unreal/plugins/publish/extract_layout.py b/openpype/hosts/unreal/plugins/publish/extract_layout.py index 2d09b0e7bd..87e6693a97 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_layout.py +++ b/openpype/hosts/unreal/plugins/publish/extract_layout.py @@ -3,12 +3,14 @@ import os import json import math +from bson.objectid import ObjectId + import unreal from unreal import EditorLevelLibrary as ell from unreal import EditorAssetLibrary as eal import openpype.api -from avalon import io +from openpype.pipeline import legacy_io class ExtractLayout(openpype.api.Extractor): @@ -59,10 +61,10 @@ class ExtractLayout(openpype.api.Extractor): family = eal.get_metadata_tag(asset_container, "family") self.log.info("Parent: {}".format(parent)) - blend = io.find_one( + blend = legacy_io.find_one( { "type": "representation", - "parent": io.ObjectId(parent), + "parent": ObjectId(parent), "name": "blend" }, projection={"_id": True}) diff --git a/openpype/hosts/webpublisher/api/__init__.py b/openpype/hosts/webpublisher/api/__init__.py index dbeb628073..18e3a16cf5 100644 --- a/openpype/hosts/webpublisher/api/__init__.py +++ b/openpype/hosts/webpublisher/api/__init__.py @@ -1,10 +1,9 @@ import os import logging -from avalon import api as avalon -from avalon import io from pyblish import api as pyblish import openpype.hosts.webpublisher +from openpype.pipeline import legacy_io log = logging.getLogger("openpype.hosts.webpublisher") @@ -20,7 +19,7 @@ def install(): pyblish.register_plugin_path(PUBLISH_PATH) log.info(PUBLISH_PATH) - io.install() + legacy_io.install() def uninstall(): diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py index ca14538d7d..9ff779636a 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py @@ -1,18 +1,24 @@ -"""Loads batch context from json and continues in publish process. +"""Parses batch context from json and continues in publish process. Provides: context -> Loaded batch file. + - asset + - task (task name) + - taskType + - project_name + - variant """ import os import pyblish.api -from avalon import io + from openpype.lib.plugin_tools import ( parse_json, get_batch_asset_task_info ) from openpype.lib.remote_publish import get_webpublish_conn, IN_PROGRESS_STATUS +from openpype.pipeline import legacy_io class CollectBatchData(pyblish.api.ContextPlugin): @@ -24,7 +30,7 @@ class CollectBatchData(pyblish.api.ContextPlugin): # must be really early, context values are only in json file order = pyblish.api.CollectorOrder - 0.495 label = "Collect batch data" - host = ["webpublisher"] + hosts = ["webpublisher"] def process(self, context): batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") @@ -52,14 +58,15 @@ class CollectBatchData(pyblish.api.ContextPlugin): ) os.environ["AVALON_ASSET"] = asset_name - io.Session["AVALON_ASSET"] = asset_name + legacy_io.Session["AVALON_ASSET"] = asset_name os.environ["AVALON_TASK"] = task_name - io.Session["AVALON_TASK"] = task_name + legacy_io.Session["AVALON_TASK"] = task_name context.data["asset"] = asset_name context.data["task"] = task_name context.data["taskType"] = task_type context.data["project_name"] = project_name + context.data["variant"] = batch_data["variant"] self._set_ctx_path(batch_data) diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py index 65cef14703..bdd3caccfd 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py @@ -12,7 +12,6 @@ import clique import tempfile import math -from avalon import io import pyblish.api from openpype.lib import ( prepare_template_data, @@ -24,6 +23,7 @@ from openpype.lib.plugin_tools import ( parse_json, get_subset_name_with_asset_doc ) +from openpype.pipeline import legacy_io class CollectPublishedFiles(pyblish.api.ContextPlugin): @@ -40,7 +40,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): # must be really early, context values are only in json file order = pyblish.api.CollectorOrder - 0.490 label = "Collect rendered frames" - host = ["webpublisher"] + hosts = ["webpublisher"] targets = ["filespublish"] # from Settings @@ -61,6 +61,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): task_name = context.data["task"] task_type = context.data["taskType"] project_name = context.data["project_name"] + variant = context.data["variant"] for task_dir in task_subfolders: task_data = parse_json(os.path.join(task_dir, "manifest.json")) @@ -76,7 +77,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): extension.replace(".", '')) subset_name = get_subset_name_with_asset_doc( - family, task_data["variant"], task_name, asset_doc, + family, variant, task_name, asset_doc, project_name=project_name, host_name="webpublisher" ) version = self._get_last_version(asset_name, subset_name) + 1 @@ -108,15 +109,18 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): instance.data["representations"] = self._get_single_repre( task_dir, task_data["files"], tags ) - file_url = os.path.join(task_dir, task_data["files"][0]) - no_of_frames = self._get_number_of_frames(file_url) - if no_of_frames: + if family != 'workfile': + file_url = os.path.join(task_dir, task_data["files"][0]) try: - frame_end = int(frame_start) + math.ceil(no_of_frames) - instance.data["frameEnd"] = math.ceil(frame_end) - 1 - self.log.debug("frameEnd:: {}".format( - instance.data["frameEnd"])) - except ValueError: + no_of_frames = self._get_number_of_frames(file_url) + if no_of_frames: + frame_end = int(frame_start) + \ + math.ceil(no_of_frames) + frame_end = math.ceil(frame_end) - 1 + instance.data["frameEnd"] = frame_end + self.log.debug("frameEnd:: {}".format( + instance.data["frameEnd"])) + except Exception: self.log.warning("Unable to count frames " "duration {}".format(no_of_frames)) @@ -209,7 +213,6 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): msg = "No family found for combination of " +\ "task_type: {}, is_sequence:{}, extension: {}".format( task_type, is_sequence, extension) - found_family = "render" assert found_family, msg return (found_family, @@ -259,7 +262,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): } } ] - version = list(io.aggregate(query)) + version = list(legacy_io.aggregate(query)) if version: return version[0].get("version") or 0 diff --git a/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py b/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py index cb6ed8481c..a56521891b 100644 --- a/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py @@ -8,7 +8,7 @@ from openpype.lib import ( run_subprocess, get_transcode_temp_directory, - convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, should_convert_for_ffmpeg ) @@ -59,11 +59,9 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): if do_convert: convert_dir = get_transcode_temp_directory() filename = os.path.basename(full_input_path) - convert_for_ffmpeg( - full_input_path, + convert_input_paths_for_ffmpeg( + [full_input_path], convert_dir, - None, - None, self.log ) full_input_path = os.path.join(convert_dir, filename) diff --git a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py index 1f9089aa27..e82ba7f2b8 100644 --- a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py +++ b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py @@ -7,18 +7,20 @@ import collections from aiohttp.web_response import Response import subprocess -from avalon.api import AvalonMongoDB - -from openpype.lib import OpenPypeMongoConnection -from openpype_modules.avalon_apps.rest_api import _RestApiEndpoint -from openpype.settings import get_project_settings - -from openpype.lib import PypeLogger +from openpype.lib import ( + OpenPypeMongoConnection, + PypeLogger, +) from openpype.lib.remote_publish import ( get_task_data, ERROR_STATUS, REPROCESS_STATUS ) +from openpype.pipeline import AvalonMongoDB +from openpype_modules.avalon_apps.rest_api import _RestApiEndpoint +from openpype.settings import get_project_settings + + log = PypeLogger.get_logger("WebServer") diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index 1ebafbb2d2..29719b63bd 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -29,6 +29,21 @@ from .vendor_bin_utils import ( is_oiio_supported ) +from .attribute_definitions import ( + AbtractAttrDef, + + UIDef, + UISeparatorDef, + UILabelDef, + + UnknownDef, + NumberDef, + TextDef, + EnumDef, + BoolDef, + FileDef, +) + from .env_tools import ( env_value_to_bool, get_paths_from_environ, @@ -90,6 +105,7 @@ from .transcoding import ( get_transcode_temp_directory, should_convert_for_ffmpeg, convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, get_ffprobe_data, get_ffprobe_streams, get_ffmpeg_codec_args, @@ -206,6 +222,12 @@ from .openpype_version import ( is_current_version_higher_than_expected ) + +from .connections import ( + requests_get, + requests_post +) + terminal = Terminal __all__ = [ @@ -233,6 +255,19 @@ __all__ = [ "get_ffmpeg_tool_path", "is_oiio_supported", + "AbtractAttrDef", + + "UIDef", + "UISeparatorDef", + "UILabelDef", + + "UnknownDef", + "NumberDef", + "TextDef", + "EnumDef", + "BoolDef", + "FileDef", + "import_filepath", "modules_from_path", "recursive_bases_from_class", @@ -242,6 +277,7 @@ __all__ = [ "get_transcode_temp_directory", "should_convert_for_ffmpeg", "convert_for_ffmpeg", + "convert_input_paths_for_ffmpeg", "get_ffprobe_data", "get_ffprobe_streams", "get_ffmpeg_codec_args", @@ -362,4 +398,7 @@ __all__ = [ "is_running_from_build", "is_running_staging", "is_current_version_studio_latest", + + "requests_get", + "requests_post" ] diff --git a/openpype/lib/abstract_collect_render.py b/openpype/lib/abstract_collect_render.py index 7c768e280c..3d81f6d794 100644 --- a/openpype/lib/abstract_collect_render.py +++ b/openpype/lib/abstract_collect_render.py @@ -9,9 +9,10 @@ from abc import abstractmethod import attr import six -from avalon import api import pyblish.api +from openpype.pipeline import legacy_io + from .abstract_metaplugins import AbstractMetaContextPlugin @@ -30,6 +31,7 @@ class RenderInstance(object): source = attr.ib() # path to source scene file label = attr.ib() # label to show in GUI subset = attr.ib() # subset name + task = attr.ib() # task name asset = attr.ib() # asset name (AVALON_ASSET) attachTo = attr.ib() # subset name to attach render to setMembers = attr.ib() # list of nodes/members producing render output @@ -127,7 +129,7 @@ class AbstractCollectRender(pyblish.api.ContextPlugin): """Constructor.""" super(AbstractCollectRender, self).__init__(*args, **kwargs) self._file_path = None - self._asset = api.Session["AVALON_ASSET"] + self._asset = legacy_io.Session["AVALON_ASSET"] self._context = None def process(self, context): @@ -138,7 +140,9 @@ class AbstractCollectRender(pyblish.api.ContextPlugin): try: if "workfile" in instance.data["families"]: instance.data["publish"] = True - if "renderFarm" in instance.data["families"]: + # TODO merge renderFarm and render.farm + if ("renderFarm" in instance.data["families"] or + "render.farm" in instance.data["families"]): instance.data["remove"] = True except KeyError: # be tolerant if 'families' is missing. diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index 557c016d74..b52da52dc9 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -13,7 +13,8 @@ import six from openpype.settings import ( get_system_settings, - get_project_settings + get_project_settings, + get_local_settings ) from openpype.settings.constants import ( METADATA_KEYS, @@ -211,6 +212,7 @@ class ApplicationGroup: data (dict): Group defying data loaded from settings. manager (ApplicationManager): Manager that created the group. """ + def __init__(self, name, data, manager): self.name = name self.manager = manager @@ -374,6 +376,7 @@ class ApplicationManager: will always use these values. Gives ability to create manager using different settings. """ + def __init__(self, system_settings=None): self.log = PypeLogger.get_logger(self.__class__.__name__) @@ -530,13 +533,13 @@ class EnvironmentToolGroup: variants = data.get("variants") or {} label_by_key = variants.pop(M_DYNAMIC_KEY_LABEL, {}) variants_by_name = {} - for variant_name, variant_env in variants.items(): + for variant_name, variant_data in variants.items(): if variant_name in METADATA_KEYS: continue variant_label = label_by_key.get(variant_name) or variant_name tool = EnvironmentTool( - variant_name, variant_label, variant_env, self + variant_name, variant_label, variant_data, self ) variants_by_name[variant_name] = tool self.variants = variants_by_name @@ -560,15 +563,30 @@ class EnvironmentTool: Args: name (str): Name of the tool. - environment (dict): Variant environments. + variant_data (dict): Variant data with environments and + host and app variant filters. group (str): Name of group which wraps tool. """ - def __init__(self, name, label, environment, group): + def __init__(self, name, label, variant_data, group): + # Backwards compatibility 3.9.1 - 3.9.2 + # - 'variant_data' contained only environments but contain also host + # and application variant filters + host_names = variant_data.get("host_names", []) + app_variants = variant_data.get("app_variants", []) + + if "environment" in variant_data: + environment = variant_data["environment"] + else: + environment = variant_data + + self.host_names = host_names + self.app_variants = app_variants self.name = name self.variant_label = label self.label = " ".join((group.label, label)) self.group = group + self._environment = environment self.full_name = "/".join((group.name, name)) @@ -579,6 +597,19 @@ class EnvironmentTool: def environment(self): return copy.deepcopy(self._environment) + def is_valid_for_app(self, app): + """Is tool valid for application. + + Args: + app (Application): Application for which are prepared environments. + """ + if self.app_variants and app.full_name not in self.app_variants: + return False + + if self.host_names and app.host_name not in self.host_names: + return False + return True + class ApplicationExecutable: """Representation of executable loaded from settings.""" @@ -1242,6 +1273,9 @@ class EnvironmentPrepData(dict): if data.get("env") is None: data["env"] = os.environ.copy() + if "system_settings" not in data: + data["system_settings"] = get_system_settings() + super(EnvironmentPrepData, self).__init__(data) @@ -1261,7 +1295,7 @@ def get_app_environments_for_context( Returns: dict: Environments for passed context and application. """ - from avalon.api import AvalonMongoDB + from openpype.pipeline import AvalonMongoDB # Avalon database connection dbcon = AvalonMongoDB() @@ -1319,6 +1353,41 @@ def _merge_env(env, current_env): return result +def _add_python_version_paths(app, env, logger): + """Add vendor packages specific for a Python version.""" + + # Skip adding if host name is not set + if not app.host_name: + return + + # Add Python 2/3 modules + openpype_root = os.getenv("OPENPYPE_REPOS_ROOT") + python_vendor_dir = os.path.join( + openpype_root, + "openpype", + "vendor", + "python" + ) + if app.use_python_2: + pythonpath = os.path.join(python_vendor_dir, "python_2") + else: + pythonpath = os.path.join(python_vendor_dir, "python_3") + + if not os.path.exists(pythonpath): + return + + logger.debug("Adding Python version specific paths to PYTHONPATH") + python_paths = [pythonpath] + + # Load PYTHONPATH from current launch context + python_path = env.get("PYTHONPATH") + if python_path: + python_paths.append(python_path) + + # Set new PYTHONPATH to launch context environments + env["PYTHONPATH"] = os.pathsep.join(python_paths) + + def prepare_app_environments(data, env_group=None, implementation_envs=True): """Modify launch environments based on launched app and context. @@ -1330,6 +1399,27 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): app = data["app"] log = data["log"] + source_env = data["env"].copy() + + _add_python_version_paths(app, source_env, log) + + # Use environments from local settings + filtered_local_envs = {} + system_settings = data["system_settings"] + whitelist_envs = system_settings["general"].get("local_env_white_list") + if whitelist_envs: + local_settings = get_local_settings() + local_envs = local_settings.get("environments") or {} + filtered_local_envs = { + key: value + for key, value in local_envs.items() + if key in whitelist_envs + } + + # Apply local environment variables for already existing values + for key, value in filtered_local_envs.items(): + if key in source_env: + source_env[key] = value # `added_env_keys` has debug purpose added_env_keys = {app.group.name, app.name} @@ -1347,7 +1437,7 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): # Make sure each tool group can be added only once for key in asset_doc["data"].get("tools_env") or []: tool = app.manager.tools.get(key) - if not tool: + if not tool or not tool.is_valid_for_app(app): continue groups_by_name[tool.group.name] = tool.group tool_by_group_name[tool.group.name][tool.name] = tool @@ -1374,10 +1464,19 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): # Choose right platform tool_env = parse_environments(_env_values, env_group) + + # Apply local environment variables + # - must happen between all values because they may be used during + # merge + for key, value in filtered_local_envs.items(): + if key in tool_env: + tool_env[key] = value + # Merge dictionaries env_values = _merge_env(tool_env, env_values) - merged_env = _merge_env(env_values, data["env"]) + merged_env = _merge_env(env_values, source_env) + loaded_env = acre.compute(merged_env, cleanup=False) final_env = None @@ -1397,7 +1496,7 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): if final_env is None: final_env = loaded_env - keys_to_remove = set(data["env"].keys()) - set(final_env.keys()) + keys_to_remove = set(source_env.keys()) - set(final_env.keys()) # Update env data["env"].update(final_env) @@ -1544,7 +1643,7 @@ def _prepare_last_workfile(data, workdir): result will be stored. workdir (str): Path to folder where workfiles should be stored. """ - import avalon.api + from openpype.pipeline import HOST_WORKFILE_EXTENSIONS log = data["log"] @@ -1593,7 +1692,7 @@ def _prepare_last_workfile(data, workdir): # Last workfile path last_workfile_path = data.get("last_workfile_path") or "" if not last_workfile_path: - extensions = avalon.api.HOST_WORKFILE_EXTENSIONS.get(app.host_name) + extensions = HOST_WORKFILE_EXTENSIONS.get(app.host_name) if extensions: anatomy = data["anatomy"] project_settings = data["project_settings"] diff --git a/openpype/pipeline/lib/attribute_definitions.py b/openpype/lib/attribute_definitions.py similarity index 100% rename from openpype/pipeline/lib/attribute_definitions.py rename to openpype/lib/attribute_definitions.py diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index 8e9fff5f67..3fcddef745 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -9,6 +9,8 @@ import collections import functools import getpass +from bson.objectid import ObjectId + from openpype.settings import ( get_project_settings, get_system_settings @@ -18,9 +20,7 @@ from .profiles_filtering import filter_profiles from .events import emit_event from .path_templates import StringTemplate -# avalon module is not imported at the top -# - may not be in path at the time of pype.lib initialization -avalon = None +legacy_io = None log = logging.getLogger("AvalonContext") @@ -62,8 +62,8 @@ def create_project( """ from openpype.settings import ProjectSettings, SaveWarningExc - from avalon.api import AvalonMongoDB - from avalon.schema import validate + from openpype.pipeline import AvalonMongoDB + from openpype.pipeline.schema import validate if dbcon is None: dbcon = AvalonMongoDB() @@ -118,17 +118,17 @@ def create_project( return project_doc -def with_avalon(func): +def with_pipeline_io(func): @functools.wraps(func) - def wrap_avalon(*args, **kwargs): - global avalon - if avalon is None: - import avalon + def wrapped(*args, **kwargs): + global legacy_io + if legacy_io is None: + from openpype.pipeline import legacy_io return func(*args, **kwargs) - return wrap_avalon + return wrapped -@with_avalon +@with_pipeline_io def is_latest(representation): """Return whether the representation is from latest version @@ -140,12 +140,12 @@ def is_latest(representation): """ - version = avalon.io.find_one({"_id": representation['parent']}) + version = legacy_io.find_one({"_id": representation['parent']}) if version["type"] == "hero_version": return True # Get highest version under the parent - highest_version = avalon.io.find_one({ + highest_version = legacy_io.find_one({ "type": "version", "parent": version["parent"] }, sort=[("name", -1)], projection={"name": True}) @@ -156,20 +156,21 @@ def is_latest(representation): return False -@with_avalon +@with_pipeline_io def any_outdated(): """Return whether the current scene has any outdated content""" + from openpype.pipeline import registered_host checked = set() - host = avalon.api.registered_host() + host = registered_host() for container in host.ls(): representation = container['representation'] if representation in checked: continue - representation_doc = avalon.io.find_one( + representation_doc = legacy_io.find_one( { - "_id": avalon.io.ObjectId(representation), + "_id": ObjectId(representation), "type": "representation" }, projection={"parent": True} @@ -186,7 +187,7 @@ def any_outdated(): return False -@with_avalon +@with_pipeline_io def get_asset(asset_name=None): """ Returning asset document from database by its name. @@ -199,9 +200,9 @@ def get_asset(asset_name=None): (MongoDB document) """ if not asset_name: - asset_name = avalon.api.Session["AVALON_ASSET"] + asset_name = legacy_io.Session["AVALON_ASSET"] - asset_document = avalon.io.find_one({ + asset_document = legacy_io.find_one({ "name": asset_name, "type": "asset" }) @@ -212,7 +213,7 @@ def get_asset(asset_name=None): return asset_document -@with_avalon +@with_pipeline_io def get_hierarchy(asset_name=None): """ Obtain asset hierarchy path string from mongo db @@ -225,12 +226,12 @@ def get_hierarchy(asset_name=None): """ if not asset_name: - asset_name = avalon.io.Session.get( + asset_name = legacy_io.Session.get( "AVALON_ASSET", os.environ["AVALON_ASSET"] ) - asset_entity = avalon.io.find_one({ + asset_entity = legacy_io.find_one({ "type": 'asset', "name": asset_name }) @@ -249,13 +250,13 @@ def get_hierarchy(asset_name=None): parent_id = entity.get("data", {}).get("visualParent") if not parent_id: break - entity = avalon.io.find_one({"_id": parent_id}) + entity = legacy_io.find_one({"_id": parent_id}) hierarchy_items.append(entity["name"]) # Add parents to entity data for next query entity_data = asset_entity.get("data", {}) entity_data["parents"] = hierarchy_items - avalon.io.update_many( + legacy_io.update_many( {"_id": asset_entity["_id"]}, {"$set": {"data": entity_data}} ) @@ -302,7 +303,7 @@ def get_linked_asset_ids(asset_doc): return output -@with_avalon +@with_pipeline_io def get_linked_assets(asset_doc): """Return linked assets for `asset_doc` from DB @@ -316,10 +317,10 @@ def get_linked_assets(asset_doc): if not link_ids: return [] - return list(avalon.io.find({"_id": {"$in": link_ids}})) + return list(legacy_io.find({"_id": {"$in": link_ids}})) -@with_avalon +@with_pipeline_io def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): """Retrieve latest version from `asset_name`, and `subset_name`. @@ -330,8 +331,7 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): Args: asset_name (str): Name of asset. subset_name (str): Name of subset. - dbcon (avalon.mongodb.AvalonMongoDB, optional): Avalon Mongo connection - with Session. + dbcon (AvalonMongoDB, optional): Avalon Mongo connection with Session. project_name (str, optional): Find latest version in specific project. Returns: @@ -340,13 +340,13 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): """ if not dbcon: - log.debug("Using `avalon.io` for query.") - dbcon = avalon.io + log.debug("Using `legacy_io` for query.") + dbcon = legacy_io # Make sure is installed dbcon.install() if project_name and project_name != dbcon.Session.get("AVALON_PROJECT"): - # `avalon.io` has only `_database` attribute + # `legacy_io` has only `_database` attribute # but `AvalonMongoDB` has `database` database = getattr(dbcon, "database", dbcon._database) collection = database[project_name] @@ -426,7 +426,7 @@ def get_workfile_template_key_from_context( "`get_workfile_template_key_from_context` requires to pass" " one of 'dbcon' or 'project_name' arguments." )) - from avalon.api import AvalonMongoDB + from openpype.pipeline import AvalonMongoDB dbcon = AvalonMongoDB() dbcon.Session["AVALON_PROJECT"] = project_name @@ -646,6 +646,7 @@ def get_workdir( ) +@with_pipeline_io def template_data_from_session(session=None): """ Return dictionary with template from session keys. @@ -655,15 +656,15 @@ def template_data_from_session(session=None): Returns: dict: All available data from session. """ - from avalon import io - import avalon.api if session is None: - session = avalon.api.Session + session = legacy_io.Session project_name = session["AVALON_PROJECT"] - project_doc = io._database[project_name].find_one({"type": "project"}) - asset_doc = io._database[project_name].find_one({ + project_doc = legacy_io.database[project_name].find_one({ + "type": "project" + }) + asset_doc = legacy_io.database[project_name].find_one({ "type": "asset", "name": session["AVALON_ASSET"] }) @@ -672,6 +673,7 @@ def template_data_from_session(session=None): return get_workdir_data(project_doc, asset_doc, task_name, host_name) +@with_pipeline_io def compute_session_changes( session, task=None, asset=None, app=None, template_key=None ): @@ -710,10 +712,8 @@ def compute_session_changes( asset = asset["name"] if not asset_document or not asset_tasks: - from avalon import io - # Assume asset name - asset_document = io.find_one( + asset_document = legacy_io.find_one( { "name": asset, "type": "asset" @@ -745,11 +745,10 @@ def compute_session_changes( return changes +@with_pipeline_io def get_workdir_from_session(session=None, template_key=None): - import avalon.api - if session is None: - session = avalon.api.Session + session = legacy_io.Session project_name = session["AVALON_PROJECT"] host_name = session["AVALON_APP"] anatomy = Anatomy(project_name) @@ -766,6 +765,7 @@ def get_workdir_from_session(session=None, template_key=None): return anatomy_filled[template_key]["folder"] +@with_pipeline_io def update_current_task(task=None, asset=None, app=None, template_key=None): """Update active Session to a new task work area. @@ -780,10 +780,8 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): dict: The changed key, values in the current Session. """ - import avalon.api - changes = compute_session_changes( - avalon.api.Session, + legacy_io.Session, task=task, asset=asset, app=app, @@ -793,7 +791,7 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): # Update the Session and environments. Pop from environments all keys with # value set to None. for key, value in changes.items(): - avalon.api.Session[key] = value + legacy_io.Session[key] = value if value is None: os.environ.pop(key, None) else: @@ -805,7 +803,7 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): return changes -@with_avalon +@with_pipeline_io def get_workfile_doc(asset_id, task_name, filename, dbcon=None): """Return workfile document for entered context. @@ -817,14 +815,14 @@ def get_workfile_doc(asset_id, task_name, filename, dbcon=None): task_name (str): Name of task under which the workfile belongs. filename (str): Name of a workfile. dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and - `avalon.io` is used if not entered. + `legacy_io` is used if not entered. Returns: dict: Workfile document or None. """ - # Use avalon.io if dbcon is not entered + # Use legacy_io if dbcon is not entered if not dbcon: - dbcon = avalon.io + dbcon = legacy_io return dbcon.find_one({ "type": "workfile", @@ -834,7 +832,7 @@ def get_workfile_doc(asset_id, task_name, filename, dbcon=None): }) -@with_avalon +@with_pipeline_io def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): """Creates or replace workfile document in mongo. @@ -847,11 +845,11 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): filename (str): Filename of workfile. workdir (str): Path to directory where `filename` is located. dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and - `avalon.io` is used if not entered. + `legacy_io` is used if not entered. """ - # Use avalon.io if dbcon is not entered + # Use legacy_io if dbcon is not entered if not dbcon: - dbcon = avalon.io + dbcon = legacy_io # Filter of workfile document doc_filter = { @@ -896,7 +894,7 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): ) -@with_avalon +@with_pipeline_io def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): if not workfile_doc: # TODO add log message @@ -905,9 +903,9 @@ def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): if not data: return - # Use avalon.io if dbcon is not entered + # Use legacy_io if dbcon is not entered if not dbcon: - dbcon = avalon.io + dbcon = legacy_io # Convert data to mongo modification keys/values # - this is naive implementation which does not expect nested @@ -957,7 +955,7 @@ class BuildWorkfile: return containers - @with_avalon + @with_pipeline_io def build_workfile(self): """Prepares and load containers into workfile. @@ -984,8 +982,8 @@ class BuildWorkfile: from openpype.pipeline import discover_loader_plugins # Get current asset name and entity - current_asset_name = avalon.io.Session["AVALON_ASSET"] - current_asset_entity = avalon.io.find_one({ + current_asset_name = legacy_io.Session["AVALON_ASSET"] + current_asset_entity = legacy_io.find_one({ "type": "asset", "name": current_asset_name }) @@ -1013,7 +1011,7 @@ class BuildWorkfile: return # Get current task name - current_task_name = avalon.io.Session["AVALON_TASK"] + current_task_name = legacy_io.Session["AVALON_TASK"] # Load workfile presets for task self.build_presets = self.get_build_presets( @@ -1101,7 +1099,7 @@ class BuildWorkfile: # Return list of loaded containers return loaded_containers - @with_avalon + @with_pipeline_io def get_build_presets(self, task_name, asset_doc): """ Returns presets to build workfile for task name. @@ -1117,7 +1115,7 @@ class BuildWorkfile: """ host_name = os.environ["AVALON_APP"] project_settings = get_project_settings( - avalon.io.Session["AVALON_PROJECT"] + legacy_io.Session["AVALON_PROJECT"] ) host_settings = project_settings.get(host_name) or {} @@ -1367,7 +1365,7 @@ class BuildWorkfile: "containers": containers } - @with_avalon + @with_pipeline_io def _load_containers( self, repres_by_subset_id, subsets_by_id, profiles_per_subset_id, loaders_by_name @@ -1493,7 +1491,7 @@ class BuildWorkfile: return loaded_containers - @with_avalon + @with_pipeline_io def _collect_last_version_repres(self, asset_entities): """Collect subsets, versions and representations for asset_entities. @@ -1532,13 +1530,13 @@ class BuildWorkfile: asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities} - subsets = list(avalon.io.find({ + subsets = list(legacy_io.find({ "type": "subset", "parent": {"$in": asset_entity_by_ids.keys()} })) subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} - sorted_versions = list(avalon.io.find({ + sorted_versions = list(legacy_io.find({ "type": "version", "parent": {"$in": subset_entity_by_ids.keys()} }).sort("name", -1)) @@ -1552,7 +1550,7 @@ class BuildWorkfile: subset_id_with_latest_version.append(subset_id) last_versions_by_id[version["_id"]] = version - repres = avalon.io.find({ + repres = legacy_io.find({ "type": "representation", "parent": {"$in": last_versions_by_id.keys()} }) @@ -1590,7 +1588,7 @@ class BuildWorkfile: return output -@with_avalon +@with_pipeline_io def get_creator_by_name(creator_name, case_sensitive=False): """Find creator plugin by name. @@ -1602,13 +1600,13 @@ def get_creator_by_name(creator_name, case_sensitive=False): Returns: Creator: Return first matching plugin or `None`. """ - from openpype.pipeline import LegacyCreator + from openpype.pipeline import discover_legacy_creator_plugins # Lower input creator name if is not case sensitive if not case_sensitive: creator_name = creator_name.lower() - for creator_plugin in avalon.api.discover(LegacyCreator): + for creator_plugin in discover_legacy_creator_plugins(): _creator_name = creator_plugin.__name__ # Lower creator plugin name if is not case sensitive @@ -1620,7 +1618,7 @@ def get_creator_by_name(creator_name, case_sensitive=False): return None -@with_avalon +@with_pipeline_io def change_timer_to_current_context(): """Called after context change to change timers. @@ -1639,9 +1637,9 @@ def change_timer_to_current_context(): log.warning("Couldn't start timer") return data = { - "project_name": avalon.io.Session["AVALON_PROJECT"], - "asset_name": avalon.io.Session["AVALON_ASSET"], - "task_name": avalon.io.Session["AVALON_TASK"] + "project_name": legacy_io.Session["AVALON_PROJECT"], + "asset_name": legacy_io.Session["AVALON_ASSET"], + "task_name": legacy_io.Session["AVALON_TASK"] } requests.post(rest_api_url, json=data) @@ -1703,7 +1701,7 @@ def _get_task_context_data_for_anatomy( "task": { "name": task_name, "type": task_type, - "short_name": project_task_type_data["short_name"] + "short": project_task_type_data["short_name"] } } @@ -1791,7 +1789,7 @@ def get_custom_workfile_template_by_string_context( """ if dbcon is None: - from avalon.api import AvalonMongoDB + from openpype.pipeline import AvalonMongoDB dbcon = AvalonMongoDB() @@ -1825,10 +1823,11 @@ def get_custom_workfile_template_by_string_context( ) +@with_pipeline_io def get_custom_workfile_template(template_profiles): """Filter and fill workfile template profiles by current context. - Current context is defined by `avalon.api.Session`. That's why this + Current context is defined by `legacy_io.Session`. That's why this function should be used only inside host where context is set and stable. Args: @@ -1838,15 +1837,13 @@ def get_custom_workfile_template(template_profiles): str: Path to template or None if none of profiles match current context. (Existence of formatted path is not validated.) """ - # Use `avalon.io` as Mongo connection - from avalon import io return get_custom_workfile_template_by_string_context( template_profiles, - io.Session["AVALON_PROJECT"], - io.Session["AVALON_ASSET"], - io.Session["AVALON_TASK"], - io + legacy_io.Session["AVALON_PROJECT"], + legacy_io.Session["AVALON_ASSET"], + legacy_io.Session["AVALON_TASK"], + legacy_io ) @@ -1963,9 +1960,126 @@ def get_last_workfile( data.pop("comment", None) if not data.get("ext"): data["ext"] = extensions[0] + data["ext"] = data["ext"].replace('.', '') filename = StringTemplate.format_strict_template(file_template, data) if full_path: return os.path.normpath(os.path.join(workdir, filename)) return filename + + +@with_pipeline_io +def get_linked_ids_for_representations(project_name, repre_ids, dbcon=None, + link_type=None, max_depth=0): + """Returns list of linked ids of particular type (if provided). + + Goes from representations to version, back to representations + Args: + project_name (str) + repre_ids (list) or (ObjectId) + dbcon (avalon.mongodb.AvalonMongoDB, optional): Avalon Mongo connection + with Session. + link_type (str): ['reference', '..] + max_depth (int): limit how many levels of recursion + Returns: + (list) of ObjectId - linked representations + """ + # Create new dbcon if not passed and use passed project name + if not dbcon: + from openpype.pipeline import AvalonMongoDB + dbcon = AvalonMongoDB() + dbcon.Session["AVALON_PROJECT"] = project_name + # Validate that passed dbcon has same project + elif dbcon.Session["AVALON_PROJECT"] != project_name: + raise ValueError("Passed connection does not have right project") + + if not isinstance(repre_ids, list): + repre_ids = [repre_ids] + + version_ids = dbcon.distinct("parent", { + "_id": {"$in": repre_ids}, + "type": "representation" + }) + + match = { + "_id": {"$in": version_ids}, + "type": "version" + } + + graph_lookup = { + "from": project_name, + "startWith": "$data.inputLinks.id", + "connectFromField": "data.inputLinks.id", + "connectToField": "_id", + "as": "outputs_recursive", + "depthField": "depth" + } + if max_depth != 0: + # We offset by -1 since 0 basically means no recursion + # but the recursion only happens after the initial lookup + # for outputs. + graph_lookup["maxDepth"] = max_depth - 1 + + pipeline_ = [ + # Match + {"$match": match}, + # Recursive graph lookup for inputs + {"$graphLookup": graph_lookup} + ] + + result = dbcon.aggregate(pipeline_) + referenced_version_ids = _process_referenced_pipeline_result(result, + link_type) + + ref_ids = dbcon.distinct( + "_id", + filter={ + "parent": {"$in": list(referenced_version_ids)}, + "type": "representation" + } + ) + + return list(ref_ids) + + +def _process_referenced_pipeline_result(result, link_type): + """Filters result from pipeline for particular link_type. + + Pipeline cannot use link_type directly in a query. + Returns: + (list) + """ + referenced_version_ids = set() + correctly_linked_ids = set() + for item in result: + input_links = item["data"].get("inputLinks", []) + correctly_linked_ids = _filter_input_links(input_links, + link_type, + correctly_linked_ids) + + # outputs_recursive in random order, sort by depth + outputs_recursive = sorted(item.get("outputs_recursive", []), + key=lambda d: d["depth"]) + + for output in outputs_recursive: + if output["_id"] not in correctly_linked_ids: # leaf + continue + + correctly_linked_ids = _filter_input_links( + output["data"].get("inputLinks", []), + link_type, + correctly_linked_ids) + + referenced_version_ids.add(output["_id"]) + + return referenced_version_ids + + +def _filter_input_links(input_links, link_type, correctly_linked_ids): + for input_link in input_links: + if not link_type or input_link["type"] == link_type: + correctly_linked_ids.add(input_link.get("id") or + input_link.get("_id")) # legacy + + return correctly_linked_ids diff --git a/openpype/lib/connections.py b/openpype/lib/connections.py new file mode 100644 index 0000000000..91b745a4c1 --- /dev/null +++ b/openpype/lib/connections.py @@ -0,0 +1,38 @@ +import requests +import os + + +def requests_post(*args, **kwargs): + """Wrap request post method. + + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. + + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. + + """ + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.post(*args, **kwargs) + + +def requests_get(*args, **kwargs): + """Wrap request get method. + + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. + + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. + + """ + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.get(*args, **kwargs) diff --git a/openpype/lib/log.py b/openpype/lib/log.py index 98a3bae8e6..2cdb7ec8e4 100644 --- a/openpype/lib/log.py +++ b/openpype/lib/log.py @@ -98,6 +98,10 @@ class PypeStreamHandler(logging.StreamHandler): self.flush() except (KeyboardInterrupt, SystemExit): raise + + except OSError: + self.handleError(record) + except Exception: print(repr(record)) self.handleError(record) @@ -212,8 +216,8 @@ class PypeLogger: # Collection name under database in Mongo log_collection_name = "logs" - # OPENPYPE_DEBUG - pype_debug = 0 + # Logging level - OPENPYPE_LOG_LEVEL + log_level = None # Data same for all record documents process_data = None @@ -227,10 +231,7 @@ class PypeLogger: logger = logging.getLogger(name or "__main__") - if cls.pype_debug > 0: - logger.setLevel(logging.DEBUG) - else: - logger.setLevel(logging.INFO) + logger.setLevel(cls.log_level) add_mongo_handler = cls.use_mongo_logging add_console_handler = True @@ -329,6 +330,9 @@ class PypeLogger: # Define if should logging to mongo be used use_mongo_logging = bool(log4mongo is not None) + if use_mongo_logging: + use_mongo_logging = os.environ.get("OPENPYPE_LOG_TO_SERVER") == "1" + # Set mongo id for process (ONLY ONCE) if use_mongo_logging and cls.mongo_process_id is None: try: @@ -353,8 +357,16 @@ class PypeLogger: # Store result to class definition cls.use_mongo_logging = use_mongo_logging - # Define if is in OPENPYPE_DEBUG mode - cls.pype_debug = int(os.getenv("OPENPYPE_DEBUG") or "0") + # Define what is logging level + log_level = os.getenv("OPENPYPE_LOG_LEVEL") + if not log_level: + # Check OPENPYPE_DEBUG for backwards compatibility + op_debug = os.getenv("OPENPYPE_DEBUG") + if op_debug and int(op_debug) > 0: + log_level = 10 + else: + log_level = 20 + cls.log_level = int(log_level) if not os.environ.get("OPENPYPE_MONGO"): cls.use_mongo_logging = False diff --git a/openpype/lib/path_templates.py b/openpype/lib/path_templates.py index 14e5fe59f8..5c40aa4549 100644 --- a/openpype/lib/path_templates.py +++ b/openpype/lib/path_templates.py @@ -365,6 +365,7 @@ class TemplateResult(str): when value of key in data is dictionary but template expect string of number. """ + used_values = None solved = None template = None @@ -383,6 +384,12 @@ class TemplateResult(str): new_obj.invalid_types = invalid_types return new_obj + def __copy__(self, *args, **kwargs): + return self.copy() + + def __deepcopy__(self, *args, **kwargs): + return self.copy() + def validate(self): if not self.solved: raise TemplateUnsolved( @@ -391,6 +398,17 @@ class TemplateResult(str): self.invalid_types ) + def copy(self): + cls = self.__class__ + return cls( + str(self), + self.template, + self.solved, + self.used_values, + self.missing_keys, + self.invalid_types + ) + class TemplatesResultDict(dict): """Holds and wrap TemplateResults for easy bug report.""" diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index f11ba56865..bcbf06a0e8 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -72,9 +72,9 @@ def get_subset_name_with_asset_doc( family = family.rsplit(".", 1)[-1] if project_name is None: - import avalon.api + from openpype.pipeline import legacy_io - project_name = avalon.api.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] asset_tasks = asset_doc.get("data", {}).get("tasks") or {} task_info = asset_tasks.get(task_name) or {} @@ -136,7 +136,7 @@ def get_subset_name( `get_subset_name_with_asset_doc` where asset document is expected. """ if dbcon is None: - from avalon.api import AvalonMongoDB + from openpype.pipeline import AvalonMongoDB dbcon = AvalonMongoDB() dbcon.Session["AVALON_PROJECT"] = project_name diff --git a/openpype/lib/profiles_filtering.py b/openpype/lib/profiles_filtering.py index 0bb901aff8..370703a68b 100644 --- a/openpype/lib/profiles_filtering.py +++ b/openpype/lib/profiles_filtering.py @@ -44,12 +44,6 @@ def _profile_exclusion(matching_profiles, logger): Returns: dict: Most matching profile. """ - - logger.info( - "Search for first most matching profile in match order:" - " Host name -> Task name -> Family." - ) - if not matching_profiles: return None @@ -168,6 +162,15 @@ def filter_profiles(profiles_data, key_values, keys_order=None, logger=None): _keys_order.append(key) keys_order = tuple(_keys_order) + log_parts = " | ".join([ + "{}: \"{}\"".format(*item) + for item in key_values.items() + ]) + + logger.info( + "Looking for matching profile for: {}".format(log_parts) + ) + matching_profiles = None highest_profile_points = -1 # Each profile get 1 point for each matching filter. Profile with most @@ -205,11 +208,6 @@ def filter_profiles(profiles_data, key_values, keys_order=None, logger=None): if profile_points == highest_profile_points: matching_profiles.append((profile, profile_scores)) - log_parts = " | ".join([ - "{}: \"{}\"".format(*item) - for item in key_values.items() - ]) - if not matching_profiles: logger.info( "None of profiles match your setup. {}".format(log_parts) @@ -221,4 +219,9 @@ def filter_profiles(profiles_data, key_values, keys_order=None, logger=None): "More than one profile match your setup. {}".format(log_parts) ) - return _profile_exclusion(matching_profiles, logger) + profile = _profile_exclusion(matching_profiles, logger) + if profile: + logger.info( + "Profile selected: {}".format(profile) + ) + return profile diff --git a/openpype/lib/project_backpack.py b/openpype/lib/project_backpack.py index 11fd0c0c3e..396479c725 100644 --- a/openpype/lib/project_backpack.py +++ b/openpype/lib/project_backpack.py @@ -25,7 +25,7 @@ from bson.json_util import ( CANONICAL_JSON_OPTIONS ) -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB DOCUMENTS_FILE_NAME = "database" METADATA_FILE_NAME = "metadata" diff --git a/openpype/lib/python_module_tools.py b/openpype/lib/python_module_tools.py index f62c848e4a..6fad3b547f 100644 --- a/openpype/lib/python_module_tools.py +++ b/openpype/lib/python_module_tools.py @@ -5,8 +5,9 @@ import importlib import inspect import logging +import six + log = logging.getLogger(__name__) -PY3 = sys.version_info[0] == 3 def import_filepath(filepath, module_name=None): @@ -28,7 +29,7 @@ def import_filepath(filepath, module_name=None): # Prepare module object where content of file will be parsed module = types.ModuleType(module_name) - if PY3: + if six.PY3: # Use loader so module has full specs module_loader = importlib.machinery.SourceFileLoader( module_name, filepath @@ -38,7 +39,7 @@ def import_filepath(filepath, module_name=None): # Execute module code and store content to module with open(filepath) as _stream: # Execute content and store it to module object - exec(_stream.read(), module.__dict__) + six.exec_(_stream.read(), module.__dict__) module.__file__ = filepath return module @@ -129,20 +130,12 @@ def classes_from_module(superclass, module): for name in dir(module): # It could be anything at this point obj = getattr(module, name) - if not inspect.isclass(obj): + if not inspect.isclass(obj) or obj is superclass: continue - # These are subclassed from nothing, not even `object` - if not len(obj.__bases__) > 0: - continue + if issubclass(obj, superclass): + classes.append(obj) - # Use string comparison rather than `issubclass` - # in order to support reloading of this module. - bases = recursive_bases_from_class(obj) - if not any(base.__name__ == superclass.__name__ for base in bases): - continue - - classes.append(obj) return classes @@ -228,7 +221,7 @@ def import_module_from_dirpath(dirpath, folder_name, dst_module_name=None): dst_module_name(str): Parent module name under which can be loaded module added. """ - if PY3: + if six.PY3: module = _import_module_from_dirpath_py3( dirpath, folder_name, dst_module_name ) diff --git a/openpype/lib/remote_publish.py b/openpype/lib/remote_publish.py index 9d97671a61..8a42daf4e9 100644 --- a/openpype/lib/remote_publish.py +++ b/openpype/lib/remote_publish.py @@ -1,13 +1,12 @@ import os from datetime import datetime -import sys -from bson.objectid import ObjectId import collections +from bson.objectid import ObjectId + import pyblish.util import pyblish.api -from openpype import uninstall from openpype.lib.mongo import OpenPypeMongoConnection from openpype.lib.plugin_tools import parse_json @@ -81,7 +80,6 @@ def publish(log, close_plugin_name=None): if result["error"]: log.error(error_format.format(**result)) - uninstall() if close_plugin: # close host app explicitly after error context = pyblish.api.Context() close_plugin().process(context) @@ -118,7 +116,6 @@ def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): if result["error"]: log.error(error_format.format(**result)) - uninstall() log_lines = [error_format.format(**result)] + log_lines dbcon.update_one( {"_id": _id}, diff --git a/openpype/lib/splash.txt b/openpype/lib/splash.txt deleted file mode 100644 index 833bcd4b9c..0000000000 --- a/openpype/lib/splash.txt +++ /dev/null @@ -1,413 +0,0 @@ - - - - * - - - - - - - .* - - - - - - * - .* - * - - - - . - * - .* - * - . - - . - * - .* - .* - .* - * - . - . - * - .* - .* - .* - * - . - _. - /** - \ * - \* - * - * - . - __. - ---* - \ \* - \ * - \* - * - . - \___. - /* * - \ \ * - \ \* - \ * - \* - . - |____. - /* * - \|\ * - \ \ * - \ \ * - \ \* - \/. - _/_____. - /* * - / \ * - \ \ * - \ \ * - \ \__* - \/__. - __________. - --*-- ___* - \ \ \/_* - \ \ __* - \ \ \_* - \ \____\* - \/____/. - \____________ . - /* ___ \* - \ \ \/_\ * - \ \ _____* - \ \ \___/* - \ \____\ * - \/____/ . - |___________ . - /* ___ \ * - \|\ \/_\ \ * - \ \ _____/ * - \ \ \___/ * - \ \____\ / * - \/____/ \. - _/__________ . - /* ___ \ * - / \ \/_\ \ * - \ \ _____/ * - \ \ \___/ ---* - \ \____\ / \__* - \/____/ \/__. - ____________ . - --*-- ___ \ * - \ \ \/_\ \ * - \ \ _____/ * - \ \ \___/ ---- * - \ \____\ / \____\* - \/____/ \/____/. - ____________ - /\ ___ \ . - \ \ \/_\ \ * - \ \ _____/ * - \ \ \___/ ---- * - \ \____\ / \____\ . - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ . - \ \ _____/ * - \ \ \___/ ---- * - \ \____\ / \____\ . - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ . - \ \ \___/ ---- * - \ \____\ / \____\ . - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ - \ \ \___/ ---- * - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ - \ \ \___/ ---- . - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ _ - \ \ \___/ ---- - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ - \ \____\ / \____\ \ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ - \ \____\ / \____\ __\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ - \ \____\ / \____\ \__\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ \ - \ \____\ / \____\ \__\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ \ - \ \____\ / \____\ \__\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___. - \ \ \___/ ---- \ \\ - \ \____\ / \____\ \__\, - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ . - \ \ \___/ ---- \ \\ - \ \____\ / \____\ \__\\, - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ _. - \ \ \___/ ---- \ \\\ - \ \____\ / \____\ \__\\\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ __. - \ \ \___/ ---- \ \\ \ - \ \____\ / \____\ \__\\_/. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___. - \ \ \___/ ---- \ \\ \\ - \ \____\ / \____\ \__\\__\. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ . - \ \ \___/ ---- \ \\ \\ - \ \____\ / \____\ \__\\__\\. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ _. - \ \ \___/ ---- \ \\ \\\ - \ \____\ / \____\ \__\\__\\. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ __. - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\_. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ __. - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ * - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ O* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ .oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ ..oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . .oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . p.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . Py.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYp.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPe.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE .oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE c.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE C1.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE ClU.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE CluB.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club .oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club . .. - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club . .. - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club . . - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club . diff --git a/openpype/lib/terminal_splash.py b/openpype/lib/terminal_splash.py deleted file mode 100644 index 0ba2706a27..0000000000 --- a/openpype/lib/terminal_splash.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- -"""Pype terminal animation.""" -import blessed -from pathlib import Path -from time import sleep - -NO_TERMINAL = False - -try: - term = blessed.Terminal() -except AttributeError: - # this happens when blessed cannot find proper terminal. - # If so, skip printing ascii art animation. - NO_TERMINAL = True - - -def play_animation(): - """Play ASCII art Pype animation.""" - if NO_TERMINAL: - return - print(term.home + term.clear) - frame_size = 7 - splash_file = Path(__file__).parent / "splash.txt" - with splash_file.open("r") as sf: - animation = sf.readlines() - - animation_length = int(len(animation) / frame_size) - current_frame = 0 - for _ in range(animation_length): - frame = "".join( - scanline - for y, scanline in enumerate( - animation[current_frame: current_frame + frame_size] - ) - ) - - with term.location(0, 0): - # term.aquamarine3_bold(frame) - print(f"{term.bold}{term.aquamarine3}{frame}{term.normal}") - - sleep(0.02) - current_frame += frame_size - print(term.move_y(7)) diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py index 6bab6a8160..f20bef3854 100644 --- a/openpype/lib/transcoding.py +++ b/openpype/lib/transcoding.py @@ -17,6 +17,9 @@ from .vendor_bin_utils import ( # Max length of string that is supported by ffmpeg MAX_FFMPEG_STRING_LEN = 8196 +# Not allowed symbols in attributes for ffmpeg +NOT_ALLOWED_FFMPEG_CHARS = ("\"", ) + # OIIO known xml tags STRING_TAGS = { "format" @@ -367,14 +370,23 @@ def should_convert_for_ffmpeg(src_filepath): return None for attr_value in input_info["attribs"].values(): - if ( - isinstance(attr_value, str) - and len(attr_value) > MAX_FFMPEG_STRING_LEN - ): + if not isinstance(attr_value, str): + continue + + if len(attr_value) > MAX_FFMPEG_STRING_LEN: return True + + for char in NOT_ALLOWED_FFMPEG_CHARS: + if char in attr_value: + return True return False +# Deprecated since 2022 4 20 +# - Reason - Doesn't convert sequences right way: Can't handle gaps, reuse +# first frame for all frames and changes filenames when input +# is sequence. +# - use 'convert_input_paths_for_ffmpeg' instead def convert_for_ffmpeg( first_input_path, output_dir, @@ -402,6 +414,12 @@ def convert_for_ffmpeg( if logger is None: logger = logging.getLogger(__name__) + logger.warning(( + "DEPRECATED: 'openpype.lib.transcoding.convert_for_ffmpeg' is" + " deprecated function of conversion for FFMpeg. Please replace usage" + " with 'openpype.lib.transcoding.convert_input_paths_for_ffmpeg'" + )) + ext = os.path.splitext(first_input_path)[1].lower() if ext != ".exr": raise ValueError(( @@ -422,7 +440,12 @@ def convert_for_ffmpeg( compression = "none" # Prepare subprocess arguments - oiio_cmd = [get_oiio_tools_path()] + oiio_cmd = [ + get_oiio_tools_path(), + + # Don't add any additional attributes + "--nosoftwareattrib", + ] # Add input compression if available if compression: oiio_cmd.extend(["--compression", compression]) @@ -458,28 +481,44 @@ def convert_for_ffmpeg( "--frames", "{}-{}".format(input_frame_start, input_frame_end) ]) - ignore_attr_changes_added = False for attr_name, attr_value in input_info["attribs"].items(): if not isinstance(attr_value, str): continue # Remove attributes that have string value longer than allowed length - # for ffmpeg + # for ffmpeg or when containt unallowed symbols + erase_reason = "Missing reason" + erase_attribute = False if len(attr_value) > MAX_FFMPEG_STRING_LEN: - if not ignore_attr_changes_added: - # Attrite changes won't be added to attributes itself - ignore_attr_changes_added = True - oiio_cmd.append("--sansattrib") + erase_reason = "has too long value ({} chars).".format( + len(attr_value) + ) + + if erase_attribute: + for char in NOT_ALLOWED_FFMPEG_CHARS: + if char in attr_value: + erase_attribute = True + erase_reason = ( + "contains unsupported character \"{}\"." + ).format(char) + break + + if erase_attribute: # Set attribute to empty string logger.info(( - "Removed attribute \"{}\" from metadata" - " because has too long value ({} chars)." - ).format(attr_name, len(attr_value))) + "Removed attribute \"{}\" from metadata because {}." + ).format(attr_name, erase_reason)) oiio_cmd.extend(["--eraseattrib", attr_name]) # Add last argument - path to output - base_file_name = os.path.basename(first_input_path) - output_path = os.path.join(output_dir, base_file_name) + if is_sequence: + ext = os.path.splitext(first_input_path)[1] + base_filename = "tmp.%{:0>2}d{}".format( + len(str(input_frame_end)), ext + ) + else: + base_filename = os.path.basename(first_input_path) + output_path = os.path.join(output_dir, base_filename) oiio_cmd.extend([ "-o", output_path ]) @@ -488,6 +527,130 @@ def convert_for_ffmpeg( run_subprocess(oiio_cmd, logger=logger) +def convert_input_paths_for_ffmpeg( + input_paths, + output_dir, + logger=None +): + """Contert source file to format supported in ffmpeg. + + Currently can convert only exrs. The input filepaths should be files + with same type. Information about input is loaded only from first found + file. + + Filenames of input files are kept so make sure that output directory + is not the same directory as input files have. + - This way it can handle gaps and can keep input filenames without handling + frame template + + Args: + input_paths (str): Paths that should be converted. It is expected that + contains single file or image sequence of samy type. + output_dir (str): Path to directory where output will be rendered. + Must not be same as input's directory. + logger (logging.Logger): Logger used for logging. + + Raises: + ValueError: If input filepath has extension not supported by function. + Currently is supported only ".exr" extension. + """ + if logger is None: + logger = logging.getLogger(__name__) + + first_input_path = input_paths[0] + ext = os.path.splitext(first_input_path)[1].lower() + if ext != ".exr": + raise ValueError(( + "Function 'convert_for_ffmpeg' currently support only" + " \".exr\" extension. Got \"{}\"." + ).format(ext)) + + input_info = get_oiio_info_for_input(first_input_path) + + # Change compression only if source compression is "dwaa" or "dwab" + # - they're not supported in ffmpeg + compression = input_info["attribs"].get("compression") + if compression in ("dwaa", "dwab"): + compression = "none" + + # Collect channels to export + channel_names = input_info["channelnames"] + review_channels = get_convert_rgb_channels(channel_names) + if review_channels is None: + raise ValueError( + "Couldn't find channels that can be used for conversion." + ) + + red, green, blue, alpha = review_channels + input_channels = [red, green, blue] + channels_arg = "R={},G={},B={}".format(red, green, blue) + if alpha is not None: + channels_arg += ",A={}".format(alpha) + input_channels.append(alpha) + input_channels_str = ",".join(input_channels) + + for input_path in input_paths: + # Prepare subprocess arguments + oiio_cmd = [ + get_oiio_tools_path(), + + # Don't add any additional attributes + "--nosoftwareattrib", + ] + # Add input compression if available + if compression: + oiio_cmd.extend(["--compression", compression]) + + oiio_cmd.extend([ + # Tell oiiotool which channels should be loaded + # - other channels are not loaded to memory so helps to + # avoid memory leak issues + "-i:ch={}".format(input_channels_str), input_path, + # Tell oiiotool which channels should be put to top stack + # (and output) + "--ch", channels_arg + ]) + + for attr_name, attr_value in input_info["attribs"].items(): + if not isinstance(attr_value, str): + continue + + # Remove attributes that have string value longer than allowed + # length for ffmpeg or when containt unallowed symbols + erase_reason = "Missing reason" + erase_attribute = False + if len(attr_value) > MAX_FFMPEG_STRING_LEN: + erase_reason = "has too long value ({} chars).".format( + len(attr_value) + ) + + if erase_attribute: + for char in NOT_ALLOWED_FFMPEG_CHARS: + if char in attr_value: + erase_attribute = True + erase_reason = ( + "contains unsupported character \"{}\"." + ).format(char) + break + + if erase_attribute: + # Set attribute to empty string + logger.info(( + "Removed attribute \"{}\" from metadata because {}." + ).format(attr_name, erase_reason)) + oiio_cmd.extend(["--eraseattrib", attr_name]) + + # Add last argument - path to output + base_filename = os.path.basename(input_path) + output_path = os.path.join(output_dir, base_filename) + oiio_cmd.extend([ + "-o", output_path + ]) + + logger.debug("Conversion command: {}".format(" ".join(oiio_cmd))) + run_subprocess(oiio_cmd, logger=logger) + + # FFMPEG functions def get_ffprobe_data(path_to_file, logger=None): """Load data about entered filepath via ffprobe. @@ -564,9 +727,9 @@ def get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd=None): def _ffmpeg_mxf_format_args(ffprobe_data, source_ffmpeg_cmd): input_format = ffprobe_data["format"] format_tags = input_format.get("tags") or {} - product_name = format_tags.get("product_name") or "" + operational_pattern_ul = format_tags.get("operational_pattern_ul") or "" output = [] - if "opatom" in product_name.lower(): + if operational_pattern_ul == "060e2b34.04010102.0d010201.10030000": output.extend(["-f", "mxf_opatom"]) return output diff --git a/openpype/lib/usdlib.py b/openpype/lib/usdlib.py index 3ae7430c7b..86de19b4be 100644 --- a/openpype/lib/usdlib.py +++ b/openpype/lib/usdlib.py @@ -8,7 +8,10 @@ except ImportError: # Allow to fall back on Multiverse 6.3.0+ pxr usd library from mvpxr import Usd, UsdGeom, Sdf, Kind -from avalon import io, api +from openpype.pipeline import ( + registered_root, + legacy_io, +) log = logging.getLogger(__name__) @@ -125,7 +128,7 @@ def create_model(filename, asset, variant_subsets): """ - asset_doc = io.find_one({"name": asset, "type": "asset"}) + asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -175,7 +178,7 @@ def create_shade(filename, asset, variant_subsets): """ - asset_doc = io.find_one({"name": asset, "type": "asset"}) + asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -210,7 +213,7 @@ def create_shade_variation(filename, asset, model_variant, shade_variants): """ - asset_doc = io.find_one({"name": asset, "type": "asset"}) + asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -310,22 +313,21 @@ def get_usd_master_path(asset, subset, representation): """ - project = io.find_one( + project = legacy_io.find_one( {"type": "project"}, projection={"config.template.publish": True} ) template = project["config"]["template"]["publish"] - if isinstance(asset, dict) and "silo" in asset and "name" in asset: + if isinstance(asset, dict) and "name" in asset: # Allow explicitly passing asset document asset_doc = asset else: - asset_doc = io.find_one({"name": asset, "type": "asset"}) + asset_doc = legacy_io.find_one({"name": asset, "type": "asset"}) path = template.format( **{ - "root": api.registered_root(), - "project": api.Session["AVALON_PROJECT"], - "silo": asset_doc["silo"], + "root": registered_root(), + "project": legacy_io.Session["AVALON_PROJECT"], "asset": asset_doc["name"], "subset": subset, "representation": representation, diff --git a/openpype/modules/avalon_apps/avalon_app.py b/openpype/modules/avalon_apps/avalon_app.py index 51a22323f1..1d21de129b 100644 --- a/openpype/modules/avalon_apps/avalon_app.py +++ b/openpype/modules/avalon_apps/avalon_app.py @@ -1,5 +1,5 @@ import os -import openpype + from openpype.modules import OpenPypeModule from openpype_interfaces import ITrayModule @@ -26,7 +26,8 @@ class AvalonModule(OpenPypeModule, ITrayModule): self.avalon_mongo_timeout = avalon_mongo_timeout # Tray attributes - self.libraryloader = None + self._library_loader_imported = None + self._library_loader_window = None self.rest_api_obj = None def get_global_environments(self): @@ -41,21 +42,11 @@ class AvalonModule(OpenPypeModule, ITrayModule): def tray_init(self): # Add library tool + self._library_loader_imported = False try: - from Qt import QtCore from openpype.tools.libraryloader import LibraryLoaderWindow - libraryloader = LibraryLoaderWindow( - show_projects=True, - show_libraries=True - ) - # Remove always on top flag for tray - window_flags = libraryloader.windowFlags() - if window_flags | QtCore.Qt.WindowStaysOnTopHint: - window_flags ^= QtCore.Qt.WindowStaysOnTopHint - libraryloader.setWindowFlags(window_flags) - self.libraryloader = libraryloader - + self._library_loader_imported = True except Exception: self.log.warning( "Couldn't load Library loader tool for tray.", @@ -64,7 +55,7 @@ class AvalonModule(OpenPypeModule, ITrayModule): # Definition of Tray menu def tray_menu(self, tray_menu): - if self.libraryloader is None: + if not self._library_loader_imported: return from Qt import QtWidgets @@ -84,17 +75,31 @@ class AvalonModule(OpenPypeModule, ITrayModule): return def show_library_loader(self): - if self.libraryloader is None: - return + if self._library_loader_window is None: + from Qt import QtCore + from openpype.tools.libraryloader import LibraryLoaderWindow + from openpype.pipeline import install_openpype_plugins - self.libraryloader.show() + libraryloader = LibraryLoaderWindow( + show_projects=True, + show_libraries=True + ) + # Remove always on top flag for tray + window_flags = libraryloader.windowFlags() + if window_flags | QtCore.Qt.WindowStaysOnTopHint: + window_flags ^= QtCore.Qt.WindowStaysOnTopHint + libraryloader.setWindowFlags(window_flags) + self._library_loader_window = libraryloader + + install_openpype_plugins() + + self._library_loader_window.show() # Raise and activate the window # for MacOS - self.libraryloader.raise_() + self._library_loader_window.raise_() # for Windows - self.libraryloader.activateWindow() - self.libraryloader.refresh() + self._library_loader_window.activateWindow() # Webserver module implementation def webserver_initialization(self, server_manager): diff --git a/openpype/modules/avalon_apps/rest_api.py b/openpype/modules/avalon_apps/rest_api.py index 533050fc0c..b35f5bf357 100644 --- a/openpype/modules/avalon_apps/rest_api.py +++ b/openpype/modules/avalon_apps/rest_api.py @@ -1,4 +1,3 @@ -import os import json import datetime @@ -6,7 +5,7 @@ from bson.objectid import ObjectId from aiohttp.web_response import Response -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB from openpype_modules.webserver.base_routes import RestApiEndpoint diff --git a/openpype/modules/base.py b/openpype/modules/base.py index 175957ae39..23c908299f 100644 --- a/openpype/modules/base.py +++ b/openpype/modules/base.py @@ -28,26 +28,17 @@ from openpype.settings.lib import ( ) from openpype.lib import PypeLogger - -DEFAULT_OPENPYPE_MODULES = ( - "avalon_apps", - "clockify", - "log_viewer", - "deadline", - "muster", - "royalrender", - "python_console_interpreter", - "ftrack", - "slack", - "webserver", - "launcher_action", - "project_manager_action", - "settings_action", - "standalonepublish_action", - "traypublish_action", - "job_queue", - "timers_manager", - "sync_server", +# Files that will be always ignored on modules import +IGNORED_FILENAMES = ( + "__pycache__", +) +# Files ignored on modules import from "./openpype/modules" +IGNORED_DEFAULT_FILENAMES = ( + "__init__.py", + "base.py", + "interfaces.py", + "example_addons", + "default_modules", ) @@ -146,9 +137,16 @@ class _LoadCache: def get_default_modules_dir(): """Path to default OpenPype modules.""" + current_dir = os.path.abspath(os.path.dirname(__file__)) - return os.path.join(current_dir, "default_modules") + output = [] + for folder_name in ("default_modules", ): + path = os.path.join(current_dir, folder_name) + if os.path.exists(path) and os.path.isdir(path): + output.append(path) + + return output def get_dynamic_modules_dirs(): @@ -186,7 +184,7 @@ def get_dynamic_modules_dirs(): def get_module_dirs(): """List of paths where OpenPype modules can be found.""" _dirpaths = [] - _dirpaths.append(get_default_modules_dir()) + _dirpaths.extend(get_default_modules_dir()) _dirpaths.extend(get_dynamic_modules_dirs()) dirpaths = [] @@ -292,25 +290,54 @@ def _load_modules(): log = PypeLogger.get_logger("ModulesLoader") + current_dir = os.path.abspath(os.path.dirname(__file__)) + processed_paths = set() + processed_paths.add(current_dir) # Import default modules imported from 'openpype.modules' - for default_module_name in DEFAULT_OPENPYPE_MODULES: + for filename in os.listdir(current_dir): + # Ignore filenames + if ( + filename in IGNORED_FILENAMES + or filename in IGNORED_DEFAULT_FILENAMES + ): + continue + + fullpath = os.path.join(current_dir, filename) + basename, ext = os.path.splitext(filename) + + if os.path.isdir(fullpath): + # Check existence of init fil + init_path = os.path.join(fullpath, "__init__.py") + if not os.path.exists(init_path): + log.debug(( + "Module directory does not contan __init__.py file {}" + ).format(fullpath)) + continue + + elif ext not in (".py", ): + continue + try: - import_str = "openpype.modules.{}".format(default_module_name) - new_import_str = "{}.{}".format(modules_key, default_module_name) + import_str = "openpype.modules.{}".format(basename) + new_import_str = "{}.{}".format(modules_key, basename) default_module = __import__(import_str, fromlist=("", )) sys.modules[new_import_str] = default_module - setattr(openpype_modules, default_module_name, default_module) + setattr(openpype_modules, basename, default_module) except Exception: msg = ( "Failed to import default module '{}'." - ).format(default_module_name) + ).format(basename) log.error(msg, exc_info=True) # Look for OpenPype modules in paths defined with `get_module_dirs` # - dynamically imported OpenPype modules and addons - dirpaths = get_module_dirs() - for dirpath in dirpaths: + for dirpath in get_module_dirs(): + # Skip already processed paths + if dirpath in processed_paths: + continue + processed_paths.add(dirpath) + if not os.path.exists(dirpath): log.warning(( "Could not find path when loading OpenPype modules \"{}\"" @@ -319,12 +346,24 @@ def _load_modules(): for filename in os.listdir(dirpath): # Ignore filenames - if filename in ("__pycache__", ): + if filename in IGNORED_FILENAMES: continue fullpath = os.path.join(dirpath, filename) basename, ext = os.path.splitext(filename) + if os.path.isdir(fullpath): + # Check existence of init fil + init_path = os.path.join(fullpath, "__init__.py") + if not os.path.exists(init_path): + log.debug(( + "Module directory does not contan __init__.py file {}" + ).format(fullpath)) + continue + + elif ext not in (".py", ): + continue + # TODO add more logic how to define if folder is module or not # - check manifest and content of manifest try: diff --git a/openpype/modules/clockify/launcher_actions/ClockifyStart.py b/openpype/modules/clockify/launcher_actions/ClockifyStart.py index db51964eb7..4669f98b01 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifyStart.py +++ b/openpype/modules/clockify/launcher_actions/ClockifyStart.py @@ -1,12 +1,15 @@ -from avalon import api, io from openpype.api import Logger +from openpype.pipeline import ( + legacy_io, + LauncherAction, +) from openpype_modules.clockify.clockify_api import ClockifyAPI -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) -class ClockifyStart(api.Action): +class ClockifyStart(LauncherAction): name = "clockify_start_timer" label = "Clockify - Start Timer" @@ -26,7 +29,7 @@ class ClockifyStart(api.Action): task_name = session['AVALON_TASK'] description = asset_name - asset = io.find_one({ + asset = legacy_io.find_one({ 'type': 'asset', 'name': asset_name }) diff --git a/openpype/modules/clockify/launcher_actions/ClockifySync.py b/openpype/modules/clockify/launcher_actions/ClockifySync.py index 02982d373a..356bbd0306 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifySync.py +++ b/openpype/modules/clockify/launcher_actions/ClockifySync.py @@ -1,10 +1,14 @@ -from avalon import api, io from openpype_modules.clockify.clockify_api import ClockifyAPI from openpype.api import Logger -log = Logger().get_logger(__name__) +from openpype.pipeline import ( + legacy_io, + LauncherAction, +) + +log = Logger.get_logger(__name__) -class ClockifySync(api.Action): +class ClockifySync(LauncherAction): name = "sync_to_clockify" label = "Sync to Clockify" @@ -22,10 +26,10 @@ class ClockifySync(api.Action): projects_to_sync = [] if project_name.strip() == '' or project_name is None: - for project in io.projects(): + for project in legacy_io.projects(): projects_to_sync.append(project) else: - project = io.find_one({'type': 'project'}) + project = legacy_io.find_one({'type': 'project'}) projects_to_sync.append(project) projects_info = {} diff --git a/openpype/modules/deadline/deadline_module.py b/openpype/modules/deadline/deadline_module.py index 1a179e9aaf..c30db75188 100644 --- a/openpype/modules/deadline/deadline_module.py +++ b/openpype/modules/deadline/deadline_module.py @@ -1,8 +1,19 @@ import os +import requests +import six +import sys + +from openpype.lib import requests_get, PypeLogger from openpype.modules import OpenPypeModule from openpype_interfaces import IPluginPaths +class DeadlineWebserviceError(Exception): + """ + Exception to throw when connection to Deadline server fails. + """ + + class DeadlineModule(OpenPypeModule, IPluginPaths): name = "deadline" @@ -32,3 +43,35 @@ class DeadlineModule(OpenPypeModule, IPluginPaths): return { "publish": [os.path.join(current_dir, "plugins", "publish")] } + + @staticmethod + def get_deadline_pools(webservice, log=None): + # type: (str) -> list + """Get pools from Deadline. + Args: + webservice (str): Server url. + log (Logger) + Returns: + list: Pools. + Throws: + RuntimeError: If deadline webservice is unreachable. + + """ + if not log: + log = PypeLogger.get_logger(__name__) + + argument = "{}/api/pools?NamesOnly=true".format(webservice) + try: + response = requests_get(argument) + except requests.exceptions.ConnectionError as exc: + msg = 'Cannot connect to DL web service {}'.format(webservice) + log.error(msg) + six.reraise( + DeadlineWebserviceError, + DeadlineWebserviceError('{} - {}'.format(msg, exc)), + sys.exc_info()[2]) + if not response.ok: + log.warning("No pools retrieved") + return [] + + return response.json() diff --git a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py index 1bc4eaa067..a7035cd99f 100644 --- a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py +++ b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py @@ -11,7 +11,7 @@ import pyblish.api class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): """Collect Deadline Webservice URL from instance.""" - order = pyblish.api.CollectorOrder + 0.02 + order = pyblish.api.CollectorOrder + 0.415 label = "Deadline Webservice from the Instance" families = ["rendering"] diff --git a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py index fc056342a8..e6ad6a9aa1 100644 --- a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py +++ b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py @@ -6,7 +6,7 @@ import pyblish.api class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): """Collect default Deadline Webservice URL.""" - order = pyblish.api.CollectorOrder + 0.01 + order = pyblish.api.CollectorOrder + 0.410 label = "Default Deadline Webservice" pass_mongo_url = False diff --git a/openpype/modules/deadline/plugins/publish/collect_pools.py b/openpype/modules/deadline/plugins/publish/collect_pools.py new file mode 100644 index 0000000000..48130848d5 --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/collect_pools.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +"""Collect Deadline pools. Choose default one from Settings + +""" +import pyblish.api + + +class CollectDeadlinePools(pyblish.api.InstancePlugin): + """Collect pools from instance if present, from Setting otherwise.""" + + order = pyblish.api.CollectorOrder + 0.420 + label = "Collect Deadline Pools" + families = ["rendering", "render.farm", "renderFarm", "renderlayer"] + + primary_pool = None + secondary_pool = None + + def process(self, instance): + if not instance.data.get("primaryPool"): + instance.data["primaryPool"] = self.primary_pool or "none" + + if not instance.data.get("secondaryPool"): + instance.data["secondaryPool"] = self.secondary_pool or "none" diff --git a/openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml b/openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml new file mode 100644 index 0000000000..0e7d72910e --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml @@ -0,0 +1,31 @@ + + + + Scene setting + + ## Invalid Deadline pools found + + Configured pools don't match what is set in Deadline. + + {invalid_value_str} + + ### How to repair? + + If your instance had deadline pools set on creation, remove or + change them. + + In other cases inform admin to change them in Settings. + + Available deadline pools {pools_str}. + + + ### __Detailed Info__ + + This error is shown when deadline pool is not on Deadline anymore. It + could happen in case of republish old workfile which was created with + previous deadline pools, + or someone changed pools on Deadline side, but didn't modify Openpype + Settings. + + + \ No newline at end of file diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index c499c14d40..b6584f239e 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -3,10 +3,9 @@ import attr import getpass import pyblish.api -from avalon import api - from openpype.lib import env_value_to_bool from openpype.lib.delivery import collect_frames +from openpype.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo @@ -37,8 +36,6 @@ class AfterEffectsSubmitDeadline( priority = 50 chunk_size = 1000000 - primary_pool = None - secondary_pool = None group = None department = None multiprocess = True @@ -62,8 +59,8 @@ class AfterEffectsSubmitDeadline( dln_job_info.Frames = frame_range dln_job_info.Priority = self.priority - dln_job_info.Pool = self.primary_pool - dln_job_info.SecondaryPool = self.secondary_pool + dln_job_info.Pool = self._instance.data.get("primaryPool") + dln_job_info.SecondaryPool = self._instance.data.get("secondaryPool") dln_job_info.Group = self.group dln_job_info.Department = self.department dln_job_info.ChunkSize = self.chunk_size @@ -89,7 +86,7 @@ class AfterEffectsSubmitDeadline( keys.append("OPENPYPE_MONGO") environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) for key in keys: val = environment.get(key) if val: diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py index 918efb6630..912f0f4026 100644 --- a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py @@ -8,8 +8,8 @@ import re import attr import pyblish.api -from avalon import api +from openpype.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo @@ -241,8 +241,6 @@ class HarmonySubmitDeadline( optional = True use_published = False - primary_pool = "" - secondary_pool = "" priority = 50 chunk_size = 1000000 group = "none" @@ -259,8 +257,8 @@ class HarmonySubmitDeadline( # for now, get those from presets. Later on it should be # configurable in Harmony UI directly. job_info.Priority = self.priority - job_info.Pool = self.primary_pool - job_info.SecondaryPool = self.secondary_pool + job_info.Pool = self._instance.data.get("primaryPool") + job_info.SecondaryPool = self._instance.data.get("secondaryPool") job_info.ChunkSize = self.chunk_size job_info.BatchName = os.path.basename(self._instance.data["source"]) job_info.Department = self.department @@ -282,7 +280,7 @@ class HarmonySubmitDeadline( keys.append("OPENPYPE_MONGO") environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) for key in keys: val = environment.get(key) if val: diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py index c683eb68a8..f834ae7e92 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py @@ -4,10 +4,10 @@ import json import requests import hou -from avalon import api, io - import pyblish.api +from openpype.pipeline import legacy_io + class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): """Submit Houdini scene to perform a local publish in Deadline. @@ -35,7 +35,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): ), "Errors found, aborting integration.." # Deadline connection - AVALON_DEADLINE = api.Session.get( + AVALON_DEADLINE = legacy_io.Session.get( "AVALON_DEADLINE", "http://localhost:8082" ) assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" @@ -55,7 +55,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): scenename = os.path.basename(scene) # Get project code - project = io.find_one({"type": "project"}) + project = legacy_io.find_one({"type": "project"}) code = project["data"].get("code", project["name"]) job_name = "{scene} [PUBLISH]".format(scene=scenename) @@ -137,7 +137,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): environment = dict( {key: os.environ[key] for key in keys if key in os.environ}, - **api.Session + **legacy_io.Session ) environment["PYBLISH_ACTIVE_INSTANCES"] = ",".join(instances) diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index 59aeb68b79..aca88c7440 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -3,11 +3,11 @@ import json import getpass import requests -from avalon import api - import pyblish.api -import hou +# import hou ??? + +from openpype.pipeline import legacy_io class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): @@ -71,7 +71,8 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): "UserName": deadline_user, "Plugin": "Houdini", - "Pool": "houdini_redshift", # todo: remove hardcoded pool + "Pool": instance.data.get("primaryPool"), + "secondaryPool": instance.data.get("secondaryPool"), "Frames": frames, "ChunkSize": instance.data.get("chunkSize", 10), @@ -106,7 +107,7 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): keys.append("OPENPYPE_MONGO") environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) payload["JobInfo"].update({ "EnvironmentKeyValue%d" % index: "{key}={value}".format( @@ -140,7 +141,7 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): def submit(self, instance, payload): - AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE", + AVALON_DEADLINE = legacy_io.Session.get("AVALON_DEADLINE", "http://localhost:8082") assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 15a6f8d828..8f776a3371 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -32,10 +32,11 @@ import requests from maya import cmds -from avalon import api import pyblish.api +from openpype.lib import requests_post from openpype.hosts.maya.api import lib +from openpype.pipeline import legacy_io # Documentation for keys available at: # https://docs.thinkboxsoftware.com @@ -187,6 +188,10 @@ def get_renderer_variables(renderlayer, root): filename_0 = re.sub('_', '_beauty', filename_0, flags=re.IGNORECASE) prefix_attr = "defaultRenderGlobals.imageFilePrefix" + + scene = cmds.file(query=True, sceneName=True) + scene, _ = os.path.splitext(os.path.basename(scene)) + if renderer == "vray": renderlayer = renderlayer.split("_")[-1] # Maya's renderSettings function does not return V-Ray file extension @@ -206,8 +211,7 @@ def get_renderer_variables(renderlayer, root): filename_prefix = cmds.getAttr(prefix_attr) # we need to determine path for vray as maya `renderSettings` query # does not work for vray. - scene = cmds.file(query=True, sceneName=True) - scene, _ = os.path.splitext(os.path.basename(scene)) + filename_0 = re.sub('', scene, filename_prefix, flags=re.IGNORECASE) # noqa: E501 filename_0 = re.sub('', renderlayer, filename_0, flags=re.IGNORECASE) # noqa: E501 filename_0 = "{}.{}.{}".format( @@ -215,6 +219,39 @@ def get_renderer_variables(renderlayer, root): filename_0 = os.path.normpath(os.path.join(root, filename_0)) elif renderer == "renderman": prefix_attr = "rmanGlobals.imageFileFormat" + # NOTE: This is guessing extensions from renderman display types. + # Some of them are just framebuffers, d_texture format can be + # set in display setting. We set those now to None, but it + # should be handled more gracefully. + display_types = { + "d_deepexr": "exr", + "d_it": None, + "d_null": None, + "d_openexr": "exr", + "d_png": "png", + "d_pointcloud": "ptc", + "d_targa": "tga", + "d_texture": None, + "d_tiff": "tif" + } + + extension = display_types.get( + cmds.listConnections("rmanDefaultDisplay.displayType")[0], + "exr" + ) or "exr" + + filename_prefix = "{}/{}".format( + cmds.getAttr("rmanGlobals.imageOutputDir"), + cmds.getAttr("rmanGlobals.imageFileFormat") + ) + + renderlayer = renderlayer.split("_")[-1] + + filename_0 = re.sub('', scene, filename_prefix, flags=re.IGNORECASE) # noqa: E501 + filename_0 = re.sub('', renderlayer, filename_0, flags=re.IGNORECASE) # noqa: E501 + filename_0 = re.sub('', "#" * int(padding), filename_0, flags=re.IGNORECASE) # noqa: E501 + filename_0 = re.sub('', extension, filename_0, flags=re.IGNORECASE) # noqa: E501 + filename_0 = os.path.normpath(os.path.join(root, filename_0)) elif renderer == "redshift": # mapping redshift extension dropdown values to strings ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"] @@ -254,7 +291,11 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): use_published = True tile_assembler_plugin = "OpenPypeTileAssembler" asset_dependencies = False + priority = 50 + tile_priority = 50 limit_groups = [] + jobInfo = {} + pluginInfo = {} group = "none" def process(self, instance): @@ -272,37 +313,12 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): self.deadline_url = instance.data.get("deadlineUrl") assert self.deadline_url, "Requires Deadline Webservice URL" - self._job_info = ( - context.data["project_settings"].get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}).get( - "jobInfo", {}) - ) + # just using existing names from Setting + self._job_info = self.jobInfo - self._plugin_info = ( - context.data["project_settings"].get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}).get( - "pluginInfo", {}) - ) + self._plugin_info = self.pluginInfo - self.limit_groups = ( - context.data["project_settings"].get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}).get( - "limit", []) - ) - - self.group = ( - context.data["project_settings"].get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}).get( - "group", "none") - ) + self.limit_groups = self.limit context = instance.context workspace = context.data["workspaceDir"] @@ -424,6 +440,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): output_filename_0 = filename_0 + dirname = os.path.dirname(output_filename_0) + # Create render folder ---------------------------------------------- try: # Ensure render folder exists @@ -465,7 +483,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): self.payload_skeleton["JobInfo"]["UserName"] = deadline_user # Set job priority self.payload_skeleton["JobInfo"]["Priority"] = \ - self._instance.data.get("priority", 50) + self._instance.data.get("priority", self.priority) if self.group != "none" and self.group: self.payload_skeleton["JobInfo"]["Group"] = self.group @@ -509,7 +527,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): keys.append("OPENPYPE_MONGO") environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) environment["OPENPYPE_LOG_NO_COLORS"] = "1" environment["OPENPYPE_MAYA_VERSION"] = cmds.about(v=True) # to recognize job from PYPE for turning Event On/Off @@ -635,7 +653,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): } assembly_payload["JobInfo"].update(output_filenames) assembly_payload["JobInfo"]["Priority"] = self._instance.data.get( - "priority", 50) + "tile_priority", self.tile_priority) assembly_payload["JobInfo"]["UserName"] = deadline_user frame_payloads = [] @@ -721,7 +739,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): tiles_count = instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501 for tile_job in frame_payloads: - response = self._requests_post(url, json=tile_job) + response = requests_post(url, json=tile_job) if not response.ok: raise Exception(response.text) @@ -784,7 +802,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): job_idx, len(assembly_payloads) )) self.log.debug(json.dumps(ass_job, indent=4, sort_keys=True)) - response = self._requests_post(url, json=ass_job) + response = requests_post(url, json=ass_job) if not response.ok: raise Exception(response.text) @@ -802,7 +820,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # E.g. http://192.168.0.1:8082/api/jobs url = "{}/api/jobs".format(self.deadline_url) - response = self._requests_post(url, json=payload) + response = requests_post(url, json=payload) if not response.ok: raise Exception(response.text) instance.data["deadlineSubmissionJob"] = response.json() @@ -819,6 +837,23 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "AssetDependency0": data["filepath"], } + renderer = self._instance.data["renderer"] + + # This hack is here because of how Deadline handles Renderman version. + # it considers everything with `renderman` set as version older than + # Renderman 22, and so if we are using renderman > 21 we need to set + # renderer string on the job to `renderman22`. We will have to change + # this when Deadline releases new version handling this. + if self._instance.data["renderer"] == "renderman": + try: + from rfm2.config import cfg # noqa + except ImportError: + raise Exception("Cannot determine renderman version") + + rman_version = cfg().build_info.version() # type: str + if int(rman_version.split(".")[0]) > 22: + renderer = "renderman22" + plugin_info = { "SceneFile": data["filepath"], # Output directory and filename @@ -832,7 +867,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "RenderLayer": data["renderlayer"], # Determine which renderer to use from the file itself - "Renderer": self._instance.data["renderer"], + "Renderer": renderer, # Resolve relative references "ProjectPath": data["workspace"], @@ -1010,7 +1045,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): self.log.info("Submitting ass export job.") url = "{}/api/jobs".format(self.deadline_url) - response = self._requests_post(url, json=payload) + response = requests_post(url, json=payload) if not response.ok: self.log.error("Submition failed!") self.log.error(response.status_code) @@ -1034,44 +1069,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): % (value, int(value)) ) - def _requests_post(self, *args, **kwargs): - """Wrap request post method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if 'verify' not in kwargs: - kwargs['verify'] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - # add 10sec timeout before bailing out - kwargs['timeout'] = 10 - return requests.post(*args, **kwargs) - - def _requests_get(self, *args, **kwargs): - """Wrap request get method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if 'verify' not in kwargs: - kwargs['verify'] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - # add 10sec timeout before bailing out - kwargs['timeout'] = 10 - return requests.get(*args, **kwargs) - def format_vray_output_filename(self, filename, template, dir=False): """Format the expected output file of the Export job. diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py index d6bd11620d..94c703d66d 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -4,10 +4,10 @@ import json import getpass import requests - -from avalon import api import pyblish.api + import nuke +from openpype.pipeline import legacy_io class NukeSubmitDeadline(pyblish.api.InstancePlugin): @@ -27,8 +27,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): # presets priority = 50 chunk_size = 1 - primary_pool = "" - secondary_pool = "" + concurrent_tasks = 1 group = "" department = "" limit_groups = {} @@ -149,11 +148,16 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): pass # define chunk and priority - chunk_size = instance.data.get("deadlineChunkSize") + chunk_size = instance.data["deadlineChunkSize"] if chunk_size == 0 and self.chunk_size: chunk_size = self.chunk_size - priority = instance.data.get("deadlinePriority") + # define chunk and priority + concurrent_tasks = instance.data["deadlineConcurrentTasks"] + if concurrent_tasks == 0 and self.concurrent_tasks: + concurrent_tasks = self.concurrent_tasks + + priority = instance.data["deadlinePriority"] if not priority: priority = self.priority @@ -177,10 +181,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "Priority": priority, "ChunkSize": chunk_size, + "ConcurrentTasks": concurrent_tasks, + "Department": self.department, - "Pool": self.primary_pool, - "SecondaryPool": self.secondary_pool, + "Pool": instance.data.get("primaryPool"), + "SecondaryPool": instance.data.get("secondaryPool"), "Group": self.group, "Plugin": "Nuke", @@ -236,7 +242,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): keys = [ "PYTHONPATH", "PATH", - "AVALON_SCHEMA", "AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK", @@ -258,7 +263,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): keys += self.env_allowed_keys environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) for _path in os.environ: if _path.lower().startswith('openpype_'): diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index fad4d14ea0..306237c78c 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -7,13 +7,15 @@ import re from copy import copy, deepcopy import requests import clique -import openpype.api - -from avalon import api, io import pyblish.api -from openpype.pipeline import get_representation_path +import openpype.api +from openpype.pipeline import ( + get_representation_path, + legacy_io, +) +from openpype.pipeline.farm.patterning import match_aov_pattern def get_resources(version, extension=None): @@ -22,7 +24,7 @@ def get_resources(version, extension=None): if extension: query["name"] = extension - representation = io.find_one(query) + representation = legacy_io.find_one(query) assert representation, "This is a bug" directory = get_representation_path(representation) @@ -107,7 +109,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): families = ["render.farm", "prerender.farm", "renderlayer", "imagesequence", "vrayscene"] - aov_filter = {"maya": [r".*(?:[\._-])*([Bb]eauty)(?:[\.|_])*.*"], + aov_filter = {"maya": [r".*([Bb]eauty).*"], "aftereffects": [r".*"], # for everything from AE "harmony": [r".*"], # for everything from AE "celaction": [r".*"]} @@ -129,7 +131,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "OPENPYPE_PUBLISH_JOB" ] - # custom deadline atributes + # custom deadline attributes deadline_department = "" deadline_pool = "" deadline_pool_secondary = "" @@ -221,9 +223,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self._create_metadata_path(instance) environment = job["Props"].get("Env", {}) - environment["AVALON_PROJECT"] = io.Session["AVALON_PROJECT"] - environment["AVALON_ASSET"] = io.Session["AVALON_ASSET"] - environment["AVALON_TASK"] = io.Session["AVALON_TASK"] + environment["AVALON_PROJECT"] = legacy_io.Session["AVALON_PROJECT"] + environment["AVALON_ASSET"] = legacy_io.Session["AVALON_ASSET"] + environment["AVALON_TASK"] = legacy_io.Session["AVALON_TASK"] environment["AVALON_APP_NAME"] = os.environ.get("AVALON_APP_NAME") environment["OPENPYPE_LOG_NO_COLORS"] = "1" environment["OPENPYPE_USERNAME"] = instance.context.data["user"] @@ -235,6 +237,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if mongo_url: environment["OPENPYPE_MONGO"] = mongo_url + priority = self.deadline_priority or instance.data.get("priority", 50) + args = [ "--headless", 'publish', @@ -254,11 +258,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "Department": self.deadline_department, "ChunkSize": self.deadline_chunk_size, - "Priority": job["Props"]["Pri"], + "Priority": priority, "Group": self.deadline_group, - "Pool": self.deadline_pool, - "SecondaryPool": self.deadline_pool_secondary, + "Pool": instance.data.get("primaryPool"), + "SecondaryPool": instance.data.get("secondaryPool"), "OutputDirectory0": output_dir }, @@ -281,6 +285,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): else: payload["JobInfo"]["JobDependency0"] = job["_id"] + if instance.data.get("suspend_publish"): + payload["JobInfo"]["InitialStatus"] = "Suspended" + index = 0 for key in environment: if key.upper() in self.enviro_filter: @@ -447,12 +454,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): app = os.environ.get("AVALON_APP", "") preview = False - if app in self.aov_filter.keys(): - for aov_pattern in self.aov_filter[app]: - if re.match(aov_pattern, aov): - preview = True - break + if isinstance(col, list): + render_file_name = os.path.basename(col[0]) + else: + render_file_name = os.path.basename(col) + aov_patterns = self.aov_filter + preview = match_aov_pattern(app, aov_patterns, render_file_name) + + # toggle preview on if multipart is on if instance_data.get("multipartExr"): preview = True @@ -509,8 +519,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): most cases, but if not - we create representation from each of them. Arguments: - instance (pyblish.plugin.Instance): instance for which we are - setting representations + instance (dict): instance data for which we are + setting representations exp_files (list): list of expected files Returns: @@ -518,27 +528,29 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): """ representations = [] + host_name = os.environ.get("AVALON_APP", "") collections, remainders = clique.assemble(exp_files) # create representation for every collected sequento ce for collection in collections: ext = collection.tail.lstrip(".") preview = False - # if filtered aov name is found in filename, toggle it for - # preview video rendering - for app in self.aov_filter.keys(): - if os.environ.get("AVALON_APP", "") == app: - for aov in self.aov_filter[app]: - if re.match( - aov, - list(collection)[0] - ): - preview = True - break - - # toggle preview on if multipart is on - if instance.get("multipartExr", False): - preview = True + # TODO 'useSequenceForReview' is temporary solution which does + # not work for 100% of cases. We must be able to tell what + # expected files contains more explicitly and from what + # should be review made. + # - "review" tag is never added when is set to 'False' + if instance["useSequenceForReview"]: + # toggle preview on if multipart is on + if instance.get("multipartExr", False): + preview = True + else: + render_file_name = list(collection)[0] + # if filtered aov name is found in filename, toggle it for + # preview video rendering + preview = match_aov_pattern( + host_name, self.aov_filter, render_file_name + ) staging = os.path.dirname(list(collection)[0]) success, rootless_staging_dir = ( @@ -602,12 +614,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "files": os.path.basename(remainder), "stagingDir": os.path.dirname(remainder), } - if "render" in instance.get("families"): + + preview = match_aov_pattern( + host_name, self.aov_filter, remainder + ) + if preview: rep.update({ "fps": instance.get("fps"), "tags": ["review"] }) - self._solve_families(instance, True) + self._solve_families(instance, preview) already_there = False for repre in instance.get("representations", []): @@ -654,7 +670,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if hasattr(instance, "_log"): data['_log'] = instance._log - asset = data.get("asset") or api.Session["AVALON_ASSET"] + asset = data.get("asset") or legacy_io.Session["AVALON_ASSET"] subset = data.get("subset") start = instance.data.get("frameStart") @@ -724,7 +740,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "resolutionWidth": data.get("resolutionWidth", 1920), "resolutionHeight": data.get("resolutionHeight", 1080), "multipartExr": data.get("multipartExr", False), - "jobBatchName": data.get("jobBatchName", "") + "jobBatchName": data.get("jobBatchName", ""), + "useSequenceForReview": data.get("useSequenceForReview", True) } if "prerender" in instance.data["families"]: @@ -916,12 +933,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # User is deadline user render_job["Props"]["User"] = context.data.get( "deadlineUser", getpass.getuser()) - # Priority is now not handled at all - - if self.deadline_priority: - render_job["Props"]["Pri"] = self.deadline_priority - else: - render_job["Props"]["Pri"] = instance.data.get("priority") render_job["Props"]["Env"] = { "FTRACK_API_USER": os.environ.get("FTRACK_API_USER"), @@ -951,7 +962,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "intent": context.data.get("intent"), "comment": context.data.get("comment"), "job": render_job or None, - "session": api.Session.copy(), + "session": legacy_io.Session.copy(), "instances": instances } @@ -1059,7 +1070,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): else: # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = api.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." diff --git a/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py new file mode 100644 index 0000000000..78eed17c98 --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py @@ -0,0 +1,48 @@ +import pyblish.api + +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) +from openpype.modules.deadline.deadline_module import DeadlineModule + + +class ValidateDeadlinePools(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """Validate primaryPool and secondaryPool on instance. + + Values are on instance based on value insertion when Creating instance or + by Settings in CollectDeadlinePools. + """ + + label = "Validate Deadline Pools" + order = pyblish.api.ValidatorOrder + families = ["rendering", "render.farm", "renderFarm", "renderlayer"] + optional = True + + def process(self, instance): + # get default deadline webservice url from deadline module + deadline_url = instance.context.data["defaultDeadline"] + self.log.info("deadline_url::{}".format(deadline_url)) + pools = DeadlineModule.get_deadline_pools(deadline_url, log=self.log) + self.log.info("pools::{}".format(pools)) + + formatting_data = { + "pools_str": ",".join(pools) + } + + primary_pool = instance.data.get("primaryPool") + if primary_pool and primary_pool not in pools: + msg = "Configured primary '{}' not present on Deadline".format( + instance.data["primaryPool"]) + formatting_data["invalid_value_str"] = msg + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + secondary_pool = instance.data.get("secondaryPool") + if secondary_pool and secondary_pool not in pools: + msg = "Configured secondary '{}' not present on Deadline".format( + instance.data["secondaryPool"]) + formatting_data["invalid_value_str"] = msg + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py index 2e55be2743..975e49cb28 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py @@ -1,8 +1,8 @@ import json -from avalon.api import AvalonMongoDB from openpype.api import ProjectSettings from openpype.lib import create_project +from openpype.pipeline import AvalonMongoDB from openpype.settings import SaveWarningExc from openpype_modules.ftrack.lib import ( diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_links.py b/openpype/modules/ftrack/event_handlers_server/event_sync_links.py index 9610e7f5de..ae70c6756f 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_links.py +++ b/openpype/modules/ftrack/event_handlers_server/event_sync_links.py @@ -1,7 +1,7 @@ from pymongo import UpdateOne from bson.objectid import ObjectId -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB from openpype_modules.ftrack.lib import ( CUST_ATTR_ID_KEY, diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index 46c333c4c4..b5f199b3e4 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -12,8 +12,7 @@ from pymongo import UpdateOne import arrow import ftrack_api -from avalon import schema -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB, schema from openpype_modules.ftrack.lib import ( get_openpype_attr, diff --git a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py index 96243c8c36..593fc5e596 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py +++ b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py @@ -1,10 +1,9 @@ -import os import re import subprocess from openpype_modules.ftrack.lib import BaseEvent from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB from bson.objectid import ObjectId diff --git a/openpype/modules/ftrack/event_handlers_user/action_applications.py b/openpype/modules/ftrack/event_handlers_user/action_applications.py index 48a0dea006..b25bc1b5cb 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_applications.py +++ b/openpype/modules/ftrack/event_handlers_user/action_applications.py @@ -1,5 +1,4 @@ import os -from uuid import uuid4 from openpype_modules.ftrack.lib import BaseAction from openpype.lib.applications import ( @@ -8,7 +7,7 @@ from openpype.lib.applications import ( ApplictionExecutableNotFound, CUSTOM_LAUNCH_APP_GROUPS ) -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB class AppplicationsAction(BaseAction): diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py index d15a865124..81f38e0c39 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py @@ -1,11 +1,8 @@ import os +import collections +import copy +from openpype.api import Anatomy from openpype_modules.ftrack.lib import BaseAction, statics_icon -from avalon import lib as avalonlib -from openpype.api import ( - Anatomy, - get_project_settings -) -from openpype.lib import ApplicationManager class CreateFolders(BaseAction): @@ -14,55 +11,59 @@ class CreateFolders(BaseAction): icon = statics_icon("ftrack", "action_icons", "CreateFolders.svg") def discover(self, session, entities, event): - if len(entities) != 1: - return False - - not_allowed = ["assetversion", "project"] - if entities[0].entity_type.lower() in not_allowed: - return False - - return True + for entity_item in event["data"]["selection"]: + if entity_item.get("entityType").lower() in ("task", "show"): + return True + return False def interface(self, session, entities, event): if event["data"].get("values", {}): return - entity = entities[0] - without_interface = True - for child in entity["children"]: - if child["object_type"]["name"].lower() != "task": - without_interface = False + + with_interface = False + for entity in entities: + if entity.entity_type.lower() != "task": + with_interface = True break - self.without_interface = without_interface - if without_interface: + + if "values" not in event["data"]: + event["data"]["values"] = {} + + event["data"]["values"]["with_interface"] = with_interface + if not with_interface: return + title = "Create folders" entity_name = entity["name"] msg = ( "

Do you want create folders also" - " for all children of \"{}\"?

" + " for all children of your selection?" ) if entity.entity_type.lower() == "project": entity_name = entity["full_name"] msg = msg.replace(" also", "") msg += "

(Project root won't be created if not checked)

" - items = [] - item_msg = { - "type": "label", - "value": msg.format(entity_name) - } - item_label = { - "type": "label", - "value": "With all chilren entities" - } - item = { - "name": "children_included", - "type": "boolean", - "value": False - } - items.append(item_msg) - items.append(item_label) - items.append(item) + items = [ + { + "type": "label", + "value": msg.format(entity_name) + }, + { + "type": "label", + "value": "With all chilren entities" + }, + { + "name": "children_included", + "type": "boolean", + "value": False + }, + { + "type": "hidden", + "name": "with_interface", + "value": with_interface + } + ] return { "items": items, @@ -71,30 +72,47 @@ class CreateFolders(BaseAction): def launch(self, session, entities, event): '''Callback method for custom action.''' + + if "values" not in event["data"]: + return + + with_interface = event["data"]["values"]["with_interface"] with_childrens = True - if self.without_interface is False: - if "values" not in event["data"]: - return + if with_interface: with_childrens = event["data"]["values"]["children_included"] - entity = entities[0] - if entity.entity_type.lower() == "project": - proj = entity - else: - proj = entity["project"] - project_name = proj["full_name"] - project_code = proj["name"] + filtered_entities = [] + for entity in entities: + low_context_type = entity["context_type"].lower() + if low_context_type in ("task", "show"): + if not with_childrens and low_context_type == "show": + continue + filtered_entities.append(entity) - if entity.entity_type.lower() == 'project' and with_childrens is False: + if not filtered_entities: return { - 'success': True, - 'message': 'Nothing was created' + "success": True, + "message": 'Nothing was created' } - all_entities = [] - all_entities.append(entity) - if with_childrens: - all_entities = self.get_notask_children(entity) + project_entity = self.get_project_from_entity(filtered_entities[0]) + + project_name = project_entity["full_name"] + project_code = project_entity["name"] + + task_entities = [] + other_entities = [] + self.get_all_entities( + session, entities, task_entities, other_entities + ) + hierarchy = self.get_entities_hierarchy( + session, task_entities, other_entities + ) + task_types = session.query("select id, name from Type").all() + task_type_names_by_id = { + task_type["id"]: task_type["name"] + for task_type in task_types + } anatomy = Anatomy(project_name) @@ -102,77 +120,67 @@ class CreateFolders(BaseAction): work_template = anatomy.templates for key in work_keys: work_template = work_template[key] - work_has_apps = "{app" in work_template publish_keys = ["publish", "folder"] publish_template = anatomy.templates for key in publish_keys: publish_template = publish_template[key] - publish_has_apps = "{app" in publish_template + + project_data = { + "project": { + "name": project_name, + "code": project_code + } + } collected_paths = [] - for entity in all_entities: - if entity.entity_type.lower() == "project": - continue - ent_data = { - "project": { - "name": project_name, - "code": project_code - } - } + for item in hierarchy: + parent_entity, task_entities = item - ent_data["asset"] = entity["name"] + parent_data = copy.deepcopy(project_data) - parents = entity["link"][1:-1] + parents = parent_entity["link"][1:-1] hierarchy_names = [p["name"] for p in parents] - hierarchy = "" + hierarchy = "/".join(hierarchy_names) + if hierarchy_names: - hierarchy = os.path.sep.join(hierarchy_names) - ent_data["hierarchy"] = hierarchy + parent_name = hierarchy_names[-1] + else: + parent_name = project_name - tasks_created = False - for child in entity["children"]: - if child["object_type"]["name"].lower() != "task": - continue - tasks_created = True - task_data = ent_data.copy() - task_data["task"] = child["name"] + parent_data.update({ + "asset": parent_entity["name"], + "hierarchy": hierarchy, + "parent": parent_name + }) - apps = [] - - # Template wok - if work_has_apps: - app_data = task_data.copy() - for app in apps: - app_data["app"] = app - collected_paths.append(self.compute_template( - anatomy, app_data, work_keys - )) - else: - collected_paths.append(self.compute_template( - anatomy, task_data, work_keys - )) - - # Template publish - if publish_has_apps: - app_data = task_data.copy() - for app in apps: - app_data["app"] = app - collected_paths.append(self.compute_template( - anatomy, app_data, publish_keys - )) - else: - collected_paths.append(self.compute_template( - anatomy, task_data, publish_keys - )) - - if not tasks_created: + if not task_entities: # create path for entity collected_paths.append(self.compute_template( - anatomy, ent_data, work_keys + anatomy, parent_data, work_keys )) collected_paths.append(self.compute_template( - anatomy, ent_data, publish_keys + anatomy, parent_data, publish_keys + )) + continue + + for task_entity in task_entities: + task_type_id = task_entity["type_id"] + task_type_name = task_type_names_by_id[task_type_id] + task_data = copy.deepcopy(parent_data) + task_data["task"] = { + "name": task_entity["name"], + "type": task_type_name + } + + # Template wok + collected_paths.append(self.compute_template( + anatomy, task_data, work_keys + )) + + # Template publish + collected_paths.append(self.compute_template( + anatomy, task_data, publish_keys )) if len(collected_paths) == 0: @@ -193,14 +201,65 @@ class CreateFolders(BaseAction): "message": "Successfully created project folders." } - def get_notask_children(self, entity): + def get_all_entities( + self, session, entities, task_entities, other_entities + ): + if not entities: + return + + no_task_entities = [] + for entity in entities: + if entity.entity_type.lower() == "task": + task_entities.append(entity) + else: + no_task_entities.append(entity) + + if not no_task_entities: + return task_entities + + other_entities.extend(no_task_entities) + + no_task_entity_ids = [entity["id"] for entity in no_task_entities] + next_entities = session.query(( + "select id, parent_id" + " from TypedContext where parent_id in ({})" + ).format(self.join_query_keys(no_task_entity_ids))).all() + + self.get_all_entities( + session, next_entities, task_entities, other_entities + ) + + def get_entities_hierarchy(self, session, task_entities, other_entities): + task_entity_ids = [entity["id"] for entity in task_entities] + full_task_entities = session.query(( + "select id, name, type_id, parent_id" + " from TypedContext where id in ({})" + ).format(self.join_query_keys(task_entity_ids))) + task_entities_by_parent_id = collections.defaultdict(list) + for entity in full_task_entities: + parent_id = entity["parent_id"] + task_entities_by_parent_id[parent_id].append(entity) + output = [] - if entity.entity_type.lower() == "task": + if not task_entities_by_parent_id: return output - output.append(entity) - for child in entity["children"]: - output.extend(self.get_notask_children(child)) + other_ids = set() + for entity in other_entities: + other_ids.add(entity["id"]) + other_ids |= set(task_entities_by_parent_id.keys()) + + parent_entities = session.query(( + "select id, name from TypedContext where id in ({})" + ).format(self.join_query_keys(other_ids))).all() + + for parent_entity in parent_entities: + parent_id = parent_entity["id"] + output.append(( + parent_entity, + task_entities_by_parent_id[parent_id] + )) + return output def compute_template(self, anatomy, data, anatomy_keys): diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py index 94f359c317..ebea8872f9 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py @@ -1,6 +1,4 @@ -import os import re -import json from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype.api import get_project_basic_paths, create_project_folders diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py index 94385a36c5..ee5c3d0d97 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py @@ -3,7 +3,8 @@ import uuid from datetime import datetime from bson.objectid import ObjectId -from avalon.api import AvalonMongoDB + +from openpype.pipeline import AvalonMongoDB from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import create_chunks diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py index 1b694e25f1..f5addde8ae 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py @@ -5,10 +5,10 @@ import uuid import clique from pymongo import UpdateOne -from avalon.api import AvalonMongoDB from openpype.api import Anatomy from openpype.lib import StringTemplate, TemplateUnsolved +from openpype.pipeline import AvalonMongoDB from openpype_modules.ftrack.lib import BaseAction, statics_icon @@ -492,7 +492,8 @@ class DeleteOldVersions(BaseAction): os.remove(file_path) self.log.debug("Removed file: {}".format(file_path)) - remainders.remove(file_path_base) + if file_path_base in remainders: + remainders.remove(file_path_base) continue seq_path_base = os.path.split(seq_path)[1] diff --git a/openpype/modules/ftrack/event_handlers_user/action_delivery.py b/openpype/modules/ftrack/event_handlers_user/action_delivery.py index 1f28b18900..9ef2a1668e 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delivery.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delivery.py @@ -15,7 +15,7 @@ from openpype.lib.delivery import ( process_single_file, process_sequence ) -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB class Delivery(BaseAction): diff --git a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py index 3888379e04..c7237a1150 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py +++ b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py @@ -7,7 +7,6 @@ import datetime import ftrack_api -from avalon.api import AvalonMongoDB from openpype.api import get_project_settings from openpype.lib import ( get_workfile_template_key, @@ -15,6 +14,7 @@ from openpype.lib import ( Anatomy, StringTemplate, ) +from openpype.pipeline import AvalonMongoDB from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import create_chunks diff --git a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py index 3759bc81ac..0b14e7aa2b 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py @@ -1,8 +1,8 @@ import json -from avalon.api import AvalonMongoDB from openpype.api import ProjectSettings from openpype.lib import create_project +from openpype.pipeline import AvalonMongoDB from openpype.settings import SaveWarningExc from openpype_modules.ftrack.lib import ( diff --git a/openpype/modules/ftrack/event_handlers_user/action_rv.py b/openpype/modules/ftrack/event_handlers_user/action_rv.py index bdb0eaf250..040ca75582 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_rv.py +++ b/openpype/modules/ftrack/event_handlers_user/action_rv.py @@ -4,8 +4,11 @@ import traceback import json import ftrack_api -from avalon import io, api -from openpype.pipeline import get_representation_path + +from openpype.pipeline import ( + get_representation_path, + legacy_io, +) from openpype_modules.ftrack.lib import BaseAction, statics_icon @@ -253,8 +256,8 @@ class RVAction(BaseAction): )["version"]["asset"]["parent"]["link"][0] project = session.get(link["type"], link["id"]) os.environ["AVALON_PROJECT"] = project["name"] - api.Session["AVALON_PROJECT"] = project["name"] - io.install() + legacy_io.Session["AVALON_PROJECT"] = project["name"] + legacy_io.install() location = ftrack_api.Session().pick_location() @@ -278,22 +281,22 @@ class RVAction(BaseAction): if online_source: continue - asset = io.find_one({"type": "asset", "name": parent_name}) - subset = io.find_one( + asset = legacy_io.find_one({"type": "asset", "name": parent_name}) + subset = legacy_io.find_one( { "type": "subset", "name": component["version"]["asset"]["name"], "parent": asset["_id"] } ) - version = io.find_one( + version = legacy_io.find_one( { "type": "version", "name": component["version"]["version"], "parent": subset["_id"] } ) - representation = io.find_one( + representation = legacy_io.find_one( { "type": "representation", "parent": version["_id"], @@ -301,7 +304,7 @@ class RVAction(BaseAction): } ) if representation is None: - representation = io.find_one( + representation = legacy_io.find_one( { "type": "representation", "parent": version["_id"], diff --git a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py index 4820925844..62fdfa2bdd 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py @@ -4,9 +4,10 @@ import json import requests from bson.objectid import ObjectId + from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype.api import Anatomy -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY diff --git a/openpype/modules/ftrack/ftrack_server/lib.py b/openpype/modules/ftrack/ftrack_server/lib.py index f8319b67d4..5c6d6352d2 100644 --- a/openpype/modules/ftrack/ftrack_server/lib.py +++ b/openpype/modules/ftrack/ftrack_server/lib.py @@ -31,10 +31,13 @@ TOPIC_STATUS_SERVER = "openpype.event.server.status" TOPIC_STATUS_SERVER_RESULT = "openpype.event.server.status.result" -def check_ftrack_url(url, log_errors=True): +def check_ftrack_url(url, log_errors=True, logger=None): """Checks if Ftrack server is responding""" + if logger is None: + logger = Logger.get_logger(__name__) + if not url: - print('ERROR: Ftrack URL is not set!') + logger.error("Ftrack URL is not set!") return None url = url.strip('/ ') @@ -48,15 +51,15 @@ def check_ftrack_url(url, log_errors=True): result = requests.get(url, allow_redirects=False) except requests.exceptions.RequestException: if log_errors: - print('ERROR: Entered Ftrack URL is not accesible!') + logger.error("Entered Ftrack URL is not accesible!") return False if (result.status_code != 200 or 'FTRACK_VERSION' not in result.headers): if log_errors: - print('ERROR: Entered Ftrack URL is not accesible!') + logger.error("Entered Ftrack URL is not accesible!") return False - print('DEBUG: Ftrack server {} is accessible.'.format(url)) + logger.debug("Ftrack server {} is accessible.".format(url)) return url @@ -133,7 +136,7 @@ class ProcessEventHub(SocketBaseEventHub): hearbeat_msg = b"processor" is_collection_created = False - pypelog = Logger().get_logger("Session Processor") + pypelog = Logger.get_logger("Session Processor") def __init__(self, *args, **kwargs): self.mongo_url = None @@ -192,7 +195,7 @@ class ProcessEventHub(SocketBaseEventHub): except pymongo.errors.AutoReconnect: self.pypelog.error(( "Mongo server \"{}\" is not responding, exiting." - ).format(os.environ["AVALON_MONGO"])) + ).format(os.environ["OPENPYPE_MONGO"])) sys.exit(0) # Additional special processing of events. if event['topic'] == 'ftrack.meta.disconnected': diff --git a/openpype/modules/ftrack/lib/avalon_sync.py b/openpype/modules/ftrack/lib/avalon_sync.py index 5301ec568e..124787e467 100644 --- a/openpype/modules/ftrack/lib/avalon_sync.py +++ b/openpype/modules/ftrack/lib/avalon_sync.py @@ -6,16 +6,12 @@ import numbers import six -from avalon.api import AvalonMongoDB - -import avalon - from openpype.api import ( Logger, - Anatomy, get_anatomy_settings ) from openpype.lib import ApplicationManager +from openpype.pipeline import AvalonMongoDB, schema from .constants import CUST_ATTR_ID_KEY, FPS_KEYS from .custom_attributes import get_openpype_attr, query_custom_attributes @@ -175,7 +171,7 @@ def check_regex(name, entity_type, in_schema=None, schema_patterns=None): if not name_pattern: default_pattern = "^[a-zA-Z0-9_.]*$" - schema_obj = avalon.schema._cache.get(schema_name + ".json") + schema_obj = schema._cache.get(schema_name + ".json") if not schema_obj: name_pattern = default_pattern else: @@ -286,21 +282,6 @@ def from_dict_to_set(data, is_project): return result -def get_avalon_project_template(project_name): - """Get avalon template - Args: - project_name: (string) - Returns: - dictionary with templates - """ - templates = Anatomy(project_name).templates - return { - "workfile": templates["avalon"]["workfile"], - "work": templates["avalon"]["work"], - "publish": templates["avalon"]["publish"] - } - - def get_project_apps(in_app_list): """ Application definitions for app name. diff --git a/openpype/modules/ftrack/lib/custom_attributes.py b/openpype/modules/ftrack/lib/custom_attributes.py index 29c6b5e7f8..2f53815368 100644 --- a/openpype/modules/ftrack/lib/custom_attributes.py +++ b/openpype/modules/ftrack/lib/custom_attributes.py @@ -135,7 +135,7 @@ def query_custom_attributes( output.extend( session.query( ( - "select value, entity_id from {}" + "select value, entity_id, configuration_id from {}" " where entity_id in ({}) and configuration_id in ({})" ).format( table_name, diff --git a/openpype/modules/ftrack/plugins/publish/collect_custom_attributes_data.py b/openpype/modules/ftrack/plugins/publish/collect_custom_attributes_data.py new file mode 100644 index 0000000000..43fa3bc3f8 --- /dev/null +++ b/openpype/modules/ftrack/plugins/publish/collect_custom_attributes_data.py @@ -0,0 +1,148 @@ +""" +Requires: + context > ftrackSession + context > ftrackEntity + instance > ftrackEntity + +Provides: + instance > customData > ftrack +""" +import copy + +import pyblish.api + + +class CollectFtrackCustomAttributeData(pyblish.api.ContextPlugin): + """Collect custom attribute values and store them to customData. + + Data are stored into each instance in context under + instance.data["customData"]["ftrack"]. + + Hierarchical attributes are not looked up properly for that functionality + custom attribute values lookup must be extended. + """ + + order = pyblish.api.CollectorOrder + 0.4992 + label = "Collect Ftrack Custom Attribute Data" + + # Name of custom attributes for which will be look for + custom_attribute_keys = [] + + def process(self, context): + if not self.custom_attribute_keys: + self.log.info("Custom attribute keys are not set. Skipping") + return + + ftrack_entities_by_id = {} + default_entity_id = None + + context_entity = context.data.get("ftrackEntity") + if context_entity: + entity_id = context_entity["id"] + default_entity_id = entity_id + ftrack_entities_by_id[entity_id] = context_entity + + instances_by_entity_id = { + default_entity_id: [] + } + for instance in context: + entity = instance.data.get("ftrackEntity") + if not entity: + instances_by_entity_id[default_entity_id].append(instance) + continue + + entity_id = entity["id"] + ftrack_entities_by_id[entity_id] = entity + if entity_id not in instances_by_entity_id: + instances_by_entity_id[entity_id] = [] + instances_by_entity_id[entity_id].append(instance) + + if not ftrack_entities_by_id: + self.log.info("Ftrack entities are not set. Skipping") + return + + session = context.data["ftrackSession"] + custom_attr_key_by_id = self.query_attr_confs(session) + if not custom_attr_key_by_id: + self.log.info(( + "Didn't find any of defined custom attributes {}" + ).format(", ".join(self.custom_attribute_keys))) + return + + entity_ids = list(instances_by_entity_id.keys()) + values_by_entity_id = self.query_attr_values( + session, entity_ids, custom_attr_key_by_id + ) + + for entity_id, instances in instances_by_entity_id.items(): + if entity_id not in values_by_entity_id: + # Use defaut empty values + entity_id = None + + for instance in instances: + value = copy.deepcopy(values_by_entity_id[entity_id]) + if "customData" not in instance.data: + instance.data["customData"] = {} + instance.data["customData"]["ftrack"] = value + instance_label = ( + instance.data.get("label") or instance.data["name"] + ) + self.log.debug(( + "Added ftrack custom data to instance \"{}\": {}" + ).format(instance_label, value)) + + def query_attr_values(self, session, entity_ids, custom_attr_key_by_id): + # Prepare values for query + entity_ids_joined = ",".join([ + '"{}"'.format(entity_id) + for entity_id in entity_ids + ]) + conf_ids_joined = ",".join([ + '"{}"'.format(conf_id) + for conf_id in custom_attr_key_by_id.keys() + ]) + # Query custom attribute values + value_items = session.query( + ( + "select value, entity_id, configuration_id" + " from CustomAttributeValue" + " where entity_id in ({}) and configuration_id in ({})" + ).format( + entity_ids_joined, + conf_ids_joined + ) + ).all() + + # Prepare default value output per entity id + values_by_key = { + key: None for key in self.custom_attribute_keys + } + # Prepare all entity ids that were queried + values_by_entity_id = { + entity_id: copy.deepcopy(values_by_key) + for entity_id in entity_ids + } + # Add none entity id which is used as default value + values_by_entity_id[None] = copy.deepcopy(values_by_key) + # Go through queried data and store them + for item in value_items: + conf_id = item["configuration_id"] + conf_key = custom_attr_key_by_id[conf_id] + entity_id = item["entity_id"] + values_by_entity_id[entity_id][conf_key] = item["value"] + return values_by_entity_id + + def query_attr_confs(self, session): + custom_attributes = set(self.custom_attribute_keys) + cust_attrs_query = ( + "select id, key from CustomAttributeConfiguration" + " where key in ({})" + ).format(", ".join( + ["\"{}\"".format(attr_name) for attr_name in custom_attributes] + )) + + custom_attr_confs = session.query(cust_attrs_query).all() + return { + conf["id"]: conf["key"] + for conf in custom_attr_confs + } diff --git a/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py index 07af217fb6..14da188150 100644 --- a/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py +++ b/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py @@ -1,12 +1,13 @@ import logging import pyblish.api -import avalon.api + +from openpype.pipeline import legacy_io class CollectFtrackApi(pyblish.api.ContextPlugin): """ Collects an ftrack session and the current task id. """ - order = pyblish.api.CollectorOrder + 0.4999 + order = pyblish.api.CollectorOrder + 0.4991 label = "Collect Ftrack Api" def process(self, context): @@ -23,9 +24,9 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): self.log.debug("Ftrack user: \"{0}\"".format(session.api_user)) # Collect task - project_name = avalon.api.Session["AVALON_PROJECT"] - asset_name = avalon.api.Session["AVALON_ASSET"] - task_name = avalon.api.Session["AVALON_TASK"] + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] # Find project entity project_query = 'Project where full_name is "{0}"'.format(project_name) diff --git a/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py b/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py index 70030acad9..5758068f86 100644 --- a/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py +++ b/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py @@ -6,8 +6,8 @@ Provides: instance -> families ([]) """ import pyblish.api -import avalon.api +from openpype.pipeline import legacy_io from openpype.lib.plugin_tools import filter_profiles @@ -25,7 +25,7 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): based on 'families' (editorial drives it by presence of 'review') """ label = "Collect Ftrack Family" - order = pyblish.api.CollectorOrder + 0.4998 + order = pyblish.api.CollectorOrder + 0.4990 profiles = None @@ -34,9 +34,10 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): self.log.warning("No profiles present for adding Ftrack family") return + add_ftrack_family = False task_name = instance.data.get("task", - avalon.api.Session["AVALON_TASK"]) - host_name = avalon.api.Session["AVALON_APP"] + legacy_io.Session["AVALON_TASK"]) + host_name = legacy_io.Session["AVALON_APP"] family = instance.data["family"] filtering_criteria = { @@ -53,6 +54,8 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): additional_filters = profile.get("advanced_filtering") if additional_filters: + self.log.info("'{}' families used for additional filtering". + format(families)) add_ftrack_family = self._get_add_ftrack_f_from_addit_filters( additional_filters, families, @@ -69,6 +72,13 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): else: instance.data["families"] = ["ftrack"] + result_str = "Adding" + if not add_ftrack_family: + result_str = "Not adding" + self.log.info("{} 'ftrack' family for instance with '{}'".format( + result_str, family + )) + def _get_add_ftrack_f_from_addit_filters(self, additional_filters, families, diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py index 6c25b9191e..650c59fae8 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py @@ -1,3 +1,15 @@ +"""Integrate components into ftrack + +Requires: + context -> ftrackSession - connected ftrack.Session + instance -> ftrackComponentsList - list of components to integrate + +Provides: + instance -> ftrackIntegratedAssetVersionsData + # legacy + instance -> ftrackIntegratedAssetVersions +""" + import os import sys import six @@ -54,6 +66,114 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): self.log.debug(query) return query + def process(self, instance): + session = instance.context.data["ftrackSession"] + context = instance.context + component_list = instance.data.get("ftrackComponentsList") + if not component_list: + self.log.info( + "Instance don't have components to integrate to Ftrack." + " Skipping." + ) + return + + session = instance.context.data["ftrackSession"] + context = instance.context + + parent_entity = None + default_asset_name = None + # If instance has set "ftrackEntity" or "ftrackTask" then use them from + # instance. Even if they are set to None. If they are set to None it + # has a reason. (like has different context) + if "ftrackEntity" in instance.data or "ftrackTask" in instance.data: + task_entity = instance.data.get("ftrackTask") + parent_entity = instance.data.get("ftrackEntity") + + elif "ftrackEntity" in context.data or "ftrackTask" in context.data: + task_entity = context.data.get("ftrackTask") + parent_entity = context.data.get("ftrackEntity") + + if task_entity: + default_asset_name = task_entity["name"] + parent_entity = task_entity["parent"] + + if parent_entity is None: + self.log.info(( + "Skipping ftrack integration. Instance \"{}\" does not" + " have specified ftrack entities." + ).format(str(instance))) + return + + if not default_asset_name: + default_asset_name = parent_entity["name"] + + # Change status on task + self._set_task_status(instance, task_entity, session) + + # Prepare AssetTypes + asset_types_by_short = self._ensure_asset_types_exists( + session, component_list + ) + + asset_versions_data_by_id = {} + used_asset_versions = [] + # Iterate over components and publish + for data in component_list: + self.log.debug("data: {}".format(data)) + + # AssetType + asset_type_short = data["assettype_data"]["short"] + asset_type_entity = asset_types_by_short[asset_type_short] + + # Asset + asset_data = data.get("asset_data") or {} + if "name" not in asset_data: + asset_data["name"] = default_asset_name + asset_entity = self._ensure_asset_exists( + session, + asset_data, + asset_type_entity["id"], + parent_entity["id"] + ) + + # Asset Version + asset_version_data = data.get("assetversion_data") or {} + asset_version_entity = self._ensure_asset_version_exists( + session, asset_version_data, asset_entity["id"], task_entity + ) + + # Component + self.create_component(session, asset_version_entity, data) + + # Store asset version and components items that were + version_id = asset_version_entity["id"] + if version_id not in asset_versions_data_by_id: + asset_versions_data_by_id[version_id] = { + "asset_version": asset_version_entity, + "component_items": [] + } + + asset_versions_data_by_id[version_id]["component_items"].append( + data + ) + + # Backwards compatibility + if asset_version_entity not in used_asset_versions: + used_asset_versions.append(asset_version_entity) + + instance.data["ftrackIntegratedAssetVersionsData"] = ( + asset_versions_data_by_id + ) + + # Backwards compatibility + asset_versions_key = "ftrackIntegratedAssetVersions" + if asset_versions_key not in instance.data: + instance.data[asset_versions_key] = [] + + for asset_version in used_asset_versions: + if asset_version not in instance.data[asset_versions_key]: + instance.data[asset_versions_key].append(asset_version) + def _set_task_status(self, instance, task_entity, session): project_entity = instance.context.data.get("ftrackProject") if not project_entity: @@ -100,190 +220,224 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): session._configure_locations() six.reraise(tp, value, tb) - def process(self, instance): - session = instance.context.data["ftrackSession"] - context = instance.context + def _ensure_asset_types_exists(self, session, component_list): + """Make sure that all AssetType entities exists for integration. - name = None - # If instance has set "ftrackEntity" or "ftrackTask" then use them from - # instance. Even if they are set to None. If they are set to None it - # has a reason. (like has different context) - if "ftrackEntity" in instance.data or "ftrackTask" in instance.data: - task = instance.data.get("ftrackTask") - parent = instance.data.get("ftrackEntity") + Returns: + dict: All asset types by short name. + """ + # Query existing asset types + asset_types = session.query("select id, short from AssetType").all() + # Stpore all existing short names + asset_type_shorts = {asset_type["short"] for asset_type in asset_types} + # Check which asset types are missing and store them + asset_type_names_by_missing_shorts = {} + default_short_name = "upload" + for data in component_list: + asset_type_data = data.get("assettype_data") or {} + asset_type_short = asset_type_data.get("short") + if not asset_type_short: + # Use default asset type name if not set and change the + # input data + asset_type_short = default_short_name + asset_type_data["short"] = asset_type_short + data["assettype_data"] = asset_type_data - elif "ftrackEntity" in context.data or "ftrackTask" in context.data: - task = context.data.get("ftrackTask") - parent = context.data.get("ftrackEntity") + if ( + # Skip if short name exists + asset_type_short in asset_type_shorts + # Skip if short name was already added to missing types + # and asset type name is filled + # - if asset type name is missing then try use name from other + # data + or asset_type_names_by_missing_shorts.get(asset_type_short) + ): + continue - if task: - parent = task["parent"] - name = task - elif parent: - name = parent["name"] + asset_type_names_by_missing_shorts[asset_type_short] = ( + asset_type_data.get("name") + ) - if not name: - self.log.info(( - "Skipping ftrack integration. Instance \"{}\" does not" - " have specified ftrack entities." - ).format(str(instance))) - return + # Create missing asset types if there are any + if asset_type_names_by_missing_shorts: + self.log.info("Creating asset types with short names: {}".format( + ", ".join(asset_type_names_by_missing_shorts.keys()) + )) + for missing_short, type_name in ( + asset_type_names_by_missing_shorts.items() + ): + # Use short for name if name is not defined + if not type_name: + type_name = missing_short + # Use short name also for name + # - there is not other source for 'name' + session.create( + "AssetType", + { + "short": missing_short, + "name": type_name + } + ) - info_msg = ( - "Created new {entity_type} with data: {data}" - ", metadata: {metadata}." + # Commit creation + session.commit() + # Requery asset types + asset_types = session.query( + "select id, short from AssetType" + ).all() + + return {asset_type["short"]: asset_type for asset_type in asset_types} + + def _ensure_asset_exists( + self, session, asset_data, asset_type_id, parent_id + ): + asset_name = asset_data["name"] + asset_entity = self._query_asset( + session, asset_name, asset_type_id, parent_id + ) + if asset_entity is not None: + return asset_entity + + asset_data = { + "name": asset_name, + "type_id": asset_type_id, + "context_id": parent_id + } + self.log.info("Created new Asset with data: {}.".format(asset_data)) + session.create("Asset", asset_data) + session.commit() + return self._query_asset(session, asset_name, asset_type_id, parent_id) + + def _query_asset(self, session, asset_name, asset_type_id, parent_id): + return session.query( + ( + "select id from Asset" + " where name is \"{}\"" + " and type_id is \"{}\"" + " and context_id is \"{}\"" + ).format(asset_name, asset_type_id, parent_id) + ).first() + + def _ensure_asset_version_exists( + self, session, asset_version_data, asset_id, task_entity + ): + task_id = None + if task_entity: + task_id = task_entity["id"] + + # Try query asset version by criteria (asset id and version) + version = asset_version_data.get("version") or 0 + asset_version_entity = self._query_asset_version( + session, version, asset_id ) - used_asset_versions = [] + # Prepare comment value + comment = asset_version_data.get("comment") or "" + if asset_version_entity is not None: + changed = False + if comment != asset_version_entity["comment"]: + asset_version_entity["comment"] = comment + changed = True - self._set_task_status(instance, task, session) + if task_id != asset_version_entity["task_id"]: + asset_version_entity["task_id"] = task_id + changed = True - # Iterate over components and publish - for data in instance.data.get("ftrackComponentsList", []): - # AssetType - # Get existing entity. - assettype_data = {"short": "upload"} - assettype_data.update(data.get("assettype_data", {})) - self.log.debug("data: {}".format(data)) + if changed: + session.commit() - assettype_entity = session.query( - self.query("AssetType", assettype_data) - ).first() - - # Create a new entity if none exits. - if not assettype_entity: - assettype_entity = session.create("AssetType", assettype_data) - self.log.debug("Created new AssetType with data: {}".format( - assettype_data - )) - - # Asset - # Get existing entity. - asset_data = { - "name": name, - "type": assettype_entity, - "parent": parent, + else: + new_asset_version_data = { + "version": version, + "asset_id": asset_id } - asset_data.update(data.get("asset_data", {})) + if task_id: + new_asset_version_data["task_id"] = task_id - asset_entity = session.query( - self.query("Asset", asset_data) - ).first() + if comment: + new_asset_version_data["comment"] = comment - self.log.info("asset entity: {}".format(asset_entity)) - - # Extracting metadata, and adding after entity creation. This is - # due to a ftrack_api bug where you can't add metadata on creation. - asset_metadata = asset_data.pop("metadata", {}) - - # Create a new entity if none exits. - if not asset_entity: - asset_entity = session.create("Asset", asset_data) - self.log.debug( - info_msg.format( - entity_type="Asset", - data=asset_data, - metadata=asset_metadata - ) - ) - try: - session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - session._configure_locations() - six.reraise(tp, value, tb) - - # Adding metadata - existing_asset_metadata = asset_entity["metadata"] - existing_asset_metadata.update(asset_metadata) - asset_entity["metadata"] = existing_asset_metadata - - # AssetVersion - # Get existing entity. - assetversion_data = { - "version": 0, - "asset": asset_entity, - } - _assetversion_data = data.get("assetversion_data", {}) - assetversion_cust_attrs = _assetversion_data.pop( - "custom_attributes", {} + self.log.info("Created new AssetVersion with data {}".format( + new_asset_version_data + )) + session.create("AssetVersion", new_asset_version_data) + session.commit() + asset_version_entity = self._query_asset_version( + session, version, asset_id ) - asset_version_comment = _assetversion_data.pop( - "comment", None - ) - assetversion_data.update(_assetversion_data) - assetversion_entity = session.query( - self.query("AssetVersion", assetversion_data) - ).first() - - # Extracting metadata, and adding after entity creation. This is - # due to a ftrack_api bug where you can't add metadata on creation. - assetversion_metadata = assetversion_data.pop("metadata", {}) - - if task: - assetversion_data['task'] = task - - # Create a new entity if none exits. - if not assetversion_entity: - assetversion_entity = session.create( - "AssetVersion", assetversion_data - ) - self.log.debug( - info_msg.format( - entity_type="AssetVersion", - data=assetversion_data, - metadata=assetversion_metadata + # Set custom attributes if there were any set + custom_attrs = asset_version_data.get("custom_attributes") or {} + for attr_key, attr_value in custom_attrs.items(): + if attr_key in asset_version_entity["custom_attributes"]: + try: + asset_version_entity["custom_attributes"][attr_key] = ( + attr_value ) + session.commit() + continue + except Exception: + session.rollback() + session._configure_locations() + + self.log.warning( + ( + "Custom Attrubute \"{0}\" is not available for" + " AssetVersion <{1}>. Can't set it's value to: \"{2}\"" + ).format( + attr_key, asset_version_entity["id"], str(attr_value) ) - try: - session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - session._configure_locations() - six.reraise(tp, value, tb) + ) - # Adding metadata - existing_assetversion_metadata = assetversion_entity["metadata"] - existing_assetversion_metadata.update(assetversion_metadata) - assetversion_entity["metadata"] = existing_assetversion_metadata + return asset_version_entity - # Add comment - if asset_version_comment: - assetversion_entity["comment"] = asset_version_comment - try: - session.commit() - except Exception: - session.rollback() - session._configure_locations() - self.log.warning(( - "Comment was not possible to set for AssetVersion" - "\"{0}\". Can't set it's value to: \"{1}\"" - ).format( - assetversion_entity["id"], str(asset_version_comment) - )) + def _query_asset_version(self, session, version, asset_id): + return session.query( + ( + "select id, task_id, comment from AssetVersion" + " where version is \"{}\" and asset_id is \"{}\"" + ).format(version, asset_id) + ).first() - # Adding Custom Attributes - for attr, val in assetversion_cust_attrs.items(): - if attr in assetversion_entity["custom_attributes"]: - try: - assetversion_entity["custom_attributes"][attr] = val - session.commit() - continue - except Exception: - session.rollback() - session._configure_locations() + def create_component(self, session, asset_version_entity, data): + component_data = data.get("component_data") or {} - self.log.warning(( - "Custom Attrubute \"{0}\"" - " is not available for AssetVersion <{1}>." - " Can't set it's value to: \"{2}\"" - ).format(attr, assetversion_entity["id"], str(val))) + if not component_data.get("name"): + component_data["name"] = "main" + + version_id = asset_version_entity["id"] + component_data["version_id"] = version_id + component_entity = session.query( + ( + "select id, name from Component where name is \"{}\"" + " and version_id is \"{}\"" + ).format(component_data["name"], version_id) + ).first() + + component_overwrite = data.get("component_overwrite", False) + location = data.get("component_location", session.pick_location()) + + # Overwrite existing component data if requested. + if component_entity and component_overwrite: + origin_location = session.query( + "Location where name is \"ftrack.origin\"" + ).one() + + # Removing existing members from location + components = list(component_entity.get("members", [])) + components += [component_entity] + for component in components: + for loc in component["component_locations"]: + if location["id"] == loc["location_id"]: + location.remove_component( + component, recursive=False + ) + + # Deleting existing members on component entity + for member in component_entity.get("members", []): + session.delete(member) + del(member) - # Have to commit the version and asset, because location can't - # determine the final location without. try: session.commit() except Exception: @@ -292,175 +446,124 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): session._configure_locations() six.reraise(tp, value, tb) - # Component - # Get existing entity. - component_data = { - "name": "main", - "version": assetversion_entity - } - component_data.update(data.get("component_data", {})) + # Reset members in memory + if "members" in component_entity.keys(): + component_entity["members"] = [] - component_entity = session.query( - self.query("Component", component_data) - ).first() + # Add components to origin location + try: + collection = clique.parse(data["component_path"]) + except ValueError: + # Assume its a single file + # Changing file type + name, ext = os.path.splitext(data["component_path"]) + component_entity["file_type"] = ext - component_overwrite = data.get("component_overwrite", False) - location = data.get("component_location", session.pick_location()) - - # Overwrite existing component data if requested. - if component_entity and component_overwrite: - - origin_location = session.query( - "Location where name is \"ftrack.origin\"" - ).one() - - # Removing existing members from location - components = list(component_entity.get("members", [])) - components += [component_entity] - for component in components: - for loc in component["component_locations"]: - if location["id"] == loc["location_id"]: - location.remove_component( - component, recursive=False - ) - - # Deleting existing members on component entity - for member in component_entity.get("members", []): - session.delete(member) - del(member) - - try: - session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - session._configure_locations() - six.reraise(tp, value, tb) - - # Reset members in memory - if "members" in component_entity.keys(): - component_entity["members"] = [] - - # Add components to origin location - try: - collection = clique.parse(data["component_path"]) - except ValueError: - # Assume its a single file - # Changing file type - name, ext = os.path.splitext(data["component_path"]) - component_entity["file_type"] = ext - - origin_location.add_component( - component_entity, data["component_path"] - ) - else: - # Changing file type - component_entity["file_type"] = collection.format("{tail}") - - # Create member components for sequence. - for member_path in collection: - - size = 0 - try: - size = os.path.getsize(member_path) - except OSError: - pass - - name = collection.match(member_path).group("index") - - member_data = { - "name": name, - "container": component_entity, - "size": size, - "file_type": os.path.splitext(member_path)[-1] - } - - component = session.create( - "FileComponent", member_data - ) - origin_location.add_component( - component, member_path, recursive=False - ) - component_entity["members"].append(component) - - # Add components to location. - location.add_component( - component_entity, origin_location, recursive=True - ) - - data["component"] = component_entity - msg = "Overwriting Component with path: {0}, data: {1}, " - msg += "location: {2}" - self.log.info( - msg.format( - data["component_path"], - component_data, - location - ) - ) - - # Extracting metadata, and adding after entity creation. This is - # due to a ftrack_api bug where you can't add metadata on creation. - component_metadata = component_data.pop("metadata", {}) - - # Create new component if none exists. - new_component = False - if not component_entity: - component_entity = assetversion_entity.create_component( - data["component_path"], - data=component_data, - location=location - ) - data["component"] = component_entity - msg = "Created new Component with path: {0}, data: {1}" - msg += ", metadata: {2}, location: {3}" - self.log.info( - msg.format( - data["component_path"], - component_data, - component_metadata, - location - ) - ) - new_component = True - - # Adding metadata - existing_component_metadata = component_entity["metadata"] - existing_component_metadata.update(component_metadata) - component_entity["metadata"] = existing_component_metadata - - # if component_data['name'] = 'ftrackreview-mp4-mp4': - # assetversion_entity["thumbnail_id"] - - # Setting assetversion thumbnail - if data.get("thumbnail", False): - assetversion_entity["thumbnail_id"] = component_entity["id"] - - # Inform user about no changes to the database. - if (component_entity and not component_overwrite and - not new_component): - data["component"] = component_entity - self.log.info( - "Found existing component, and no request to overwrite. " - "Nothing has been changed." + origin_location.add_component( + component_entity, data["component_path"] ) else: - # Commit changes. - try: - session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - session._configure_locations() - six.reraise(tp, value, tb) + # Changing file type + component_entity["file_type"] = collection.format("{tail}") - if assetversion_entity not in used_asset_versions: - used_asset_versions.append(assetversion_entity) + # Create member components for sequence. + for member_path in collection: - asset_versions_key = "ftrackIntegratedAssetVersions" - if asset_versions_key not in instance.data: - instance.data[asset_versions_key] = [] + size = 0 + try: + size = os.path.getsize(member_path) + except OSError: + pass - for asset_version in used_asset_versions: - if asset_version not in instance.data[asset_versions_key]: - instance.data[asset_versions_key].append(asset_version) + name = collection.match(member_path).group("index") + + member_data = { + "name": name, + "container": component_entity, + "size": size, + "file_type": os.path.splitext(member_path)[-1] + } + + component = session.create( + "FileComponent", member_data + ) + origin_location.add_component( + component, member_path, recursive=False + ) + component_entity["members"].append(component) + + # Add components to location. + location.add_component( + component_entity, origin_location, recursive=True + ) + + data["component"] = component_entity + self.log.info( + ( + "Overwriting Component with path: {0}, data: {1}," + " location: {2}" + ).format( + data["component_path"], + component_data, + location + ) + ) + + # Extracting metadata, and adding after entity creation. This is + # due to a ftrack_api bug where you can't add metadata on creation. + component_metadata = component_data.pop("metadata", {}) + + # Create new component if none exists. + new_component = False + if not component_entity: + component_entity = asset_version_entity.create_component( + data["component_path"], + data=component_data, + location=location + ) + data["component"] = component_entity + self.log.info( + ( + "Created new Component with path: {0}, data: {1}," + " metadata: {2}, location: {3}" + ).format( + data["component_path"], + component_data, + component_metadata, + location + ) + ) + new_component = True + + # Adding metadata + existing_component_metadata = component_entity["metadata"] + existing_component_metadata.update(component_metadata) + component_entity["metadata"] = existing_component_metadata + + # if component_data['name'] = 'ftrackreview-mp4-mp4': + # assetversion_entity["thumbnail_id"] + + # Setting assetversion thumbnail + if data.get("thumbnail"): + asset_version_entity["thumbnail_id"] = component_entity["id"] + + # Inform user about no changes to the database. + if ( + component_entity + and not component_overwrite + and not new_component + ): + data["component"] = component_entity + self.log.info( + "Found existing component, and no request to overwrite. " + "Nothing has been changed." + ) + else: + # Commit changes. + try: + session.commit() + except Exception: + tp, value, tb = sys.exc_info() + session.rollback() + session._configure_locations() + six.reraise(tp, value, tb) diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py new file mode 100644 index 0000000000..c6a3d47f66 --- /dev/null +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py @@ -0,0 +1,84 @@ +""" +Requires: + context > comment + context > ftrackSession + instance > ftrackIntegratedAssetVersionsData +""" + +import sys + +import six +import pyblish.api + + +class IntegrateFtrackDescription(pyblish.api.InstancePlugin): + """Add description to AssetVersions in Ftrack.""" + + # Must be after integrate asset new + order = pyblish.api.IntegratorOrder + 0.4999 + label = "Integrate Ftrack description" + families = ["ftrack"] + optional = True + + # Can be set in settings: + # - Allows `intent` and `comment` keys + description_template = "{comment}" + + def process(self, instance): + # Check if there are any integrated AssetVersion entities + asset_versions_key = "ftrackIntegratedAssetVersionsData" + asset_versions_data_by_id = instance.data.get(asset_versions_key) + if not asset_versions_data_by_id: + self.log.info("There are any integrated AssetVersions") + return + + comment = (instance.context.data.get("comment") or "").strip() + if not comment: + self.log.info("Comment is not set.") + else: + self.log.debug("Comment is set to `{}`".format(comment)) + + session = instance.context.data["ftrackSession"] + + intent = instance.context.data.get("intent") + intent_label = None + if intent and isinstance(intent, dict): + intent_val = intent.get("value") + intent_label = intent.get("label") + else: + intent_val = intent + + if not intent_label: + intent_label = intent_val or "" + + # if intent label is set then format comment + # - it is possible that intent_label is equal to "" (empty string) + if intent_label: + self.log.debug( + "Intent label is set to `{}`.".format(intent_label) + ) + + else: + self.log.debug("Intent is not set.") + + for asset_version_data in asset_versions_data_by_id.values(): + asset_version = asset_version_data["asset_version"] + + # Backwards compatibility for older settings using + # attribute 'note_with_intent_template' + comment = self.description_template.format(**{ + "intent": intent_label, + "comment": comment + }) + asset_version["comment"] = comment + + try: + session.commit() + self.log.debug("Comment added to AssetVersion \"{}\"".format( + str(asset_version) + )) + except Exception: + tp, value, tb = sys.exc_info() + session.rollback() + session._configure_locations() + six.reraise(tp, value, tb) diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py index cff7cd32cb..5ea0469bce 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py @@ -35,10 +35,18 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): "image": "img", "reference": "reference" } + keep_first_subset_name_for_review = True def process(self, instance): self.log.debug("instance {}".format(instance)) + instance_repres = instance.data.get("representations") + if not instance_repres: + self.log.info(( + "Skipping instance. Does not have any representations {}" + ).format(str(instance))) + return + instance_version = instance.data.get("version") if instance_version is None: raise ValueError("Instance version not set") @@ -52,8 +60,12 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): if not asset_type and family_low in self.family_mapping: asset_type = self.family_mapping[family_low] - self.log.debug(self.family_mapping) - self.log.debug(family_low) + if not asset_type: + asset_type = "upload" + + self.log.debug( + "Family: {}\nMapping: {}".format(family_low, self.family_mapping) + ) # Ignore this instance if neither "ftrackFamily" or a family mapping is # found. @@ -63,13 +75,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ).format(family)) return - instance_repres = instance.data.get("representations") - if not instance_repres: - self.log.info(( - "Skipping instance. Does not have any representations {}" - ).format(str(instance))) - return - # Prepare FPS instance_fps = instance.data.get("fps") if instance_fps is None: @@ -168,7 +173,47 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Change asset name of each new component for review is_first_review_repre = True not_first_components = [] + extended_asset_name = "" + multiple_reviewable = len(review_representations) > 1 for repre in review_representations: + # Create copy of base comp item and append it + review_item = copy.deepcopy(base_component_item) + + # get asset name and define extended name variant + asset_name = review_item["asset_data"]["name"] + extended_asset_name = "_".join( + (asset_name, repre["name"]) + ) + + # reset extended if no need for extended asset name + if ( + self.keep_first_subset_name_for_review + and is_first_review_repre + ): + extended_asset_name = "" + else: + # only rename if multiple reviewable + if multiple_reviewable: + review_item["asset_data"]["name"] = extended_asset_name + else: + extended_asset_name = "" + + # rename all already created components + # only if first repre and extended name available + if is_first_review_repre and extended_asset_name: + # and rename all already created components + for _ci in component_list: + _ci["asset_data"]["name"] = extended_asset_name + + # and rename all already created src components + for _sci in src_components_to_add: + _sci["asset_data"]["name"] = extended_asset_name + + # rename also first thumbnail component if any + if first_thumbnail_component is not None: + first_thumbnail_component[ + "asset_data"]["name"] = extended_asset_name + frame_start = repre.get("frameStartFtrack") frame_end = repre.get("frameEndFtrack") if frame_start is None or frame_end is None: @@ -184,8 +229,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): if fps is None: fps = instance_fps - # Create copy of base comp item and append it - review_item = copy.deepcopy(base_component_item) # Change location review_item["component_path"] = repre["published_path"] # Change component data @@ -200,18 +243,16 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): }) } } - # Create copy of item before setting location or changing asset - src_components_to_add.append(copy.deepcopy(review_item)) + if is_first_review_repre: is_first_review_repre = False else: - # Add representation name to asset name of "not first" review - asset_name = review_item["asset_data"]["name"] - review_item["asset_data"]["name"] = "_".join( - (asset_name, repre["name"]) - ) + # later detection for thumbnail duplication not_first_components.append(review_item) + # Create copy of item before setting location + src_components_to_add.append(copy.deepcopy(review_item)) + # Set location review_item["component_location"] = ftrack_server_location # Add item to component list @@ -249,6 +290,14 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): continue # Create copy of base comp item and append it other_item = copy.deepcopy(base_component_item) + + # add extended name if any + if ( + not self.keep_first_subset_name_for_review + and extended_asset_name + ): + other_item["asset_data"]["name"] = extended_asset_name + other_item["component_data"] = { "name": repre["name"] } diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py index acd295854d..952b21546d 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py @@ -1,7 +1,17 @@ +""" +Requires: + context > hostName + context > appName + context > appLabel + context > comment + context > ftrackSession + instance > ftrackIntegratedAssetVersionsData +""" + import sys -import json -import pyblish.api + import six +import pyblish.api class IntegrateFtrackNote(pyblish.api.InstancePlugin): @@ -15,100 +25,52 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): # Can be set in presets: # - Allows only `intent` and `comment` keys + note_template = None + # Backwards compatibility note_with_intent_template = "{intent}: {comment}" # - note label must exist in Ftrack note_labels = [] - def get_intent_label(self, session, intent_value): - if not intent_value: - return - - intent_configurations = session.query( - "CustomAttributeConfiguration where key is intent" - ).all() - if not intent_configurations: - return - - intent_configuration = intent_configurations[0] - if len(intent_configuration) > 1: - self.log.warning(( - "Found more than one `intent` custom attribute." - " Using first found." - )) - - config = intent_configuration.get("config") - if not config: - return - - configuration = json.loads(config) - items = configuration.get("data") - if not items: - return - - if sys.version_info[0] < 3: - string_type = basestring - else: - string_type = str - - if isinstance(items, string_type): - items = json.loads(items) - - intent_label = None - for item in items: - if item["value"] == intent_value: - intent_label = item["menu"] - break - - return intent_label - def process(self, instance): - comment = (instance.context.data.get("comment") or "").strip() + # Check if there are any integrated AssetVersion entities + asset_versions_key = "ftrackIntegratedAssetVersionsData" + asset_versions_data_by_id = instance.data.get(asset_versions_key) + if not asset_versions_data_by_id: + self.log.info("There are any integrated AssetVersions") + return + + context = instance.context + host_name = context.data["hostName"] + app_name = context.data["appName"] + app_label = context.data["appLabel"] + comment = (context.data.get("comment") or "").strip() if not comment: self.log.info("Comment is not set.") - return + else: + self.log.debug("Comment is set to `{}`".format(comment)) - self.log.debug("Comment is set to `{}`".format(comment)) - - session = instance.context.data["ftrackSession"] + session = context.data["ftrackSession"] intent = instance.context.data.get("intent") + intent_label = None if intent and isinstance(intent, dict): intent_val = intent.get("value") intent_label = intent.get("label") else: - intent_val = intent_label = intent + intent_val = intent - final_label = None - if intent_val: - final_label = self.get_intent_label(session, intent_val) - if final_label is None: - final_label = intent_label + if not intent_label: + intent_label = intent_val or "" # if intent label is set then format comment # - it is possible that intent_label is equal to "" (empty string) - if final_label: - msg = "Intent label is set to `{}`.".format(final_label) - comment = self.note_with_intent_template.format(**{ - "intent": final_label, - "comment": comment - }) - - elif intent_val: - msg = ( - "Intent is set to `{}` and was not added" - " to comment because label is set to `{}`." - ).format(intent_val, final_label) + if intent_label: + self.log.debug( + "Intent label is set to `{}`.".format(intent_label) + ) else: - msg = "Intent is not set." - - self.log.debug(msg) - - asset_versions_key = "ftrackIntegratedAssetVersions" - asset_versions = instance.data.get(asset_versions_key) - if not asset_versions: - self.log.info("There are any integrated AssetVersions") - return + self.log.debug("Intent is not set.") user = session.query( "User where username is \"{}\"".format(session.api_user) @@ -122,7 +84,7 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): labels = [] if self.note_labels: - all_labels = session.query("NoteLabel").all() + all_labels = session.query("select id, name from NoteLabel").all() labels_by_low_name = {lab["name"].lower(): lab for lab in all_labels} for _label in self.note_labels: label = labels_by_low_name.get(_label.lower()) @@ -134,7 +96,34 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): labels.append(label) - for asset_version in asset_versions: + for asset_version_data in asset_versions_data_by_id.values(): + asset_version = asset_version_data["asset_version"] + component_items = asset_version_data["component_items"] + + published_paths = set() + for component_item in component_items: + published_paths.add(component_item["component_path"]) + + # Backwards compatibility for older settings using + # attribute 'note_with_intent_template' + template = self.note_template + if template is None: + template = self.note_with_intent_template + format_data = { + "intent": intent_label, + "comment": comment, + "host_name": host_name, + "app_name": app_name, + "app_label": app_label, + "published_paths": "
".join(sorted(published_paths)), + } + comment = template.format(**format_data) + if not comment: + self.log.info(( + "Note for AssetVersion {} would be empty. Skipping." + "\nTemplate: {}\nData: {}" + ).format(asset_version["id"], template, format_data)) + continue asset_version.create_note(comment, author=user, labels=labels) try: diff --git a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py index 61892240d7..cf90c11b65 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py @@ -2,7 +2,8 @@ import sys import collections import six import pyblish.api -from avalon import io + +from openpype.pipeline import legacy_io # Copy of constant `openpype_modules.ftrack.lib.avalon_sync.CUST_ATTR_AUTO_SYNC` CUST_ATTR_AUTO_SYNC = "avalon_auto_sync" @@ -80,8 +81,8 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): auto_sync_state = project[ "custom_attributes"][CUST_ATTR_AUTO_SYNC] - if not io.Session: - io.install() + if not legacy_io.Session: + legacy_io.install() self.ft_project = None @@ -271,7 +272,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): # Create new links. for input in entity_data.get("inputs", []): - input_id = io.find_one({"_id": input})["data"]["ftrackId"] + input_id = legacy_io.find_one({"_id": input})["data"]["ftrackId"] assetbuild = self.session.get("AssetBuild", input_id) self.log.debug( "Creating link from {0} to {1}".format( diff --git a/openpype/modules/ftrack/scripts/sub_event_storer.py b/openpype/modules/ftrack/scripts/sub_event_storer.py index 5543ed74e2..946ecbff79 100644 --- a/openpype/modules/ftrack/scripts/sub_event_storer.py +++ b/openpype/modules/ftrack/scripts/sub_event_storer.py @@ -67,7 +67,7 @@ def launch(event): except pymongo.errors.AutoReconnect: log.error("Mongo server \"{}\" is not responding, exiting.".format( - os.environ["AVALON_MONGO"] + os.environ["OPENPYPE_MONGO"] )) sys.exit(0) diff --git a/openpype/modules/log_viewer/tray/app.py b/openpype/modules/log_viewer/tray/app.py index 1e8d6483cd..def319e0e3 100644 --- a/openpype/modules/log_viewer/tray/app.py +++ b/openpype/modules/log_viewer/tray/app.py @@ -26,3 +26,12 @@ class LogsWindow(QtWidgets.QWidget): self.log_detail = log_detail self.setStyleSheet(style.load_stylesheet()) + + self._first_show = True + + def showEvent(self, event): + super(LogsWindow, self).showEvent(event) + + if self._first_show: + self._first_show = False + self.logs_widget.refresh() diff --git a/openpype/modules/log_viewer/tray/widgets.py b/openpype/modules/log_viewer/tray/widgets.py index ff77405de5..ed08e62109 100644 --- a/openpype/modules/log_viewer/tray/widgets.py +++ b/openpype/modules/log_viewer/tray/widgets.py @@ -155,6 +155,11 @@ class LogsWidget(QtWidgets.QWidget): QtCore.Qt.DescendingOrder ) + refresh_triggered_timer = QtCore.QTimer() + refresh_triggered_timer.setSingleShot(True) + refresh_triggered_timer.setInterval(200) + + refresh_triggered_timer.timeout.connect(self._on_refresh_timeout) view.selectionModel().selectionChanged.connect(self._on_index_change) refresh_btn.clicked.connect(self._on_refresh_clicked) @@ -169,10 +174,12 @@ class LogsWidget(QtWidgets.QWidget): self.detail_widget = detail_widget self.refresh_btn = refresh_btn - # prepare - self.refresh() + self._refresh_triggered_timer = refresh_triggered_timer def refresh(self): + self._refresh_triggered_timer.start() + + def _on_refresh_timeout(self): self.model.refresh() self.detail_widget.refresh() diff --git a/openpype/modules/python_console_interpreter/window/widgets.py b/openpype/modules/python_console_interpreter/window/widgets.py index ecf41eaf3e..36ce1b61a2 100644 --- a/openpype/modules/python_console_interpreter/window/widgets.py +++ b/openpype/modules/python_console_interpreter/window/widgets.py @@ -389,7 +389,8 @@ class PythonInterpreterWidget(QtWidgets.QWidget): self._append_lines([openpype_art]) - self.setStyleSheet(load_stylesheet()) + self._first_show = True + self._splitter_size_ratio = None self._init_from_registry() @@ -416,9 +417,9 @@ class PythonInterpreterWidget(QtWidgets.QWidget): self.resize(width, height) try: - sizes = setting_registry.get_item("splitter_sizes") - if len(sizes) == len(self._widgets_splitter.sizes()): - self._widgets_splitter.setSizes(sizes) + self._splitter_size_ratio = ( + setting_registry.get_item("splitter_sizes") + ) except ValueError: pass @@ -627,8 +628,29 @@ class PythonInterpreterWidget(QtWidgets.QWidget): def showEvent(self, event): self._line_check_timer.start() super(PythonInterpreterWidget, self).showEvent(event) + # First show setup + if self._first_show: + self._first_show = False + self._on_first_show() + self._output_widget.scroll_to_bottom() + def _on_first_show(self): + # Change stylesheet + self.setStyleSheet(load_stylesheet()) + # Check if splitter size ratio is set + # - first store value to local variable and then unset it + splitter_size_ratio = self._splitter_size_ratio + self._splitter_size_ratio = None + # Skip if is not set + if not splitter_size_ratio: + return + + # Skip if number of size items does not match to splitter + splitters_count = len(self._widgets_splitter.sizes()) + if len(splitter_size_ratio) == splitters_count: + self._widgets_splitter.setSizes(splitter_size_ratio) + def closeEvent(self, event): self.save_registry() super(PythonInterpreterWidget, self).closeEvent(event) diff --git a/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py b/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py index 4d216c1c0a..65af90e8a6 100644 --- a/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py +++ b/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py @@ -7,7 +7,8 @@ import json from pprint import pformat import pyblish.api -from avalon import api + +from openpype.pipeline import legacy_io def collect(root, @@ -127,7 +128,7 @@ class CollectSequencesFromJob(pyblish.api.ContextPlugin): session = metadata.get("session") if session: self.log.info("setting session using metadata") - api.Session.update(session) + legacy_io.Session.update(session) os.environ.update(session) else: @@ -187,7 +188,9 @@ class CollectSequencesFromJob(pyblish.api.ContextPlugin): "family": families[0], # backwards compatibility / pyblish "families": list(families), "subset": subset, - "asset": data.get("asset", api.Session["AVALON_ASSET"]), + "asset": data.get( + "asset", legacy_io.Session["AVALON_ASSET"] + ), "stagingDir": root, "frameStart": start, "frameEnd": end, diff --git a/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py b/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py index 82a79daf3b..cdc37588cd 100644 --- a/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py +++ b/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py @@ -119,7 +119,7 @@ class OpenPypeContextSelector: # app names and versions, but since app_name is not used # currently down the line (but it is required by OP publish command # right now). - self.context["app_name"] = "maya/2020" + # self.context["app_name"] = "maya/2022" return True @staticmethod @@ -139,7 +139,8 @@ class OpenPypeContextSelector: env = {"AVALON_PROJECT": str(self.context.get("project")), "AVALON_ASSET": str(self.context.get("asset")), "AVALON_TASK": str(self.context.get("task")), - "AVALON_APP_NAME": str(self.context.get("app_name"))} + # "AVALON_APP_NAME": str(self.context.get("app_name")) + } print(">>> setting environment:") for k, v in env.items(): @@ -184,7 +185,7 @@ selector = OpenPypeContextSelector() selector.context["project"] = os.getenv("AVALON_PROJECT") selector.context["asset"] = os.getenv("AVALON_ASSET") selector.context["task"] = os.getenv("AVALON_TASK") -selector.context["app_name"] = os.getenv("AVALON_APP_NAME") +# selector.context["app_name"] = os.getenv("AVALON_APP_NAME") # if anything inside is None, scratch the whole thing and # ask user for context. diff --git a/openpype/modules/slack/plugins/publish/collect_slack_family.py b/openpype/modules/slack/plugins/publish/collect_slack_family.py index 6c965b04cd..39b05937dc 100644 --- a/openpype/modules/slack/plugins/publish/collect_slack_family.py +++ b/openpype/modules/slack/plugins/publish/collect_slack_family.py @@ -1,7 +1,7 @@ -from avalon import io import pyblish.api from openpype.lib.profiles_filtering import filter_profiles +from openpype.pipeline import legacy_io class CollectSlackFamilies(pyblish.api.InstancePlugin): @@ -18,7 +18,7 @@ class CollectSlackFamilies(pyblish.api.InstancePlugin): profiles = None def process(self, instance): - task_name = io.Session.get("AVALON_TASK") + task_name = legacy_io.Session.get("AVALON_TASK") family = self.main_family_from_instance(instance) key_values = { "families": family, @@ -35,20 +35,25 @@ class CollectSlackFamilies(pyblish.api.InstancePlugin): return # make slack publishable - if profile: - self.log.info("Found profile: {}".format(profile)) - if instance.data.get('families'): - instance.data['families'].append('slack') - else: - instance.data['families'] = ['slack'] + if not profile: + return - instance.data["slack_channel_message_profiles"] = \ - profile["channel_messages"] + self.log.info("Found profile: {}".format(profile)) + if instance.data.get('families'): + instance.data['families'].append('slack') + else: + instance.data['families'] = ['slack'] - slack_token = (instance.context.data["project_settings"] - ["slack"] - ["token"]) - instance.data["slack_token"] = slack_token + selected_profiles = profile["channel_messages"] + for prof in selected_profiles: + prof["review_upload_limit"] = profile.get("review_upload_limit", + 50) + instance.data["slack_channel_message_profiles"] = selected_profiles + + slack_token = (instance.context.data["project_settings"] + ["slack"] + ["token"]) + instance.data["slack_token"] = slack_token def main_family_from_instance(self, instance): # TODO yank from integrate """Returns main family of entered instance.""" diff --git a/openpype/modules/slack/plugins/publish/integrate_slack_api.py b/openpype/modules/slack/plugins/publish/integrate_slack_api.py index 018a7594bb..10bde7d4c0 100644 --- a/openpype/modules/slack/plugins/publish/integrate_slack_api.py +++ b/openpype/modules/slack/plugins/publish/integrate_slack_api.py @@ -35,7 +35,7 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): message = self._get_filled_message(message_profile["message"], instance, review_path) - self.log.info("message:: {}".format(message)) + self.log.debug("message:: {}".format(message)) if not message: return @@ -43,7 +43,8 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): publish_files.add(thumbnail_path) if message_profile["upload_review"] and review_path: - publish_files.add(review_path) + message, publish_files = self._handle_review_upload( + message, message_profile, publish_files, review_path) project = instance.context.data["anatomyData"]["project"]["code"] for channel in message_profile["channels"]: @@ -75,6 +76,19 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): dbcon = mongo_client[database_name]["notification_messages"] dbcon.insert_one(msg) + def _handle_review_upload(self, message, message_profile, publish_files, + review_path): + """Check if uploaded file is not too large""" + review_file_size_MB = os.path.getsize(review_path) / 1024 / 1024 + file_limit = message_profile.get("review_upload_limit", 50) + if review_file_size_MB > file_limit: + message += "\nReview upload omitted because of file size." + if review_path not in message: + message += "\nFile located at: {}".format(review_path) + else: + publish_files.add(review_path) + return message, publish_files + def _get_filled_message(self, message_templ, instance, review_path=None): """Use message_templ and data from instance to get message content. @@ -210,6 +224,9 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): # You will get a SlackApiError if "ok" is False error_str = self._enrich_error(str(e.response["error"]), channel) self.log.warning("Error happened {}".format(error_str)) + except Exception as e: + error_str = self._enrich_error(str(e), channel) + self.log.warning("Not SlackAPI error", exc_info=True) return None, [] diff --git a/openpype/modules/sync_server/providers/dropbox.py b/openpype/modules/sync_server/providers/dropbox.py index f5910299e5..dfc42fed75 100644 --- a/openpype/modules/sync_server/providers/dropbox.py +++ b/openpype/modules/sync_server/providers/dropbox.py @@ -17,6 +17,7 @@ class DropboxHandler(AbstractProvider): self.active = False self.site_name = site_name self.presets = presets + self.dbx = None if not self.presets: log.info( @@ -24,6 +25,11 @@ class DropboxHandler(AbstractProvider): ) return + if not self.presets["enabled"]: + log.debug("Sync Server: Site {} not enabled for {}.". + format(site_name, project_name)) + return + token = self.presets.get("token", "") if not token: msg = "Sync Server: No access token for dropbox provider" @@ -44,16 +50,13 @@ class DropboxHandler(AbstractProvider): log.info(msg) return - self.dbx = None - - if self.presets["enabled"]: - try: - self.dbx = self._get_service( - token, acting_as_member, team_folder_name - ) - except Exception as e: - log.info("Could not establish dropbox object: {}".format(e)) - return + try: + self.dbx = self._get_service( + token, acting_as_member, team_folder_name + ) + except Exception as e: + log.info("Could not establish dropbox object: {}".format(e)) + return super(AbstractProvider, self).__init__() diff --git a/openpype/modules/sync_server/providers/gdrive.py b/openpype/modules/sync_server/providers/gdrive.py index 0b586613b5..aa7329b104 100644 --- a/openpype/modules/sync_server/providers/gdrive.py +++ b/openpype/modules/sync_server/providers/gdrive.py @@ -73,8 +73,28 @@ class GDriveHandler(AbstractProvider): format(site_name)) return - cred_path = self.presets.get("credentials_url", {}).\ - get(platform.system().lower()) or '' + if not self.presets["enabled"]: + log.debug("Sync Server: Site {} not enabled for {}.". + format(site_name, project_name)) + return + + current_platform = platform.system().lower() + cred_path = self.presets.get("credentials_url", {}). \ + get(current_platform) or '' + + if not cred_path: + msg = "Sync Server: Please, fill the credentials for gdrive "\ + "provider for platform '{}' !".format(current_platform) + log.info(msg) + return + + try: + cred_path = cred_path.format(**os.environ) + except KeyError as e: + log.info("Sync Server: The key(s) {} does not exist in the " + "environment variables".format(" ".join(e.args))) + return + if not os.path.exists(cred_path): msg = "Sync Server: No credentials for gdrive provider " + \ "for '{}' on path '{}'!".format(site_name, cred_path) @@ -82,11 +102,10 @@ class GDriveHandler(AbstractProvider): return self.service = None - if self.presets["enabled"]: - self.service = self._get_gd_service(cred_path) + self.service = self._get_gd_service(cred_path) - self._tree = tree - self.active = True + self._tree = tree + self.active = True def is_active(self): """ diff --git a/openpype/modules/sync_server/sync_server_module.py b/openpype/modules/sync_server/sync_server_module.py index caf58503f1..45ff8bc4d1 100644 --- a/openpype/modules/sync_server/sync_server_module.py +++ b/openpype/modules/sync_server/sync_server_module.py @@ -4,9 +4,8 @@ from datetime import datetime import threading import platform import copy -from collections import deque +from collections import deque, defaultdict -from avalon.api import AvalonMongoDB from openpype.modules import OpenPypeModule from openpype_interfaces import ITrayModule @@ -14,16 +13,19 @@ from openpype.api import ( Anatomy, get_project_settings, get_system_settings, - get_local_site_id) + get_local_site_id +) from openpype.lib import PypeLogger +from openpype.pipeline import AvalonMongoDB from openpype.settings.lib import ( get_default_anatomy_settings, - get_anatomy_settings) + get_anatomy_settings +) from .providers.local_drive import LocalDriveHandler from .providers import lib -from .utils import time_function, SyncStatus +from .utils import time_function, SyncStatus, SiteAlreadyPresentError log = PypeLogger().get_logger("SyncServer") @@ -131,21 +133,25 @@ class SyncServerModule(OpenPypeModule, ITrayModule): def add_site(self, collection, representation_id, site_name=None, force=False): """ - Adds new site to representation to be synced. + Adds new site to representation to be synced. - 'collection' must have synchronization enabled (globally or - project only) + 'collection' must have synchronization enabled (globally or + project only) - Used as a API endpoint from outside applications (Loader etc) + Used as a API endpoint from outside applications (Loader etc). - Args: - collection (string): project name (must match DB) - representation_id (string): MongoDB _id value - site_name (string): name of configured and active site - force (bool): reset site if exists + Use 'force' to reset existing site. - Returns: - throws ValueError if any issue + Args: + collection (string): project name (must match DB) + representation_id (string): MongoDB _id value + site_name (string): name of configured and active site + force (bool): reset site if exists + + Throws: + SiteAlreadyPresentError - if adding already existing site and + not 'force' + ValueError - other errors (repre not found, misconfiguration) """ if not self.get_sync_project_setting(collection): raise ValueError("Project not configured") @@ -155,9 +161,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.reset_site_on_representation(collection, representation_id, - site_name=site_name, force=force) + site_name=site_name, + force=force) - # public facing API def remove_site(self, collection, representation_id, site_name, remove_local_files=False): """ @@ -184,6 +190,151 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if remove_local_files: self._remove_local_file(collection, representation_id, site_name) + def compute_resource_sync_sites(self, project_name): + """Get available resource sync sites state for publish process. + + Returns dict with prepared state of sync sites for 'project_name'. + It checks if Site Sync is enabled, handles alternative sites. + Publish process stores this dictionary as a part of representation + document in DB. + + Example: + [ + { + 'name': '42abbc09-d62a-44a4-815c-a12cd679d2d7', + 'created_dt': datetime.datetime(2022, 3, 30, 12, 16, 9, 778637) + }, + {'name': 'studio'}, + {'name': 'SFTP'} + ] -- representation is published locally, artist or Settings have set + remote site as 'studio'. 'SFTP' is alternate site to 'studio'. Eg. + whenever file is on 'studio', it is also on 'SFTP'. + """ + + def create_metadata(name, created=True): + """Create sync site metadata for site with `name`""" + metadata = {"name": name} + if created: + metadata["created_dt"] = datetime.now() + return metadata + + if ( + not self.sync_system_settings["enabled"] or + not self.sync_project_settings[project_name]["enabled"]): + return [create_metadata(self.DEFAULT_SITE)] + + local_site = self.get_active_site(project_name) + remote_site = self.get_remote_site(project_name) + + # Attached sites metadata by site name + # That is the local site, remote site, the always accesible sites + # and their alternate sites (alias of sites with different protocol) + attached_sites = dict() + attached_sites[local_site] = create_metadata(local_site) + + if remote_site and remote_site not in attached_sites: + attached_sites[remote_site] = create_metadata(remote_site, + created=False) + + attached_sites = self._add_alternative_sites(attached_sites) + # add skeleton for sites where it should be always synced to + # usually it would be a backup site which is handled by separate + # background process + for site in self._get_always_accessible_sites(project_name): + if site not in attached_sites: + attached_sites[site] = create_metadata(site, created=False) + + return list(attached_sites.values()) + + def _get_always_accessible_sites(self, project_name): + """Sites that synced to as a part of background process. + + Artist machine doesn't handle those, explicit Tray with that site name + as a local id must be running. + Example is dropbox site serving as a backup solution + """ + always_accessible_sites = ( + self.get_sync_project_setting(project_name)["config"]. + get("always_accessible_on", []) + ) + return [site.strip() for site in always_accessible_sites] + + def _add_alternative_sites(self, attached_sites): + """Add skeleton document for alternative sites + + Each new configured site in System Setting could serve as a alternative + site, it's a kind of alias. It means that files on 'a site' are + physically accessible also on 'a alternative' site. + Example is sftp site serving studio files via sftp protocol, physically + file is only in studio, sftp server has this location mounted. + """ + additional_sites = self.sync_system_settings.get("sites", {}) + + alt_site_pairs = self._get_alt_site_pairs(additional_sites) + + for site_name in additional_sites.keys(): + # Get alternate sites (stripped names) for this site name + alt_sites = alt_site_pairs.get(site_name) + alt_sites = [site.strip() for site in alt_sites] + alt_sites = set(alt_sites) + + # If no alternative sites we don't need to add + if not alt_sites: + continue + + # Take a copy of data of the first alternate site that is already + # defined as an attached site to match the same state. + match_meta = next((attached_sites[site] for site in alt_sites + if site in attached_sites), None) + if not match_meta: + continue + + alt_site_meta = copy.deepcopy(match_meta) + alt_site_meta["name"] = site_name + + # Note: We change mutable `attached_site` dict in-place + attached_sites[site_name] = alt_site_meta + + return attached_sites + + def _get_alt_site_pairs(self, conf_sites): + """Returns dict of site and its alternative sites. + + If `site` has alternative site, it means that alt_site has 'site' as + alternative site + Args: + conf_sites (dict) + Returns: + (dict): {'site': [alternative sites]...} + """ + alt_site_pairs = defaultdict(set) + for site_name, site_info in conf_sites.items(): + alt_sites = set(site_info.get("alternative_sites", [])) + alt_site_pairs[site_name].update(alt_sites) + + for alt_site in alt_sites: + alt_site_pairs[alt_site].add(site_name) + + for site_name, alt_sites in alt_site_pairs.items(): + sites_queue = deque(alt_sites) + while sites_queue: + alt_site = sites_queue.popleft() + + # safety against wrong config + # {"SFTP": {"alternative_site": "SFTP"} + if alt_site == site_name or alt_site not in alt_site_pairs: + continue + + for alt_alt_site in alt_site_pairs[alt_site]: + if ( + alt_alt_site != site_name + and alt_alt_site not in alt_sites + ): + alt_sites.add(alt_alt_site) + sites_queue.append(alt_alt_site) + + return alt_site_pairs + def clear_project(self, collection, site_name): """ Clear 'collection' of 'site_name' and its local files @@ -207,36 +358,38 @@ class SyncServerModule(OpenPypeModule, ITrayModule): def create_validate_project_task(self, collection, site_name): """Adds metadata about project files validation on a queue. - This process will loop through all representation and check if - their files actually exist on an active site. + This process will loop through all representation and check if + their files actually exist on an active site. - This might be useful for edge cases when artists is switching - between sites, remote site is actually physically mounted and - active site has same file urls etc. + It also checks if site is set in DB, but file is physically not + present - Task will run on a asyncio loop, shouldn't be blocking. + This might be useful for edge cases when artists is switching + between sites, remote site is actually physically mounted and + active site has same file urls etc. + + Task will run on a asyncio loop, shouldn't be blocking. """ task = { "type": "validate", "project_name": collection, - "func": lambda: self.validate_project(collection, site_name) + "func": lambda: self.validate_project(collection, site_name, + reset_missing=True) } self.projects_processed.add(collection) self.long_running_tasks.append(task) - def validate_project(self, collection, site_name, remove_missing=False): - """ - Validate 'collection' of 'site_name' and its local files + def validate_project(self, collection, site_name, reset_missing=False): + """Validate 'collection' of 'site_name' and its local files - If file present and not marked with a 'site_name' in DB, DB is - updated with site name and file modified date. + If file present and not marked with a 'site_name' in DB, DB is + updated with site name and file modified date. - Args: - module (SyncServerModule) - collection (string): project name - site_name (string): active site name - remove_missing (bool): if True remove sites in DB if missing - physically + Args: + collection (string): project name + site_name (string): active site name + reset_missing (bool): if True reset site in DB if missing + physically """ self.log.debug("Validation of {} for {} started".format(collection, site_name)) @@ -251,29 +404,32 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return sites_added = 0 - sites_removed = 0 + sites_reset = 0 for repre in representations: repre_id = repre["_id"] for repre_file in repre.get("files", []): try: - has_site = site_name in [site["name"] - for site in repre_file["sites"]] - except TypeError: + is_on_site = site_name in [site["name"] + for site in repre_file["sites"] + if (site.get("created_dt") and + not site.get("error"))] + except (TypeError, AttributeError): self.log.debug("Structure error in {}".format(repre_id)) continue - if has_site and not remove_missing: - continue - file_path = repre_file.get("path", "") local_file_path = self.get_local_file_path(collection, site_name, file_path) - if local_file_path and os.path.exists(local_file_path): - self.log.debug("Adding site {} for {}".format(site_name, - repre_id)) - if not has_site: + file_exists = (local_file_path and + os.path.exists(local_file_path)) + if not is_on_site: + if file_exists: + self.log.debug( + "Adding site {} for {}".format(site_name, + repre_id)) + query = { "_id": repre_id } @@ -281,27 +437,27 @@ class SyncServerModule(OpenPypeModule, ITrayModule): os.path.getmtime(local_file_path)) elem = {"name": site_name, "created_dt": created_dt} - self._add_site(collection, query, [repre], elem, + self._add_site(collection, query, repre, elem, site_name=site_name, - file_id=repre_file["_id"]) + file_id=repre_file["_id"], + force=True) sites_added += 1 else: - if has_site and remove_missing: - self.log.debug("Removing site {} for {}". + if not file_exists and reset_missing: + self.log.debug("Resetting site {} for {}". format(site_name, repre_id)) - self.reset_provider_for_file(collection, - repre_id, - file_id=repre_file["_id"], - remove=True) - sites_removed += 1 + self.reset_site_on_representation( + collection, repre_id, site_name=site_name, + file_id=repre_file["_id"]) + sites_reset += 1 if sites_added % 100 == 0: self.log.debug("Sites added {}".format(sites_added)) self.log.debug("Validation of {} for {} ended".format(collection, site_name)) - self.log.info("Sites added {}, sites removed {}".format(sites_added, - sites_removed)) + self.log.info("Sites added {}, sites reset {}".format(sites_added, + reset_missing)) def pause_representation(self, collection, representation_id, site_name): """ @@ -819,7 +975,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.log.debug("Adding alternate {} to {}".format( alt_site, representation["_id"])) self._add_site(collection, query, - [representation], elem, + representation, elem, alt_site, file_id=file_id, force=True) """ End of Public API """ @@ -848,6 +1004,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if self.enabled and sync_settings.get('enabled'): sites.append(self.LOCAL_SITE) + active_site = sync_settings["config"]["active_site"] + # for Tray running background process + if active_site not in sites and active_site == get_local_site_id(): + sites.append(active_site) + return sites def tray_init(self): @@ -1418,14 +1579,16 @@ class SyncServerModule(OpenPypeModule, ITrayModule): pause (bool or None): if True - pause, False - unpause force (bool): hard reset - currently only for add_site - Returns: - throws ValueError + Raises: + SiteAlreadyPresentError - if adding already existing site and + not 'force' + ValueError - other errors (repre not found, misconfiguration) """ query = { "_id": ObjectId(representation_id) } - representation = list(self.connection.database[collection].find(query)) + representation = self.connection.database[collection].find_one(query) if not representation: raise ValueError("Representation {} not found in {}". format(representation_id, collection)) @@ -1456,7 +1619,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): representation, site_name, pause) else: # add new site to all files for representation self._add_site(collection, query, representation, elem, site_name, - force) + force=force) def _update_site(self, collection, query, update, arr_filter): """ @@ -1511,7 +1674,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Throws ValueError if 'site_name' not found on 'representation' """ found = False - for repre_file in representation.pop().get("files"): + for repre_file in representation.get("files"): for site in repre_file.get("sites"): if site.get("name") == site_name: found = True @@ -1537,7 +1700,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): """ found = False site = None - for repre_file in representation.pop().get("files"): + for repre_file in representation.get("files"): for site in repre_file.get("sites"): if site["name"] == site_name: found = True @@ -1569,29 +1732,34 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Adds 'site_name' to 'representation' on 'collection' Args: - representation (list of 1 dict) + representation (dict) file_id (ObjectId) Use 'force' to remove existing or raises ValueError """ - reseted_existing = False - for repre_file in representation.pop().get("files"): + reset_existing = False + files = representation.get("files", []) + if not files: + log.debug("No files for {}".format(representation["_id"])) + return + + for repre_file in files: if file_id and file_id != repre_file["_id"]: continue for site in repre_file.get("sites"): if site["name"] == site_name: - if force: + if force or site.get("error"): self._reset_site_for_file(collection, query, elem, repre_file["_id"], site_name) - reseted_existing = True + reset_existing = True else: msg = "Site {} already present".format(site_name) log.info(msg) - raise ValueError(msg) + raise SiteAlreadyPresentError(msg) - if reseted_existing: + if reset_existing: return if not file_id: @@ -1755,7 +1923,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): (int) - number of failed attempts """ _, rec = self._get_site_rec(file.get("sites", []), provider) - return rec.get("tries", 0) + return self._get_tries_count_from_rec(rec) def _get_progress_dict(self, progress): """ diff --git a/openpype/modules/sync_server/utils.py b/openpype/modules/sync_server/utils.py index 85e4e03f77..03f362202f 100644 --- a/openpype/modules/sync_server/utils.py +++ b/openpype/modules/sync_server/utils.py @@ -8,6 +8,11 @@ class ResumableError(Exception): pass +class SiteAlreadyPresentError(Exception): + """Representation has already site skeleton present.""" + pass + + class SyncStatus: DO_NOTHING = 0 DO_UPLOAD = 1 diff --git a/openpype/modules/timers_manager/timers_manager.py b/openpype/modules/timers_manager/timers_manager.py index 47d020104b..3f77a2b7dc 100644 --- a/openpype/modules/timers_manager/timers_manager.py +++ b/openpype/modules/timers_manager/timers_manager.py @@ -1,13 +1,14 @@ import os import platform -from avalon.api import AvalonMongoDB from openpype.modules import OpenPypeModule from openpype_interfaces import ( ITrayService, ILaunchHookPaths ) +from openpype.pipeline import AvalonMongoDB + from .exceptions import InvalidContextError diff --git a/openpype/pipeline/__init__.py b/openpype/pipeline/__init__.py index 26970e4edc..2e441fbf27 100644 --- a/openpype/pipeline/__init__.py +++ b/openpype/pipeline/__init__.py @@ -1,15 +1,28 @@ -from .lib import attribute_definitions +from .constants import ( + AVALON_CONTAINER_ID, + HOST_WORKFILE_EXTENSIONS, +) + +from .mongodb import ( + AvalonMongoDB, +) from .create import ( BaseCreator, Creator, AutoCreator, CreatedInstance, - CreatorError, LegacyCreator, legacy_create, + + discover_creator_plugins, + discover_legacy_creator_plugins, + register_creator_plugin, + deregister_creator_plugin, + register_creator_plugin_path, + deregister_creator_plugin_path, ) from .load import ( @@ -31,6 +44,7 @@ from .load import ( loaders_from_representation, get_representation_path, + get_representation_context, get_repres_contexts, ) @@ -38,18 +52,56 @@ from .publish import ( PublishValidationError, PublishXmlValidationError, KnownPublishError, - OpenPypePyblishPluginMixin + OpenPypePyblishPluginMixin, + OptionalPyblishPluginMixin, ) +from .actions import ( + LauncherAction, + + InventoryAction, + + discover_launcher_actions, + register_launcher_action, + register_launcher_action_path, + + discover_inventory_actions, + register_inventory_action, + register_inventory_action_path, + deregister_inventory_action, + deregister_inventory_action_path, +) + +from .context_tools import ( + install_openpype_plugins, + install_host, + uninstall_host, + is_installed, + + register_root, + registered_root, + + register_host, + registered_host, + deregister_host, +) +install = install_host +uninstall = uninstall_host + __all__ = ( - "attribute_definitions", + "AVALON_CONTAINER_ID", + "HOST_WORKFILE_EXTENSIONS", + + # --- MongoDB --- + "AvalonMongoDB", # --- Create --- "BaseCreator", "Creator", "AutoCreator", "CreatedInstance", + "CreatorError", "CreatorError", @@ -57,6 +109,13 @@ __all__ = ( "LegacyCreator", "legacy_create", + "discover_creator_plugins", + "discover_legacy_creator_plugins", + "register_creator_plugin", + "deregister_creator_plugin", + "register_creator_plugin_path", + "deregister_creator_plugin_path", + # --- Load --- "HeroVersionType", "IncompatibleLoaderError", @@ -76,11 +135,44 @@ __all__ = ( "loaders_from_representation", "get_representation_path", + "get_representation_context", "get_repres_contexts", # --- Publish --- "PublishValidationError", "PublishXmlValidationError", "KnownPublishError", - "OpenPypePyblishPluginMixin" + "OpenPypePyblishPluginMixin", + "OptionalPyblishPluginMixin", + + # --- Actions --- + "LauncherAction", + "InventoryAction", + + "discover_launcher_actions", + "register_launcher_action", + "register_launcher_action_path", + + "discover_inventory_actions", + "register_inventory_action", + "register_inventory_action_path", + "deregister_inventory_action", + "deregister_inventory_action_path", + + # --- Process context --- + "install_openpype_plugins", + "install_host", + "uninstall_host", + "is_installed", + + "register_root", + "registered_root", + + "register_host", + "registered_host", + "deregister_host", + + # Backwards compatible function names + "install", + "uninstall", ) diff --git a/openpype/pipeline/actions.py b/openpype/pipeline/actions.py new file mode 100644 index 0000000000..b488fe3e1f --- /dev/null +++ b/openpype/pipeline/actions.py @@ -0,0 +1,135 @@ +import logging +from openpype.pipeline.plugin_discover import ( + discover, + register_plugin, + register_plugin_path, + deregister_plugin, + deregister_plugin_path +) + + +class LauncherAction(object): + """A custom action available""" + name = None + label = None + icon = None + color = None + order = 0 + + log = logging.getLogger("LauncherAction") + log.propagate = True + + def is_compatible(self, session): + """Return whether the class is compatible with the Session.""" + return True + + def process(self, session, **kwargs): + pass + + +class InventoryAction(object): + """A custom action for the scene inventory tool + + If registered the action will be visible in the Right Mouse Button menu + under the submenu "Actions". + + """ + + label = None + icon = None + color = None + order = 0 + + log = logging.getLogger("InventoryAction") + log.propagate = True + + @staticmethod + def is_compatible(container): + """Override function in a custom class + + This method is specifically used to ensure the action can operate on + the container. + + Args: + container(dict): the data of a loaded asset, see host.ls() + + Returns: + bool + """ + return bool(container.get("objectName")) + + def process(self, containers): + """Override function in a custom class + + This method will receive all containers even those which are + incompatible. It is advised to create a small filter along the lines + of this example: + + valid_containers = filter(self.is_compatible(c) for c in containers) + + The return value will need to be a True-ish value to trigger + the data_changed signal in order to refresh the view. + + You can return a list of container names to trigger GUI to select + treeview items. + + You can return a dict to carry extra GUI options. For example: + { + "objectNames": [container names...], + "options": {"mode": "toggle", + "clear": False} + } + Currently workable GUI options are: + - clear (bool): Clear current selection before selecting by action. + Default `True`. + - mode (str): selection mode, use one of these: + "select", "deselect", "toggle". Default is "select". + + Args: + containers (list): list of dictionaries + + Return: + bool, list or dict + + """ + return True + + +# Launcher action +def discover_launcher_actions(): + return discover(LauncherAction) + + +def register_launcher_action(plugin): + return register_plugin(LauncherAction, plugin) + + +def register_launcher_action_path(path): + return register_plugin_path(LauncherAction, path) + + +# Inventory action +def discover_inventory_actions(): + actions = discover(InventoryAction) + filtered_actions = [] + for action in actions: + if action is not InventoryAction: + filtered_actions.append(action) + + return filtered_actions + + +def register_inventory_action(plugin): + return register_plugin(InventoryAction, plugin) + + +def deregister_inventory_action(plugin): + deregister_plugin(InventoryAction, plugin) + + +def register_inventory_action_path(path): + return register_plugin_path(InventoryAction, path) + + +def deregister_inventory_action_path(path): + return deregister_plugin_path(InventoryAction, path) diff --git a/openpype/pipeline/constants.py b/openpype/pipeline/constants.py new file mode 100644 index 0000000000..e6496cbf95 --- /dev/null +++ b/openpype/pipeline/constants.py @@ -0,0 +1,19 @@ +# Metadata ID of loaded container into scene +AVALON_CONTAINER_ID = "pyblish.avalon.container" + +# TODO get extensions from host implementations +HOST_WORKFILE_EXTENSIONS = { + "blender": [".blend"], + "celaction": [".scn"], + "tvpaint": [".tvpp"], + "fusion": [".comp"], + "harmony": [".zip"], + "houdini": [".hip", ".hiplc", ".hipnc"], + "maya": [".ma", ".mb"], + "nuke": [".nk"], + "hiero": [".hrox"], + "photoshop": [".psd", ".psb"], + "premiere": [".prproj"], + "resolve": [".drp"], + "aftereffects": [".aep"] +} diff --git a/openpype/pipeline/context_tools.py b/openpype/pipeline/context_tools.py new file mode 100644 index 0000000000..06bd639776 --- /dev/null +++ b/openpype/pipeline/context_tools.py @@ -0,0 +1,334 @@ +"""Core pipeline functionality""" + +import os +import sys +import json +import types +import logging +import inspect +import platform + +import pyblish.api +from pyblish.lib import MessageHandler + +import openpype +from openpype.modules import load_modules +from openpype.settings import get_project_settings +from openpype.lib import ( + Anatomy, + register_event_callback, + filter_pyblish_plugins, + change_timer_to_current_context, +) + +from . import ( + legacy_io, + register_loader_plugin_path, + register_inventory_action, + register_creator_plugin_path, + deregister_loader_plugin_path, +) + + +_is_installed = False +_registered_root = {"_": ""} +_registered_host = {"_": None} + +log = logging.getLogger(__name__) + +PACKAGE_DIR = os.path.dirname(os.path.abspath(openpype.__file__)) +PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") + +# Global plugin paths +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") + + +def register_root(path): + """Register currently active root""" + log.info("Registering root: %s" % path) + _registered_root["_"] = path + + +def registered_root(): + """Return currently registered root""" + root = _registered_root["_"] + if root: + return root + + root = legacy_io.Session.get("AVALON_PROJECTS") + if root: + return os.path.normpath(root) + return "" + + +def install_host(host): + """Install `host` into the running Python session. + + Args: + host (module): A Python module containing the Avalon + avalon host-interface. + """ + global _is_installed + + _is_installed = True + + legacy_io.install() + + missing = list() + for key in ("AVALON_PROJECT", "AVALON_ASSET"): + if key not in legacy_io.Session: + missing.append(key) + + assert not missing, ( + "%s missing from environment, %s" % ( + ", ".join(missing), + json.dumps(legacy_io.Session, indent=4, sort_keys=True) + )) + + project_name = legacy_io.Session["AVALON_PROJECT"] + log.info("Activating %s.." % project_name) + + # Optional host install function + if hasattr(host, "install"): + host.install() + + register_host(host) + + register_event_callback("taskChanged", _on_task_change) + + def modified_emit(obj, record): + """Method replacing `emit` in Pyblish's MessageHandler.""" + record.msg = record.getMessage() + obj.records.append(record) + + MessageHandler.emit = modified_emit + + install_openpype_plugins() + + +def install_openpype_plugins(project_name=None): + # Make sure modules are loaded + load_modules() + + log.info("Registering global plug-ins..") + pyblish.api.register_plugin_path(PUBLISH_PATH) + pyblish.api.register_discovery_filter(filter_pyblish_plugins) + register_loader_plugin_path(LOAD_PATH) + + if project_name is None: + project_name = os.environ.get("AVALON_PROJECT") + + # Register studio specific plugins + if project_name: + anatomy = Anatomy(project_name) + anatomy.set_root_environments() + register_root(anatomy.roots) + + project_settings = get_project_settings(project_name) + platform_name = platform.system().lower() + project_plugins = ( + project_settings + .get("global", {}) + .get("project_plugins", {}) + .get(platform_name) + ) or [] + for path in project_plugins: + try: + path = str(path.format(**os.environ)) + except KeyError: + pass + + if not path or not os.path.exists(path): + continue + + pyblish.api.register_plugin_path(path) + register_loader_plugin_path(path) + register_creator_plugin_path(path) + register_inventory_action(path) + + +def _on_task_change(): + change_timer_to_current_context() + + +def uninstall_host(): + """Undo all of what `install()` did""" + host = registered_host() + + try: + host.uninstall() + except AttributeError: + pass + + log.info("Deregistering global plug-ins..") + pyblish.api.deregister_plugin_path(PUBLISH_PATH) + pyblish.api.deregister_discovery_filter(filter_pyblish_plugins) + deregister_loader_plugin_path(LOAD_PATH) + log.info("Global plug-ins unregistred") + + deregister_host() + + legacy_io.uninstall() + + log.info("Successfully uninstalled Avalon!") + + +def is_installed(): + """Return state of installation + + Returns: + True if installed, False otherwise + + """ + + return _is_installed + + +def register_host(host): + """Register a new host for the current process + + Arguments: + host (ModuleType): A module implementing the + Host API interface. See the Host API + documentation for details on what is + required, or browse the source code. + + """ + signatures = { + "ls": [] + } + + _validate_signature(host, signatures) + _registered_host["_"] = host + + +def _validate_signature(module, signatures): + # Required signatures for each member + + missing = list() + invalid = list() + success = True + + for member in signatures: + if not hasattr(module, member): + missing.append(member) + success = False + + else: + attr = getattr(module, member) + if sys.version_info.major >= 3: + signature = inspect.getfullargspec(attr)[0] + else: + signature = inspect.getargspec(attr)[0] + required_signature = signatures[member] + + assert isinstance(signature, list) + assert isinstance(required_signature, list) + + if not all(member in signature + for member in required_signature): + invalid.append({ + "member": member, + "signature": ", ".join(signature), + "required": ", ".join(required_signature) + }) + success = False + + if not success: + report = list() + + if missing: + report.append( + "Incomplete interface for module: '%s'\n" + "Missing: %s" % (module, ", ".join( + "'%s'" % member for member in missing)) + ) + + if invalid: + report.append( + "'%s': One or more members were found, but didn't " + "have the right argument signature." % module.__name__ + ) + + for member in invalid: + report.append( + " Found: {member}({signature})".format(**member) + ) + report.append( + " Expected: {member}({required})".format(**member) + ) + + raise ValueError("\n".join(report)) + + +def registered_host(): + """Return currently registered host""" + return _registered_host["_"] + + +def deregister_host(): + _registered_host["_"] = default_host() + + +def default_host(): + """A default host, in place of anything better + + This may be considered as reference for the + interface a host must implement. It also ensures + that the system runs, even when nothing is there + to support it. + + """ + + host = types.ModuleType("defaultHost") + + def ls(): + return list() + + host.__dict__.update({ + "ls": ls + }) + + return host + + +def debug_host(): + """A debug host, useful to debugging features that depend on a host""" + + host = types.ModuleType("debugHost") + + def ls(): + containers = [ + { + "representation": "ee-ft-a-uuid1", + "schema": "openpype:container-1.0", + "name": "Bruce01", + "objectName": "Bruce01_node", + "namespace": "_bruce01_", + "version": 3, + }, + { + "representation": "aa-bc-s-uuid2", + "schema": "openpype:container-1.0", + "name": "Bruce02", + "objectName": "Bruce01_node", + "namespace": "_bruce02_", + "version": 2, + } + ] + + for container in containers: + yield container + + host.__dict__.update({ + "ls": ls, + "open_file": lambda fname: None, + "save_file": lambda fname: None, + "current_file": lambda: os.path.expanduser("~/temp.txt"), + "has_unsaved_changes": lambda: False, + "work_root": lambda: os.path.expanduser("~/temp"), + "file_extensions": lambda: ["txt"], + }) + + return host diff --git a/openpype/pipeline/create/__init__.py b/openpype/pipeline/create/__init__.py index 9571f56b8f..1beeb4267b 100644 --- a/openpype/pipeline/create/__init__.py +++ b/openpype/pipeline/create/__init__.py @@ -6,7 +6,14 @@ from .creator_plugins import ( BaseCreator, Creator, - AutoCreator + AutoCreator, + + discover_creator_plugins, + discover_legacy_creator_plugins, + register_creator_plugin, + deregister_creator_plugin, + register_creator_plugin_path, + deregister_creator_plugin_path, ) from .context import ( @@ -29,6 +36,13 @@ __all__ = ( "Creator", "AutoCreator", + "discover_creator_plugins", + "discover_legacy_creator_plugins", + "register_creator_plugin", + "deregister_creator_plugin", + "register_creator_plugin_path", + "deregister_creator_plugin_path", + "CreatedInstance", "CreateContext", diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index c2757a4502..6f862e0588 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -6,11 +6,16 @@ import inspect from uuid import uuid4 from contextlib import contextmanager -from ..lib import UnknownDef +from openpype.pipeline import legacy_io +from openpype.pipeline.mongodb import ( + AvalonMongoDB, + session_data_from_environment, +) + from .creator_plugins import ( - BaseCreator, Creator, - AutoCreator + AutoCreator, + discover_creator_plugins, ) from openpype.api import ( @@ -18,6 +23,8 @@ from openpype.api import ( get_project_settings ) +UpdateData = collections.namedtuple("UpdateData", ["instance", "changes"]) + class ImmutableKeyError(TypeError): """Accessed key is immutable so does not allow changes or removements.""" @@ -87,6 +94,8 @@ class AttributeValues: origin_data(dict): Values loaded from host before conversion. """ def __init__(self, attr_defs, values, origin_data=None): + from openpype.lib.attribute_definitions import UnknownDef + if origin_data is None: origin_data = copy.deepcopy(values) self._origin_data = origin_data @@ -352,7 +361,7 @@ class CreatedInstance: already existing instance. creator(BaseCreator): Creator responsible for instance. host(ModuleType): Host implementation loaded with - `avalon.api.registered_host`. + `openpype.pipeline.registered_host`. new(bool): Is instance new. """ # Keys that can't be changed or removed from data after loading using @@ -655,10 +664,8 @@ class CreateContext: ): # Create conncetion if is not passed if dbcon is None: - import avalon.api - - session = avalon.api.session_data_from_environment(True) - dbcon = avalon.api.AvalonMongoDB(session) + session = session_data_from_environment(True) + dbcon = AvalonMongoDB(session) dbcon.install() self.dbcon = dbcon @@ -766,12 +773,11 @@ class CreateContext: """Give ability to reset avalon context. Reset is based on optional host implementation of `get_current_context` - function or using `avalon.api.Session`. + function or using `legacy_io.Session`. Some hosts have ability to change context file without using workfiles tool but that change is not propagated to """ - import avalon.api project_name = asset_name = task_name = None if hasattr(self.host, "get_current_context"): @@ -782,11 +788,11 @@ class CreateContext: task_name = host_context.get("task_name") if not project_name: - project_name = avalon.api.Session.get("AVALON_PROJECT") + project_name = legacy_io.Session.get("AVALON_PROJECT") if not asset_name: - asset_name = avalon.api.Session.get("AVALON_ASSET") + asset_name = legacy_io.Session.get("AVALON_ASSET") if not task_name: - task_name = avalon.api.Session.get("AVALON_TASK") + task_name = legacy_io.Session.get("AVALON_TASK") if project_name: self.dbcon.Session["AVALON_PROJECT"] = project_name @@ -801,7 +807,6 @@ class CreateContext: Reloads creators from preregistered paths and can load publish plugins if it's enabled on context. """ - import avalon.api import pyblish.logic from openpype.pipeline import OpenPypePyblishPluginMixin @@ -842,7 +847,7 @@ class CreateContext: creators = {} autocreators = {} manual_creators = {} - for creator_class in avalon.api.discover(BaseCreator): + for creator_class in discover_creator_plugins(): if inspect.isabstract(creator_class): self.log.info( "Skipping abstract Creator {}".format(str(creator_class)) @@ -1080,7 +1085,7 @@ class CreateContext: for instance in cretor_instances: instance_changes = instance.changes() if instance_changes: - update_list.append((instance, instance_changes)) + update_list.append(UpdateData(instance, instance_changes)) creator = self.creators[identifier] if update_list: diff --git a/openpype/pipeline/create/creator_plugins.py b/openpype/pipeline/create/creator_plugins.py index 1ac2c420a2..cbe19da064 100644 --- a/openpype/pipeline/create/creator_plugins.py +++ b/openpype/pipeline/create/creator_plugins.py @@ -8,7 +8,19 @@ from abc import ( ) import six -from openpype.lib import get_subset_name_with_asset_doc +from openpype.lib import ( + get_subset_name_with_asset_doc, + set_plugin_attributes_from_settings, +) +from openpype.pipeline.plugin_discover import ( + discover, + register_plugin, + register_plugin_path, + deregister_plugin, + deregister_plugin_path +) + +from .legacy_create import LegacyCreator class CreatorError(Exception): @@ -46,6 +58,11 @@ class BaseCreator: # - may not be used if `get_icon` is reimplemented icon = None + # Instance attribute definitions that can be changed per instance + # - returns list of attribute definitions from + # `openpype.pipeline.attribute_definitions` + instance_attr_defs = [] + def __init__( self, create_context, system_settings, project_settings, headless=False ): @@ -56,10 +73,13 @@ class BaseCreator: # - we may use UI inside processing this attribute should be checked self.headless = headless - @abstractproperty + @property def identifier(self): - """Identifier of creator (must be unique).""" - pass + """Identifier of creator (must be unique). + + Default implementation returns plugin's family. + """ + return self.family @abstractproperty def family(self): @@ -69,7 +89,9 @@ class BaseCreator: @property def log(self): if self._log is None: - self._log = logging.getLogger(self.__class__.__name__) + from openpype.api import Logger + + self._log = Logger.get_logger(self.__class__.__name__) return self._log def _add_instance_to_context(self, instance): @@ -90,11 +112,39 @@ class BaseCreator: pass @abstractmethod - def collect_instances(self, attr_plugins=None): + def collect_instances(self): + """Collect existing instances related to this creator plugin. + + The implementation differs on host abilities. The creator has to + collect metadata about instance and create 'CreatedInstance' object + which should be added to 'CreateContext'. + + Example: + ```python + def collect_instances(self): + # Getting existing instances is different per host implementation + for instance_data in pipeline.list_instances(): + # Process only instances that were created by this creator + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + # Create instance object from existing data + instance = CreatedInstance.from_existing( + instance_data, self + ) + # Add instance to create context + self._add_instance_to_context(instance) + ``` + """ pass @abstractmethod def update_instances(self, update_list): + """Store changes of existing instances so they can be recollected. + + Args: + update_list(list): Gets list of tuples. Each item + contain changed instance and it's changes. + """ pass @abstractmethod @@ -178,7 +228,7 @@ class BaseCreator: list: Attribute definitions that can be tweaked for created instance. """ - return [] + return self.instance_attr_defs class Creator(BaseCreator): @@ -191,6 +241,9 @@ class Creator(BaseCreator): # - default_variants may not be used if `get_default_variants` is overriden default_variants = [] + # Default variant used in 'get_default_variant' + default_variant = None + # Short description of family # - may not be used if `get_description` is overriden description = None @@ -204,6 +257,10 @@ class Creator(BaseCreator): # e.g. for buld creators create_allow_context_change = True + # Precreate attribute definitions showed before creation + # - similar to instance attribute definitions + pre_create_attr_defs = [] + @abstractmethod def create(self, subset_name, instance_data, pre_create_data): """Create new instance and store it. @@ -263,7 +320,7 @@ class Creator(BaseCreator): `get_default_variants` should be used. """ - return None + return self.default_variant def get_pre_create_attr_defs(self): """Plugin attribute definitions needed for creation. @@ -276,7 +333,7 @@ class Creator(BaseCreator): list: Attribute definitions that can be tweaked for created instance. """ - return [] + return self.pre_create_attr_defs class AutoCreator(BaseCreator): @@ -284,6 +341,43 @@ class AutoCreator(BaseCreator): Can be used e.g. for `workfile`. """ + def remove_instances(self, instances): """Skip removement.""" pass + + +def discover_creator_plugins(): + return discover(BaseCreator) + + +def discover_legacy_creator_plugins(): + plugins = discover(LegacyCreator) + set_plugin_attributes_from_settings(plugins, LegacyCreator) + return plugins + + +def register_creator_plugin(plugin): + if issubclass(plugin, BaseCreator): + register_plugin(BaseCreator, plugin) + + elif issubclass(plugin, LegacyCreator): + register_plugin(LegacyCreator, plugin) + + +def deregister_creator_plugin(plugin): + if issubclass(plugin, BaseCreator): + deregister_plugin(BaseCreator, plugin) + + elif issubclass(plugin, LegacyCreator): + deregister_plugin(LegacyCreator, plugin) + + +def register_creator_plugin_path(path): + register_plugin_path(BaseCreator, path) + register_plugin_path(LegacyCreator, path) + + +def deregister_creator_plugin_path(path): + deregister_plugin_path(BaseCreator, path) + deregister_plugin_path(LegacyCreator, path) diff --git a/openpype/pipeline/create/legacy_create.py b/openpype/pipeline/create/legacy_create.py index cf6629047e..46e0e3d663 100644 --- a/openpype/pipeline/create/legacy_create.py +++ b/openpype/pipeline/create/legacy_create.py @@ -142,7 +142,8 @@ def legacy_create(Creator, name, asset, options=None, data=None): Name of instance """ - from avalon.api import registered_host + from openpype.pipeline import registered_host + host = registered_host() plugin = Creator(name, asset, options, data) diff --git a/openpype/pipeline/farm/__init__.py b/openpype/pipeline/farm/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/pipeline/farm/patterning.py b/openpype/pipeline/farm/patterning.py new file mode 100644 index 0000000000..1e4b5bf37d --- /dev/null +++ b/openpype/pipeline/farm/patterning.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +import re + + +def match_aov_pattern(host_name, aov_patterns, render_file_name): + """Matching against a `AOV` pattern in the render files. + + In order to match the AOV name we must compare + against the render filename string that we are + grabbing the render filename string from the collection + that we have grabbed from `exp_files`. + + Args: + app (str): Host name. + aov_patterns (dict): AOV patterns from AOV filters. + render_file_name (str): Incoming file name to match against. + + Returns: + bool: Review state for rendered file (render_file_name). + """ + aov_pattern = aov_patterns.get(host_name, []) + if not aov_pattern: + return False + return any(re.match(p, render_file_name) for p in aov_pattern) diff --git a/openpype/pipeline/legacy_io.py b/openpype/pipeline/legacy_io.py new file mode 100644 index 0000000000..c41406b208 --- /dev/null +++ b/openpype/pipeline/legacy_io.py @@ -0,0 +1,146 @@ +"""Wrapper around interactions with the database""" + +import sys +import logging +import functools + +from . import schema +from .mongodb import AvalonMongoDB, session_data_from_environment + +module = sys.modules[__name__] + +Session = {} +_is_installed = False +_connection_object = AvalonMongoDB(Session) +_mongo_client = None +_database = database = None + +log = logging.getLogger(__name__) + + +def install(): + """Establish a persistent connection to the database""" + if module._is_installed: + return + + session = session_data_from_environment(context_keys=True) + + session["schema"] = "openpype:session-2.0" + try: + schema.validate(session) + except schema.ValidationError as e: + # TODO(marcus): Make this mandatory + log.warning(e) + + _connection_object.Session.update(session) + _connection_object.install() + + module._mongo_client = _connection_object.mongo_client + module._database = module.database = _connection_object.database + + module._is_installed = True + + +def uninstall(): + """Close any connection to the database""" + module._mongo_client = None + module._database = module.database = None + module._is_installed = False + try: + module._connection_object.uninstall() + except AttributeError: + pass + + +def requires_install(func): + @functools.wraps(func) + def decorated(*args, **kwargs): + if not module._is_installed: + install() + return func(*args, **kwargs) + return decorated + + +@requires_install +def projects(*args, **kwargs): + return _connection_object.projects(*args, **kwargs) + + +@requires_install +def insert_one(doc, *args, **kwargs): + return _connection_object.insert_one(doc, *args, **kwargs) + + +@requires_install +def insert_many(docs, *args, **kwargs): + return _connection_object.insert_many(docs, *args, **kwargs) + + +@requires_install +def update_one(*args, **kwargs): + return _connection_object.update_one(*args, **kwargs) + + +@requires_install +def update_many(*args, **kwargs): + return _connection_object.update_many(*args, **kwargs) + + +@requires_install +def replace_one(*args, **kwargs): + return _connection_object.replace_one(*args, **kwargs) + + +@requires_install +def replace_many(*args, **kwargs): + return _connection_object.replace_many(*args, **kwargs) + + +@requires_install +def delete_one(*args, **kwargs): + return _connection_object.delete_one(*args, **kwargs) + + +@requires_install +def delete_many(*args, **kwargs): + return _connection_object.delete_many(*args, **kwargs) + + +@requires_install +def find(*args, **kwargs): + return _connection_object.find(*args, **kwargs) + + +@requires_install +def find_one(*args, **kwargs): + return _connection_object.find_one(*args, **kwargs) + + +@requires_install +def distinct(*args, **kwargs): + return _connection_object.distinct(*args, **kwargs) + + +@requires_install +def aggregate(*args, **kwargs): + return _connection_object.aggregate(*args, **kwargs) + + +@requires_install +def save(*args, **kwargs): + return _connection_object.save(*args, **kwargs) + + +@requires_install +def drop(*args, **kwargs): + return _connection_object.drop(*args, **kwargs) + + +@requires_install +def parenthood(*args, **kwargs): + return _connection_object.parenthood(*args, **kwargs) + + +@requires_install +def bulk_write(*args, **kwargs): + return _connection_object.bulk_write(*args, **kwargs) diff --git a/openpype/pipeline/lib/__init__.py b/openpype/pipeline/lib/__init__.py deleted file mode 100644 index f762c4205d..0000000000 --- a/openpype/pipeline/lib/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -from .attribute_definitions import ( - AbtractAttrDef, - - UIDef, - UISeparatorDef, - UILabelDef, - - UnknownDef, - NumberDef, - TextDef, - EnumDef, - BoolDef, - FileDef, -) - - -__all__ = ( - "AbtractAttrDef", - - "UIDef", - "UISeparatorDef", - "UILabelDef", - - "UnknownDef", - "NumberDef", - "TextDef", - "EnumDef", - "BoolDef", - "FileDef", -) diff --git a/openpype/pipeline/load/plugins.py b/openpype/pipeline/load/plugins.py index 601ad3b258..a30a2188a4 100644 --- a/openpype/pipeline/load/plugins.py +++ b/openpype/pipeline/load/plugins.py @@ -1,5 +1,13 @@ import logging +from openpype.lib import set_plugin_attributes_from_settings +from openpype.pipeline.plugin_discover import ( + discover, + register_plugin, + register_plugin_path, + deregister_plugin, + deregister_plugin_path +) from .utils import get_representation_path_from_context @@ -33,7 +41,8 @@ class LoaderPlugin(list): def get_representations(cls): return cls.representations - def filepath_from_context(self, context): + @classmethod + def filepath_from_context(cls, context): return get_representation_path_from_context(context) def load(self, context, name=None, namespace=None, options=None): @@ -102,29 +111,22 @@ class SubsetLoaderPlugin(LoaderPlugin): def discover_loader_plugins(): - import avalon.api - - return avalon.api.discover(LoaderPlugin) + plugins = discover(LoaderPlugin) + set_plugin_attributes_from_settings(plugins, LoaderPlugin) + return plugins def register_loader_plugin(plugin): - import avalon.api - - return avalon.api.register_plugin(LoaderPlugin, plugin) - - -def deregister_loader_plugin_path(path): - import avalon.api - - avalon.api.deregister_plugin_path(LoaderPlugin, path) - - -def register_loader_plugin_path(path): - import avalon.api - - return avalon.api.register_plugin_path(LoaderPlugin, path) + return register_plugin(LoaderPlugin, plugin) def deregister_loader_plugin(plugin): - import avalon.api - avalon.api.deregister_plugin(LoaderPlugin, plugin) + deregister_plugin(LoaderPlugin, plugin) + + +def deregister_loader_plugin_path(path): + deregister_plugin_path(LoaderPlugin, path) + + +def register_loader_plugin_path(path): + return register_plugin_path(LoaderPlugin, path) diff --git a/openpype/pipeline/load/utils.py b/openpype/pipeline/load/utils.py index 6d32c11cd7..99e5d11f82 100644 --- a/openpype/pipeline/load/utils.py +++ b/openpype/pipeline/load/utils.py @@ -7,11 +7,13 @@ import inspect import numbers import six - -from avalon import io, schema -from avalon.api import Session, registered_root +from bson.objectid import ObjectId from openpype.lib import Anatomy +from openpype.pipeline import ( + schema, + legacy_io, +) log = logging.getLogger(__name__) @@ -58,7 +60,7 @@ def get_repres_contexts(representation_ids, dbcon=None): """ if not dbcon: - dbcon = io + dbcon = legacy_io contexts = {} if not representation_ids: @@ -67,7 +69,7 @@ def get_repres_contexts(representation_ids, dbcon=None): _representation_ids = [] for repre_id in representation_ids: if isinstance(repre_id, six.string_types): - repre_id = io.ObjectId(repre_id) + repre_id = ObjectId(repre_id) _representation_ids.append(repre_id) repre_docs = dbcon.find({ @@ -165,7 +167,7 @@ def get_subset_contexts(subset_ids, dbcon=None): dict: The full representation context by representation id. """ if not dbcon: - dbcon = io + dbcon = legacy_io contexts = {} if not subset_ids: @@ -174,7 +176,7 @@ def get_subset_contexts(subset_ids, dbcon=None): _subset_ids = set() for subset_id in subset_ids: if isinstance(subset_id, six.string_types): - subset_id = io.ObjectId(subset_id) + subset_id = ObjectId(subset_id) _subset_ids.add(subset_id) subset_docs = dbcon.find({ @@ -217,7 +219,7 @@ def get_representation_context(representation): """Return parenthood context for representation. Args: - representation (str or io.ObjectId or dict): The representation id + representation (str or ObjectId or dict): The representation id or full representation as returned by the database. Returns: @@ -227,11 +229,11 @@ def get_representation_context(representation): assert representation is not None, "This is a bug" - if isinstance(representation, (six.string_types, io.ObjectId)): - representation = io.find_one( - {"_id": io.ObjectId(str(representation))}) + if isinstance(representation, (six.string_types, ObjectId)): + representation = legacy_io.find_one( + {"_id": ObjectId(str(representation))}) - version, subset, asset, project = io.parenthood(representation) + version, subset, asset, project = legacy_io.parenthood(representation) assert all([representation, version, subset, asset, project]), ( "This is a bug" @@ -340,7 +342,7 @@ def load_container( Args: Loader (Loader): The loader class to trigger. - representation (str or io.ObjectId or dict): The representation id + representation (str or ObjectId or dict): The representation id or full representation as returned by the database. namespace (str, Optional): The namespace to assign. Defaults to None. name (str, Optional): The name to assign. Defaults to subset name. @@ -403,17 +405,17 @@ def update_container(container, version=-1): """Update a container""" # Compute the different version from 'representation' - current_representation = io.find_one({ - "_id": io.ObjectId(container["representation"]) + current_representation = legacy_io.find_one({ + "_id": ObjectId(container["representation"]) }) assert current_representation is not None, "This is a bug" - current_version, subset, asset, project = io.parenthood( + current_version, subset, asset, project = legacy_io.parenthood( current_representation) if version == -1: - new_version = io.find_one({ + new_version = legacy_io.find_one({ "type": "version", "parent": subset["_id"] }, sort=[("name", -1)]) @@ -429,11 +431,11 @@ def update_container(container, version=-1): "type": "version", "name": version } - new_version = io.find_one(version_query) + new_version = legacy_io.find_one(version_query) assert new_version is not None, "This is a bug" - new_representation = io.find_one({ + new_representation = legacy_io.find_one({ "type": "representation", "parent": new_version["_id"], "name": current_representation["name"] @@ -480,7 +482,7 @@ def switch_container(container, representation, loader_plugin=None): )) # Get the new representation to switch to - new_representation = io.find_one({ + new_representation = legacy_io.find_one({ "type": "representation", "_id": representation["_id"], }) @@ -499,7 +501,7 @@ def get_representation_path_from_context(context): representation = context['representation'] project_doc = context.get("project") root = None - session_project = Session.get("AVALON_PROJECT") + session_project = legacy_io.Session.get("AVALON_PROJECT") if project_doc and project_doc["name"] != session_project: anatomy = Anatomy(project_doc["name"]) root = anatomy.roots @@ -528,9 +530,11 @@ def get_representation_path(representation, root=None, dbcon=None): from openpype.lib import StringTemplate, TemplateUnsolved if dbcon is None: - dbcon = io + dbcon = legacy_io if root is None: + from openpype.pipeline import registered_root + root = registered_root() def path_from_represenation(): @@ -593,7 +597,6 @@ def get_representation_path(representation, root=None, dbcon=None): "code": project.get("data", {}).get("code") }, "asset": asset["name"], - "silo": asset.get("silo"), "hierarchy": hierarchy, "subset": subset["name"], "version": version_["name"], diff --git a/openpype/pipeline/mongodb.py b/openpype/pipeline/mongodb.py new file mode 100644 index 0000000000..565e26b966 --- /dev/null +++ b/openpype/pipeline/mongodb.py @@ -0,0 +1,272 @@ +import os +import time +import functools +import logging +import pymongo +from uuid import uuid4 + +from . import schema + + +def requires_install(func): + func_obj = getattr(func, "__self__", None) + + @functools.wraps(func) + def decorated(*args, **kwargs): + if func_obj is not None: + _obj = func_obj + else: + _obj = args[0] + if not _obj.is_installed(): + if _obj.auto_install: + _obj.install() + else: + raise IOError( + "'{}.{}()' requires to run install() first".format( + _obj.__class__.__name__, func.__name__ + ) + ) + return func(*args, **kwargs) + return decorated + + +def auto_reconnect(func): + """Handling auto reconnect in 3 retry times""" + retry_times = 3 + reconnect_msg = "Reconnecting..." + func_obj = getattr(func, "__self__", None) + + @functools.wraps(func) + def decorated(*args, **kwargs): + if func_obj is not None: + _obj = func_obj + else: + _obj = args[0] + + for retry in range(1, retry_times + 1): + try: + return func(*args, **kwargs) + except pymongo.errors.AutoReconnect: + if hasattr(_obj, "log"): + _obj.log.warning(reconnect_msg) + else: + print(reconnect_msg) + + if retry >= retry_times: + raise + time.sleep(0.1) + return decorated + + +SESSION_CONTEXT_KEYS = ( + # Root directory of projects on disk + "AVALON_PROJECTS", + # Name of current Project + "AVALON_PROJECT", + # Name of current Asset + "AVALON_ASSET", + # Name of current task + "AVALON_TASK", + # Name of current app + "AVALON_APP", + # Path to working directory + "AVALON_WORKDIR", + # Optional path to scenes directory (see Work Files API) + "AVALON_SCENEDIR" +) + + +def session_data_from_environment(context_keys=False): + session_data = {} + if context_keys: + for key in SESSION_CONTEXT_KEYS: + value = os.environ.get(key) + session_data[key] = value or "" + else: + for key in SESSION_CONTEXT_KEYS: + session_data[key] = None + + for key, default_value in ( + # Name of Avalon in graphical user interfaces + # Use this to customise the visual appearance of Avalon + # to better integrate with your surrounding pipeline + ("AVALON_LABEL", "Avalon"), + + # Used during any connections to the outside world + ("AVALON_TIMEOUT", "1000"), + + # Name of database used in MongoDB + ("AVALON_DB", "avalon"), + ): + value = os.environ.get(key) or default_value + if value is not None: + session_data[key] = value + + return session_data + + +class AvalonMongoDB: + def __init__(self, session=None, auto_install=True): + self._id = uuid4() + self._database = None + self.auto_install = auto_install + self._installed = False + + if session is None: + session = session_data_from_environment(context_keys=False) + + self.Session = session + + self.log = logging.getLogger(self.__class__.__name__) + + def __getattr__(self, attr_name): + attr = None + if not self.is_installed() and self.auto_install: + self.install() + + if not self.is_installed(): + raise IOError( + "'{}.{}()' requires to run install() first".format( + self.__class__.__name__, attr_name + ) + ) + + project_name = self.active_project() + if project_name is None: + raise ValueError( + "Value of 'Session[\"AVALON_PROJECT\"]' is not set." + ) + + collection = self._database[project_name] + not_set = object() + attr = getattr(collection, attr_name, not_set) + + if attr is not_set: + # Raise attribute error + raise AttributeError( + "{} has no attribute '{}'.".format( + collection.__class__.__name__, attr_name + ) + ) + + # Decorate function + if callable(attr): + attr = auto_reconnect(attr) + return attr + + @property + def mongo_client(self): + from openpype.lib import OpenPypeMongoConnection + + return OpenPypeMongoConnection.get_mongo_client() + + @property + def id(self): + return self._id + + @property + def database(self): + if not self.is_installed() and self.auto_install: + self.install() + + if self.is_installed(): + return self._database + + raise IOError( + "'{}.database' requires to run install() first".format( + self.__class__.__name__ + ) + ) + + def is_installed(self): + return self._installed + + def install(self): + """Establish a persistent connection to the database""" + if self.is_installed(): + return + + self._installed = True + self._database = self.mongo_client[str(os.environ["AVALON_DB"])] + + def uninstall(self): + """Close any connection to the database""" + self._installed = False + self._database = None + + @requires_install + def active_project(self): + """Return the name of the active project""" + return self.Session["AVALON_PROJECT"] + + @requires_install + @auto_reconnect + def projects(self, projection=None, only_active=True): + """Iter project documents + + Args: + projection (optional): MongoDB query projection operation + only_active (optional): Skip inactive projects, default True. + + Returns: + Project documents iterator + + """ + query_filter = {"type": "project"} + if only_active: + query_filter.update({ + "$or": [ + {"data.active": {"$exists": 0}}, + {"data.active": True}, + ] + }) + + for project_name in self._database.collection_names(): + if project_name in ("system.indexes",): + continue + + # Each collection will have exactly one project document + + doc = self._database[project_name].find_one( + query_filter, projection=projection + ) + if doc is not None: + yield doc + + @auto_reconnect + def insert_one(self, item, *args, **kwargs): + assert isinstance(item, dict), "item must be of type " + schema.validate(item) + return self._database[self.active_project()].insert_one( + item, *args, **kwargs + ) + + @auto_reconnect + def insert_many(self, items, *args, **kwargs): + # check if all items are valid + assert isinstance(items, list), "`items` must be of type " + for item in items: + assert isinstance(item, dict), "`item` must be of type " + schema.validate(item) + + return self._database[self.active_project()].insert_many( + items, *args, **kwargs + ) + + def parenthood(self, document): + assert document is not None, "This is a bug" + + parents = list() + + while document.get("parent") is not None: + document = self.find_one({"_id": document["parent"]}) + if document is None: + break + + if document.get("type") == "hero_version": + _document = self.find_one({"_id": document["version_id"]}) + document["data"] = _document["data"] + + parents.append(document) + + return parents diff --git a/openpype/pipeline/plugin_discover.py b/openpype/pipeline/plugin_discover.py new file mode 100644 index 0000000000..fb860fe5f2 --- /dev/null +++ b/openpype/pipeline/plugin_discover.py @@ -0,0 +1,298 @@ +import os +import inspect +import traceback + +from openpype.api import Logger +from openpype.lib.python_module_tools import ( + modules_from_path, + classes_from_module, +) + +log = Logger.get_logger(__name__) + + +class DiscoverResult: + """Result of Plug-ins discovery of a single superclass type. + + Stores discovered, duplicated, ignored and abstract plugins and file paths + which crashed on execution of file. + """ + + def __init__(self, superclass): + self.superclass = superclass + self.plugins = [] + self.crashed_file_paths = {} + self.duplicated_plugins = [] + self.abstract_plugins = [] + self.ignored_plugins = set() + # Store loaded modules to keep them in memory + self._modules = set() + + def __iter__(self): + for plugin in self.plugins: + yield plugin + + def __getitem__(self, item): + return self.plugins[item] + + def __setitem__(self, item, value): + self.plugins[item] = value + + def add_module(self, module): + """Add dynamically loaded python module to keep it in memory.""" + self._modules.add(module) + + def get_report(self, only_errors=True, exc_info=True, full_report=False): + lines = [] + if not only_errors: + # Successfully discovered plugins + if self.plugins or full_report: + lines.append( + "*** Discovered {} plugins".format(len(self.plugins)) + ) + for cls in self.plugins: + lines.append("- {}".format(cls.__class__.__name__)) + + # Plugin that were defined to be ignored + if self.ignored_plugins or full_report: + lines.append("*** Ignored plugins {}".format(len( + self.ignored_plugins + ))) + for cls in self.ignored_plugins: + lines.append("- {}".format(cls.__class__.__name__)) + + # Abstract classes + if self.abstract_plugins or full_report: + lines.append("*** Discovered {} abstract plugins".format(len( + self.abstract_plugins + ))) + for cls in self.abstract_plugins: + lines.append("- {}".format(cls.__class__.__name__)) + + # Abstract classes + if self.duplicated_plugins or full_report: + lines.append("*** There were {} duplicated plugins".format(len( + self.duplicated_plugins + ))) + for cls in self.duplicated_plugins: + lines.append("- {}".format(cls.__class__.__name__)) + + if self.crashed_file_paths or full_report: + lines.append("*** Failed to load {} files".format(len( + self.crashed_file_paths + ))) + for path, exc_info_args in self.crashed_file_paths.items(): + lines.append("- {}".format(path)) + if exc_info: + lines.append(10 * "*") + lines.extend(traceback.format_exception(*exc_info_args)) + lines.append(10 * "*") + + return "\n".join(lines) + + def log_report(self, only_errors=True, exc_info=True): + report = self.get_report(only_errors, exc_info) + if report: + log.info(report) + + +class PluginDiscoverContext(object): + """Store and discover registered types nad registered paths to types. + + Keeps in memory all registered types and their paths. Paths are dynamically + loaded on discover so different discover calls won't return the same + class objects even if were loaded from same file. + """ + + def __init__(self): + self._registered_plugins = {} + self._registered_plugin_paths = {} + self._last_discovered_plugins = {} + # Store the last result to memory + self._last_discovered_results = {} + + def get_last_discovered_plugins(self, superclass): + """Access last discovered plugin by a subperclass. + + Returns: + None: When superclass was not discovered yet. + list: Lastly discovered plugins of the superclass. + """ + + return self._last_discovered_plugins.get(superclass) + + def discover( + self, + superclass, + allow_duplicates=True, + ignore_classes=None, + return_report=False + ): + """Find and return subclasses of `superclass` + + Args: + superclass (type): Class which determines discovered subclasses. + allow_duplicates (bool): Validate class name duplications. + ignore_classes (list): List of classes that will be ignored + and not added to result. + + Returns: + DiscoverResult: Object holding succesfully discovered plugins, + ignored plugins, plugins with missing abstract implementation + and duplicated plugin. + """ + + if not ignore_classes: + ignore_classes = [] + + result = DiscoverResult(superclass) + plugin_names = set() + registered_classes = self._registered_plugins.get(superclass) or [] + registered_paths = self._registered_plugin_paths.get(superclass) or [] + for cls in registered_classes: + if cls is superclass or cls in ignore_classes: + result.ignored_plugins.add(cls) + continue + + if inspect.isabstract(cls): + result.abstract_plugins.append(cls) + continue + + class_name = cls.__name__ + if class_name in plugin_names: + result.duplicated_plugins.append(cls) + continue + plugin_names.add(class_name) + result.plugins.append(cls) + + # Include plug-ins from registered paths + for path in registered_paths: + modules, crashed = modules_from_path(path) + for item in crashed: + filepath, exc_info = item + result.crashed_file_paths[filepath] = exc_info + + for item in modules: + filepath, module = item + result.add_module(module) + for cls in classes_from_module(superclass, module): + if cls is superclass or cls in ignore_classes: + result.ignored_plugins.add(cls) + continue + + if inspect.isabstract(cls): + result.abstract_plugins.append(cls) + continue + + if not allow_duplicates: + class_name = cls.__name__ + if class_name in plugin_names: + result.duplicated_plugins.append(cls) + continue + plugin_names.add(class_name) + + result.plugins.append(cls) + + # Store in memory last result to keep in memory loaded modules + self._last_discovered_results[superclass] = result + self._last_discovered_plugins[superclass] = list( + result.plugins + ) + result.log_report() + if return_report: + return result + return result.plugins + + def register_plugin(self, superclass, cls): + """Register a directory containing plug-ins of type `superclass` + + Arguments: + superclass (type): Superclass of plug-in + cls (object): Subclass of `superclass` + """ + + if superclass not in self._registered_plugins: + self._registered_plugins[superclass] = list() + + if cls not in self._registered_plugins[superclass]: + self._registered_plugins[superclass].append(cls) + + def register_plugin_path(self, superclass, path): + """Register a directory of one or more plug-ins + + Arguments: + superclass (type): Superclass of plug-ins to look for during + discovery + path (str): Absolute path to directory in which to discover + plug-ins + """ + + if superclass not in self._registered_plugin_paths: + self._registered_plugin_paths[superclass] = list() + + path = os.path.normpath(path) + if path not in self._registered_plugin_paths[superclass]: + self._registered_plugin_paths[superclass].append(path) + + def registered_plugin_paths(self): + """Return all currently registered plug-in paths""" + # Return shallow copy so we the original data can't be changed + return { + superclass: paths[:] + for superclass, paths in self._registered_plugin_paths.items() + } + + def deregister_plugin(self, superclass, plugin): + """Opposite of `register_plugin()`""" + if superclass in self._registered_plugins: + self._registered_plugins[superclass].remove(plugin) + + def deregister_plugin_path(self, superclass, path): + """Opposite of `register_plugin_path()`""" + self._registered_plugin_paths[superclass].remove(path) + + +class _GlobalDiscover: + """Access to global object of PluginDiscoverContext. + + Using singleton object to register/deregister plugins and plugin paths + and then discover them by superclass. + """ + + _context = None + + @classmethod + def get_context(cls): + if cls._context is None: + cls._context = PluginDiscoverContext() + return cls._context + + +def discover(superclass, allow_duplicates=True): + context = _GlobalDiscover.get_context() + return context.discover(superclass, allow_duplicates) + + +def get_last_discovered_plugins(superclass): + context = _GlobalDiscover.get_context() + return context.get_last_discovered_plugins(superclass) + + +def register_plugin(superclass, cls): + context = _GlobalDiscover.get_context() + context.register_plugin(superclass, cls) + + +def register_plugin_path(superclass, path): + context = _GlobalDiscover.get_context() + context.register_plugin_path(superclass, path) + + +def deregister_plugin(superclass, cls): + context = _GlobalDiscover.get_context() + context.deregister_plugin(superclass, cls) + + +def deregister_plugin_path(superclass, path): + context = _GlobalDiscover.get_context() + context.deregister_plugin_path(superclass, path) diff --git a/openpype/pipeline/publish/__init__.py b/openpype/pipeline/publish/__init__.py index c2729a46ce..af5d7c4a91 100644 --- a/openpype/pipeline/publish/__init__.py +++ b/openpype/pipeline/publish/__init__.py @@ -3,6 +3,7 @@ from .publish_plugins import ( PublishXmlValidationError, KnownPublishError, OpenPypePyblishPluginMixin, + OptionalPyblishPluginMixin, ) from .lib import ( @@ -18,6 +19,7 @@ __all__ = ( "PublishXmlValidationError", "KnownPublishError", "OpenPypePyblishPluginMixin", + "OptionalPyblishPluginMixin", "DiscoverResult", "publish_plugins_discover", diff --git a/openpype/pipeline/publish/publish_plugins.py b/openpype/pipeline/publish/publish_plugins.py index bce64ec709..2402a005c2 100644 --- a/openpype/pipeline/publish/publish_plugins.py +++ b/openpype/pipeline/publish/publish_plugins.py @@ -1,3 +1,4 @@ +from openpype.lib import BoolDef from .lib import load_help_content_from_plugin @@ -108,3 +109,64 @@ class OpenPypePyblishPluginMixin: plugin_values[key] ) return attribute_values + + def get_attr_values_from_data(self, data): + """Get attribute values for attribute definitions from data. + + Args: + data(dict): Data from instance or context. + """ + return ( + data + .get("publish_attributes", {}) + .get(self.__class__.__name__, {}) + ) + + +class OptionalPyblishPluginMixin(OpenPypePyblishPluginMixin): + """Prepare mixin for optional plugins. + + Defined active attribute definition prepared for published and + prepares method which will check if is active or not. + + ``` + class ValidateScene( + pyblish.api.InstancePlugin, OptionalPyblishPluginMixin + ): + def process(self, instance): + # Skip the instance if is not active by data on the instance + if not self.is_active(instance.data): + return + ``` + """ + + @classmethod + def get_attribute_defs(cls): + """Attribute definitions based on plugin's optional attribute.""" + + # Empty list if plugin is not optional + if not getattr(cls, "optional", None): + return [] + + # Get active value from class as default value + active = getattr(cls, "active", True) + # Return boolean stored under 'active' key with label of the class name + label = cls.label or cls.__name__ + return [ + BoolDef("active", default=active, label=label) + ] + + def is_active(self, data): + """Check if plugins is active for instance/context based on their data. + + Args: + data(dict): Data from instance or context. + """ + # Skip if is not optional and return True + if not getattr(self, "optional", None): + return True + attr_values = self.get_attr_values_from_data(data) + active = attr_values.get("active") + if active is None: + active = getattr(self, "active", True) + return active diff --git a/openpype/pipeline/schema.py b/openpype/pipeline/schema.py new file mode 100644 index 0000000000..7e96bfe1b1 --- /dev/null +++ b/openpype/pipeline/schema.py @@ -0,0 +1,137 @@ +"""Wrapper around :mod:`jsonschema` + +Schemas are implicitly loaded from the /schema directory of this project. + +Attributes: + _cache: Cache of previously loaded schemas + +Resources: + http://json-schema.org/ + http://json-schema.org/latest/json-schema-core.html + http://spacetelescope.github.io/understanding-json-schema/index.html + +""" + +import os +import re +import json +import logging + +import jsonschema +import six + +log_ = logging.getLogger(__name__) + +ValidationError = jsonschema.ValidationError +SchemaError = jsonschema.SchemaError + +_CACHED = False + + +def get_schema_version(schema_name): + """Extract version form schema name. + + It is expected that schema name contain only major and minor version. + + Expected name should match to: + "{name}:{type}-{major version}.{minor version}" + - `name` - must not contain colon + - `type` - must not contain dash + - major and minor versions must be numbers separated by dot + + Args: + schema_name(str): Name of schema that should be parsed. + + Returns: + tuple: Contain two values major version as first and minor version as + second. When schema does not match parsing regex then `(0, 0)` is + returned. + """ + schema_regex = re.compile(r"[^:]+:[^-]+-(\d.\d)") + groups = schema_regex.findall(schema_name) + if not groups: + return 0, 0 + + maj_version, min_version = groups[0].split(".") + return int(maj_version), int(min_version) + + +def validate(data, schema=None): + """Validate `data` with `schema` + + Arguments: + data (dict): JSON-compatible data + schema (str): DEPRECATED Name of schema. Now included in the data. + + Raises: + ValidationError on invalid schema + + """ + if not _CACHED: + _precache() + + root, schema = data["schema"].rsplit(":", 1) + # assert root in ( + # "mindbender-core", # Backwards compatiblity + # "avalon-core", + # "pype" + # ) + + if isinstance(schema, six.string_types): + schema = _cache[schema + ".json"] + + resolver = jsonschema.RefResolver( + "", + None, + store=_cache, + cache_remote=True + ) + + jsonschema.validate(data, + schema, + types={"array": (list, tuple)}, + resolver=resolver) + + +_cache = { + # A mock schema for docstring tests + "_doctest.json": { + "$schema": "http://json-schema.org/schema#", + + "title": "_doctest", + "description": "A test schema", + + "type": "object", + + "additionalProperties": False, + + "required": ["key"], + + "properties": { + "key": { + "description": "A test key", + "type": "string" + } + } + } +} + + +def _precache(): + """Store available schemas in-memory for reduced disk access""" + global _CACHED + + repos_root = os.environ["OPENPYPE_REPOS_ROOT"] + schema_dir = os.path.join(repos_root, "schema") + + for schema in os.listdir(schema_dir): + if schema.startswith(("_", ".")): + continue + if not schema.endswith(".json"): + continue + if not os.path.isfile(os.path.join(schema_dir, schema)): + continue + with open(os.path.join(schema_dir, schema)) as f: + log_.debug("Installing schema '%s'.." % schema) + _cache[schema] = json.load(f) + _CACHED = True diff --git a/openpype/pipeline/thumbnail.py b/openpype/pipeline/thumbnail.py new file mode 100644 index 0000000000..ec97b36954 --- /dev/null +++ b/openpype/pipeline/thumbnail.py @@ -0,0 +1,146 @@ +import os +import copy +import logging + +from . import legacy_io +from .plugin_discover import ( + discover, + register_plugin, + register_plugin_path, +) +log = logging.getLogger(__name__) + + +def get_thumbnail_binary(thumbnail_entity, thumbnail_type, dbcon=None): + if not thumbnail_entity: + return + + resolvers = discover_thumbnail_resolvers() + resolvers = sorted(resolvers, key=lambda cls: cls.priority) + if dbcon is None: + dbcon = legacy_io + + for Resolver in resolvers: + available_types = Resolver.thumbnail_types + if ( + thumbnail_type not in available_types + and "*" not in available_types + and ( + isinstance(available_types, (list, tuple)) + and len(available_types) == 0 + ) + ): + continue + try: + instance = Resolver(dbcon) + result = instance.process(thumbnail_entity, thumbnail_type) + if result: + return result + + except Exception: + log.warning("Resolver {0} failed durring process.".format( + Resolver.__class__.__name__, exc_info=True + )) + + +class ThumbnailResolver(object): + """Determine how to get data from thumbnail entity. + + "priority" - determines the order of processing in `get_thumbnail_binary`, + lower number is processed earlier. + "thumbnail_types" - it is expected that thumbnails will be used in more + more than one level, there is only ["thumbnail"] type at the moment + of creating this docstring but it is expected to add "ico" and "full" + in future. + """ + + priority = 100 + thumbnail_types = ["*"] + + def __init__(self, dbcon): + self._log = None + self.dbcon = dbcon + + @property + def log(self): + if self._log is None: + self._log = logging.getLogger(self.__class__.__name__) + return self._log + + def process(self, thumbnail_entity, thumbnail_type): + pass + + +class TemplateResolver(ThumbnailResolver): + + priority = 90 + + def process(self, thumbnail_entity, thumbnail_type): + + if not os.environ.get("AVALON_THUMBNAIL_ROOT"): + return + + template = thumbnail_entity["data"].get("template") + if not template: + self.log.debug("Thumbnail entity does not have set template") + return + + project = self.dbcon.find_one( + {"type": "project"}, + { + "name": True, + "data.code": True + } + ) + + template_data = copy.deepcopy( + thumbnail_entity["data"].get("template_data") or {} + ) + template_data.update({ + "_id": str(thumbnail_entity["_id"]), + "thumbnail_type": thumbnail_type, + "thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"), + "project": { + "name": project["name"], + "code": project["data"].get("code") + } + }) + + try: + filepath = os.path.normpath(template.format(**template_data)) + except KeyError: + self.log.warning(( + "Missing template data keys for template <{0}> || Data: {1}" + ).format(template, str(template_data))) + return + + if not os.path.exists(filepath): + self.log.warning("File does not exist \"{0}\"".format(filepath)) + return + + with open(filepath, "rb") as _file: + content = _file.read() + + return content + + +class BinaryThumbnail(ThumbnailResolver): + def process(self, thumbnail_entity, thumbnail_type): + return thumbnail_entity["data"].get("binary_data") + + +# Thumbnail resolvers +def discover_thumbnail_resolvers(): + return discover(ThumbnailResolver) + + +def register_thumbnail_resolver(plugin): + register_plugin(ThumbnailResolver, plugin) + + +def register_thumbnail_resolver_path(path): + register_plugin_path(ThumbnailResolver, path) + + +register_thumbnail_resolver(TemplateResolver) +register_thumbnail_resolver(BinaryThumbnail) diff --git a/openpype/plugin.py b/openpype/plugin.py index 3569936dac..bb9bc2ff85 100644 --- a/openpype/plugin.py +++ b/openpype/plugin.py @@ -1,7 +1,6 @@ import tempfile import os import pyblish.api -import avalon.api ValidatePipelineOrder = pyblish.api.ValidatorOrder + 0.05 ValidateContentsOrder = pyblish.api.ValidatorOrder + 0.1 diff --git a/openpype/plugins/load/add_site.py b/openpype/plugins/load/add_site.py index 95001691e2..55fda55d17 100644 --- a/openpype/plugins/load/add_site.py +++ b/openpype/plugins/load/add_site.py @@ -1,9 +1,19 @@ from openpype.modules import ModulesManager from openpype.pipeline import load +from openpype.lib.avalon_context import get_linked_ids_for_representations +from openpype.modules.sync_server.utils import SiteAlreadyPresentError class AddSyncSite(load.LoaderPlugin): - """Add sync site to representation""" + """Add sync site to representation + + If family of synced representation is 'workfile', it looks for all + representations which are referenced (loaded) in workfile with content of + 'inputLinks'. + It doesn't do any checks for site, most common use case is when artist is + downloading workfile to his local site, but it might be helpful when + artist is re-uploading broken representation on remote site also. + """ representations = ["*"] families = ["*"] @@ -12,21 +22,42 @@ class AddSyncSite(load.LoaderPlugin): icon = "download" color = "#999999" + _sync_server = None + is_add_site_loader = True + + @property + def sync_server(self): + if not self._sync_server: + manager = ModulesManager() + self._sync_server = manager.modules_by_name["sync_server"] + + return self._sync_server + def load(self, context, name=None, namespace=None, data=None): self.log.info("Adding {} to representation: {}".format( data["site_name"], data["_id"])) - self.add_site_to_representation(data["project_name"], - data["_id"], - data["site_name"]) - self.log.debug("Site added.") + family = context["representation"]["context"]["family"] + project_name = data["project_name"] + repre_id = data["_id"] + site_name = data["site_name"] - @staticmethod - def add_site_to_representation(project_name, representation_id, site_name): - """Adds new site to representation_id, resets if exists""" - manager = ModulesManager() - sync_server = manager.modules_by_name["sync_server"] - sync_server.add_site(project_name, representation_id, site_name, - force=True) + self.sync_server.add_site(project_name, repre_id, site_name, + force=True) + + if family == "workfile": + links = get_linked_ids_for_representations(project_name, + [repre_id], + link_type="reference") + for link_repre_id in links: + try: + self.sync_server.add_site(project_name, link_repre_id, + site_name, + force=False) + except SiteAlreadyPresentError: + # do not add/reset working site for references + self.log.debug("Site present", exc_info=True) + + self.log.debug("Site added.") def filepath_from_context(self, context): """No real file loading""" diff --git a/openpype/plugins/load/delete_old_versions.py b/openpype/plugins/load/delete_old_versions.py index 692acdec02..c3e9e9fa0a 100644 --- a/openpype/plugins/load/delete_old_versions.py +++ b/openpype/plugins/load/delete_old_versions.py @@ -8,9 +8,8 @@ import ftrack_api import qargparse from Qt import QtWidgets, QtCore -from avalon.api import AvalonMongoDB from openpype import style -from openpype.pipeline import load +from openpype.pipeline import load, AvalonMongoDB from openpype.lib import StringTemplate from openpype.api import Anatomy @@ -126,7 +125,8 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): os.remove(file_path) self.log.debug("Removed file: {}".format(file_path)) - remainders.remove(file_path_base) + if file_path_base in remainders: + remainders.remove(file_path_base) continue seq_path_base = os.path.split(seq_path)[1] @@ -333,6 +333,8 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): def main(self, data, remove_publish_folder): # Size of files. size = 0 + if not data: + return size if remove_publish_folder: size = self.delete_whole_dir_paths(data["dir_paths"].values()) @@ -418,6 +420,8 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): ) data = self.get_data(context, versions_to_keep) + if not data: + continue size += self.main(data, remove_publish_folder) print("Progressing {}/{}".format(count + 1, len(contexts))) diff --git a/openpype/plugins/load/delivery.py b/openpype/plugins/load/delivery.py index 04080053e3..7df07e3f64 100644 --- a/openpype/plugins/load/delivery.py +++ b/openpype/plugins/load/delivery.py @@ -3,9 +3,7 @@ from collections import defaultdict from Qt import QtWidgets, QtCore, QtGui -from avalon.api import AvalonMongoDB - -from openpype.pipeline import load +from openpype.pipeline import load, AvalonMongoDB from openpype.api import Anatomy, config from openpype import resources, style diff --git a/openpype/plugins/load/remove_site.py b/openpype/plugins/load/remove_site.py index adffec9986..c5f442b2f5 100644 --- a/openpype/plugins/load/remove_site.py +++ b/openpype/plugins/load/remove_site.py @@ -12,22 +12,26 @@ class RemoveSyncSite(load.LoaderPlugin): icon = "download" color = "#999999" + _sync_server = None + is_remove_site_loader = True + + @property + def sync_server(self): + if not self._sync_server: + manager = ModulesManager() + self._sync_server = manager.modules_by_name["sync_server"] + + return self._sync_server + def load(self, context, name=None, namespace=None, data=None): self.log.info("Removing {} on representation: {}".format( data["site_name"], data["_id"])) - self.remove_site_on_representation(data["project_name"], - data["_id"], - data["site_name"]) + self.sync_server.remove_site(data["project_name"], + data["_id"], + data["site_name"], + True) self.log.debug("Site added.") - @staticmethod - def remove_site_on_representation(project_name, representation_id, - site_name): - manager = ModulesManager() - sync_server = manager.modules_by_name["sync_server"] - sync_server.remove_site(project_name, representation_id, - site_name, True) - def filepath_from_context(self, context): """No real file loading""" return "" diff --git a/openpype/plugins/publish/cleanup_farm.py b/openpype/plugins/publish/cleanup_farm.py index ab0c6e469e..2c6c1625bb 100644 --- a/openpype/plugins/publish/cleanup_farm.py +++ b/openpype/plugins/publish/cleanup_farm.py @@ -3,7 +3,8 @@ import os import shutil import pyblish.api -import avalon.api + +from openpype.pipeline import legacy_io class CleanUpFarm(pyblish.api.ContextPlugin): @@ -22,7 +23,7 @@ class CleanUpFarm(pyblish.api.ContextPlugin): def process(self, context): # Get source host from which farm publishing was started - src_host_name = avalon.api.Session.get("AVALON_APP") + src_host_name = legacy_io.Session.get("AVALON_APP") self.log.debug("Host name from session is {}".format(src_host_name)) # Skip process if is not in list of source hosts in which this # plugin should run diff --git a/openpype/plugins/publish/collect_anatomy_context_data.py b/openpype/plugins/publish/collect_anatomy_context_data.py index bd8d9e50c4..0794adfb67 100644 --- a/openpype/plugins/publish/collect_anatomy_context_data.py +++ b/openpype/plugins/publish/collect_anatomy_context_data.py @@ -13,11 +13,12 @@ Provides: """ import json +import pyblish.api + from openpype.lib import ( get_system_general_anatomy_data ) -from avalon import api -import pyblish.api +from openpype.pipeline import legacy_io class CollectAnatomyContextData(pyblish.api.ContextPlugin): @@ -65,7 +66,7 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): asset_entity = context.data.get("assetEntity") if asset_entity: - task_name = api.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] asset_tasks = asset_entity["data"]["tasks"] task_type = asset_tasks.get(task_name, {}).get("type") diff --git a/openpype/plugins/publish/collect_anatomy_instance_data.py b/openpype/plugins/publish/collect_anatomy_instance_data.py index 42836e796b..6a6ea170b5 100644 --- a/openpype/plugins/publish/collect_anatomy_instance_data.py +++ b/openpype/plugins/publish/collect_anatomy_instance_data.py @@ -25,9 +25,10 @@ import copy import json import collections -from avalon import io import pyblish.api +from openpype.pipeline import legacy_io + class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): """Collect Instance specific Anatomy data. @@ -83,7 +84,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): self.log.debug("Querying asset documents with names: {}".format( ", ".join(["\"{}\"".format(name) for name in asset_names]) )) - asset_docs = io.find({ + asset_docs = legacy_io.find({ "type": "asset", "name": {"$in": asset_names} }) @@ -153,7 +154,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): subset_docs = [] if subset_filters: - subset_docs = list(io.find({ + subset_docs = list(legacy_io.find({ "type": "subset", "$or": subset_filters })) @@ -202,7 +203,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): ] last_version_by_subset_id = {} - for doc in io.aggregate(_pipeline): + for doc in legacy_io.aggregate(_pipeline): subset_id = doc["_id"] last_version_by_subset_id[subset_id] = doc["name"] diff --git a/openpype/plugins/publish/collect_avalon_entities.py b/openpype/plugins/publish/collect_avalon_entities.py index c099a2cf75..3e7843407f 100644 --- a/openpype/plugins/publish/collect_avalon_entities.py +++ b/openpype/plugins/publish/collect_avalon_entities.py @@ -8,9 +8,10 @@ Provides: context -> assetEntity - asset entity from database """ -from avalon import io, api import pyblish.api +from openpype.pipeline import legacy_io + class CollectAvalonEntities(pyblish.api.ContextPlugin): """Collect Anatomy into Context""" @@ -19,12 +20,12 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): label = "Collect Avalon Entities" def process(self, context): - io.install() - project_name = api.Session["AVALON_PROJECT"] - asset_name = api.Session["AVALON_ASSET"] - task_name = api.Session["AVALON_TASK"] + legacy_io.install() + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] - project_entity = io.find_one({ + project_entity = legacy_io.find_one({ "type": "project", "name": project_name }) @@ -38,7 +39,7 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): if not asset_name: self.log.info("Context is not set. Can't collect global data.") return - asset_entity = io.find_one({ + asset_entity = legacy_io.find_one({ "type": "asset", "name": asset_name, "parent": project_entity["_id"] diff --git a/openpype/plugins/publish/collect_cleanup_keys.py b/openpype/plugins/publish/collect_cleanup_keys.py new file mode 100644 index 0000000000..635b038387 --- /dev/null +++ b/openpype/plugins/publish/collect_cleanup_keys.py @@ -0,0 +1,21 @@ +""" +Requires: + None +Provides: + context + - cleanupFullPaths (list) + - cleanupEmptyDirs (list) +""" + +import pyblish.api + + +class CollectCleanupKeys(pyblish.api.ContextPlugin): + """Prepare keys for 'ExplicitCleanUp' plugin.""" + + label = "Collect Cleanup Keys" + order = pyblish.api.CollectorOrder + + def process(self, context): + context.data["cleanupFullPaths"] = [] + context.data["cleanupEmptyDirs"] = [] diff --git a/openpype/plugins/publish/collect_from_create_context.py b/openpype/plugins/publish/collect_from_create_context.py index 16e3f669c3..f6ead98809 100644 --- a/openpype/plugins/publish/collect_from_create_context.py +++ b/openpype/plugins/publish/collect_from_create_context.py @@ -3,7 +3,8 @@ """ import os import pyblish.api -import avalon.api + +from openpype.pipeline import legacy_io class CollectFromCreateContext(pyblish.api.ContextPlugin): @@ -25,12 +26,12 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): # Update global data to context context.data.update(create_context.context_data_to_store()) - + context.data["newPublishing"] = True # Update context data for key in ("AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK"): value = create_context.dbcon.Session.get(key) if value is not None: - avalon.api.Session[key] = value + legacy_io.Session[key] = value os.environ[key] = value def create_instance(self, context, in_data): diff --git a/openpype/plugins/publish/collect_hierarchy.py b/openpype/plugins/publish/collect_hierarchy.py index efb40407d9..4e94acce4a 100644 --- a/openpype/plugins/publish/collect_hierarchy.py +++ b/openpype/plugins/publish/collect_hierarchy.py @@ -1,5 +1,6 @@ import pyblish.api -import avalon.api as avalon + +from openpype.pipeline import legacy_io class CollectHierarchy(pyblish.api.ContextPlugin): @@ -19,7 +20,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin): def process(self, context): temp_context = {} - project_name = avalon.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] final_context = {} final_context[project_name] = {} final_context[project_name]['entity_type'] = 'Project' diff --git a/openpype/plugins/publish/collect_host_name.py b/openpype/plugins/publish/collect_host_name.py index b731e3ed26..d64af4d049 100644 --- a/openpype/plugins/publish/collect_host_name.py +++ b/openpype/plugins/publish/collect_host_name.py @@ -18,20 +18,30 @@ class CollectHostName(pyblish.api.ContextPlugin): def process(self, context): host_name = context.data.get("hostName") + app_name = context.data.get("appName") + app_label = context.data.get("appLabel") # Don't override value if is already set - if host_name: + if host_name and app_name and app_label: return - # Use AVALON_APP as first if available it is the same as host name - # - only if is not defined use AVALON_APP_NAME (e.g. on Farm) and - # set it back to AVALON_APP env variable - host_name = os.environ.get("AVALON_APP") + # Use AVALON_APP to get host name if available if not host_name: + host_name = os.environ.get("AVALON_APP") + + # Use AVALON_APP_NAME to get full app name + if not app_name: app_name = os.environ.get("AVALON_APP_NAME") - if app_name: - app_manager = ApplicationManager() - app = app_manager.applications.get(app_name) - if app: + + # Fill missing values based on app full name + if (not host_name or not app_label) and app_name: + app_manager = ApplicationManager() + app = app_manager.applications.get(app_name) + if app: + if not host_name: host_name = app.host_name + if not app_label: + app_label = app.full_label context.data["hostName"] = host_name + context.data["appName"] = app_name + context.data["appLabel"] = app_label diff --git a/openpype/plugins/publish/collect_rendered_files.py b/openpype/plugins/publish/collect_rendered_files.py index 1005c38b9d..670e57ed10 100644 --- a/openpype/plugins/publish/collect_rendered_files.py +++ b/openpype/plugins/publish/collect_rendered_files.py @@ -11,7 +11,8 @@ import os import json import pyblish.api -from avalon import api + +from openpype.pipeline import legacy_io class CollectRenderedFiles(pyblish.api.ContextPlugin): @@ -150,7 +151,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): session_data["AVALON_WORKDIR"] = remapped self.log.info("Setting session using data from file") - api.Session.update(session_data) + legacy_io.Session.update(session_data) os.environ.update(session_data) session_is_set = True self._process_path(data, anatomy) diff --git a/openpype/plugins/publish/collect_resources_path.py b/openpype/plugins/publish/collect_resources_path.py index fa181301ee..89df031fb0 100644 --- a/openpype/plugins/publish/collect_resources_path.py +++ b/openpype/plugins/publish/collect_resources_path.py @@ -12,7 +12,8 @@ import os import copy import pyblish.api -from avalon import api + +from openpype.pipeline import legacy_io class CollectResourcesPath(pyblish.api.InstancePlugin): @@ -53,7 +54,10 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): "textures", "action", "background", - "effect" + "effect", + "staticMesh", + "skeletalMesh" + ] def process(self, instance): @@ -81,7 +85,7 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): else: # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = api.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." diff --git a/openpype/plugins/publish/collect_scene_loaded_versions.py b/openpype/plugins/publish/collect_scene_loaded_versions.py index d8119846c6..bb34e3ce31 100644 --- a/openpype/plugins/publish/collect_scene_loaded_versions.py +++ b/openpype/plugins/publish/collect_scene_loaded_versions.py @@ -1,6 +1,11 @@ +from bson.objectid import ObjectId import pyblish.api -from avalon import api, io + +from openpype.pipeline import ( + registered_host, + legacy_io, +) class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): @@ -23,7 +28,7 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): ] def process(self, context): - host = api.registered_host() + host = registered_host() if host is None: self.log.warn("No registered host.") return @@ -35,19 +40,34 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): loaded_versions = [] _containers = list(host.ls()) - _repr_ids = [io.ObjectId(c["representation"]) for c in _containers] + _repr_ids = [ObjectId(c["representation"]) for c in _containers] + repre_docs = legacy_io.find( + {"_id": {"$in": _repr_ids}}, + projection={"_id": 1, "parent": 1} + ) version_by_repr = { - str(doc["_id"]): doc["parent"] for doc in - io.find({"_id": {"$in": _repr_ids}}, projection={"parent": 1}) + str(doc["_id"]): doc["parent"] + for doc in repre_docs } + # QUESTION should we add same representation id when loaded multiple + # times? for con in _containers: + repre_id = con["representation"] + version_id = version_by_repr.get(repre_id) + if version_id is None: + self.log.warning(( + "Skipping container," + " did not find representation document. {}" + ).format(str(con))) + continue + # NOTE: # may have more then one representation that are same version version = { "subsetName": con["name"], - "representation": io.ObjectId(con["representation"]), - "version": version_by_repr[con["representation"]], # _id + "representation": ObjectId(repre_id), + "version": version_id, } loaded_versions.append(version) diff --git a/openpype/plugins/publish/extract_burnin.py b/openpype/plugins/publish/extract_burnin.py index b2ca8850b6..544c763b52 100644 --- a/openpype/plugins/publish/extract_burnin.py +++ b/openpype/plugins/publish/extract_burnin.py @@ -16,7 +16,7 @@ from openpype.lib import ( run_openpype_process, get_transcode_temp_directory, - convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, should_convert_for_ffmpeg, CREATE_NO_WINDOW @@ -187,8 +187,13 @@ class ExtractBurnin(openpype.api.Extractor): repre_files = repre["files"] if isinstance(repre_files, (tuple, list)): filename = repre_files[0] + src_filepaths = [ + os.path.join(src_repre_staging_dir, filename) + for filename in repre_files + ] else: filename = repre_files + src_filepaths = [os.path.join(src_repre_staging_dir, filename)] first_input_path = os.path.join(src_repre_staging_dir, filename) # Determine if representation requires pre conversion for ffmpeg @@ -209,11 +214,9 @@ class ExtractBurnin(openpype.api.Extractor): new_staging_dir = get_transcode_temp_directory() repre["stagingDir"] = new_staging_dir - convert_for_ffmpeg( - first_input_path, + convert_input_paths_for_ffmpeg( + src_filepaths, new_staging_dir, - _temp_data["frameStart"], - _temp_data["frameEnd"], self.log ) @@ -221,11 +224,17 @@ class ExtractBurnin(openpype.api.Extractor): filled_anatomy = anatomy.format_all(burnin_data) burnin_data["anatomy"] = filled_anatomy.get_solved() - # Add context data burnin_data. - burnin_data["custom"] = ( + custom_data = copy.deepcopy( + instance.data.get("customData") or {} + ) + # Backwards compatibility (since 2022/04/07) + custom_data.update( instance.data.get("custom_burnin_data") or {} ) + # Add context data burnin_data. + burnin_data["custom"] = custom_data + # Add source camera name to burnin data camera_name = repre.get("camera_name") if camera_name: diff --git a/openpype/plugins/publish/extract_hierarchy_avalon.py b/openpype/plugins/publish/extract_hierarchy_avalon.py index e263edd931..2f528d4469 100644 --- a/openpype/plugins/publish/extract_hierarchy_avalon.py +++ b/openpype/plugins/publish/extract_hierarchy_avalon.py @@ -1,7 +1,10 @@ -import pyblish.api -from avalon import io from copy import deepcopy +import pyblish.api + +from openpype.pipeline import legacy_io + + class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): """Create entities in Avalon based on collected data.""" @@ -16,8 +19,8 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): return hierarchy_context = deepcopy(context.data["hierarchyContext"]) - if not io.Session: - io.install() + if not legacy_io.Session: + legacy_io.install() active_assets = [] # filter only the active publishing insatnces @@ -64,7 +67,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): data["tasks"] = tasks parents = [] visualParent = None - # do not store project"s id as visualParent (silo asset) + # do not store project"s id as visualParent if self.project is not None: if self.project["_id"] != parent["_id"]: visualParent = parent["_id"] @@ -78,7 +81,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): update_data = True # Process project if entity_type.lower() == "project": - entity = io.find_one({"type": "project"}) + entity = legacy_io.find_one({"type": "project"}) # TODO: should be in validator? assert (entity is not None), "Did not find project in DB" @@ -95,7 +98,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): ) # Else process assset else: - entity = io.find_one({"type": "asset", "name": name}) + entity = legacy_io.find_one({"type": "asset", "name": name}) if entity: # Do not override data, only update cur_entity_data = entity.get("data") or {} @@ -119,7 +122,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): # Skip updating data update_data = False - archived_entities = io.find({ + archived_entities = legacy_io.find({ "type": "archived_asset", "name": name }) @@ -143,7 +146,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): if update_data: # Update entity data with input data - io.update_many( + legacy_io.update_many( {"_id": entity["_id"]}, {"$set": {"data": data}} ) @@ -161,7 +164,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): "type": "asset", "data": data } - io.replace_one( + legacy_io.replace_one( {"_id": entity["_id"]}, new_entity ) @@ -176,9 +179,9 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): "data": data } self.log.debug("Creating asset: {}".format(item)) - entity_id = io.insert_one(item).inserted_id + entity_id = legacy_io.insert_one(item).inserted_id - return io.find_one({"_id": entity_id}) + return legacy_io.find_one({"_id": entity_id}) def _get_assets(self, input_dict): """ Returns only asset dictionary. diff --git a/openpype/plugins/publish/extract_jpeg_exr.py b/openpype/plugins/publish/extract_jpeg_exr.py index 468ed96199..d6d6854092 100644 --- a/openpype/plugins/publish/extract_jpeg_exr.py +++ b/openpype/plugins/publish/extract_jpeg_exr.py @@ -8,7 +8,7 @@ from openpype.lib import ( path_to_subprocess_arg, get_transcode_temp_directory, - convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, should_convert_for_ffmpeg ) @@ -79,11 +79,9 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): if do_convert: convert_dir = get_transcode_temp_directory() filename = os.path.basename(full_input_path) - convert_for_ffmpeg( - full_input_path, + convert_input_paths_for_ffmpeg( + [full_input_path], convert_dir, - None, - None, self.log ) full_input_path = os.path.join(convert_dir, filename) diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index 3ecea1f8bd..f2473839d9 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -18,7 +18,7 @@ from openpype.lib import ( path_to_subprocess_arg, should_convert_for_ffmpeg, - convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, get_transcode_temp_directory ) import speedcopy @@ -188,23 +188,26 @@ class ExtractReview(pyblish.api.InstancePlugin): outputs_per_repres = self._get_outputs_per_representations( instance, profile_outputs ) - fill_data = copy.deepcopy(instance.data["anatomyData"]) - for repre, outputs in outputs_per_repres: + for repre, outpu_defs in outputs_per_repres: # Check if input should be preconverted before processing # Store original staging dir (it's value may change) src_repre_staging_dir = repre["stagingDir"] # Receive filepath to first file in representation first_input_path = None + input_filepaths = [] if not self.input_is_sequence(repre): first_input_path = os.path.join( src_repre_staging_dir, repre["files"] ) + input_filepaths.append(first_input_path) else: for filename in repre["files"]: - first_input_path = os.path.join( + filepath = os.path.join( src_repre_staging_dir, filename ) - break + input_filepaths.append(filepath) + if first_input_path is None: + first_input_path = filepath # Skip if file is not set if first_input_path is None: @@ -231,136 +234,149 @@ class ExtractReview(pyblish.api.InstancePlugin): new_staging_dir = get_transcode_temp_directory() repre["stagingDir"] = new_staging_dir - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] - convert_for_ffmpeg( - first_input_path, + convert_input_paths_for_ffmpeg( + input_filepaths, new_staging_dir, - frame_start, - frame_end, self.log ) - for _output_def in outputs: - output_def = copy.deepcopy(_output_def) - # Make sure output definition has "tags" key - if "tags" not in output_def: - output_def["tags"] = [] - - if "burnins" not in output_def: - output_def["burnins"] = [] - - # Create copy of representation - new_repre = copy.deepcopy(repre) - # Make sure new representation has origin staging dir - # - this is because source representation may change - # it's staging dir because of ffmpeg conversion - new_repre["stagingDir"] = src_repre_staging_dir - - # Remove "delete" tag from new repre if there is - if "delete" in new_repre["tags"]: - new_repre["tags"].remove("delete") - - # Add additional tags from output definition to representation - for tag in output_def["tags"]: - if tag not in new_repre["tags"]: - new_repre["tags"].append(tag) - - # Add burnin link from output definition to representation - for burnin in output_def["burnins"]: - if burnin not in new_repre.get("burnins", []): - if not new_repre.get("burnins"): - new_repre["burnins"] = [] - new_repre["burnins"].append(str(burnin)) - - self.log.debug( - "Linked burnins: `{}`".format(new_repre.get("burnins")) + try: + self._render_output_definitions( + instance, repre, src_repre_staging_dir, outpu_defs ) - self.log.debug( - "New representation tags: `{}`".format( - new_repre.get("tags")) + finally: + # Make sure temporary staging is cleaned up and representation + # has set origin stagingDir + if do_convert: + # Set staging dir of source representation back to previous + # value + repre["stagingDir"] = src_repre_staging_dir + if os.path.exists(new_staging_dir): + shutil.rmtree(new_staging_dir) + + def _render_output_definitions( + self, instance, repre, src_repre_staging_dir, outpu_defs + ): + fill_data = copy.deepcopy(instance.data["anatomyData"]) + for _output_def in outpu_defs: + output_def = copy.deepcopy(_output_def) + # Make sure output definition has "tags" key + if "tags" not in output_def: + output_def["tags"] = [] + + if "burnins" not in output_def: + output_def["burnins"] = [] + + # Create copy of representation + new_repre = copy.deepcopy(repre) + # Make sure new representation has origin staging dir + # - this is because source representation may change + # it's staging dir because of ffmpeg conversion + new_repre["stagingDir"] = src_repre_staging_dir + + # Remove "delete" tag from new repre if there is + if "delete" in new_repre["tags"]: + new_repre["tags"].remove("delete") + + # Add additional tags from output definition to representation + for tag in output_def["tags"]: + if tag not in new_repre["tags"]: + new_repre["tags"].append(tag) + + # Add burnin link from output definition to representation + for burnin in output_def["burnins"]: + if burnin not in new_repre.get("burnins", []): + if not new_repre.get("burnins"): + new_repre["burnins"] = [] + new_repre["burnins"].append(str(burnin)) + + self.log.debug( + "Linked burnins: `{}`".format(new_repre.get("burnins")) + ) + + self.log.debug( + "New representation tags: `{}`".format( + new_repre.get("tags")) + ) + + temp_data = self.prepare_temp_data(instance, repre, output_def) + files_to_clean = [] + if temp_data["input_is_sequence"]: + self.log.info("Filling gaps in sequence.") + files_to_clean = self.fill_sequence_gaps( + temp_data["origin_repre"]["files"], + new_repre["stagingDir"], + temp_data["frame_start"], + temp_data["frame_end"]) + + # create or update outputName + output_name = new_repre.get("outputName", "") + output_ext = new_repre["ext"] + if output_name: + output_name += "_" + output_name += output_def["filename_suffix"] + if temp_data["without_handles"]: + output_name += "_noHandles" + + # add outputName to anatomy format fill_data + fill_data.update({ + "output": output_name, + "ext": output_ext + }) + + try: # temporary until oiiotool is supported cross platform + ffmpeg_args = self._ffmpeg_arguments( + output_def, instance, new_repre, temp_data, fill_data ) - - temp_data = self.prepare_temp_data( - instance, repre, output_def) - files_to_clean = [] - if temp_data["input_is_sequence"]: - self.log.info("Filling gaps in sequence.") - files_to_clean = self.fill_sequence_gaps( - temp_data["origin_repre"]["files"], - new_repre["stagingDir"], - temp_data["frame_start"], - temp_data["frame_end"]) - - # create or update outputName - output_name = new_repre.get("outputName", "") - output_ext = new_repre["ext"] - if output_name: - output_name += "_" - output_name += output_def["filename_suffix"] - if temp_data["without_handles"]: - output_name += "_noHandles" - - # add outputName to anatomy format fill_data - fill_data.update({ - "output": output_name, - "ext": output_ext - }) - - try: # temporary until oiiotool is supported cross platform - ffmpeg_args = self._ffmpeg_arguments( - output_def, instance, new_repre, temp_data, fill_data + except ZeroDivisionError: + # TODO recalculate width and height using OIIO before + # conversion + if 'exr' in temp_data["origin_repre"]["ext"]: + self.log.warning( + ( + "Unsupported compression on input files." + " Skipping!!!" + ), + exc_info=True ) - except ZeroDivisionError: - if 'exr' in temp_data["origin_repre"]["ext"]: - self.log.debug("Unsupported compression on input " + - "files. Skipping!!!") - return - raise NotImplementedError + return + raise NotImplementedError - subprcs_cmd = " ".join(ffmpeg_args) + subprcs_cmd = " ".join(ffmpeg_args) - # run subprocess - self.log.debug("Executing: {}".format(subprcs_cmd)) + # run subprocess + self.log.debug("Executing: {}".format(subprcs_cmd)) - openpype.api.run_subprocess( - subprcs_cmd, shell=True, logger=self.log - ) + openpype.api.run_subprocess( + subprcs_cmd, shell=True, logger=self.log + ) - # delete files added to fill gaps - if files_to_clean: - for f in files_to_clean: - os.unlink(f) + # delete files added to fill gaps + if files_to_clean: + for f in files_to_clean: + os.unlink(f) - new_repre.update({ - "name": "{}_{}".format(output_name, output_ext), - "outputName": output_name, - "outputDef": output_def, - "frameStartFtrack": temp_data["output_frame_start"], - "frameEndFtrack": temp_data["output_frame_end"], - "ffmpeg_cmd": subprcs_cmd - }) + new_repre.update({ + "name": "{}_{}".format(output_name, output_ext), + "outputName": output_name, + "outputDef": output_def, + "frameStartFtrack": temp_data["output_frame_start"], + "frameEndFtrack": temp_data["output_frame_end"], + "ffmpeg_cmd": subprcs_cmd + }) - # Force to pop these key if are in new repre - new_repre.pop("preview", None) - new_repre.pop("thumbnail", None) - if "clean_name" in new_repre.get("tags", []): - new_repre.pop("outputName") + # Force to pop these key if are in new repre + new_repre.pop("preview", None) + new_repre.pop("thumbnail", None) + if "clean_name" in new_repre.get("tags", []): + new_repre.pop("outputName") - # adding representation - self.log.debug( - "Adding new representation: {}".format(new_repre) - ) - instance.data["representations"].append(new_repre) - - # Cleanup temp staging dir after procesisng of output definitions - if do_convert: - temp_dir = repre["stagingDir"] - shutil.rmtree(temp_dir) - # Set staging dir of source representation back to previous - # value - repre["stagingDir"] = src_repre_staging_dir + # adding representation + self.log.debug( + "Adding new representation: {}".format(new_repre) + ) + instance.data["representations"].append(new_repre) def input_is_sequence(self, repre): """Deduce from representation data if input is sequence.""" diff --git a/openpype/plugins/publish/extract_review_slate.py b/openpype/plugins/publish/extract_review_slate.py index 505ae75169..49f0eac41d 100644 --- a/openpype/plugins/publish/extract_review_slate.py +++ b/openpype/plugins/publish/extract_review_slate.py @@ -158,13 +158,15 @@ class ExtractReviewSlate(openpype.api.Extractor): ]) if use_legacy_code: + format_args = [] codec_args = repre["_profile"].get('codec', []) output_args.extend(codec_args) # preset's output data output_args.extend(repre["_profile"].get('output', [])) else: # Codecs are copied from source for whole input - codec_args = self._get_codec_args(repre) + format_args, codec_args = self._get_format_codec_args(repre) + output_args.extend(format_args) output_args.extend(codec_args) # make sure colors are correct @@ -266,8 +268,14 @@ class ExtractReviewSlate(openpype.api.Extractor): "-safe", "0", "-i", conc_text_path, "-c", "copy", - output_path ] + # NOTE: Added because of OP Atom demuxers + # Add format arguments if there are any + # - keep format of output + if format_args: + concat_args.extend(format_args) + # Add final output path + concat_args.append(output_path) # ffmpeg concat subprocess self.log.debug( @@ -338,7 +346,7 @@ class ExtractReviewSlate(openpype.api.Extractor): return vf_back - def _get_codec_args(self, repre): + def _get_format_codec_args(self, repre): """Detect possible codec arguments from representation.""" codec_args = [] @@ -361,13 +369,9 @@ class ExtractReviewSlate(openpype.api.Extractor): return codec_args source_ffmpeg_cmd = repre.get("ffmpeg_cmd") - codec_args.extend( - get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd) - ) - codec_args.extend( - get_ffmpeg_codec_args( - ffprobe_data, source_ffmpeg_cmd, logger=self.log - ) + format_args = get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd) + codec_args = get_ffmpeg_codec_args( + ffprobe_data, source_ffmpeg_cmd, logger=self.log ) - return codec_args + return format_args, codec_args diff --git a/openpype/plugins/publish/integrate_hero_version.py b/openpype/plugins/publish/integrate_hero_version.py index 60245314f4..a706b653c4 100644 --- a/openpype/plugins/publish/integrate_hero_version.py +++ b/openpype/plugins/publish/integrate_hero_version.py @@ -4,10 +4,18 @@ import clique import errno import shutil +from bson.objectid import ObjectId from pymongo import InsertOne, ReplaceOne import pyblish.api -from avalon import api, io, schema -from openpype.lib import create_hard_link + +from openpype.lib import ( + create_hard_link, + filter_profiles +) +from openpype.pipeline import ( + schema, + legacy_io, +) class IntegrateHeroVersion(pyblish.api.InstancePlugin): @@ -16,7 +24,9 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): order = pyblish.api.IntegratorOrder + 0.1 optional = True + active = True + # Families are modified using settings families = [ "model", "rig", @@ -32,11 +42,13 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): "project", "asset", "task", "subset", "representation", "family", "hierarchy", "task", "username" ] - # TODO add family filtering # QUESTION/TODO this process should happen on server if crashed due to # permissions error on files (files were used or user didn't have perms) # *but all other plugins must be sucessfully completed + template_name_profiles = [] + _default_template_name = "hero" + def process(self, instance): self.log.debug( "--- Integration of Hero version for subset `{}` begins.".format( @@ -50,27 +62,35 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): ) return - project_name = api.Session["AVALON_PROJECT"] + template_key = self._get_template_key(instance) - # TODO raise error if Hero not set? anatomy = instance.context.data["anatomy"] - if "hero" not in anatomy.templates: - self.log.warning("!!! Anatomy does not have set `hero` key!") - return - - if "path" not in anatomy.templates["hero"]: + project_name = legacy_io.Session["AVALON_PROJECT"] + if template_key not in anatomy.templates: self.log.warning(( - "!!! There is not set `path` template in `hero` anatomy" - " for project \"{}\"." - ).format(project_name)) + "!!! Anatomy of project \"{}\" does not have set" + " \"{}\" template key!" + ).format(project_name, template_key)) return - hero_template = anatomy.templates["hero"]["path"] + if "path" not in anatomy.templates[template_key]: + self.log.warning(( + "!!! There is not set \"path\" template in \"{}\" anatomy" + " for project \"{}\"." + ).format(template_key, project_name)) + return + + hero_template = anatomy.templates[template_key]["path"] self.log.debug("`hero` template check was successful. `{}`".format( hero_template )) - hero_publish_dir = self.get_publish_dir(instance) + self.integrate_instance(instance, template_key, hero_template) + + def integrate_instance(self, instance, template_key, hero_template): + anatomy = instance.context.data["anatomy"] + published_repres = instance.data["published_representations"] + hero_publish_dir = self.get_publish_dir(instance, template_key) src_version_entity = instance.data.get("versionEntity") filtered_repre_ids = [] @@ -161,7 +181,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): if old_version: new_version_id = old_version["_id"] else: - new_version_id = io.ObjectId() + new_version_id = ObjectId() new_hero_version = { "_id": new_version_id, @@ -203,7 +223,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): if old_repres_by_name: old_repres_to_delete = old_repres_by_name - archived_repres = list(io.find({ + archived_repres = list(legacy_io.find({ # Check what is type of archived representation "type": "archived_repsentation", "parent": new_version_id @@ -270,12 +290,12 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): continue # Prepare anatomy data - anatomy_data = repre_info["anatomy_data"] + anatomy_data = copy.deepcopy(repre_info["anatomy_data"]) anatomy_data.pop("version", None) # Get filled path to repre context anatomy_filled = anatomy.format(anatomy_data) - template_filled = anatomy_filled["hero"]["path"] + template_filled = anatomy_filled[template_key]["path"] repre_data = { "path": str(template_filled), @@ -307,11 +327,11 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): collections, remainders = clique.assemble(published_files) if remainders or not collections or len(collections) > 1: raise Exception(( - "Integrity error. Files of published representation " - "is combination of frame collections and single files." - "Collections: `{}` Single files: `{}`" - ).format(str(collections), - str(remainders))) + "Integrity error. Files of published" + " representation is combination of frame" + " collections and single files. Collections:" + " `{}` Single files: `{}`" + ).format(str(collections), str(remainders))) src_col = collections[0] @@ -319,13 +339,10 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): frame_splitter = "_-_FRAME_SPLIT_-_" anatomy_data["frame"] = frame_splitter _anatomy_filled = anatomy.format(anatomy_data) - _template_filled = _anatomy_filled["hero"]["path"] + _template_filled = _anatomy_filled[template_key]["path"] head, tail = _template_filled.split(frame_splitter) padding = int( - anatomy.templates["render"].get( - "frame_padding", - anatomy.templates["render"].get("padding") - ) + anatomy.templates[template_key]["frame_padding"] ) dst_col = clique.Collection( @@ -384,7 +401,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): # Create representation else: - repre["_id"] = io.ObjectId() + repre["_id"] = ObjectId() bulk_writes.append( InsertOne(repre) ) @@ -420,14 +437,15 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): else: repre["old_id"] = repre["_id"] - repre["_id"] = io.ObjectId() + repre["_id"] = ObjectId() repre["type"] = "archived_representation" bulk_writes.append( InsertOne(repre) ) if bulk_writes: - io._database[io.Session["AVALON_PROJECT"]].bulk_write( + project_name = legacy_io.Session["AVALON_PROJECT"] + legacy_io.database[project_name].bulk_write( bulk_writes ) @@ -443,6 +461,8 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): backup_hero_publish_dir is not None and os.path.exists(backup_hero_publish_dir) ): + if os.path.exists(hero_publish_dir): + shutil.rmtree(hero_publish_dir) os.rename(backup_hero_publish_dir, hero_publish_dir) self.log.error(( "!!! Creating of hero version failed." @@ -465,13 +485,18 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): files.append(_path) return files - def get_publish_dir(self, instance): + def get_publish_dir(self, instance, template_key): anatomy = instance.context.data["anatomy"] template_data = copy.deepcopy(instance.data["anatomyData"]) - if "folder" in anatomy.templates["hero"]: + if "originalBasename" in instance.data: + template_data.update({ + "originalBasename": instance.data.get("originalBasename") + }) + + if "folder" in anatomy.templates[template_key]: anatomy_filled = anatomy.format(template_data) - publish_folder = anatomy_filled["hero"]["folder"] + publish_folder = anatomy_filled[template_key]["folder"] else: # This is for cases of Deprecated anatomy without `folder` # TODO remove when all clients have solved this issue @@ -482,13 +507,13 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): anatomy_filled = anatomy.format(template_data) # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = api.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." ).format(project_name)) - file_path = anatomy_filled["hero"]["path"] + file_path = anatomy_filled[template_key]["path"] # Directory publish_folder = os.path.dirname(file_path) @@ -498,6 +523,38 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): return publish_folder + def _get_template_key(self, instance): + anatomy_data = instance.data["anatomyData"] + task_data = anatomy_data.get("task") or {} + task_name = task_data.get("name") + task_type = task_data.get("type") + host_name = instance.context.data["hostName"] + # TODO raise error if Hero not set? + family = self.main_family_from_instance(instance) + key_values = { + "families": family, + "task_names": task_name, + "task_types": task_type, + "hosts": host_name + } + profile = filter_profiles( + self.template_name_profiles, + key_values, + logger=self.log + ) + if profile: + template_name = profile["template_name"] + else: + template_name = self._default_template_name + return template_name + + def main_family_from_instance(self, instance): + """Returns main family of entered instance.""" + family = instance.data.get("family") + if not family: + family = instance.data["families"][0] + return family + def copy_file(self, src_path, dst_path): # TODO check drives if are the same to check if cas hardlink dirname = os.path.dirname(dst_path) @@ -531,12 +588,12 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): def version_from_representations(self, repres): for repre in repres: - version = io.find_one({"_id": repre["parent"]}) + version = legacy_io.find_one({"_id": repre["parent"]}) if version: return version def current_hero_ents(self, version): - hero_version = io.find_one({ + hero_version = legacy_io.find_one({ "parent": version["parent"], "type": "hero_version" }) @@ -544,7 +601,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): if not hero_version: return (None, []) - hero_repres = list(io.find({ + hero_repres = list(legacy_io.find({ "parent": hero_version["_id"], "type": "representation" })) @@ -563,22 +620,16 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): src_file (string) - original file path dst_file (string) - hero file path """ - _, rootless = anatomy.find_root_template_from_path( - dst_file - ) - _, rtls_src = anatomy.find_root_template_from_path( - src_file - ) + _, rootless = anatomy.find_root_template_from_path(dst_file) + _, rtls_src = anatomy.find_root_template_from_path(src_file) return path.replace(rtls_src, rootless) def _update_hash(self, hash, src_file_name, dst_file): """ Updates hash value with proper hero name """ - src_file_name = self._get_name_without_ext( - src_file_name) - hero_file_name = self._get_name_without_ext( - dst_file) + src_file_name = self._get_name_without_ext(src_file_name) + hero_file_name = self._get_name_without_ext(dst_file) return hash.replace(src_file_name, hero_file_name) def _get_name_without_ext(self, value): diff --git a/openpype/plugins/publish/integrate_inputlinks.py b/openpype/plugins/publish/integrate_inputlinks.py index f973dfc963..6964f2d938 100644 --- a/openpype/plugins/publish/integrate_inputlinks.py +++ b/openpype/plugins/publish/integrate_inputlinks.py @@ -1,8 +1,10 @@ - from collections import OrderedDict -from avalon import io + +from bson.objectid import ObjectId import pyblish.api +from openpype.pipeline import legacy_io + class IntegrateInputLinks(pyblish.api.ContextPlugin): """Connecting version level dependency links""" @@ -104,7 +106,7 @@ class IntegrateInputLinks(pyblish.api.ContextPlugin): # future. link = OrderedDict() link["type"] = link_type - link["id"] = io.ObjectId(input_id) + link["id"] = ObjectId(input_id) link["linkedBy"] = "publish" if "inputLinks" not in version_doc["data"]: @@ -127,5 +129,7 @@ class IntegrateInputLinks(pyblish.api.ContextPlugin): if input_links is None: continue - io.update_one({"_id": version_doc["_id"]}, - {"$set": {"data.inputLinks": input_links}}) + legacy_io.update_one( + {"_id": version_doc["_id"]}, + {"$set": {"data.inputLinks": input_links}} + ) diff --git a/openpype/plugins/publish/integrate_new.py b/openpype/plugins/publish/integrate_new.py index 6ca6125cb2..bf13a4050e 100644 --- a/openpype/plugins/publish/integrate_new.py +++ b/openpype/plugins/publish/integrate_new.py @@ -8,13 +8,14 @@ import errno import six import re import shutil +from collections import deque, defaultdict +from datetime import datetime +from bson.objectid import ObjectId from pymongo import DeleteOne, InsertOne import pyblish.api -from avalon import io + import openpype.api -from datetime import datetime -# from pype.modules import ModulesManager from openpype.lib.profiles_filtering import filter_profiles from openpype.lib import ( prepare_template_data, @@ -22,6 +23,7 @@ from openpype.lib import ( StringTemplate, TemplateUnsolved ) +from openpype.pipeline import legacy_io # this is needed until speedcopy for linux is fixed if sys.platform == "win32": @@ -104,9 +106,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "effect", "xgen", "hda", - "usd" + "usd", + "staticMesh", + "skeletalMesh", + "usdComposition", + "usdOverride", + "simpleUnrealTexture" ] - exclude_families = ["clip"] + exclude_families = ["clip", "render.farm"] db_representation_context_keys = [ "project", "asset", "task", "subset", "version", "representation", "family", "hierarchy", "task", "username" @@ -124,11 +131,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): subset_grouping_profiles = None def process(self, instance): - self.integrated_file_sizes = {} - if [ef for ef in self.exclude_families - if instance.data["family"] in ef]: - return + for ef in self.exclude_families: + if ( + instance.data["family"] == ef or + ef in instance.data["families"]): + self.log.debug("Excluded family '{}' in '{}' or {}".format( + ef, instance.data["family"], instance.data["families"])) + return + self.integrated_file_sizes = {} try: self.register(instance) self.log.info("Integrated Asset in to the database ...") @@ -145,7 +156,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Required environment variables anatomy_data = instance.data["anatomyData"] - io.install() + legacy_io.install() context = instance.context @@ -159,7 +170,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): asset_name = instance.data["asset"] asset_entity = instance.data.get("assetEntity") if not asset_entity or asset_entity["name"] != context_asset_name: - asset_entity = io.find_one({ + asset_entity = legacy_io.find_one({ "type": "asset", "name": asset_name, "parent": project_entity["_id"] @@ -221,7 +232,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Ensure at least one file is set up for transfer in staging dir. repres = instance.data.get("representations") - assert repres, "Instance has no files to transfer" + repres = instance.data.get("representations") + msg = "Instance {} has no files to transfer".format( + instance.data["family"]) + assert repres, msg assert isinstance(repres, (list, tuple)), ( "Instance 'files' must be a list, got: {0} {1}".format( str(type(repres)), str(repres) @@ -252,14 +266,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): new_repre_names_low = [_repre["name"].lower() for _repre in repres] - existing_version = io.find_one({ + existing_version = legacy_io.find_one({ 'type': 'version', 'parent': subset["_id"], 'name': version_number }) if existing_version is None: - version_id = io.insert_one(version).inserted_id + version_id = legacy_io.insert_one(version).inserted_id else: # Check if instance have set `append` mode which cause that # only replicated representations are set to archive @@ -267,7 +281,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Update version data # TODO query by _id and - io.update_many({ + legacy_io.update_many({ 'type': 'version', 'parent': subset["_id"], 'name': version_number @@ -277,7 +291,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): version_id = existing_version['_id'] # Find representations of existing version and archive them - current_repres = list(io.find({ + current_repres = list(legacy_io.find({ "type": "representation", "parent": version_id })) @@ -294,20 +308,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): bulk_writes.append(DeleteOne({"_id": repre_id})) repre["orig_id"] = repre_id - repre["_id"] = io.ObjectId() + repre["_id"] = ObjectId() repre["type"] = "archived_representation" bulk_writes.append(InsertOne(repre)) # bulk updates if bulk_writes: - io._database[io.Session["AVALON_PROJECT"]].bulk_write( + project_name = legacy_io.Session["AVALON_PROJECT"] + legacy_io.database[project_name].bulk_write( bulk_writes ) - version = io.find_one({"_id": version_id}) + version = legacy_io.find_one({"_id": version_id}) instance.data["versionEntity"] = version - existing_repres = list(io.find({ + existing_repres = list(legacy_io.find({ "parent": version_id, "type": "archived_representation" })) @@ -352,6 +367,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if profile: template_name = profile["template_name"] + + published_representations = {} for idx, repre in enumerate(instance.data["representations"]): # reset transfers for next representation @@ -380,6 +397,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if resolution_width: template_data["fps"] = fps + if "originalBasename" in instance.data: + template_data.update({ + "originalBasename": instance.data.get("originalBasename") + }) + files = repre['files'] if repre.get('stagingDir'): stagingdir = repre['stagingDir'] @@ -551,6 +573,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): repre['published_path'] = dst self.log.debug("__ dst: {}".format(dst)) + if not instance.data.get("publishDir"): + instance.data["publishDir"] = ( + anatomy_filled + [template_name] + ["folder"] + ) if repre.get("udim"): repre_context["udim"] = repre.get("udim") # store list @@ -573,7 +601,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Create new id if existing representations does not match if repre_id is None: - repre_id = io.ObjectId() + repre_id = ObjectId() data = repre.get("data") or {} data.update({'path': dst, 'template': template}) @@ -634,12 +662,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): repre_ids_to_remove = [] for repre in existing_repres: repre_ids_to_remove.append(repre["_id"]) - io.delete_many({"_id": {"$in": repre_ids_to_remove}}) + legacy_io.delete_many({"_id": {"$in": repre_ids_to_remove}}) for rep in instance.data["representations"]: self.log.debug("__ rep: {}".format(rep)) - io.insert_many(representations) + legacy_io.insert_many(representations) instance.data["published_representations"] = ( published_representations ) @@ -741,7 +769,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): def get_subset(self, asset, instance): subset_name = instance.data["subset"] - subset = io.find_one({ + subset = legacy_io.find_one({ "type": "subset", "parent": asset["_id"], "name": subset_name @@ -762,7 +790,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if _family not in families: families.append(_family) - _id = io.insert_one({ + _id = legacy_io.insert_one({ "schema": "openpype:subset-3.0", "type": "subset", "name": subset_name, @@ -772,7 +800,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "parent": asset["_id"] }).inserted_id - subset = io.find_one({"_id": _id}) + subset = legacy_io.find_one({"_id": _id}) # QUESTION Why is changing of group and updating it's # families in 'get_subset'? @@ -781,8 +809,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Update families on subset. families = [instance.data["family"]] families.extend(instance.data.get("families", [])) - io.update_many( - {"type": "subset", "_id": io.ObjectId(subset["_id"])}, + legacy_io.update_many( + {"type": "subset", "_id": ObjectId(subset["_id"])}, {"$set": {"data.families": families}} ) @@ -805,9 +833,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): subset_group = self._get_subset_group(instance) if subset_group: - io.update_many({ + legacy_io.update_many({ 'type': 'subset', - '_id': io.ObjectId(subset_id) + '_id': ObjectId(subset_id) }, {'$set': {'data.subsetGroup': subset_group}}) def _get_subset_group(self, instance): @@ -1054,7 +1082,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): sync_project_presets = None rec = { - "_id": io.ObjectId(), + "_id": ObjectId(), "path": path } if size: @@ -1097,18 +1125,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): rec["sites"].append(meta) already_attached_sites[meta["name"]] = None + # add alternative sites + rec, already_attached_sites = self._add_alternative_sites( + system_sync_server_presets, already_attached_sites, rec) + # add skeleton for site where it should be always synced to - for always_on_site in always_accesible: + for always_on_site in set(always_accesible): if always_on_site not in already_attached_sites.keys(): meta = {"name": always_on_site.strip()} rec["sites"].append(meta) already_attached_sites[meta["name"]] = None - # add alternative sites - rec = self._add_alternative_sites(system_sync_server_presets, - already_attached_sites, - rec) - log.debug("final sites:: {}".format(rec["sites"])) return rec @@ -1139,22 +1166,60 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): """ conf_sites = system_sync_server_presets.get("sites", {}) + alt_site_pairs = self._get_alt_site_pairs(conf_sites) + + already_attached_keys = list(already_attached_sites.keys()) + for added_site in already_attached_keys: + real_created = already_attached_sites[added_site] + for alt_site in alt_site_pairs.get(added_site, []): + if alt_site in already_attached_sites.keys(): + continue + meta = {"name": alt_site} + # alt site inherits state of 'created_dt' + if real_created: + meta["created_dt"] = real_created + rec["sites"].append(meta) + already_attached_sites[meta["name"]] = real_created + + return rec, already_attached_sites + + def _get_alt_site_pairs(self, conf_sites): + """Returns dict of site and its alternative sites. + + If `site` has alternative site, it means that alt_site has 'site' as + alternative site + Args: + conf_sites (dict) + Returns: + (dict): {'site': [alternative sites]...} + """ + alt_site_pairs = defaultdict(list) for site_name, site_info in conf_sites.items(): alt_sites = set(site_info.get("alternative_sites", [])) - already_attached_keys = list(already_attached_sites.keys()) - for added_site in already_attached_keys: - if added_site in alt_sites: - if site_name in already_attached_keys: - continue - meta = {"name": site_name} - real_created = already_attached_sites[added_site] - # alt site inherits state of 'created_dt' - if real_created: - meta["created_dt"] = real_created - rec["sites"].append(meta) - already_attached_sites[meta["name"]] = real_created + alt_site_pairs[site_name].extend(alt_sites) - return rec + for alt_site in alt_sites: + alt_site_pairs[alt_site].append(site_name) + + for site_name, alt_sites in alt_site_pairs.items(): + sites_queue = deque(alt_sites) + while sites_queue: + alt_site = sites_queue.popleft() + + # safety against wrong config + # {"SFTP": {"alternative_site": "SFTP"} + if alt_site == site_name or alt_site not in alt_site_pairs: + continue + + for alt_alt_site in alt_site_pairs[alt_site]: + if ( + alt_alt_site != site_name + and alt_alt_site not in alt_sites + ): + alt_sites.append(alt_alt_site) + sites_queue.append(alt_alt_site) + + return alt_site_pairs def handle_destination_files(self, integrated_file_sizes, mode): """ Clean destination files diff --git a/openpype/plugins/publish/integrate_thumbnail.py b/openpype/plugins/publish/integrate_thumbnail.py index 28a93efb9a..5d6fc561ea 100644 --- a/openpype/plugins/publish/integrate_thumbnail.py +++ b/openpype/plugins/publish/integrate_thumbnail.py @@ -8,7 +8,7 @@ import six import pyblish.api from bson.objectid import ObjectId -from avalon import api, io +from openpype.pipeline import legacy_io class IntegrateThumbnails(pyblish.api.InstancePlugin): @@ -38,7 +38,7 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): ) return - project_name = api.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] anatomy = instance.context.data["anatomy"] if "publish" not in anatomy.templates: @@ -66,11 +66,11 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): ) return - io.install() + legacy_io.install() thumbnail_template = anatomy.templates["publish"]["thumbnail"] - version = io.find_one({"_id": thumb_repre["parent"]}) + version = legacy_io.find_one({"_id": thumb_repre["parent"]}) if not version: raise AssertionError( "There does not exist version with id {}".format( @@ -137,12 +137,12 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): } } # Create thumbnail entity - io.insert_one(thumbnail_entity) + legacy_io.insert_one(thumbnail_entity) self.log.debug( "Creating entity in database {}".format(str(thumbnail_entity)) ) # Set thumbnail id for version - io.update_many( + legacy_io.update_many( {"_id": version["_id"]}, {"$set": {"data.thumbnail_id": thumbnail_id}} ) @@ -151,7 +151,7 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): )) asset_entity = instance.data["assetEntity"] - io.update_many( + legacy_io.update_many( {"_id": asset_entity["_id"]}, {"$set": {"data.thumbnail_id": thumbnail_id}} ) diff --git a/openpype/plugins/publish/validate_aseset_docs.py b/openpype/plugins/publish/validate_asset_docs.py similarity index 69% rename from openpype/plugins/publish/validate_aseset_docs.py rename to openpype/plugins/publish/validate_asset_docs.py index eed75cdf8a..bc1f9b9e6c 100644 --- a/openpype/plugins/publish/validate_aseset_docs.py +++ b/openpype/plugins/publish/validate_asset_docs.py @@ -2,8 +2,8 @@ import pyblish.api from openpype.pipeline import PublishValidationError -class ValidateContainers(pyblish.api.InstancePlugin): - """Validate existence of asset asset documents on instances. +class ValidateAssetDocs(pyblish.api.InstancePlugin): + """Validate existence of asset documents on instances. Without asset document it is not possible to publish the instance. @@ -22,10 +22,10 @@ class ValidateContainers(pyblish.api.InstancePlugin): return if instance.data.get("assetEntity"): - self.log.info("Instance have set asset document in it's data.") + self.log.info("Instance has set asset document in its data.") else: raise PublishValidationError(( - "Instance \"{}\" don't have set asset" - " document which is needed for publishing." + "Instance \"{}\" doesn't have asset document " + "set which is needed for publishing." ).format(instance.data["name"])) diff --git a/openpype/plugins/publish/validate_editorial_asset_name.py b/openpype/plugins/publish/validate_editorial_asset_name.py index 4a65f3c64a..f9cdaebf0c 100644 --- a/openpype/plugins/publish/validate_editorial_asset_name.py +++ b/openpype/plugins/publish/validate_editorial_asset_name.py @@ -1,7 +1,9 @@ -import pyblish.api -from avalon import io from pprint import pformat +import pyblish.api + +from openpype.pipeline import legacy_io + class ValidateEditorialAssetName(pyblish.api.ContextPlugin): """ Validating if editorial's asset names are not already created in db. @@ -24,10 +26,10 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin): asset_and_parents = self.get_parents(context) self.log.debug("__ asset_and_parents: {}".format(asset_and_parents)) - if not io.Session: - io.install() + if not legacy_io.Session: + legacy_io.install() - db_assets = list(io.find( + db_assets = list(legacy_io.find( {"type": "asset"}, {"name": 1, "data.parents": 1})) self.log.debug("__ db_assets: {}".format(db_assets)) diff --git a/openpype/pype_commands.py b/openpype/pype_commands.py index c05eece2be..d945a1f697 100644 --- a/openpype/pype_commands.py +++ b/openpype/pype_commands.py @@ -25,7 +25,7 @@ class PypeCommands: Most of its methods are called by :mod:`cli` module. """ @staticmethod - def launch_tray(debug=False): + def launch_tray(): PypeLogger.set_process_name("Tray") from openpype.tools import tray @@ -101,7 +101,8 @@ class PypeCommands: RuntimeError: When there is no path to process. """ from openpype.modules import ModulesManager - from openpype import install, uninstall + from openpype.pipeline import install_openpype_plugins + from openpype.api import Logger from openpype.tools.utils.host_tools import show_publish from openpype.tools.utils.lib import qt_app_context @@ -112,7 +113,7 @@ class PypeCommands: log = Logger.get_logger() - install() + install_openpype_plugins() manager = ModulesManager() @@ -124,13 +125,14 @@ class PypeCommands: if not any(paths): raise RuntimeError("No publish paths specified") - env = get_app_environments_for_context( - os.environ["AVALON_PROJECT"], - os.environ["AVALON_ASSET"], - os.environ["AVALON_TASK"], - os.environ["AVALON_APP_NAME"] - ) - os.environ.update(env) + if os.getenv("AVALON_APP_NAME"): + env = get_app_environments_for_context( + os.environ["AVALON_PROJECT"], + os.environ["AVALON_ASSET"], + os.environ["AVALON_TASK"], + os.environ["AVALON_APP_NAME"] + ) + os.environ.update(env) pyblish.api.register_host("shell") @@ -294,7 +296,8 @@ class PypeCommands: # Register target and host import pyblish.api import pyblish.util - import avalon.api + + from openpype.pipeline import install_host from openpype.hosts.webpublisher import api as webpublisher log = PypeLogger.get_logger() @@ -315,7 +318,7 @@ class PypeCommands: for target in targets: pyblish.api.register_target(target) - avalon.api.install(webpublisher) + install_host(webpublisher) log.info("Running publish ...") diff --git a/openpype/scripts/fusion_switch_shot.py b/openpype/scripts/fusion_switch_shot.py index 6db8ff36a8..245fc665f0 100644 --- a/openpype/scripts/fusion_switch_shot.py +++ b/openpype/scripts/fusion_switch_shot.py @@ -4,12 +4,16 @@ import sys import logging # Pipeline imports -from avalon import api, io -import avalon.fusion +from openpype.hosts.fusion import api +import openpype.hosts.fusion.api.lib as fusion_lib # Config imports -import openpype.lib as pype -import openpype.hosts.fusion.lib as fusion_lib +from openpype.lib import version_up +from openpype.pipeline import ( + install_host, + registered_host, + legacy_io, +) from openpype.lib.avalon_context import get_workdir_from_session @@ -79,7 +83,7 @@ def _format_filepath(session): # Create new unqiue filepath if os.path.exists(new_filepath): - new_filepath = pype.version_up(new_filepath) + new_filepath = version_up(new_filepath) return new_filepath @@ -102,7 +106,7 @@ def _update_savers(comp, session): comp.Print("New renders to: %s\n" % renders) - with avalon.fusion.comp_lock_and_undo_chunk(comp): + with api.comp_lock_and_undo_chunk(comp): savers = comp.GetToolList(False, "Saver").values() for saver in savers: filepath = saver.GetAttrs("TOOLST_Clip_Name")[1.0] @@ -127,7 +131,7 @@ def update_frame_range(comp, representations): """ version_ids = [r["parent"] for r in representations] - versions = io.find({"type": "version", "_id": {"$in": version_ids}}) + versions = legacy_io.find({"type": "version", "_id": {"$in": version_ids}}) versions = list(versions) start = min(v["data"]["frameStart"] for v in versions) @@ -158,25 +162,25 @@ def switch(asset_name, filepath=None, new=True): # Assert asset name exists # It is better to do this here then to wait till switch_shot does it - asset = io.find_one({"type": "asset", "name": asset_name}) + asset = legacy_io.find_one({"type": "asset", "name": asset_name}) assert asset, "Could not find '%s' in the database" % asset_name # Get current project - self._project = io.find_one({ + self._project = legacy_io.find_one({ "type": "project", - "name": api.Session["AVALON_PROJECT"] + "name": legacy_io.Session["AVALON_PROJECT"] }) # Go to comp if not filepath: - current_comp = avalon.fusion.get_current_comp() + current_comp = api.get_current_comp() assert current_comp is not None, "Could not find current comp" else: fusion = _get_fusion_instance() current_comp = fusion.LoadComp(filepath, quiet=True) assert current_comp is not None, "Fusion could not load '%s'" % filepath - host = api.registered_host() + host = registered_host() containers = list(host.ls()) assert containers, "Nothing to update" @@ -194,7 +198,7 @@ def switch(asset_name, filepath=None, new=True): current_comp.Print(message) # Build the session to switch to - switch_to_session = api.Session.copy() + switch_to_session = legacy_io.Session.copy() switch_to_session["AVALON_ASSET"] = asset['name'] if new: @@ -203,7 +207,7 @@ def switch(asset_name, filepath=None, new=True): # Update savers output based on new session _update_savers(current_comp, switch_to_session) else: - comp_path = pype.version_up(filepath) + comp_path = version_up(filepath) current_comp.Print(comp_path) @@ -234,7 +238,7 @@ if __name__ == '__main__': args, unknown = parser.parse_args() - api.install(avalon.fusion) + install_host(api) switch(args.asset_name, args.file_path) sys.exit(0) diff --git a/openpype/scripts/non_python_host_launch.py b/openpype/scripts/non_python_host_launch.py index 43921f0483..f795af7bb3 100644 --- a/openpype/scripts/non_python_host_launch.py +++ b/openpype/scripts/non_python_host_launch.py @@ -15,7 +15,7 @@ CURRENT_FILE = os.path.abspath(__file__) def show_error_messagebox(title, message, detail_message=None): """Function will show message and process ends after closing it.""" from Qt import QtWidgets, QtCore - from avalon import style + from openpype import style app = QtWidgets.QApplication([]) app.setStyleSheet(style.load_stylesheet()) diff --git a/openpype/settings/constants.py b/openpype/settings/constants.py index 8b8acf5714..19ff953eb4 100644 --- a/openpype/settings/constants.py +++ b/openpype/settings/constants.py @@ -8,11 +8,11 @@ M_ENVIRONMENT_KEY = "__environment_keys__" # Metadata key for storing dynamic created labels M_DYNAMIC_KEY_LABEL = "__dynamic_keys_labels__" -METADATA_KEYS = ( +METADATA_KEYS = frozenset([ M_OVERRIDDEN_KEY, M_ENVIRONMENT_KEY, M_DYNAMIC_KEY_LABEL -) +]) # Keys where studio's system overrides are stored GLOBAL_SETTINGS_KEY = "global_settings" diff --git a/openpype/settings/defaults/project_anatomy/imageio.json b/openpype/settings/defaults/project_anatomy/imageio.json index 1c86509155..7a3f49452e 100644 --- a/openpype/settings/defaults/project_anatomy/imageio.json +++ b/openpype/settings/defaults/project_anatomy/imageio.json @@ -185,8 +185,8 @@ "linux": [] }, "renderSpace": "ACEScg", - "viewName": "ACES 1.0 SDR-video", - "displayName": "sRGB" + "displayName": "sRGB", + "viewName": "ACES 1.0 SDR-video" }, "colorManagementPreference": { "configFilePath": { diff --git a/openpype/settings/defaults/project_anatomy/templates.json b/openpype/settings/defaults/project_anatomy/templates.json index d46d449c77..caf399a903 100644 --- a/openpype/settings/defaults/project_anatomy/templates.json +++ b/openpype/settings/defaults/project_anatomy/templates.json @@ -28,9 +28,30 @@ }, "delivery": {}, "unreal": { - "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/{@version}", - "file": "{subset}_{@version}<_{output}><.{@frame}>.{ext}", + "folder": "{root[work]}/{project[name]}/unreal/{task[name]}", + "file": "{project[code]}_{asset}", "path": "{@folder}/{@file}" }, - "others": {} + "others": { + "maya2unreal": { + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}", + "file": "{subset}_{@version}<_{output}><.{@frame}>.{ext}", + "path": "{@folder}/{@file}" + }, + "simpleUnrealTextureHero": { + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/hero", + "file": "{originalBasename}.{ext}", + "path": "{@folder}/{@file}" + }, + "simpleUnrealTexture": { + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{@version}", + "file": "{originalBasename}_{@version}.{ext}", + "path": "{@folder}/{@file}" + }, + "__dynamic_keys_labels__": { + "maya2unreal": "Maya to Unreal", + "simpleUnrealTextureHero": "Simple Unreal Texture - Hero", + "simpleUnrealTexture": "Simple Unreal Texture" + } + } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/aftereffects.json b/openpype/settings/defaults/project_settings/aftereffects.json index 6a9a399069..8083aa0972 100644 --- a/openpype/settings/defaults/project_settings/aftereffects.json +++ b/openpype/settings/defaults/project_settings/aftereffects.json @@ -1,4 +1,11 @@ { + "create": { + "RenderCreator": { + "defaults": [ + "Main" + ] + } + }, "publish": { "ValidateSceneSettings": { "enabled": true, diff --git a/openpype/settings/defaults/project_settings/deadline.json b/openpype/settings/defaults/project_settings/deadline.json index 5bb0a4022e..f0b2a7e555 100644 --- a/openpype/settings/defaults/project_settings/deadline.json +++ b/openpype/settings/defaults/project_settings/deadline.json @@ -4,6 +4,10 @@ "CollectDefaultDeadlineServer": { "pass_mongo_url": false }, + "CollectDeadlinePools": { + "primary_pool": "", + "secondary_pool": "" + }, "ValidateExpectedFiles": { "enabled": true, "active": true, @@ -15,33 +19,6 @@ "deadline" ] }, - "ProcessSubmittedJobOnFarm": { - "enabled": true, - "deadline_department": "", - "deadline_pool": "", - "deadline_group": "", - "deadline_chunk_size": 1, - "deadline_priority": 50, - "publishing_script": "", - "skip_integration_repre_list": [], - "aov_filter": { - "maya": [ - ".+(?:\\.|_)([Bb]eauty)(?:\\.|_).*" - ], - "nuke": [ - ".*" - ], - "aftereffects": [ - ".*" - ], - "celaction": [ - ".*" - ], - "harmony": [ - ".*" - ] - } - }, "MayaSubmitDeadline": { "enabled": true, "optional": false, @@ -49,6 +26,8 @@ "tile_assembler_plugin": "OpenPypeTileAssembler", "use_published": true, "asset_dependencies": true, + "priority": 50, + "tile_priority": 50, "group": "none", "limit": [], "jobInfo": {}, @@ -62,8 +41,7 @@ "use_published": true, "priority": 50, "chunk_size": 10, - "primary_pool": "", - "secondary_pool": "", + "concurrent_tasks": 1, "group": "", "department": "", "use_gpu": true, @@ -78,8 +56,6 @@ "use_published": true, "priority": 50, "chunk_size": 10000, - "primary_pool": "", - "secondary_pool": "", "group": "", "department": "" }, @@ -90,11 +66,36 @@ "use_published": true, "priority": 50, "chunk_size": 10000, - "primary_pool": "", - "secondary_pool": "", "group": "", "department": "", "multiprocess": true + }, + "ProcessSubmittedJobOnFarm": { + "enabled": true, + "deadline_department": "", + "deadline_pool": "", + "deadline_group": "", + "deadline_chunk_size": 1, + "deadline_priority": 50, + "publishing_script": "", + "skip_integration_repre_list": [], + "aov_filter": { + "maya": [ + ".*([Bb]eauty).*" + ], + "nuke": [ + ".*" + ], + "aftereffects": [ + ".*" + ], + "celaction": [ + ".*" + ], + "harmony": [ + ".*" + ] + } } } -} \ No newline at end of file +} diff --git a/openpype/settings/defaults/project_settings/flame.json b/openpype/settings/defaults/project_settings/flame.json index c7188b10b5..ef7a2a4467 100644 --- a/openpype/settings/defaults/project_settings/flame.json +++ b/openpype/settings/defaults/project_settings/flame.json @@ -20,6 +20,37 @@ } }, "publish": { + "CollectTimelineInstances": { + "xml_preset_attrs_from_comments": [ + { + "name": "width", + "type": "number" + }, + { + "name": "height", + "type": "number" + }, + { + "name": "pixelRatio", + "type": "float" + }, + { + "name": "resizeType", + "type": "string" + }, + { + "name": "resizeFilter", + "type": "string" + } + ], + "add_tasks": [ + { + "name": "compositing", + "type": "Compositing", + "create_batch_group": true + } + ] + }, "ExtractSubsetResources": { "keep_original_representation": false, "export_presets_mapping": { @@ -31,7 +62,9 @@ "ignore_comment_attrs": false, "colorspace_out": "ACES - ACEScg", "representation_add_range": true, - "representation_tags": [] + "representation_tags": [], + "load_to_batch_group": true, + "batch_group_loader_name": "LoadClip" } } } @@ -58,7 +91,29 @@ ], "reel_group_name": "OpenPype_Reels", "reel_name": "Loaded", - "clip_name_template": "{asset}_{subset}_{representation}" + "clip_name_template": "{asset}_{subset}_{output}" + }, + "LoadClipBatch": { + "enabled": true, + "families": [ + "render2d", + "source", + "plate", + "render", + "review" + ], + "representations": [ + "exr", + "dpx", + "jpg", + "jpeg", + "png", + "h264", + "mov", + "mp4" + ], + "reel_name": "OP_LoadedReel", + "clip_name_template": "{asset}_{subset}_{output}" } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json index 89bb41a164..a846a596c2 100644 --- a/openpype/settings/defaults/project_settings/ftrack.json +++ b/openpype/settings/defaults/project_settings/ftrack.json @@ -349,14 +349,36 @@ "tasks": [], "add_ftrack_family": true, "advanced_filtering": [] + }, + { + "hosts": [ + "photoshop" + ], + "families": [ + "review" + ], + "task_types": [], + "tasks": [], + "add_ftrack_family": true, + "advanced_filtering": [] } ] }, + "CollectFtrackCustomAttributeData": { + "enabled": false, + "custom_attribute_keys": [] + }, "IntegrateFtrackNote": { "enabled": true, - "note_with_intent_template": "{intent}: {comment}", + "note_template": "{intent}: {comment}", "note_labels": [] }, + "IntegrateFtrackDescription": { + "enabled": false, + "optional": true, + "active": true, + "description_template": "{comment}" + }, "ValidateFtrackAttributes": { "enabled": false, "ftrack_custom_attributes": {} @@ -395,7 +417,8 @@ "vrayproxy": "cache", "redshiftproxy": "cache", "usd": "usd" - } + }, + "keep_first_subset_name_for_review": true } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json index 30a71b044a..7317a3da1c 100644 --- a/openpype/settings/defaults/project_settings/global.json +++ b/openpype/settings/defaults/project_settings/global.json @@ -33,20 +33,6 @@ "enabled": false, "profiles": [] }, - "IntegrateHeroVersion": { - "enabled": true, - "optional": true, - "families": [ - "model", - "rig", - "look", - "pointcache", - "animation", - "setdress", - "layout", - "mayaScene" - ] - }, "ExtractJpegEXR": { "enabled": true, "ffmpeg_args": { @@ -192,6 +178,29 @@ "task_types": [], "tasks": [], "template_name": "render" + }, + { + "families": [ + "simpleUnrealTexture" + ], + "hosts": [ + "standalonepublisher" + ], + "task_types": [], + "tasks": [], + "template_name": "simpleUnrealTexture" + }, + { + "families": [ + "staticMesh", + "skeletalMesh" + ], + "hosts": [ + "maya" + ], + "task_types": [], + "tasks": [], + "template_name": "maya2unreal" } ], "subset_grouping_profiles": [ @@ -204,6 +213,35 @@ } ] }, + "IntegrateHeroVersion": { + "enabled": true, + "optional": true, + "active": true, + "families": [ + "model", + "rig", + "look", + "pointcache", + "animation", + "setdress", + "layout", + "mayaScene", + "simpleUnrealTexture" + ], + "template_name_profiles": [ + { + "families": [ + "simpleUnrealTexture" + ], + "hosts": [ + "standalonepublisher" + ], + "task_types": [], + "task_names": [], + "template_name": "simpleUnrealTextureHero" + } + ] + }, "CleanUp": { "paterns": [], "remove_temp_renders": false @@ -241,6 +279,15 @@ "tasks": [], "template": "{family}{variant}" }, + { + "families": [ + "workfile" + ], + "hosts": [], + "task_types": [], + "tasks": [], + "template": "{family}{Task}" + }, { "families": [ "render" @@ -268,6 +315,7 @@ "workfile" ], "hosts": [ + "aftereffects", "tvpaint" ], "task_types": [], @@ -287,7 +335,7 @@ }, { "families": [ - "unrealStaticMesh" + "staticMesh" ], "hosts": [ "maya" @@ -295,6 +343,17 @@ "task_types": [], "tasks": [], "template": "S_{asset}{variant}" + }, + { + "families": [ + "skeletalMesh" + ], + "hosts": [ + "maya" + ], + "task_types": [], + "tasks": [], + "template": "SK_{asset}{variant}" } ] }, @@ -304,6 +363,13 @@ "task_types": [], "hosts": [], "workfile_template": "work" + }, + { + "task_types": [], + "hosts": [ + "unreal" + ], + "workfile_template": "unreal" } ], "last_workfile_on_startup": [ diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index 19d9a95595..4cdfe1ca5d 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -52,7 +52,7 @@ "", "_Main" ], - "static_mesh_prefix": "S_", + "static_mesh_prefix": "S", "collision_prefixes": [ "UBX", "UCP", @@ -60,6 +60,11 @@ "UCX" ] }, + "CreateUnrealSkeletalMesh": { + "enabled": true, + "defaults": [], + "joint_hints": "jnt_org" + }, "CreateAnimation": { "enabled": true, "defaults": [ diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json index 6992fb6e3e..ab015271ff 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/openpype/settings/defaults/project_settings/nuke.json @@ -106,6 +106,9 @@ ] } }, + "ExtractReviewData": { + "enabled": false + }, "ExtractReviewDataLut": { "enabled": false }, @@ -119,11 +122,10 @@ "families": [], "sebsets": [] }, - "extension": "mov", + "read_raw": false, "viewer_process_override": "", "bake_viewer_process": true, "bake_viewer_input_process": true, - "add_tags": [], "reformat_node_add": false, "reformat_node_config": [ { @@ -151,12 +153,28 @@ "name": "pbb", "value": false } - ] + ], + "extension": "mov", + "add_tags": [] } } }, "ExtractSlateFrame": { - "viewer_lut_raw": false + "viewer_lut_raw": false, + "key_value_mapping": { + "f_submission_note": [ + true, + "{comment}" + ], + "f_submitting_for": [ + true, + "{intent[value]}" + ], + "f_vfx_scope_of_work": [ + false, + "" + ] + } }, "IncrementScriptVersion": { "enabled": true, diff --git a/openpype/settings/defaults/project_settings/photoshop.json b/openpype/settings/defaults/project_settings/photoshop.json index 118b9c721e..d9b7a8083f 100644 --- a/openpype/settings/defaults/project_settings/photoshop.json +++ b/openpype/settings/defaults/project_settings/photoshop.json @@ -12,13 +12,16 @@ "flatten_subset_template": "", "color_code_mapping": [] }, + "CollectInstances": { + "flatten_subset_template": "" + }, "ValidateContainers": { "enabled": true, "optional": true, "active": true }, "ValidateNaming": { - "invalid_chars": "[ \\\\/+\\*\\?\\(\\)\\[\\]\\{\\}:,]", + "invalid_chars": "[ \\\\/+\\*\\?\\(\\)\\[\\]\\{\\}:,;]", "replace_char": "_" }, "ExtractImage": { @@ -44,4 +47,4 @@ "create_first_version": false, "custom_templates": [] } -} +} \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/slack.json b/openpype/settings/defaults/project_settings/slack.json index d77b8c2208..c156fed08e 100644 --- a/openpype/settings/defaults/project_settings/slack.json +++ b/openpype/settings/defaults/project_settings/slack.json @@ -11,6 +11,7 @@ "task_types": [], "tasks": [], "subsets": [], + "review_upload_limit": 50.0, "channel_messages": [] } ] diff --git a/openpype/settings/defaults/project_settings/standalonepublisher.json b/openpype/settings/defaults/project_settings/standalonepublisher.json index 6858c4f34d..e36232d3f7 100644 --- a/openpype/settings/defaults/project_settings/standalonepublisher.json +++ b/openpype/settings/defaults/project_settings/standalonepublisher.json @@ -133,6 +133,22 @@ ], "help": "Texture files with UDIM together with worfile" }, + "create_simple_unreal_texture": { + "name": "simple_unreal_texture", + "label": "Simple Unreal Texture", + "family": "simpleUnrealTexture", + "icon": "Image", + "defaults": [], + "help": "Texture files with Unreal naming convention" + }, + "create_vdb": { + "name": "vdb", + "label": "VDB Volumetric Data", + "family": "vdbcache", + "icon": "cloud", + "defaults": [], + "help": "Hierarchical data structure for the efficient storage and manipulation of sparse volumetric data discretized on three-dimensional grids" + }, "__dynamic_keys_labels__": { "create_workfile": "Workfile", "create_model": "Model", @@ -145,7 +161,9 @@ "create_matchmove": "Matchmove", "create_render": "Render", "create_mov_batch": "Batch Mov", - "create_texture_batch": "Batch Texture" + "create_texture_batch": "Batch Texture", + "create_simple_unreal_texture": "Simple Unreal Texture", + "create_vdb": "VDB Cache" } }, "publish": { diff --git a/openpype/settings/defaults/project_settings/tvpaint.json b/openpype/settings/defaults/project_settings/tvpaint.json index 528bf6de8e..88b5a598cd 100644 --- a/openpype/settings/defaults/project_settings/tvpaint.json +++ b/openpype/settings/defaults/project_settings/tvpaint.json @@ -1,6 +1,10 @@ { "stop_timer_on_application_exit": false, "publish": { + "CollectRenderScene": { + "enabled": false, + "render_layer": "Main" + }, "ExtractSequence": { "review_bg": [ 255, @@ -28,6 +32,11 @@ "enabled": true, "optional": true, "active": true + }, + "ExtractConvertToEXR": { + "enabled": false, + "replace_pngs": true, + "exr_compression": "ZIP" } }, "load": { diff --git a/openpype/settings/defaults/system_settings/general.json b/openpype/settings/defaults/system_settings/general.json index 5a3e39e5b6..a06947ba77 100644 --- a/openpype/settings/defaults/system_settings/general.json +++ b/openpype/settings/defaults/system_settings/general.json @@ -7,11 +7,13 @@ "global": [] } }, + "log_to_server": true, "disk_mapping": { "windows": [], "linux": [], "darwin": [] }, + "local_env_white_list": [], "openpype_path": { "windows": [], "darwin": [], diff --git a/openpype/settings/defaults/system_settings/tools.json b/openpype/settings/defaults/system_settings/tools.json index 181236abe8..243cde40cc 100644 --- a/openpype/settings/defaults/system_settings/tools.json +++ b/openpype/settings/defaults/system_settings/tools.json @@ -25,10 +25,18 @@ }, "variants": { "3-2": { - "MTOA_VERSION": "3.2" + "host_names": [], + "app_variants": [], + "environment": { + "MTOA_VERSION": "3.2" + } }, "3-1": { - "MTOA_VERSION": "3.1" + "host_names": [], + "app_variants": [], + "environment": { + "MTOA_VERSION": "3.1" + } }, "__dynamic_keys_labels__": { "3-2": "3.2", @@ -44,10 +52,39 @@ "environment": {}, "variants": {} }, + "renderman": { + "environment": {}, + "variants": { + "24-3-maya": { + "host_names": [ + "maya" + ], + "app_variants": [ + "maya/2022" + ], + "environment": { + "RFMTREE": { + "windows": "C:\\Program Files\\Pixar\\RenderManForMaya-24.3", + "darwin": "/Applications/Pixar/RenderManForMaya-24.3", + "linux": "/opt/pixar/RenderManForMaya-24.3" + }, + "RMANTREE": { + "windows": "C:\\Program Files\\Pixar\\RenderManProServer-24.3", + "darwin": "/Applications/Pixar/RenderManProServer-24.3", + "linux": "/opt/pixar/RenderManProServer-24.3" + } + } + }, + "__dynamic_keys_labels__": { + "24-3-maya": "24.3 RFM" + } + } + }, "__dynamic_keys_labels__": { "mtoa": "Autodesk Arnold", "vray": "Chaos Group Vray", - "yeti": "Pergrine Labs Yeti" + "yeti": "Peregrine Labs Yeti", + "renderman": "Pixar Renderman" } } } \ No newline at end of file diff --git a/openpype/settings/entities/base_entity.py b/openpype/settings/entities/base_entity.py index 76700d605d..21ee44ae77 100644 --- a/openpype/settings/entities/base_entity.py +++ b/openpype/settings/entities/base_entity.py @@ -173,6 +173,10 @@ class BaseItemEntity(BaseEntity): # Entity has set `_project_override_value` (is not NOT_SET) self.had_project_override = False + self._default_log_invalid_types = True + self._studio_log_invalid_types = True + self._project_log_invalid_types = True + # Callbacks that are called on change. # - main current purspose is to register GUI callbacks self.on_change_callbacks = [] @@ -419,7 +423,7 @@ class BaseItemEntity(BaseEntity): raise InvalidValueType(self.valid_value_types, type(value), self.path) # TODO convert to private method - def _check_update_value(self, value, value_source): + def _check_update_value(self, value, value_source, log_invalid_types=True): """Validation of value on update methods. Update methods update data from currently saved settings so it is @@ -447,16 +451,17 @@ class BaseItemEntity(BaseEntity): if new_value is not NOT_SET: return new_value - # Warning log about invalid value type. - self.log.warning( - ( - "{} Got invalid value type for {} values." - " Expected types: {} | Got Type: {} | Value: \"{}\"" - ).format( - self.path, value_source, - self.valid_value_types, type(value), str(value) + if log_invalid_types: + # Warning log about invalid value type. + self.log.warning( + ( + "{} Got invalid value type for {} values." + " Expected types: {} | Got Type: {} | Value: \"{}\"" + ).format( + self.path, value_source, + self.valid_value_types, type(value), str(value) + ) ) - ) return NOT_SET def available_for_role(self, role_name=None): @@ -985,7 +990,7 @@ class ItemEntity(BaseItemEntity): return self.root_item.get_entity_from_path(path) @abstractmethod - def update_default_value(self, parent_values): + def update_default_value(self, parent_values, log_invalid_types=True): """Fill default values on startup or on refresh. Default values stored in `openpype` repository should update all items @@ -995,11 +1000,13 @@ class ItemEntity(BaseItemEntity): Args: parent_values (dict): Values of parent's item. But in case item is used as widget, `parent_values` contain value for item. + log_invalid_types (bool): Log invalid type of value. Used when + entity can have children with same keys and different types. """ pass @abstractmethod - def update_studio_value(self, parent_values): + def update_studio_value(self, parent_values, log_invalid_types=True): """Fill studio override values on startup or on refresh. Set studio value if is not set to NOT_SET, in that case studio @@ -1008,11 +1015,13 @@ class ItemEntity(BaseItemEntity): Args: parent_values (dict): Values of parent's item. But in case item is used as widget, `parent_values` contain value for item. + log_invalid_types (bool): Log invalid type of value. Used when + entity can have children with same keys and different types. """ pass @abstractmethod - def update_project_value(self, parent_values): + def update_project_value(self, parent_values, log_invalid_types=True): """Fill project override values on startup, refresh or project change. Set project value if is not set to NOT_SET, in that case project @@ -1021,5 +1030,7 @@ class ItemEntity(BaseItemEntity): Args: parent_values (dict): Values of parent's item. But in case item is used as widget, `parent_values` contain value for item. + log_invalid_types (bool): Log invalid type of value. Used when + entity can have children with same keys and different types. """ pass diff --git a/openpype/settings/entities/dict_conditional.py b/openpype/settings/entities/dict_conditional.py index 19f326aea7..88d2dc8296 100644 --- a/openpype/settings/entities/dict_conditional.py +++ b/openpype/settings/entities/dict_conditional.py @@ -518,12 +518,18 @@ class DictConditionalEntity(ItemEntity): output.update(self._current_metadata) return output - def _prepare_value(self, value): + def _prepare_value(self, value, log_invalid_types): if value is NOT_SET or self.enum_key not in value: return NOT_SET, NOT_SET enum_value = value.get(self.enum_key) if enum_value not in self.non_gui_children: + if log_invalid_types: + self.log.warning( + "{} Unknown enum key in default values: {}".format( + self.path, enum_value + ) + ) return NOT_SET, NOT_SET # Create copy of value before poping values @@ -551,22 +557,25 @@ class DictConditionalEntity(ItemEntity): return value, metadata - def update_default_value(self, value): + def update_default_value(self, value, log_invalid_types=True): """Update default values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "default") + self._default_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "default", log_invalid_types + ) self.has_default_value = value is not NOT_SET # TODO add value validation - value, metadata = self._prepare_value(value) + value, metadata = self._prepare_value(value, log_invalid_types) self._default_metadata = metadata if value is NOT_SET: - self.enum_entity.update_default_value(value) + self.enum_entity.update_default_value(value, log_invalid_types) for children_by_key in self.non_gui_children.values(): for child_obj in children_by_key.values(): - child_obj.update_default_value(value) + child_obj.update_default_value(value, log_invalid_types) return value_keys = set(value.keys()) @@ -574,7 +583,7 @@ class DictConditionalEntity(ItemEntity): expected_keys = set(self.non_gui_children[enum_value].keys()) expected_keys.add(self.enum_key) unknown_keys = value_keys - expected_keys - if unknown_keys: + if unknown_keys and log_invalid_types: self.log.warning( "{} Unknown keys in default values: {}".format( self.path, @@ -582,28 +591,37 @@ class DictConditionalEntity(ItemEntity): ) ) - self.enum_entity.update_default_value(enum_value) - for children_by_key in self.non_gui_children.values(): + self.enum_entity.update_default_value(enum_value, log_invalid_types) + + for enum_key, children_by_key in self.non_gui_children.items(): + _log_invalid_types = log_invalid_types + if _log_invalid_types: + _log_invalid_types = enum_key == enum_value + value_copy = copy.deepcopy(value) for key, child_obj in children_by_key.items(): child_value = value_copy.get(key, NOT_SET) - child_obj.update_default_value(child_value) + child_obj.update_default_value(child_value, _log_invalid_types) - def update_studio_value(self, value): + def update_studio_value(self, value, log_invalid_types=True): """Update studio override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "studio override") - value, metadata = self._prepare_value(value) + + self._studio_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "studio override", log_invalid_types + ) + value, metadata = self._prepare_value(value, log_invalid_types) self._studio_override_metadata = metadata self.had_studio_override = metadata is not NOT_SET if value is NOT_SET: - self.enum_entity.update_studio_value(value) + self.enum_entity.update_studio_value(value, log_invalid_types) for children_by_key in self.non_gui_children.values(): for child_obj in children_by_key.values(): - child_obj.update_studio_value(value) + child_obj.update_studio_value(value, log_invalid_types) return value_keys = set(value.keys()) @@ -611,7 +629,7 @@ class DictConditionalEntity(ItemEntity): expected_keys = set(self.non_gui_children[enum_value]) expected_keys.add(self.enum_key) unknown_keys = value_keys - expected_keys - if unknown_keys: + if unknown_keys and log_invalid_types: self.log.warning( "{} Unknown keys in studio overrides: {}".format( self.path, @@ -619,28 +637,36 @@ class DictConditionalEntity(ItemEntity): ) ) - self.enum_entity.update_studio_value(enum_value) - for children_by_key in self.non_gui_children.values(): + self.enum_entity.update_studio_value(enum_value, log_invalid_types) + for enum_key, children_by_key in self.non_gui_children.items(): + _log_invalid_types = log_invalid_types + if _log_invalid_types: + _log_invalid_types = enum_key == enum_value + value_copy = copy.deepcopy(value) for key, child_obj in children_by_key.items(): child_value = value_copy.get(key, NOT_SET) - child_obj.update_studio_value(child_value) + child_obj.update_studio_value(child_value, _log_invalid_types) - def update_project_value(self, value): + def update_project_value(self, value, log_invalid_types=True): """Update project override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "project override") - value, metadata = self._prepare_value(value) + + self._project_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "project override", log_invalid_types + ) + value, metadata = self._prepare_value(value, log_invalid_types) self._project_override_metadata = metadata self.had_project_override = metadata is not NOT_SET if value is NOT_SET: - self.enum_entity.update_project_value(value) + self.enum_entity.update_project_value(value, log_invalid_types) for children_by_key in self.non_gui_children.values(): for child_obj in children_by_key.values(): - child_obj.update_project_value(value) + child_obj.update_project_value(value, log_invalid_types) return value_keys = set(value.keys()) @@ -648,7 +674,7 @@ class DictConditionalEntity(ItemEntity): expected_keys = set(self.non_gui_children[enum_value]) expected_keys.add(self.enum_key) unknown_keys = value_keys - expected_keys - if unknown_keys: + if unknown_keys and log_invalid_types: self.log.warning( "{} Unknown keys in project overrides: {}".format( self.path, @@ -656,12 +682,16 @@ class DictConditionalEntity(ItemEntity): ) ) - self.enum_entity.update_project_value(enum_value) - for children_by_key in self.non_gui_children.values(): + self.enum_entity.update_project_value(enum_value, log_invalid_types) + for enum_key, children_by_key in self.non_gui_children.items(): + _log_invalid_types = log_invalid_types + if _log_invalid_types: + _log_invalid_types = enum_key == enum_value + value_copy = copy.deepcopy(value) for key, child_obj in children_by_key.items(): child_value = value_copy.get(key, NOT_SET) - child_obj.update_project_value(child_value) + child_obj.update_project_value(child_value, _log_invalid_types) def _discard_changes(self, on_change_trigger): self._ignore_child_changes = True diff --git a/openpype/settings/entities/dict_immutable_keys_entity.py b/openpype/settings/entities/dict_immutable_keys_entity.py index 060f8d522e..0209681e95 100644 --- a/openpype/settings/entities/dict_immutable_keys_entity.py +++ b/openpype/settings/entities/dict_immutable_keys_entity.py @@ -414,12 +414,16 @@ class DictImmutableKeysEntity(ItemEntity): return value, metadata - def update_default_value(self, value): + def update_default_value(self, value, log_invalid_types=True): """Update default values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "default") + + self._default_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "default", log_invalid_types + ) self.has_default_value = value is not NOT_SET # TODO add value validation value, metadata = self._prepare_value(value) @@ -427,13 +431,13 @@ class DictImmutableKeysEntity(ItemEntity): if value is NOT_SET: for child_obj in self.non_gui_children.values(): - child_obj.update_default_value(value) + child_obj.update_default_value(value, log_invalid_types) return value_keys = set(value.keys()) expected_keys = set(self.non_gui_children) unknown_keys = value_keys - expected_keys - if unknown_keys: + if unknown_keys and log_invalid_types: self.log.warning( "{} Unknown keys in default values: {}".format( self.path, @@ -443,27 +447,31 @@ class DictImmutableKeysEntity(ItemEntity): for key, child_obj in self.non_gui_children.items(): child_value = value.get(key, NOT_SET) - child_obj.update_default_value(child_value) + child_obj.update_default_value(child_value, log_invalid_types) - def update_studio_value(self, value): + def update_studio_value(self, value, log_invalid_types=True): """Update studio override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "studio override") + + self._studio_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "studio override", log_invalid_types + ) value, metadata = self._prepare_value(value) self._studio_override_metadata = metadata self.had_studio_override = metadata is not NOT_SET if value is NOT_SET: for child_obj in self.non_gui_children.values(): - child_obj.update_studio_value(value) + child_obj.update_studio_value(value, log_invalid_types) return value_keys = set(value.keys()) expected_keys = set(self.non_gui_children) unknown_keys = value_keys - expected_keys - if unknown_keys: + if unknown_keys and log_invalid_types: self.log.warning( "{} Unknown keys in studio overrides: {}".format( self.path, @@ -472,27 +480,31 @@ class DictImmutableKeysEntity(ItemEntity): ) for key, child_obj in self.non_gui_children.items(): child_value = value.get(key, NOT_SET) - child_obj.update_studio_value(child_value) + child_obj.update_studio_value(child_value, log_invalid_types) - def update_project_value(self, value): + def update_project_value(self, value, log_invalid_types=True): """Update project override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "project override") + + self._project_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "project override", log_invalid_types + ) value, metadata = self._prepare_value(value) self._project_override_metadata = metadata self.had_project_override = metadata is not NOT_SET if value is NOT_SET: for child_obj in self.non_gui_children.values(): - child_obj.update_project_value(value) + child_obj.update_project_value(value, log_invalid_types) return value_keys = set(value.keys()) expected_keys = set(self.non_gui_children) unknown_keys = value_keys - expected_keys - if unknown_keys: + if unknown_keys and log_invalid_types: self.log.warning( "{} Unknown keys in project overrides: {}".format( self.path, @@ -502,7 +514,7 @@ class DictImmutableKeysEntity(ItemEntity): for key, child_obj in self.non_gui_children.items(): child_value = value.get(key, NOT_SET) - child_obj.update_project_value(child_value) + child_obj.update_project_value(child_value, log_invalid_types) def _discard_changes(self, on_change_trigger): self._ignore_child_changes = True @@ -694,37 +706,48 @@ class RootsDictEntity(DictImmutableKeysEntity): self._metadata_are_modified = False self._current_metadata = {} - def update_default_value(self, value): + def update_default_value(self, value, log_invalid_types=True): """Update default values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "default") + + self._default_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "default", log_invalid_types + ) value, _ = self._prepare_value(value) self._default_value = value self._default_metadata = {} self.has_default_value = value is not NOT_SET - def update_studio_value(self, value): + def update_studio_value(self, value, log_invalid_types=True): """Update studio override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "studio override") + + self._studio_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "studio override", log_invalid_types + ) value, _ = self._prepare_value(value) self._studio_value = value self._studio_override_metadata = {} self.had_studio_override = value is not NOT_SET - def update_project_value(self, value): + def update_project_value(self, value, log_invalid_types=True): """Update project override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "project override") + self._project_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "project override", log_invalid_types + ) value, _metadata = self._prepare_value(value) self._project_value = value @@ -886,37 +909,48 @@ class SyncServerSites(DictImmutableKeysEntity): self._metadata_are_modified = False self._current_metadata = {} - def update_default_value(self, value): + def update_default_value(self, value, log_invalid_types=True): """Update default values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "default") + + self._default_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "default", log_invalid_types + ) value, _ = self._prepare_value(value) self._default_value = value self._default_metadata = {} self.has_default_value = value is not NOT_SET - def update_studio_value(self, value): + def update_studio_value(self, value, log_invalid_types=True): """Update studio override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "studio override") + + self._studio_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "studio override", log_invalid_types + ) value, _ = self._prepare_value(value) self._studio_value = value self._studio_override_metadata = {} self.had_studio_override = value is not NOT_SET - def update_project_value(self, value): + def update_project_value(self, value, log_invalid_types=True): """Update project override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "project override") + self._project_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "project override", log_invalid_types + ) value, _metadata = self._prepare_value(value) self._project_value = value diff --git a/openpype/settings/entities/dict_mutable_keys_entity.py b/openpype/settings/entities/dict_mutable_keys_entity.py index 6b9c0bc7ed..a0c93b97a7 100644 --- a/openpype/settings/entities/dict_mutable_keys_entity.py +++ b/openpype/settings/entities/dict_mutable_keys_entity.py @@ -393,11 +393,15 @@ class DictMutableKeysEntity(EndpointEntity): value = self.value_on_not_set using_values_from_state = False + log_invalid_types = True if state is OverrideState.PROJECT: + log_invalid_types = self._project_log_invalid_types using_values_from_state = using_project_overrides elif state is OverrideState.STUDIO: + log_invalid_types = self._studio_log_invalid_types using_values_from_state = using_studio_overrides elif state is OverrideState.DEFAULTS: + log_invalid_types = self._default_log_invalid_types using_values_from_state = using_default_values new_value = copy.deepcopy(value) @@ -437,11 +441,11 @@ class DictMutableKeysEntity(EndpointEntity): if not label: label = metadata_labels.get(new_key) - child_entity.update_default_value(_value) + child_entity.update_default_value(_value, log_invalid_types) if using_project_overrides: - child_entity.update_project_value(_value) + child_entity.update_project_value(_value, log_invalid_types) elif using_studio_overrides: - child_entity.update_studio_value(_value) + child_entity.update_studio_value(_value, log_invalid_types) if label: children_label_by_id[child_entity.id] = label @@ -598,8 +602,11 @@ class DictMutableKeysEntity(EndpointEntity): metadata[key] = value.pop(key) return value, metadata - def update_default_value(self, value): - value = self._check_update_value(value, "default") + def update_default_value(self, value, log_invalid_types=True): + self._default_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "default", log_invalid_types + ) has_default_value = value is not NOT_SET if has_default_value: for required_key in self.required_keys: @@ -611,15 +618,21 @@ class DictMutableKeysEntity(EndpointEntity): self._default_value = value self._default_metadata = metadata - def update_studio_value(self, value): - value = self._check_update_value(value, "studio override") + def update_studio_value(self, value, log_invalid_types=True): + self._studio_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "studio override", log_invalid_types + ) value, metadata = self._prepare_value(value) self._studio_override_value = value self._studio_override_metadata = metadata self.had_studio_override = value is not NOT_SET - def update_project_value(self, value): - value = self._check_update_value(value, "project override") + def update_project_value(self, value, log_invalid_types=True): + self._project_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "project override", log_invalid_types + ) value, metadata = self._prepare_value(value) self._project_override_value = value self._project_override_metadata = metadata @@ -686,9 +699,12 @@ class DictMutableKeysEntity(EndpointEntity): if not self._can_remove_from_project_override: return + log_invalid_types = True if self._has_studio_override: + log_invalid_types = self._studio_log_invalid_types value = self._studio_override_value elif self.has_default_value: + log_invalid_types = self._default_log_invalid_types value = self._default_value else: value = self.value_on_not_set @@ -709,9 +725,9 @@ class DictMutableKeysEntity(EndpointEntity): for _key, _value in new_value.items(): new_key = self._convert_to_regex_valid_key(_key) child_entity = self._add_key(new_key) - child_entity.update_default_value(_value) + child_entity.update_default_value(_value, log_invalid_types) if self._has_studio_override: - child_entity.update_studio_value(_value) + child_entity.update_studio_value(_value, log_invalid_types) label = metadata_labels.get(_key) if label: diff --git a/openpype/settings/entities/input_entities.py b/openpype/settings/entities/input_entities.py index 7512d7bfcc..3dcd238672 100644 --- a/openpype/settings/entities/input_entities.py +++ b/openpype/settings/entities/input_entities.py @@ -90,18 +90,27 @@ class EndpointEntity(ItemEntity): def require_restart(self): return self.has_unsaved_changes - def update_default_value(self, value): - value = self._check_update_value(value, "default") + def update_default_value(self, value, log_invalid_types=True): + self._default_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "default", log_invalid_types + ) self._default_value = value self.has_default_value = value is not NOT_SET - def update_studio_value(self, value): - value = self._check_update_value(value, "studio override") + def update_studio_value(self, value, log_invalid_types=True): + self._studio_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "studio override", log_invalid_types + ) self._studio_override_value = value self.had_studio_override = bool(value is not NOT_SET) - def update_project_value(self, value): - value = self._check_update_value(value, "project override") + def update_project_value(self, value, log_invalid_types=True): + self._project_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "project override", log_invalid_types + ) self._project_override_value = value self.had_project_override = bool(value is not NOT_SET) @@ -590,22 +599,26 @@ class RawJsonEntity(InputEntity): metadata[key] = value.pop(key) return value, metadata - def update_default_value(self, value): - value = self._check_update_value(value, "default") + def update_default_value(self, value, log_invalid_types=True): + value = self._check_update_value(value, "default", log_invalid_types) self.has_default_value = value is not NOT_SET value, metadata = self._prepare_value(value) self._default_value = value self.default_metadata = metadata - def update_studio_value(self, value): - value = self._check_update_value(value, "studio override") + def update_studio_value(self, value, log_invalid_types=True): + value = self._check_update_value( + value, "studio override", log_invalid_types + ) self.had_studio_override = value is not NOT_SET value, metadata = self._prepare_value(value) self._studio_override_value = value self.studio_override_metadata = metadata - def update_project_value(self, value): - value = self._check_update_value(value, "project override") + def update_project_value(self, value, log_invalid_types=True): + value = self._check_update_value( + value, "project override", log_invalid_types + ) self.had_project_override = value is not NOT_SET value, metadata = self._prepare_value(value) self._project_override_value = value diff --git a/openpype/settings/entities/item_entities.py b/openpype/settings/entities/item_entities.py index 9c6f428b97..3b756e4ede 100644 --- a/openpype/settings/entities/item_entities.py +++ b/openpype/settings/entities/item_entities.py @@ -173,14 +173,17 @@ class PathEntity(ItemEntity): self._ignore_missing_defaults = ignore_missing_defaults self.child_obj.set_override_state(state, ignore_missing_defaults) - def update_default_value(self, value): - self.child_obj.update_default_value(value) + def update_default_value(self, value, log_invalid_types=True): + self._default_log_invalid_types = log_invalid_types + self.child_obj.update_default_value(value, log_invalid_types) - def update_project_value(self, value): - self.child_obj.update_project_value(value) + def update_project_value(self, value, log_invalid_types=True): + self._studio_log_invalid_types = log_invalid_types + self.child_obj.update_project_value(value, log_invalid_types) - def update_studio_value(self, value): - self.child_obj.update_studio_value(value) + def update_studio_value(self, value, log_invalid_types=True): + self._project_log_invalid_types = log_invalid_types + self.child_obj.update_studio_value(value, log_invalid_types) def _discard_changes(self, *args, **kwargs): self.child_obj.discard_changes(*args, **kwargs) @@ -472,9 +475,9 @@ class ListStrictEntity(ItemEntity): self._has_project_override = False - def _check_update_value(self, value, value_type): + def _check_update_value(self, value, value_type, log_invalid_types=True): value = super(ListStrictEntity, self)._check_update_value( - value, value_type + value, value_type, log_invalid_types ) if value is NOT_SET: return value @@ -484,15 +487,16 @@ class ListStrictEntity(ItemEntity): if value_len == child_len: return value - self.log.warning( - ( - "{} Amount of strict list items in {} values is" - " not same as expected. Expected {} items. Got {} items. {}" - ).format( - self.path, value_type, - child_len, value_len, str(value) + if log_invalid_types: + self.log.warning( + ( + "{} Amount of strict list items in {} values is not same" + " as expected. Expected {} items. Got {} items. {}" + ).format( + self.path, value_type, + child_len, value_len, str(value) + ) ) - ) if value_len < child_len: # Fill missing values with NOT_SET @@ -504,36 +508,51 @@ class ListStrictEntity(ItemEntity): value.pop(child_len) return value - def update_default_value(self, value): - value = self._check_update_value(value, "default") + def update_default_value(self, value, log_invalid_types=True): + self._default_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "default", log_invalid_types + ) self.has_default_value = value is not NOT_SET if value is NOT_SET: for child_obj in self.children: - child_obj.update_default_value(value) + child_obj.update_default_value(value, log_invalid_types) else: for idx, item_value in enumerate(value): - self.children[idx].update_default_value(item_value) + self.children[idx].update_default_value( + item_value, log_invalid_types + ) - def update_studio_value(self, value): - value = self._check_update_value(value, "studio override") + def update_studio_value(self, value, log_invalid_types=True): + self._studio_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "studio override", log_invalid_types + ) if value is NOT_SET: for child_obj in self.children: - child_obj.update_studio_value(value) + child_obj.update_studio_value(value, log_invalid_types) else: for idx, item_value in enumerate(value): - self.children[idx].update_studio_value(item_value) + self.children[idx].update_studio_value( + item_value, log_invalid_types + ) - def update_project_value(self, value): - value = self._check_update_value(value, "project override") + def update_project_value(self, value, log_invalid_types=True): + self._project_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "project override", log_invalid_types + ) if value is NOT_SET: for child_obj in self.children: - child_obj.update_project_value(value) + child_obj.update_project_value(value, log_invalid_types) else: for idx, item_value in enumerate(value): - self.children[idx].update_project_value(item_value) + self.children[idx].update_project_value( + item_value, log_invalid_types + ) def reset_callbacks(self): super(ListStrictEntity, self).reset_callbacks() diff --git a/openpype/settings/entities/list_entity.py b/openpype/settings/entities/list_entity.py index 0268c208bb..5d6a64b3ea 100644 --- a/openpype/settings/entities/list_entity.py +++ b/openpype/settings/entities/list_entity.py @@ -325,16 +325,24 @@ class ListEntity(EndpointEntity): for item in value: child_obj = self._add_new_item() - child_obj.update_default_value(item) + child_obj.update_default_value( + item, self._default_log_invalid_types + ) if self._override_state is OverrideState.PROJECT: if self.had_project_override: - child_obj.update_project_value(item) + child_obj.update_project_value( + item, self._project_log_invalid_types + ) elif self.had_studio_override: - child_obj.update_studio_value(item) + child_obj.update_studio_value( + item, self._studio_log_invalid_types + ) elif self._override_state is OverrideState.STUDIO: if self.had_studio_override: - child_obj.update_studio_value(item) + child_obj.update_studio_value( + item, self._studio_log_invalid_types + ) for child_obj in self.children: child_obj.set_override_state( @@ -466,16 +474,24 @@ class ListEntity(EndpointEntity): for item in value: child_obj = self._add_new_item() - child_obj.update_default_value(item) + child_obj.update_default_value( + item, self._default_log_invalid_types + ) if self._override_state is OverrideState.PROJECT: if self.had_project_override: - child_obj.update_project_value(item) + child_obj.update_project_value( + item, self._project_log_invalid_types + ) elif self.had_studio_override: - child_obj.update_studio_value(item) + child_obj.update_studio_value( + item, self._studio_log_invalid_types + ) elif self._override_state is OverrideState.STUDIO: if self.had_studio_override: - child_obj.update_studio_value(item) + child_obj.update_studio_value( + item, self._studio_log_invalid_types + ) child_obj.set_override_state( self._override_state, self._ignore_missing_defaults diff --git a/openpype/settings/entities/schemas/README.md b/openpype/settings/entities/schemas/README.md index fbfd699937..b4bfef2972 100644 --- a/openpype/settings/entities/schemas/README.md +++ b/openpype/settings/entities/schemas/README.md @@ -745,6 +745,7 @@ How output of the schema could look like on save: ### label - add label with note or explanations - it is possible to use html tags inside the label +- set `work_wrap` to `true`/`false` if you want to enable word wrapping in UI (default: `false`) ``` { diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json b/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json index 4c4cd225ab..1a3eaef540 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json @@ -5,6 +5,29 @@ "label": "AfterEffects", "is_file": true, "children": [ + { + "type": "dict", + "collapsible": true, + "key": "create", + "label": "Creator plugins", + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "RenderCreator", + "label": "Create render", + "children": [ + { + "type": "list", + "key": "defaults", + "label": "Default Variants", + "object_type": "text", + "docstring": "Fill default variant(s) (like 'Main' or 'Default') used in subset name creation." + } + ] + } + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json index e6097a2b14..cd1741ba8b 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json @@ -30,6 +30,24 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "CollectDeadlinePools", + "label": "Default Deadline Pools", + "children": [ + { + "type": "text", + "key": "primary_pool", + "label": "Primary Pool" + }, + { + "type": "text", + "key": "secondary_pool", + "label": "Secondary Pool" + } + ] + }, { "type": "dict", "collapsible": true, @@ -117,6 +135,16 @@ "key": "asset_dependencies", "label": "Use Asset dependencies" }, + { + "type": "number", + "key": "priority", + "label": "Priority" + }, + { + "type": "number", + "key": "tile_priority", + "label": "Tile Assembler Priority" + }, { "type": "text", "key": "group", @@ -192,6 +220,9 @@ "key": "use_published", "label": "Use Published scene" }, + { + "type": "splitter" + }, { "type": "number", "key": "priority", @@ -203,20 +234,21 @@ "label": "Chunk Size" }, { - "type": "text", - "key": "primary_pool", - "label": "Primary Pool" + "type": "number", + "key": "concurrent_tasks", + "label": "Number of concurrent tasks" }, { - "type": "text", - "key": "secondary_pool", - "label": "Secondary Pool" + "type": "splitter" }, { "type": "text", "key": "group", "label": "Group" }, + { + "type": "splitter" + }, { "type": "text", "key": "department", @@ -289,16 +321,6 @@ "key": "chunk_size", "label": "Chunk Size" }, - { - "type": "text", - "key": "primary_pool", - "label": "Primary Pool" - }, - { - "type": "text", - "key": "secondary_pool", - "label": "Secondary Pool" - }, { "type": "text", "key": "group", @@ -348,16 +370,6 @@ "key": "chunk_size", "label": "Chunk Size" }, - { - "type": "text", - "key": "primary_pool", - "label": "Primary Pool" - }, - { - "type": "text", - "key": "secondary_pool", - "label": "Secondary Pool" - }, { "type": "text", "key": "group", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json index e352f8b132..fe11d63ac2 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json @@ -136,6 +136,87 @@ "key": "publish", "label": "Publish plugins", "children": [ + { + "type": "dict", + "collapsible": true, + "key": "CollectTimelineInstances", + "label": "Collect Timeline Instances", + "is_group": true, + "children": [ + { + "type": "collapsible-wrap", + "label": "XML presets attributes parsable from segment comments", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "list", + "key": "xml_preset_attrs_from_comments", + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "name", + "label": "Attribute name" + }, + { + "key": "type", + "label": "Attribute type", + "type": "enum", + "default": "number", + "enum_items": [ + { + "number": "number" + }, + { + "float": "float" + }, + { + "string": "string" + } + ] + } + ] + } + } + ] + }, + { + "type": "collapsible-wrap", + "label": "Add tasks", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "list", + "key": "add_tasks", + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "name", + "label": "Task name" + }, + { + "key": "type", + "label": "Task type", + "multiselection": false, + "type": "task-types-enum" + }, + { + "type": "boolean", + "key": "create_batch_group", + "label": "Create batch group" + } + ] + } + } + ] + } + ] + }, { "type": "dict", "collapsible": true, @@ -221,6 +302,20 @@ "type": "text", "multiline": false } + }, + { + "type": "separator" + }, + { + "type": "boolean", + "key": "load_to_batch_group", + "label": "Load to batch group reel", + "default": false + }, + { + "type": "text", + "key": "batch_group_loader_name", + "label": "Use loader name" } ] } @@ -281,6 +376,48 @@ "label": "Clip name template" } ] + }, + { + "type": "dict", + "collapsible": true, + "key": "LoadClipBatch", + "label": "Load as clip to current batch", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "families", + "label": "Families", + "object_type": "text" + }, + { + "type": "list", + "key": "representations", + "label": "Representations", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "reel_name", + "label": "Reel name" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "clip_name_template", + "label": "Clip name template" + } + ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json index cb59e9d67e..47effb3dbd 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json @@ -725,6 +725,31 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "CollectFtrackCustomAttributeData", + "label": "Collect Custom Attribute Data", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "Collect custom attributes from ftrack for ftrack entities that can be used in some templates during publishing." + }, + { + "type": "list", + "key": "custom_attribute_keys", + "label": "Custom attribute keys", + "object_type": "text" + } + ] + }, { "type": "dict", "collapsible": true, @@ -738,10 +763,15 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "label", + "label": "Template may contain formatting keys intent, comment, host_name, app_name, app_label and published_paths." + }, { "type": "text", - "key": "note_with_intent_template", - "label": "Note with intent template" + "key": "note_template", + "label": "Note template", + "multiline": true }, { "type": "list", @@ -751,6 +781,44 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "IntegrateFtrackDescription", + "label": "Integrate Ftrack Description", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "Add description to integrated AssetVersion." + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "type": "label", + "label": "Template may contain formatting keys intent and comment." + }, + { + "type": "text", + "key": "description_template", + "label": "Description template" + } + ] + }, { "type": "dict", "collapsible": true, @@ -784,6 +852,12 @@ "object_type": { "type": "text" } + }, + { + "type": "boolean", + "key": "keep_first_subset_name_for_review", + "label": "Make subset name as first asset name", + "default": true } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json b/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json index b499ccc4be..badf94229b 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json @@ -42,7 +42,7 @@ "children": [ { "type": "label", - "label": "Set color for publishable layers, set its resulting family and template for subset name. Can create flatten image from published instances" + "label": "Set color for publishable layers, set its resulting family and template for subset name. \nCan create flatten image from published instances.(Applicable only for remote publishing!)" }, { "type": "boolean", @@ -108,6 +108,23 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "CollectInstances", + "label": "Collect Instances", + "children": [ + { + "type": "label", + "label": "Name for flatten image created if no image instance present" + }, + { + "type": "text", + "key": "flatten_subset_template", + "label": "Subset template for flatten image" + } + ] + }, { "type": "schema_template", "name": "template_publish_plugin", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_slack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_slack.json index 14814d8b01..1a9804cd4f 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_slack.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_slack.json @@ -75,6 +75,15 @@ "type": "list", "object_type": "text" }, + { + "type": "number", + "key": "review_upload_limit", + "label": "Upload review maximum file size (MB)", + "decimal": 2, + "default": 50, + "minimum": 0, + "maximum": 1000000 + }, { "type": "separator" }, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json index 8286ed1193..20fe5b0855 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json @@ -16,6 +16,30 @@ "key": "publish", "label": "Publish plugins", "children": [ + { + "type": "dict", + "collapsible": true, + "key": "CollectRenderScene", + "label": "Collect Render Scene", + "is_group": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "It is possible to fill 'render_layer' or 'variant' in subset name template with custom value.
- value of 'render_pass' is always \"beauty\"." + }, + { + "type": "text", + "key": "render_layer", + "label": "Render Layer" + } + ] + }, { "type": "dict", "collapsible": true, @@ -78,6 +102,47 @@ "docstring": "Validate if shot on instances metadata is same as workfiles shot" } ] + }, + { + "type": "dict", + "key": "ExtractConvertToEXR", + "label": "Extract Convert To EXR", + "is_group": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "WARNING: This plugin does not work on MacOS (using OIIO tool)." + }, + { + "type": "boolean", + "key": "replace_pngs", + "label": "Replace source PNG" + }, + { + "type": "enum", + "key": "exr_compression", + "label": "EXR Compression", + "multiselection": false, + "enum_items": [ + {"ZIP": "ZIP"}, + {"ZIPS": "ZIPS"}, + {"DWAA": "DWAA"}, + {"DWAB": "DWAB"}, + {"PIZ": "PIZ"}, + {"RLE": "RLE"}, + {"PXR24": "PXR24"}, + {"B44": "B44"}, + {"B44A": "B44A"}, + {"none": "None"} + ] + } + ] } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index 12043d4205..061874e31c 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -122,32 +122,6 @@ } ] }, - { - "type": "dict", - "collapsible": true, - "checkbox_key": "enabled", - "key": "IntegrateHeroVersion", - "label": "IntegrateHeroVersion", - "is_group": true, - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "type": "boolean", - "key": "optional", - "label": "Optional" - }, - { - "key": "families", - "label": "Families", - "type": "list", - "object_type": "text" - } - ] - }, { "type": "dict", "collapsible": true, @@ -652,6 +626,80 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "IntegrateHeroVersion", + "label": "IntegrateHeroVersion", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "list", + "key": "template_name_profiles", + "label": "Template name profiles", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "hosts-enum", + "key": "hosts", + "label": "Hosts", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template_name", + "label": "Template name", + "tooltip": "Name of template from Anatomy templates" + } + ] + } + } + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json index 0544b4bab7..6dc10ed2a5 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json @@ -97,6 +97,32 @@ } ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreateUnrealSkeletalMesh", + "label": "Create Unreal - Skeletal Mesh", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + }, + { + "type": "text", + "key": "joint_hints", + "label": "Joint root hint" + } + ] + }, { "type": "schema_template", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json index 1636a8d700..4a796f1933 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json @@ -138,6 +138,21 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "ExtractReviewData", + "label": "ExtractReviewData", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { "type": "dict", "collapsible": true, @@ -208,9 +223,10 @@ "type": "separator" }, { - "type": "text", - "key": "extension", - "label": "File extension" + "type": "boolean", + "key": "read_raw", + "label": "Read colorspace RAW", + "default": false }, { "type": "text", @@ -227,12 +243,6 @@ "key": "bake_viewer_input_process", "label": "Bake Viewer Input Process (LUTs)" }, - { - "key": "add_tags", - "label": "Add additional tags to representations", - "type": "list", - "object_type": "text" - }, { "type": "separator" }, @@ -246,7 +256,7 @@ "type": "collapsible-wrap", "label": "Reformat Node Knobs", "collapsible": true, - "collapsed": false, + "collapsed": true, "children": [ { "type": "list", @@ -347,6 +357,20 @@ } } ] + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "extension", + "label": "Write node file type" + }, + { + "key": "add_tags", + "label": "Add additional tags to representations", + "type": "list", + "object_type": "text" } ] } @@ -365,6 +389,59 @@ "type": "boolean", "key": "viewer_lut_raw", "label": "Viewer LUT raw" + }, + { + "type": "separator" + }, + { + "type": "label", + "label": "Fill specific slate node values with templates. Uncheck the checkbox to not change the value.", + "word_wrap": true + }, + { + "type": "dict", + "key": "key_value_mapping", + "children": [ + { + "type": "list-strict", + "key": "f_submission_note", + "label": "Submission Note:", + "object_types": [ + { + "type": "boolean" + }, + { + "type": "text" + } + ] + }, + { + "type": "list-strict", + "key": "f_submitting_for", + "label": "Submission For:", + "object_types": [ + { + "type": "boolean" + }, + { + "type": "text" + } + ] + }, + { + "type": "list-strict", + "key": "f_vfx_scope_of_work", + "label": "VFX Scope Of Work:", + "object_types": [ + { + "type": "boolean" + }, + { + "type": "text" + } + ] + } + ] } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json index 7607e1a8c1..484fbf9d07 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json @@ -24,6 +24,9 @@ }, { "sequence": "Output as image sequence" + }, + { + "no-audio": "Do not add audio" } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/template_color.json b/openpype/settings/entities/schemas/projects_schema/schemas/template_color.json deleted file mode 100644 index af8fd9dae4..0000000000 --- a/openpype/settings/entities/schemas/projects_schema/schemas/template_color.json +++ /dev/null @@ -1,30 +0,0 @@ -[ - { - "type": "list-strict", - "key": "{name}", - "label": "{label}", - "object_types": [ - { - "label": "Red", - "type": "number", - "minimum": 0, - "maximum": 1, - "decimal": 3 - }, - { - "label": "Green", - "type": "number", - "minimum": 0, - "maximum": 1, - "decimal": 3 - }, - { - "label": "Blue", - "type": "number", - "minimum": 0, - "maximum": 1, - "decimal": 3 - } - ] - } -] diff --git a/openpype/settings/entities/schemas/system_schema/schema_general.json b/openpype/settings/entities/schemas/system_schema/schema_general.json index 6306317df8..0090c54386 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_general.json +++ b/openpype/settings/entities/schemas/system_schema/schema_general.json @@ -40,6 +40,11 @@ { "type": "splitter" }, + { + "type": "boolean", + "key": "log_to_server", + "label": "Log to mongo" + }, { "type": "dict", "key": "disk_mapping", @@ -110,6 +115,17 @@ { "type": "splitter" }, + { + "type": "list", + "key": "local_env_white_list", + "label": "Local overrides of environment variable keys", + "tooltip": "Environment variable keys that can be changed per machine using Local settings UI.\nKey changes are applied only on applications and tools environments.", + "use_label_wrap": true, + "object_type": "text" + }, + { + "type": "splitter" + }, { "type": "collapsible-wrap", "label": "OpenPype deployment control", diff --git a/openpype/settings/entities/schemas/system_schema/schema_tools.json b/openpype/settings/entities/schemas/system_schema/schema_tools.json index 2346bef36d..7962fdd465 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_tools.json +++ b/openpype/settings/entities/schemas/system_schema/schema_tools.json @@ -25,7 +25,30 @@ "key": "variants", "collapsible_key": true, "object_type": { - "type": "raw-json" + "type": "dict", + "children": [ + { + "key": "host_names", + "label": "Hosts", + "type": "hosts-enum", + "multiselection": true + }, + { + "key": "app_variants", + "label": "Applications", + "type": "apps-enum", + "multiselection": true, + "tooltip": "Applications are not \"live\" and may require to Save and refresh settings UI to update values." + }, + { + "type": "separator" + }, + { + "key": "environment", + "label": "Environments", + "type": "raw-json" + } + ] } } ] diff --git a/openpype/settings/handlers.py b/openpype/settings/handlers.py index 2109b53b09..c99fc6080b 100644 --- a/openpype/settings/handlers.py +++ b/openpype/settings/handlers.py @@ -324,6 +324,7 @@ class MongoSettingsHandler(SettingsHandler): global_general_keys = ( "openpype_path", "admin_password", + "log_to_server", "disk_mapping", "production_version", "staging_version" @@ -337,7 +338,7 @@ class MongoSettingsHandler(SettingsHandler): def __init__(self): # Get mongo connection from openpype.lib import OpenPypeMongoConnection - from avalon.api import AvalonMongoDB + from openpype.pipeline import AvalonMongoDB settings_collection = OpenPypeMongoConnection.get_mongo_client() diff --git a/openpype/settings/lib.py b/openpype/settings/lib.py index 1d303564d5..937329b417 100644 --- a/openpype/settings/lib.py +++ b/openpype/settings/lib.py @@ -265,11 +265,43 @@ def save_project_anatomy(project_name, anatomy_data): raise SaveWarningExc(warnings) +def _system_settings_backwards_compatible_conversion(studio_overrides): + # Backwards compatibility of tools 3.9.1 - 3.9.2 to keep + # "tools" environments + if ( + "tools" in studio_overrides + and "tool_groups" in studio_overrides["tools"] + ): + tool_groups = studio_overrides["tools"]["tool_groups"] + for tool_group, group_value in tool_groups.items(): + if tool_group in METADATA_KEYS: + continue + + variants = group_value.get("variants") + if not variants: + continue + + for key in set(variants.keys()): + if key in METADATA_KEYS: + continue + + variant_value = variants[key] + if "environment" not in variant_value: + variants[key] = { + "environment": variant_value + } + + @require_handler def get_studio_system_settings_overrides(return_version=False): - return _SETTINGS_HANDLER.get_studio_system_settings_overrides( + output = _SETTINGS_HANDLER.get_studio_system_settings_overrides( return_version ) + value = output + if return_version: + value, version = output + _system_settings_backwards_compatible_conversion(value) + return output @require_handler @@ -1081,6 +1113,14 @@ def get_general_environments(): clear_metadata_from_settings(environments) + whitelist_envs = result["general"].get("local_env_white_list") + if whitelist_envs: + local_settings = get_local_settings() + local_envs = local_settings.get("environments") or {} + for key, value in local_envs.items(): + if key in whitelist_envs and key in environments: + environments[key] = value + return environments diff --git a/openpype/style/data.json b/openpype/style/data.json index a76a77015b..15d9472e3e 100644 --- a/openpype/style/data.json +++ b/openpype/style/data.json @@ -61,7 +61,11 @@ "icon-entity-default": "#bfccd6", "icon-entity-disabled": "#808080", "font-entity-deprecated": "#666666", - + "overlay-messages": { + "close-btn": "#D3D8DE", + "bg-success": "#458056", + "bg-success-hover": "#55a066" + }, "tab-widget": { "bg": "#21252B", "bg-selected": "#434a56", diff --git a/openpype/style/style.css b/openpype/style/style.css index df83600973..f2b0cdd6ac 100644 --- a/openpype/style/style.css +++ b/openpype/style/style.css @@ -687,6 +687,26 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { background: none; } +/* Messages overlay */ +#OverlayMessageWidget { + border-radius: 0.2em; + background: {color:bg-buttons}; +} + +#OverlayMessageWidget:hover { + background: {color:bg-button-hover}; +} +#OverlayMessageWidget { + background: {color:overlay-messages:bg-success}; +} +#OverlayMessageWidget:hover { + background: {color:overlay-messages:bg-success-hover}; +} + +#OverlayMessageWidget QWidget { + background: transparent; +} + /* Password dialog*/ #PasswordBtn { border: none; @@ -1269,6 +1289,14 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { background: #21252B; } +/* Workfiles */ +#WorkfilesPublishedContextSelect { + background: rgba(0, 0, 0, 127); +} +#WorkfilesPublishedContextSelect QLabel { + font-size: 17pt; +} + /* Tray */ #TrayRestartButton { background: {color:restart-btn-bg}; diff --git a/openpype/tests/test_avalon_plugin_presets.py b/openpype/tests/test_avalon_plugin_presets.py index f1b1a94713..464c216d6f 100644 --- a/openpype/tests/test_avalon_plugin_presets.py +++ b/openpype/tests/test_avalon_plugin_presets.py @@ -1,6 +1,9 @@ -import avalon.api as api -import openpype -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + install_host, + LegacyCreator, + register_creator_plugin, + discover_creator_plugins, +) class MyTestCreator(LegacyCreator): @@ -19,16 +22,15 @@ class Test: __name__ = "test" ls = len - def __call__(self): - pass + @staticmethod + def install(): + register_creator_plugin(MyTestCreator) def test_avalon_plugin_presets(monkeypatch, printer): + install_host(Test) - openpype.install() - api.register_host(Test()) - api.register_plugin(LegacyCreator, MyTestCreator) - plugins = api.discover(LegacyCreator) + plugins = discover_creator_plugins() printer("Test if we got our test plugin") assert MyTestCreator in plugins for p in plugins: diff --git a/openpype/tools/adobe_webserver/app.py b/openpype/tools/adobe_webserver/app.py index b79d6c6c60..3911baf7ac 100644 --- a/openpype/tools/adobe_webserver/app.py +++ b/openpype/tools/adobe_webserver/app.py @@ -16,7 +16,7 @@ from wsrpc_aiohttp import ( WSRPCClient ) -from avalon import api +from openpype.pipeline import legacy_io log = logging.getLogger(__name__) @@ -80,9 +80,9 @@ class WebServerTool: loop=asyncio.get_event_loop()) await client.connect() - project = api.Session["AVALON_PROJECT"] - asset = api.Session["AVALON_ASSET"] - task = api.Session["AVALON_TASK"] + project = legacy_io.Session["AVALON_PROJECT"] + asset = legacy_io.Session["AVALON_ASSET"] + task = legacy_io.Session["AVALON_TASK"] log.info("Sending context change to {}-{}-{}".format(project, asset, task)) diff --git a/openpype/tools/context_dialog/window.py b/openpype/tools/context_dialog/window.py index c8464faa3e..3b544bd375 100644 --- a/openpype/tools/context_dialog/window.py +++ b/openpype/tools/context_dialog/window.py @@ -2,9 +2,9 @@ import os import json from Qt import QtWidgets, QtCore, QtGui -from avalon.api import AvalonMongoDB from openpype import style +from openpype.pipeline import AvalonMongoDB from openpype.tools.utils.lib import center_window from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget from openpype.tools.utils.constants import ( @@ -308,7 +308,6 @@ class ContextDialog(QtWidgets.QDialog): self._validate_strict() def _set_asset_to_tasks_widget(self): - # filter None docs they are silo asset_id = self._assets_widget.get_selected_asset_id() self._tasks_widget.set_asset_id(asset_id) diff --git a/openpype/tools/creator/model.py b/openpype/tools/creator/model.py index ef61c6e0f0..d3d60b96f2 100644 --- a/openpype/tools/creator/model.py +++ b/openpype/tools/creator/model.py @@ -1,8 +1,7 @@ import uuid from Qt import QtGui, QtCore -from avalon import api -from openpype.pipeline import LegacyCreator +from openpype.pipeline import discover_legacy_creator_plugins from . constants import ( FAMILY_ROLE, @@ -22,7 +21,7 @@ class CreatorsModel(QtGui.QStandardItemModel): self._creators_by_id = {} items = [] - creators = api.discover(LegacyCreator) + creators = discover_legacy_creator_plugins() for creator in creators: item_id = str(uuid.uuid4()) self._creators_by_id[item_id] = creator diff --git a/openpype/tools/creator/window.py b/openpype/tools/creator/window.py index 51cc66e715..e0c329fb78 100644 --- a/openpype/tools/creator/window.py +++ b/openpype/tools/creator/window.py @@ -4,16 +4,14 @@ import re from Qt import QtWidgets, QtCore -from avalon import api, io - from openpype import style from openpype.api import get_current_project_settings from openpype.tools.utils.lib import qt_app_context +from openpype.pipeline import legacy_io from openpype.pipeline.create import ( SUBSET_NAME_ALLOWED_SYMBOLS, legacy_create, CreatorError, - LegacyCreator, ) from .model import CreatorsModel @@ -220,7 +218,7 @@ class CreatorWindow(QtWidgets.QDialog): asset_doc = None if creator_plugin: # Get the asset from the database which match with the name - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"name": asset_name, "type": "asset"}, projection={"_id": 1} ) @@ -237,9 +235,9 @@ class CreatorWindow(QtWidgets.QDialog): self._set_valid_state(False) return - project_name = io.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] asset_id = asset_doc["_id"] - task_name = io.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] # Calculate subset name with Creator plugin subset_name = creator_plugin.get_subset_name( @@ -271,7 +269,7 @@ class CreatorWindow(QtWidgets.QDialog): self._subset_name_input.setText(subset_name) # Get all subsets of the current asset - subset_docs = io.find( + subset_docs = legacy_io.find( { "type": "subset", "parent": asset_id @@ -372,7 +370,7 @@ class CreatorWindow(QtWidgets.QDialog): self.setStyleSheet(style.load_stylesheet()) def refresh(self): - self._asset_name_input.setText(io.Session["AVALON_ASSET"]) + self._asset_name_input.setText(legacy_io.Session["AVALON_ASSET"]) self._creators_model.reset() @@ -385,7 +383,7 @@ class CreatorWindow(QtWidgets.QDialog): ) current_index = None family = None - task_name = io.Session.get("AVALON_TASK", None) + task_name = legacy_io.Session.get("AVALON_TASK", None) lowered_task_name = task_name.lower() if task_name: for _family, _task_names in pype_project_setting.items(): @@ -471,7 +469,7 @@ class CreatorWindow(QtWidgets.QDialog): self._msg_timer.start() -def show(debug=False, parent=None): +def show(parent=None): """Display asset creator GUI Arguments: @@ -488,24 +486,6 @@ def show(debug=False, parent=None): except (AttributeError, RuntimeError): pass - if debug: - from avalon import mock - for creator in mock.creators: - api.register_plugin(LegacyCreator, creator) - - import traceback - sys.excepthook = lambda typ, val, tb: traceback.print_last() - - io.install() - - any_project = next( - project for project in io.projects() - if project.get("active", True) is not False - ) - - api.Session["AVALON_PROJECT"] = any_project["name"] - module.project = any_project["name"] - with qt_app_context(): window = CreatorWindow(parent) window.refresh() diff --git a/openpype/tools/launcher/actions.py b/openpype/tools/launcher/actions.py index fbaef05261..546bda1c34 100644 --- a/openpype/tools/launcher/actions.py +++ b/openpype/tools/launcher/actions.py @@ -1,6 +1,7 @@ import os -from avalon import api +from Qt import QtWidgets, QtGui + from openpype import PLUGINS_DIR from openpype import style from openpype.api import Logger, resources @@ -8,7 +9,10 @@ from openpype.lib import ( ApplictionExecutableNotFound, ApplicationLaunchFailed ) -from Qt import QtWidgets, QtGui +from openpype.pipeline import ( + LauncherAction, + register_launcher_action_path, +) def register_actions_from_paths(paths): @@ -29,14 +33,15 @@ def register_actions_from_paths(paths): print("Path was not found: {}".format(path)) continue - api.register_plugin_path(api.Action, path) + register_launcher_action_path(path) def register_config_actions(): """Register actions from the configuration for Launcher""" actions_dir = os.path.join(PLUGINS_DIR, "actions") - register_actions_from_paths([actions_dir]) + if os.path.exists(actions_dir): + register_actions_from_paths([actions_dir]) def register_environment_actions(): @@ -46,7 +51,9 @@ def register_environment_actions(): register_actions_from_paths(paths_str.split(os.pathsep)) -class ApplicationAction(api.Action): +# TODO move to 'openpype.pipeline.actions' +# - remove Qt related stuff and implement exceptions to show error in launcher +class ApplicationAction(LauncherAction): """Pype's application launcher Application action based on pype's ApplicationManager system. @@ -74,7 +81,7 @@ class ApplicationAction(api.Action): @property def log(self): if self._log is None: - self._log = Logger().get_logger(self.__class__.__name__) + self._log = Logger.get_logger(self.__class__.__name__) return self._log def is_compatible(self, session): diff --git a/openpype/tools/launcher/lib.py b/openpype/tools/launcher/lib.py index 68c759f295..c1392b7b8f 100644 --- a/openpype/tools/launcher/lib.py +++ b/openpype/tools/launcher/lib.py @@ -1,19 +1,3 @@ -"""Utility script for updating database with configuration files - -Until assets are created entirely in the database, this script -provides a bridge between the file-based project inventory and configuration. - -- Migrating an old project: - $ python -m avalon.inventory --extract --silo-parent=f02_prod - $ python -m avalon.inventory --upload - -- Managing an existing project: - 1. Run `python -m avalon.inventory --load` - 2. Update the .inventory.toml or .config.toml - 3. Run `python -m avalon.inventory --save` - -""" - import os from Qt import QtGui import qtawesome diff --git a/openpype/tools/launcher/models.py b/openpype/tools/launcher/models.py index 85d553fca4..13567e7916 100644 --- a/openpype/tools/launcher/models.py +++ b/openpype/tools/launcher/models.py @@ -8,12 +8,13 @@ import time import appdirs from Qt import QtCore, QtGui import qtawesome -from avalon import api + from openpype.lib import JSONSettingRegistry from openpype.lib.applications import ( CUSTOM_LAUNCH_APP_GROUPS, ApplicationManager ) +from openpype.pipeline import discover_launcher_actions from openpype.tools.utils.lib import ( DynamicQThread, get_project_icon, @@ -68,7 +69,7 @@ class ActionModel(QtGui.QStandardItemModel): def discover(self): """Set up Actions cache. Run this for each new project.""" # Discover all registered actions - actions = api.discover(api.Action) + actions = discover_launcher_actions() # Get available project actions and the application actions app_actions = self.get_application_actions() diff --git a/openpype/tools/launcher/window.py b/openpype/tools/launcher/window.py index d80b3eabf0..dab6949613 100644 --- a/openpype/tools/launcher/window.py +++ b/openpype/tools/launcher/window.py @@ -3,10 +3,9 @@ import logging from Qt import QtWidgets, QtCore, QtGui -from avalon.api import AvalonMongoDB - from openpype import style from openpype.api import resources +from openpype.pipeline import AvalonMongoDB import qtawesome from .models import ( diff --git a/openpype/tools/libraryloader/app.py b/openpype/tools/libraryloader/app.py index 9f8845f30f..7fda6bd6f9 100644 --- a/openpype/tools/libraryloader/app.py +++ b/openpype/tools/libraryloader/app.py @@ -2,22 +2,20 @@ import sys from Qt import QtWidgets, QtCore, QtGui -from avalon.api import AvalonMongoDB from openpype import style +from openpype.pipeline import AvalonMongoDB from openpype.tools.utils import lib as tools_lib from openpype.tools.loader.widgets import ( ThumbnailWidget, VersionWidget, FamilyListView, - RepresentationWidget + RepresentationWidget, + SubsetWidget ) from openpype.tools.utils.assets_widget import MultiSelectAssetsWidget from openpype.modules import ModulesManager -from . import lib -from .widgets import LibrarySubsetWidget - module = sys.modules[__name__] module.window = None @@ -92,7 +90,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): # --- Middle part --- # Subsets widget - subsets_widget = LibrarySubsetWidget( + subsets_widget = SubsetWidget( dbcon, self.groups_config, self.family_config_cache, @@ -260,14 +258,6 @@ class LibraryLoaderWindow(QtWidgets.QDialog): self.dbcon.Session["AVALON_PROJECT"] = project_name - _config = lib.find_config() - if hasattr(_config, "install"): - _config.install() - else: - print( - "Config `%s` has no function `install`" % _config.__name__ - ) - self._subsets_widget.on_project_change(project_name) if self._repres_widget: self._repres_widget.on_project_change(project_name) @@ -448,10 +438,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): def _set_context(self, context, refresh=True): """Set the selection in the interface using a context. The context must contain `asset` data by name. - Note: Prior to setting context ensure `refresh` is triggered so that - the "silos" are listed correctly, aside from that setting the - context will force a refresh further down because it changes - the active silo and asset. + Args: context (dict): The context to apply. Returns: @@ -463,12 +450,6 @@ class LibraryLoaderWindow(QtWidgets.QDialog): return if refresh: - # Workaround: - # Force a direct (non-scheduled) refresh prior to setting the - # asset widget's silo and asset selection to ensure it's correctly - # displaying the silo tabs. Calling `window.refresh()` and directly - # `window.set_context()` the `set_context()` seems to override the - # scheduled refresh and the silo tabs are not shown. self._refresh_assets() self._assets_widget.select_asset_by_name(asset_name) diff --git a/openpype/tools/libraryloader/lib.py b/openpype/tools/libraryloader/lib.py deleted file mode 100644 index 6a497a6a16..0000000000 --- a/openpype/tools/libraryloader/lib.py +++ /dev/null @@ -1,33 +0,0 @@ -import os -import importlib -import logging -from openpype.api import Anatomy - -log = logging.getLogger(__name__) - - -# `find_config` from `pipeline` -def find_config(): - log.info("Finding configuration for project..") - - config = os.environ["AVALON_CONFIG"] - - if not config: - raise EnvironmentError( - "No configuration found in " - "the project nor environment" - ) - - log.info("Found %s, loading.." % config) - return importlib.import_module(config) - - -class RegisteredRoots: - roots_per_project = {} - - @classmethod - def registered_root(cls, project_name): - if project_name not in cls.roots_per_project: - cls.roots_per_project[project_name] = Anatomy(project_name).roots - - return cls.roots_per_project[project_name] diff --git a/openpype/tools/libraryloader/widgets.py b/openpype/tools/libraryloader/widgets.py deleted file mode 100644 index 45f9ea2048..0000000000 --- a/openpype/tools/libraryloader/widgets.py +++ /dev/null @@ -1,18 +0,0 @@ -from Qt import QtWidgets - -from .lib import RegisteredRoots -from openpype.tools.loader.widgets import SubsetWidget - - -class LibrarySubsetWidget(SubsetWidget): - def on_copy_source(self): - """Copy formatted source path to clipboard""" - source = self.data.get("source", None) - if not source: - return - - project_name = self.dbcon.Session["AVALON_PROJECT"] - root = RegisteredRoots.registered_root(project_name) - path = source.format(root=root) - clipboard = QtWidgets.QApplication.clipboard() - clipboard.setText(path) diff --git a/openpype/tools/loader/__main__.py b/openpype/tools/loader/__main__.py index 146ba7fd10..acf357aa97 100644 --- a/openpype/tools/loader/__main__.py +++ b/openpype/tools/loader/__main__.py @@ -19,12 +19,10 @@ def my_exception_hook(exctype, value, traceback): if __name__ == '__main__': - os.environ["AVALON_MONGO"] = "mongodb://localhost:27017" os.environ["OPENPYPE_MONGO"] = "mongodb://localhost:27017" os.environ["AVALON_DB"] = "avalon" os.environ["AVALON_TIMEOUT"] = "1000" os.environ["OPENPYPE_DEBUG"] = "1" - os.environ["AVALON_CONFIG"] = "pype" os.environ["AVALON_ASSET"] = "Jungle" # Set the exception hook to our wrapping function diff --git a/openpype/tools/loader/app.py b/openpype/tools/loader/app.py index d73a977ac6..bb589c199d 100644 --- a/openpype/tools/loader/app.py +++ b/openpype/tools/loader/app.py @@ -1,10 +1,14 @@ import sys +import traceback from Qt import QtWidgets, QtCore -from avalon import api, io from openpype import style from openpype.lib import register_event_callback +from openpype.pipeline import ( + install_openpype_plugins, + legacy_io, +) from openpype.tools.utils import ( lib, PlaceholderLineEdit @@ -35,14 +39,14 @@ class LoaderWindow(QtWidgets.QDialog): def __init__(self, parent=None): super(LoaderWindow, self).__init__(parent) title = "Asset Loader 2.1" - project_name = api.Session.get("AVALON_PROJECT") + project_name = legacy_io.Session.get("AVALON_PROJECT") if project_name: title += " - {}".format(project_name) self.setWindowTitle(title) # Groups config - self.groups_config = lib.GroupsConfig(io) - self.family_config_cache = lib.FamilyConfigCache(io) + self.groups_config = lib.GroupsConfig(legacy_io) + self.family_config_cache = lib.FamilyConfigCache(legacy_io) # Enable minimize and maximize for app window_flags = QtCore.Qt.Window @@ -59,13 +63,13 @@ class LoaderWindow(QtWidgets.QDialog): # Assets widget assets_widget = MultiSelectAssetsWidget( - io, parent=left_side_splitter + legacy_io, parent=left_side_splitter ) assets_widget.set_current_asset_btn_visibility(True) # Families widget families_filter_view = FamilyListView( - io, self.family_config_cache, left_side_splitter + legacy_io, self.family_config_cache, left_side_splitter ) left_side_splitter.addWidget(assets_widget) left_side_splitter.addWidget(families_filter_view) @@ -75,7 +79,7 @@ class LoaderWindow(QtWidgets.QDialog): # --- Middle part --- # Subsets widget subsets_widget = SubsetWidget( - io, + legacy_io, self.groups_config, self.family_config_cache, tool_name=self.tool_name, @@ -86,8 +90,12 @@ class LoaderWindow(QtWidgets.QDialog): thumb_ver_splitter = QtWidgets.QSplitter(main_splitter) thumb_ver_splitter.setOrientation(QtCore.Qt.Vertical) - thumbnail_widget = ThumbnailWidget(io, parent=thumb_ver_splitter) - version_info_widget = VersionWidget(io, parent=thumb_ver_splitter) + thumbnail_widget = ThumbnailWidget( + legacy_io, parent=thumb_ver_splitter + ) + version_info_widget = VersionWidget( + legacy_io, parent=thumb_ver_splitter + ) thumb_ver_splitter.addWidget(thumbnail_widget) thumb_ver_splitter.addWidget(version_info_widget) @@ -104,7 +112,7 @@ class LoaderWindow(QtWidgets.QDialog): repres_widget = None if sync_server_enabled: repres_widget = RepresentationWidget( - io, self.tool_name, parent=thumb_ver_splitter + legacy_io, self.tool_name, parent=thumb_ver_splitter ) thumb_ver_splitter.addWidget(repres_widget) @@ -258,13 +266,15 @@ class LoaderWindow(QtWidgets.QDialog): # Refresh families config self._families_filter_view.refresh() # Change to context asset on context change - self._assets_widget.select_asset_by_name(io.Session["AVALON_ASSET"]) + self._assets_widget.select_asset_by_name( + legacy_io.Session["AVALON_ASSET"] + ) def _refresh(self): """Load assets from database""" # Ensure a project is loaded - project = io.find_one({"type": "project"}, {"type": 1}) + project = legacy_io.find_one({"type": "project"}, {"type": 1}) assert project, "Project was not found! This is a bug" self._assets_widget.refresh() @@ -290,7 +300,6 @@ class LoaderWindow(QtWidgets.QDialog): subsets_model.clear() self.clear_assets_underlines() - # filter None docs they are silo asset_ids = self._assets_widget.get_selected_asset_ids() # Start loading subsets_widget.set_loading_state( @@ -381,17 +390,9 @@ class LoaderWindow(QtWidgets.QDialog): The context must contain `asset` data by name. - Note: Prior to setting context ensure `refresh` is triggered so that - the "silos" are listed correctly, aside from that setting the - context will force a refresh further down because it changes - the active silo and asset. - Args: context (dict): The context to apply. - - Returns: - None - + refrest (bool): Trigger refresh on context set. """ asset = context.get("asset", None) @@ -399,12 +400,6 @@ class LoaderWindow(QtWidgets.QDialog): return if refresh: - # Workaround: - # Force a direct (non-scheduled) refresh prior to setting the - # asset widget's silo and asset selection to ensure it's correctly - # displaying the silo tabs. Calling `window.refresh()` and directly - # `window.set_context()` the `set_context()` seems to override the - # scheduled refresh and the silo tabs are not shown. self._refresh() self._assets_widget.select_asset_by_name(asset) @@ -576,17 +571,16 @@ def show(debug=False, parent=None, use_context=False): module.window = None if debug: - import traceback sys.excepthook = lambda typ, val, tb: traceback.print_last() - io.install() + legacy_io.install() any_project = next( - project for project in io.projects() + project for project in legacy_io.projects() if project.get("active", True) is not False ) - api.Session["AVALON_PROJECT"] = any_project["name"] + legacy_io.Session["AVALON_PROJECT"] = any_project["name"] module.project = any_project["name"] with lib.qt_app_context(): @@ -594,7 +588,7 @@ def show(debug=False, parent=None, use_context=False): window.show() if use_context: - context = {"asset": api.Session["AVALON_ASSET"]} + context = {"asset": legacy_io.Session["AVALON_ASSET"]} window.set_context(context, refresh=True) else: window.refresh() @@ -618,19 +612,11 @@ def cli(args): print("Entering Project: %s" % project) - io.install() + legacy_io.install() # Store settings - api.Session["AVALON_PROJECT"] = project + legacy_io.Session["AVALON_PROJECT"] = project - from avalon import pipeline - - # Find the set config - _config = pipeline.find_config() - if hasattr(_config, "install"): - _config.install() - else: - print("Config `%s` has no function `install`" % - _config.__name__) + install_openpype_plugins(project) show() diff --git a/openpype/tools/loader/model.py b/openpype/tools/loader/model.py index 6cc6fae1fb..8cb8f30013 100644 --- a/openpype/tools/loader/model.py +++ b/openpype/tools/loader/model.py @@ -6,8 +6,10 @@ from uuid import uuid4 from Qt import QtCore, QtGui import qtawesome -from avalon import schema -from openpype.pipeline import HeroVersionType +from openpype.pipeline import ( + HeroVersionType, + schema, +) from openpype.style import get_default_entity_icon_color from openpype.tools.utils.models import TreeModel, Item diff --git a/openpype/tools/loader/widgets.py b/openpype/tools/loader/widgets.py index b14bdd0e93..42fb62b632 100644 --- a/openpype/tools/loader/widgets.py +++ b/openpype/tools/loader/widgets.py @@ -7,9 +7,9 @@ import collections from Qt import QtWidgets, QtCore, QtGui -from avalon import api, pipeline - +from openpype.api import Anatomy from openpype.pipeline import HeroVersionType +from openpype.pipeline.thumbnail import get_thumbnail_binary from openpype.pipeline.load import ( discover_loader_plugins, SubsetLoaderPlugin, @@ -640,6 +640,7 @@ class VersionTextEdit(QtWidgets.QTextEdit): "source": None, "raw": None } + self._anatomy = None # Reset self.set_version(None) @@ -730,20 +731,20 @@ class VersionTextEdit(QtWidgets.QTextEdit): # Add additional actions when any text so we can assume # the version is set. if self.toPlainText().strip(): - menu.addSeparator() - action = QtWidgets.QAction("Copy source path to clipboard", - menu) + action = QtWidgets.QAction( + "Copy source path to clipboard", menu + ) action.triggered.connect(self.on_copy_source) menu.addAction(action) - action = QtWidgets.QAction("Copy raw data to clipboard", - menu) + action = QtWidgets.QAction( + "Copy raw data to clipboard", menu + ) action.triggered.connect(self.on_copy_raw) menu.addAction(action) menu.exec_(event.globalPos()) - del menu def on_copy_source(self): """Copy formatted source path to clipboard""" @@ -751,7 +752,11 @@ class VersionTextEdit(QtWidgets.QTextEdit): if not source: return - path = source.format(root=api.registered_root()) + project_name = self.dbcon.Session["AVALON_PROJECT"] + if self._anatomy is None or self._anatomy.project_name != project_name: + self._anatomy = Anatomy(project_name) + + path = source.format(root=self._anatomy.roots) clipboard = QtWidgets.QApplication.clipboard() clipboard.setText(path) @@ -771,7 +776,6 @@ class VersionTextEdit(QtWidgets.QTextEdit): class ThumbnailWidget(QtWidgets.QLabel): - aspect_ratio = (16, 9) max_width = 300 @@ -863,7 +867,7 @@ class ThumbnailWidget(QtWidgets.QLabel): if not thumbnail_ent: return - thumbnail_bin = pipeline.get_thumbnail_binary( + thumbnail_bin = get_thumbnail_binary( thumbnail_ent, "thumbnail", self.dbcon ) if not thumbnail_bin: diff --git a/openpype/tools/mayalookassigner/app.py b/openpype/tools/mayalookassigner/app.py index 0e633a21e3..1b6cad77a8 100644 --- a/openpype/tools/mayalookassigner/app.py +++ b/openpype/tools/mayalookassigner/app.py @@ -4,8 +4,8 @@ import logging from Qt import QtWidgets, QtCore -from avalon import io from openpype import style +from openpype.pipeline import legacy_io from openpype.tools.utils.lib import qt_app_context from openpype.hosts.maya.api.lib import assign_look_by_version @@ -227,9 +227,13 @@ class MayaLookAssignerWindow(QtWidgets.QWidget): continue # Get the latest version of this asset's look subset - version = io.find_one({"type": "version", - "parent": assign_look["_id"]}, - sort=[("name", -1)]) + version = legacy_io.find_one( + { + "type": "version", + "parent": assign_look["_id"] + }, + sort=[("name", -1)] + ) subset_name = assign_look["name"] self.echo("{} Assigning {} to {}\t".format(prefix, diff --git a/openpype/tools/mayalookassigner/commands.py b/openpype/tools/mayalookassigner/commands.py index df72e41354..d41d8ca5a2 100644 --- a/openpype/tools/mayalookassigner/commands.py +++ b/openpype/tools/mayalookassigner/commands.py @@ -2,11 +2,14 @@ from collections import defaultdict import logging import os +from bson.objectid import ObjectId import maya.cmds as cmds -from avalon import io, api - -from openpype.pipeline import remove_container +from openpype.pipeline import ( + legacy_io, + remove_container, + registered_host, +) from openpype.hosts.maya.api import lib from .vray_proxies import get_alembic_ids_cache @@ -78,7 +81,7 @@ def get_all_asset_nodes(): list: list of dictionaries """ - host = api.registered_host() + host = registered_host() nodes = [] for container in host.ls(): @@ -157,8 +160,10 @@ def create_items_from_nodes(nodes): return asset_view_items for _id, id_nodes in id_hashes.items(): - asset = io.find_one({"_id": io.ObjectId(_id)}, - projection={"name": True}) + asset = legacy_io.find_one( + {"_id": ObjectId(_id)}, + projection={"name": True} + ) # Skip if asset id is not found if not asset: @@ -191,7 +196,7 @@ def remove_unused_looks(): """ - host = api.registered_host() + host = registered_host() unused = [] for container in host.ls(): diff --git a/openpype/tools/mayalookassigner/vray_proxies.py b/openpype/tools/mayalookassigner/vray_proxies.py index 6a9347449a..3523b24bf3 100644 --- a/openpype/tools/mayalookassigner/vray_proxies.py +++ b/openpype/tools/mayalookassigner/vray_proxies.py @@ -6,17 +6,18 @@ import logging import json import six +from bson.objectid import ObjectId import alembic.Abc from maya import cmds -from avalon import io, api - from openpype.pipeline import ( + legacy_io, load_container, loaders_from_representation, discover_loader_plugins, get_representation_path, + registered_host, ) from openpype.hosts.maya.api import lib @@ -156,9 +157,11 @@ def get_look_relationships(version_id): dict: Dictionary of relations. """ - json_representation = io.find_one({"type": "representation", - "parent": version_id, - "name": "json"}) + json_representation = legacy_io.find_one({ + "type": "representation", + "parent": version_id, + "name": "json" + }) # Load relationships shader_relation = get_representation_path(json_representation) @@ -182,12 +185,14 @@ def load_look(version_id): """ # Get representations of shader file and relationships - look_representation = io.find_one({"type": "representation", - "parent": version_id, - "name": "ma"}) + look_representation = legacy_io.find_one({ + "type": "representation", + "parent": version_id, + "name": "ma" + }) # See if representation is already loaded, if so reuse it. - host = api.registered_host() + host = registered_host() representation_id = str(look_representation['_id']) for container in host.ls(): if (container['loader'] == "LookLoader" and @@ -230,15 +235,21 @@ def get_latest_version(asset_id, subset): RuntimeError: When subset or version doesn't exist. """ - subset = io.find_one({"name": subset, - "parent": io.ObjectId(asset_id), - "type": "subset"}) + subset = legacy_io.find_one({ + "name": subset, + "parent": ObjectId(asset_id), + "type": "subset" + }) if not subset: raise RuntimeError("Subset does not exist: %s" % subset) - version = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[("name", -1)]) + version = legacy_io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) if not version: raise RuntimeError("Version does not exist.") diff --git a/openpype/tools/project_manager/project_manager/model.py b/openpype/tools/project_manager/project_manager/model.py index 1c3ec089f6..871704e13c 100644 --- a/openpype/tools/project_manager/project_manager/model.py +++ b/openpype/tools/project_manager/project_manager/model.py @@ -7,6 +7,11 @@ from pymongo import UpdateOne, DeleteOne from Qt import QtCore, QtGui +from openpype.lib import ( + CURRENT_DOC_SCHEMAS, + PypeLogger, +) + from .constants import ( IDENTIFIER_ROLE, ITEM_TYPE_ROLE, @@ -18,8 +23,6 @@ from .constants import ( ) from .style import ResourceCache -from openpype.lib import CURRENT_DOC_SCHEMAS - class ProjectModel(QtGui.QStandardItemModel): """Load possible projects to modify from MongoDB. @@ -185,6 +188,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): for key in self.multiselection_columns } + self._log = None # TODO Reset them on project change self._current_project = None self._root_item = None @@ -194,6 +198,12 @@ class HierarchyModel(QtCore.QAbstractItemModel): self._reset_root_item() + @property + def log(self): + if self._log is None: + self._log = PypeLogger.get_logger("ProjectManagerModel") + return self._log + @property def items_by_id(self): return self._items_by_id @@ -1367,6 +1377,9 @@ class HierarchyModel(QtCore.QAbstractItemModel): to_process = collections.deque() to_process.append(project_item) + created_count = 0 + updated_count = 0 + removed_count = 0 bulk_writes = [] while to_process: parent = to_process.popleft() @@ -1381,6 +1394,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): insert_list.append(item) elif item.data(REMOVED_ROLE): + removed_count += 1 if item.data(HIERARCHY_CHANGE_ABLE_ROLE): bulk_writes.append(DeleteOne( {"_id": item.asset_id} @@ -1394,6 +1408,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): else: update_data = item.update_data() if update_data: + updated_count += 1 bulk_writes.append(UpdateOne( {"_id": item.asset_id}, update_data @@ -1406,11 +1421,21 @@ class HierarchyModel(QtCore.QAbstractItemModel): result = project_col.insert_many(new_docs) for idx, mongo_id in enumerate(result.inserted_ids): + created_count += 1 insert_list[idx].mongo_id = mongo_id + if sum([created_count, updated_count, removed_count]) == 0: + self.log.info("Nothing has changed") + return + if bulk_writes: project_col.bulk_write(bulk_writes) + self.log.info(( + "Save finished." + " Created {} | Updated {} | Removed {} asset documents" + ).format(created_count, updated_count, removed_count)) + self.refresh_project() def copy_mime_data(self, indexes): @@ -1819,12 +1844,16 @@ class AssetItem(BaseItem): } query_projection = { "_id": 1, - "data.tasks": 1, - "data.visualParent": 1, - "schema": 1, - "name": 1, + "schema": 1, "type": 1, + "parent": 1, + + "data.visualParent": 1, + "data.parents": 1, + + "data.tasks": 1, + "data.frameStart": 1, "data.frameEnd": 1, "data.fps": 1, @@ -1835,7 +1864,7 @@ class AssetItem(BaseItem): "data.clipIn": 1, "data.clipOut": 1, "data.pixelAspect": 1, - "data.tools_env": 1 + "data.tools_env": 1, } def __init__(self, asset_doc): diff --git a/openpype/tools/project_manager/project_manager/widgets.py b/openpype/tools/project_manager/project_manager/widgets.py index 39ea833961..dc75b30bd7 100644 --- a/openpype/tools/project_manager/project_manager/widgets.py +++ b/openpype/tools/project_manager/project_manager/widgets.py @@ -10,11 +10,11 @@ from openpype.lib import ( PROJECT_NAME_REGEX ) from openpype.style import load_stylesheet +from openpype.pipeline import AvalonMongoDB from openpype.tools.utils import ( PlaceholderLineEdit, get_warning_pixmap ) -from avalon.api import AvalonMongoDB from Qt import QtWidgets, QtCore, QtGui diff --git a/openpype/tools/project_manager/project_manager/window.py b/openpype/tools/project_manager/project_manager/window.py index bdf32c7415..c281479d4f 100644 --- a/openpype/tools/project_manager/project_manager/window.py +++ b/openpype/tools/project_manager/project_manager/window.py @@ -16,6 +16,7 @@ from .style import ResourceCache from openpype.style import load_stylesheet from openpype.lib import is_admin_password_required from openpype.widgets import PasswordDialog +from openpype.pipeline import AvalonMongoDB from openpype import resources from openpype.api import ( @@ -23,7 +24,6 @@ from openpype.api import ( create_project_folders, Logger ) -from avalon.api import AvalonMongoDB class ProjectManagerWindow(QtWidgets.QWidget): diff --git a/openpype/tools/publisher/control.py b/openpype/tools/publisher/control.py index 6707feac9c..2973d6a5bb 100644 --- a/openpype/tools/publisher/control.py +++ b/openpype/tools/publisher/control.py @@ -11,10 +11,12 @@ try: except Exception: from openpype.lib.python_2_comp import WeakMethod -import avalon.api import pyblish.api -from openpype.pipeline import PublishValidationError +from openpype.pipeline import ( + PublishValidationError, + registered_host, +) from openpype.pipeline.create import CreateContext from Qt import QtCore @@ -353,7 +355,7 @@ class PublisherController: """ def __init__(self, dbcon=None, headless=False): self.log = logging.getLogger("PublisherController") - self.host = avalon.api.registered_host() + self.host = registered_host() self.headless = headless self.create_context = CreateContext( diff --git a/openpype/tools/publisher/widgets/create_dialog.py b/openpype/tools/publisher/widgets/create_dialog.py index 27ce97955a..7d98609c2c 100644 --- a/openpype/tools/publisher/widgets/create_dialog.py +++ b/openpype/tools/publisher/widgets/create_dialog.py @@ -271,7 +271,7 @@ class CreateDialog(QtWidgets.QDialog): create_btn.setEnabled(False) form_layout = QtWidgets.QFormLayout() - form_layout.addRow("Name:", variant_layout) + form_layout.addRow("Variant:", variant_layout) form_layout.addRow("Subset:", subset_name_input) mid_widget = QtWidgets.QWidget(self) diff --git a/openpype/tools/sceneinventory/model.py b/openpype/tools/sceneinventory/model.py index 7173ae751e..8d72020c98 100644 --- a/openpype/tools/sceneinventory/model.py +++ b/openpype/tools/sceneinventory/model.py @@ -5,9 +5,14 @@ from collections import defaultdict from Qt import QtCore, QtGui import qtawesome +from bson.objectid import ObjectId -from avalon import api, io, schema -from openpype.pipeline import HeroVersionType +from openpype.pipeline import ( + legacy_io, + schema, + HeroVersionType, + registered_host, +) from openpype.style import get_default_entity_icon_color from openpype.tools.utils.models import TreeModel, Item from openpype.modules import ModulesManager @@ -50,7 +55,7 @@ class InventoryModel(TreeModel): if not self.sync_enabled: return - project_name = io.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] active_site = sync_server.get_active_site(project_name) remote_site = sync_server.get_remote_site(project_name) @@ -180,7 +185,7 @@ class InventoryModel(TreeModel): def refresh(self, selected=None, items=None): """Refresh the model""" - host = api.registered_host() + host = registered_host() if not items: # for debugging or testing, injecting items from outside items = host.ls() @@ -299,32 +304,32 @@ class InventoryModel(TreeModel): for repre_id, group_dict in sorted(grouped.items()): group_items = group_dict["items"] # Get parenthood per group - representation = io.find_one({"_id": io.ObjectId(repre_id)}) + representation = legacy_io.find_one({"_id": ObjectId(repre_id)}) if not representation: not_found["representation"].append(group_items) not_found_ids.append(repre_id) continue - version = io.find_one({"_id": representation["parent"]}) + version = legacy_io.find_one({"_id": representation["parent"]}) if not version: not_found["version"].append(group_items) not_found_ids.append(repre_id) continue elif version["type"] == "hero_version": - _version = io.find_one({ + _version = legacy_io.find_one({ "_id": version["version_id"] }) version["name"] = HeroVersionType(_version["name"]) version["data"] = _version["data"] - subset = io.find_one({"_id": version["parent"]}) + subset = legacy_io.find_one({"_id": version["parent"]}) if not subset: not_found["subset"].append(group_items) not_found_ids.append(repre_id) continue - asset = io.find_one({"_id": subset["parent"]}) + asset = legacy_io.find_one({"_id": subset["parent"]}) if not asset: not_found["asset"].append(group_items) not_found_ids.append(repre_id) @@ -385,7 +390,7 @@ class InventoryModel(TreeModel): # Store the highest available version so the model can know # whether current version is currently up-to-date. - highest_version = io.find_one({ + highest_version = legacy_io.find_one({ "type": "version", "parent": version["parent"] }, sort=[("name", -1)]) diff --git a/openpype/tools/sceneinventory/switch_dialog.py b/openpype/tools/sceneinventory/switch_dialog.py index 0e7b1b759a..b2d770330f 100644 --- a/openpype/tools/sceneinventory/switch_dialog.py +++ b/openpype/tools/sceneinventory/switch_dialog.py @@ -2,12 +2,14 @@ import collections import logging from Qt import QtWidgets, QtCore import qtawesome +from bson.objectid import ObjectId -from avalon import io, pipeline -from openpype.pipeline import ( +from openpype.pipeline import legacy_io +from openpype.pipeline.load import ( discover_loader_plugins, switch_container, get_repres_contexts, + loaders_from_repre_context, ) from .widgets import ( @@ -146,10 +148,10 @@ class SwitchAssetDialog(QtWidgets.QDialog): repre_ids = set() content_loaders = set() for item in self._items: - repre_ids.add(io.ObjectId(item["representation"])) + repre_ids.add(ObjectId(item["representation"])) content_loaders.add(item["loader"]) - repres = list(io.find({ + repres = list(legacy_io.find({ "type": {"$in": ["representation", "archived_representation"]}, "_id": {"$in": list(repre_ids)} })) @@ -177,7 +179,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): content_repres[repre_id] = repres_by_id[repre_id] version_ids.append(repre["parent"]) - versions = io.find({ + versions = legacy_io.find({ "type": {"$in": ["version", "hero_version"]}, "_id": {"$in": list(set(version_ids))} }) @@ -196,7 +198,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): else: subset_ids.append(content_versions[version_id]["parent"]) - subsets = io.find({ + subsets = legacy_io.find({ "type": {"$in": ["subset", "archived_subset"]}, "_id": {"$in": subset_ids} }) @@ -218,7 +220,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): asset_ids.append(subset["parent"]) content_subsets[subset_id] = subset - assets = io.find({ + assets = legacy_io.find({ "type": {"$in": ["asset", "archived_asset"]}, "_id": {"$in": list(asset_ids)} }) @@ -369,7 +371,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): loaders = None for repre_context in repre_contexts.values(): - _loaders = set(pipeline.loaders_from_repre_context( + _loaders = set(loaders_from_repre_context( available_loaders, repre_context )) if loaders is None: @@ -470,7 +472,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): # Prepare asset document if asset is selected asset_doc = None if selected_asset: - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"type": "asset", "name": selected_asset}, {"_id": True} ) @@ -521,7 +523,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): def _get_current_output_repre_ids_xxx( self, asset_doc, selected_subset, selected_repre ): - subset_doc = io.find_one( + subset_doc = legacy_io.find_one( { "type": "subset", "name": selected_subset, @@ -535,7 +537,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): if not version_doc: return [] - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "parent": version_doc["_id"], @@ -546,7 +548,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): return [repre_doc["_id"] for repre_doc in repre_docs] def _get_current_output_repre_ids_xxo(self, asset_doc, selected_subset): - subset_doc = io.find_one( + subset_doc = legacy_io.find_one( { "type": "subset", "parent": asset_doc["_id"], @@ -561,7 +563,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): for repre_doc in self.content_repres.values(): repre_names.add(repre_doc["name"]) - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "parent": subset_doc["_id"], @@ -576,7 +578,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): for subset_doc in self.content_subsets.values(): susbet_names.add(subset_doc["name"]) - subset_docs = io.find( + subset_docs = legacy_io.find( { "type": "subset", "name": {"$in": list(susbet_names)}, @@ -585,7 +587,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): {"_id": True} ) subset_ids = [subset_doc["_id"] for subset_doc in subset_docs] - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "parent": {"$in": subset_ids}, @@ -604,7 +606,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): subset_name = subset_doc["name"] repres_by_subset_name[subset_name].add(repre_name) - subset_docs = list(io.find( + subset_docs = list(legacy_io.find( { "type": "subset", "parent": asset_doc["_id"], @@ -635,7 +637,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): "parent": version_id, "name": {"$in": list(repre_names)} }) - repre_docs = io.find( + repre_docs = legacy_io.find( {"$or": repre_or_query}, {"_id": True} ) @@ -644,7 +646,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): def _get_current_output_repre_ids_oxx( self, selected_subset, selected_repre ): - subset_docs = list(io.find({ + subset_docs = list(legacy_io.find({ "type": "subset", "parent": {"$in": list(self.content_assets.keys())}, "name": selected_subset @@ -655,7 +657,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): last_version["_id"] for last_version in last_versions_by_subset_id.values() ] - repre_docs = io.find({ + repre_docs = legacy_io.find({ "type": "representation", "parent": {"$in": last_version_ids}, "name": selected_repre @@ -664,7 +666,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): return [repre_doc["_id"] for repre_doc in repre_docs] def _get_current_output_repre_ids_oxo(self, selected_subset): - subset_docs = list(io.find( + subset_docs = list(legacy_io.find( { "type": "subset", "parent": {"$in": list(self.content_assets.keys())}, @@ -711,7 +713,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): "parent": last_version_id, "name": {"$in": list(repre_names)} }) - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "$or": repre_or_query @@ -722,7 +724,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): return [repre_doc["_id"] for repre_doc in repre_docs] def _get_current_output_repre_ids_oox(self, selected_repre): - repre_docs = io.find( + repre_docs = legacy_io.find( { "name": selected_repre, "parent": {"$in": list(self.content_versions.keys())} @@ -732,7 +734,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): return [repre_doc["_id"] for repre_doc in repre_docs] def _get_asset_box_values(self): - asset_docs = io.find( + asset_docs = legacy_io.find( {"type": "asset"}, {"_id": 1, "name": 1} ) @@ -740,7 +742,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): asset_doc["_id"]: asset_doc["name"] for asset_doc in asset_docs } - subsets = io.find( + subsets = legacy_io.find( { "type": "subset", "parent": {"$in": list(asset_names_by_id.keys())} @@ -760,12 +762,15 @@ class SwitchAssetDialog(QtWidgets.QDialog): def _get_subset_box_values(self): selected_asset = self._assets_box.get_valid_value() if selected_asset: - asset_doc = io.find_one({"type": "asset", "name": selected_asset}) + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": selected_asset + }) asset_ids = [asset_doc["_id"]] else: asset_ids = list(self.content_assets.keys()) - subsets = io.find( + subsets = legacy_io.find( { "type": "subset", "parent": {"$in": asset_ids} @@ -802,7 +807,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): # [ ] [ ] [?] if not selected_asset and not selected_subset: # Find all representations of selection's subsets - possible_repres = list(io.find( + possible_repres = list(legacy_io.find( { "type": "representation", "parent": {"$in": list(self.content_versions.keys())} @@ -831,11 +836,11 @@ class SwitchAssetDialog(QtWidgets.QDialog): # [x] [x] [?] if selected_asset and selected_subset: - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"type": "asset", "name": selected_asset}, {"_id": 1} ) - subset_doc = io.find_one( + subset_doc = legacy_io.find_one( { "type": "subset", "name": selected_subset, @@ -846,7 +851,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): subset_id = subset_doc["_id"] last_versions_by_subset_id = self.find_last_versions([subset_id]) version_doc = last_versions_by_subset_id.get(subset_id) - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "parent": version_doc["_id"] @@ -863,7 +868,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): # [x] [ ] [?] # If asset only is selected if selected_asset: - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"type": "asset", "name": selected_asset}, {"_id": 1} ) @@ -874,7 +879,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): subset_names = set() for subset_doc in self.content_subsets.values(): subset_names.add(subset_doc["name"]) - subset_docs = io.find( + subset_docs = legacy_io.find( { "type": "subset", "parent": asset_doc["_id"], @@ -898,7 +903,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): if not subset_id_by_version_id: return list() - repre_docs = list(io.find( + repre_docs = list(legacy_io.find( { "type": "representation", "parent": {"$in": list(subset_id_by_version_id.keys())} @@ -928,7 +933,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): return list(available_repres) # [ ] [x] [?] - subset_docs = list(io.find( + subset_docs = list(legacy_io.find( { "type": "subset", "parent": {"$in": list(self.content_assets.keys())}, @@ -955,7 +960,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): if not subset_id_by_version_id: return list() - repre_docs = list(io.find( + repre_docs = list(legacy_io.find( { "type": "representation", "parent": {"$in": list(subset_id_by_version_id.keys())} @@ -1011,11 +1016,11 @@ class SwitchAssetDialog(QtWidgets.QDialog): return # [x] [ ] [?] - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"type": "asset", "name": selected_asset}, {"_id": 1} ) - subset_docs = io.find( + subset_docs = legacy_io.find( {"type": "subset", "parent": asset_doc["_id"]}, {"name": 1} ) @@ -1046,7 +1051,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): }} ] last_versions_by_subset_id = dict() - for doc in io.aggregate(_pipeline): + for doc in legacy_io.aggregate(_pipeline): doc["parent"] = doc["_id"] doc["_id"] = doc.pop("_version_id") last_versions_by_subset_id[doc["parent"]] = doc @@ -1074,11 +1079,11 @@ class SwitchAssetDialog(QtWidgets.QDialog): # [x] [x] [ ] if selected_asset is not None and selected_subset is not None: - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"type": "asset", "name": selected_asset}, {"_id": 1} ) - subset_doc = io.find_one( + subset_doc = legacy_io.find_one( { "type": "subset", "parent": asset_doc["_id"], @@ -1094,7 +1099,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): validation_state.repre_ok = False return - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "parent": last_version["_id"] @@ -1114,11 +1119,11 @@ class SwitchAssetDialog(QtWidgets.QDialog): # [x] [ ] [ ] if selected_asset is not None: - asset_doc = io.find_one( + asset_doc = legacy_io.find_one( {"type": "asset", "name": selected_asset}, {"_id": 1} ) - subset_docs = list(io.find( + subset_docs = list(legacy_io.find( { "type": "subset", "parent": asset_doc["_id"] @@ -1140,7 +1145,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): version_id = last_version["_id"] subset_id_by_version_id[version_id] = subset_id - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "parent": {"$in": list(subset_id_by_version_id.keys())} @@ -1171,7 +1176,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): # [ ] [x] [ ] # Subset documents - subset_docs = io.find( + subset_docs = legacy_io.find( { "type": "subset", "parent": {"$in": list(self.content_assets.keys())}, @@ -1192,7 +1197,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): version_id = last_version["_id"] subset_id_by_version_id[version_id] = subset_id - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "parent": {"$in": list(subset_id_by_version_id.keys())} @@ -1223,7 +1228,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): def _on_current_asset(self): # Set initial asset as current. - asset_name = io.Session["AVALON_ASSET"] + asset_name = legacy_io.Session["AVALON_ASSET"] index = self._assets_box.findText( asset_name, QtCore.Qt.MatchFixedString ) @@ -1241,7 +1246,10 @@ class SwitchAssetDialog(QtWidgets.QDialog): selected_representation = self._representations_box.get_valid_value() if selected_asset: - asset_doc = io.find_one({"type": "asset", "name": selected_asset}) + asset_doc = legacy_io.find_one({ + "type": "asset", + "name": selected_asset + }) asset_docs_by_id = {asset_doc["_id"]: asset_doc} else: asset_docs_by_id = self.content_assets @@ -1260,7 +1268,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): if selected_subset: subset_query["name"] = selected_subset - subset_docs = list(io.find(subset_query)) + subset_docs = list(legacy_io.find(subset_query)) subset_ids = [] subset_docs_by_parent_and_name = collections.defaultdict(dict) for subset in subset_docs: @@ -1270,12 +1278,12 @@ class SwitchAssetDialog(QtWidgets.QDialog): subset_docs_by_parent_and_name[parent_id][name] = subset # versions - version_docs = list(io.find({ + version_docs = list(legacy_io.find({ "type": "version", "parent": {"$in": subset_ids} }, sort=[("name", -1)])) - hero_version_docs = list(io.find({ + hero_version_docs = list(legacy_io.find({ "type": "hero_version", "parent": {"$in": subset_ids} })) @@ -1295,7 +1303,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): parent_id = hero_version_doc["parent"] hero_version_docs_by_parent_id[parent_id] = hero_version_doc - repre_docs = io.find({ + repre_docs = legacy_io.find({ "type": "representation", "parent": {"$in": version_ids} }) @@ -1306,7 +1314,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): repre_docs_by_parent_id_by_name[parent_id][name] = repre_doc for container in self._items: - container_repre_id = io.ObjectId(container["representation"]) + container_repre_id = ObjectId(container["representation"]) container_repre = self.content_repres[container_repre_id] container_repre_name = container_repre["name"] diff --git a/openpype/tools/sceneinventory/view.py b/openpype/tools/sceneinventory/view.py index c38390c614..448e3f4e6f 100644 --- a/openpype/tools/sceneinventory/view.py +++ b/openpype/tools/sceneinventory/view.py @@ -4,14 +4,15 @@ from functools import partial from Qt import QtWidgets, QtCore import qtawesome - -from avalon import io, api +from bson.objectid import ObjectId from openpype import style from openpype.pipeline import ( + legacy_io, HeroVersionType, update_container, remove_container, + discover_inventory_actions, ) from openpype.modules import ModulesManager from openpype.tools.utils.lib import ( @@ -78,11 +79,11 @@ class SceneInventoryView(QtWidgets.QTreeView): repre_ids = [] for item in items: - item_id = io.ObjectId(item["representation"]) + item_id = ObjectId(item["representation"]) if item_id not in repre_ids: repre_ids.append(item_id) - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "_id": {"$in": repre_ids} @@ -96,7 +97,7 @@ class SceneInventoryView(QtWidgets.QTreeView): if version_id not in version_ids: version_ids.append(version_id) - loaded_versions = io.find({ + loaded_versions = legacy_io.find({ "_id": {"$in": version_ids}, "type": {"$in": ["version", "hero_version"]} }) @@ -113,7 +114,7 @@ class SceneInventoryView(QtWidgets.QTreeView): if parent_id not in version_parents: version_parents.append(parent_id) - all_versions = io.find({ + all_versions = legacy_io.find({ "type": {"$in": ["hero_version", "version"]}, "parent": {"$in": version_parents} }) @@ -145,11 +146,11 @@ class SceneInventoryView(QtWidgets.QTreeView): def _on_switch_to_versioned(items): repre_ids = [] for item in items: - item_id = io.ObjectId(item["representation"]) + item_id = ObjectId(item["representation"]) if item_id not in repre_ids: repre_ids.append(item_id) - repre_docs = io.find( + repre_docs = legacy_io.find( { "type": "representation", "_id": {"$in": repre_ids} @@ -164,7 +165,7 @@ class SceneInventoryView(QtWidgets.QTreeView): version_id_by_repre_id[repre_doc["_id"]] = version_id if version_id not in version_ids: version_ids.append(version_id) - hero_versions = io.find( + hero_versions = legacy_io.find( { "_id": {"$in": version_ids}, "type": "hero_version" @@ -182,7 +183,7 @@ class SceneInventoryView(QtWidgets.QTreeView): if current_version_id == hero_version_id: version_id_by_repre_id[_repre_id] = version_id - version_docs = io.find( + version_docs = legacy_io.find( { "_id": {"$in": list(version_ids)}, "type": "version" @@ -195,7 +196,7 @@ class SceneInventoryView(QtWidgets.QTreeView): version_doc["name"] for item in items: - repre_id = io.ObjectId(item["representation"]) + repre_id = ObjectId(item["representation"]) version_id = version_id_by_repre_id.get(repre_id) version_name = version_name_by_id.get(version_id) if version_name is not None: @@ -365,11 +366,11 @@ class SceneInventoryView(QtWidgets.QTreeView): repre_ids (list) side (str): 'active_site'|'remote_site' """ - project_name = io.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] active_site = self.sync_server.get_active_site(project_name) remote_site = self.sync_server.get_remote_site(project_name) - repre_docs = io.find({ + repre_docs = legacy_io.find({ "type": "representation", "_id": {"$in": repre_ids} }) @@ -487,7 +488,7 @@ class SceneInventoryView(QtWidgets.QTreeView): containers = containers or [dict()] # Check which action will be available in the menu - Plugins = api.discover(api.InventoryAction) + Plugins = discover_inventory_actions() compatible = [p() for p in Plugins if any(p.is_compatible(c) for c in containers)] @@ -658,13 +659,13 @@ class SceneInventoryView(QtWidgets.QTreeView): active = items[-1] # Get available versions for active representation - representation_id = io.ObjectId(active["representation"]) - representation = io.find_one({"_id": representation_id}) - version = io.find_one({ + representation_id = ObjectId(active["representation"]) + representation = legacy_io.find_one({"_id": representation_id}) + version = legacy_io.find_one({ "_id": representation["parent"] }) - versions = list(io.find( + versions = list(legacy_io.find( { "parent": version["parent"], "type": "version" @@ -672,7 +673,7 @@ class SceneInventoryView(QtWidgets.QTreeView): sort=[("name", 1)] )) - hero_version = io.find_one({ + hero_version = legacy_io.find_one({ "parent": version["parent"], "type": "hero_version" }) diff --git a/openpype/tools/sceneinventory/window.py b/openpype/tools/sceneinventory/window.py index b40fbb69e4..054c2a2daa 100644 --- a/openpype/tools/sceneinventory/window.py +++ b/openpype/tools/sceneinventory/window.py @@ -3,8 +3,8 @@ import sys from Qt import QtWidgets, QtCore import qtawesome -from avalon import io, api +from openpype.pipeline import legacy_io from openpype import style from openpype.tools.utils.delegates import VersionDelegate from openpype.tools.utils.lib import ( @@ -72,7 +72,7 @@ class SceneInventoryWindow(QtWidgets.QDialog): control_layout.addWidget(refresh_button) # endregion control - family_config_cache = FamilyConfigCache(io) + family_config_cache = FamilyConfigCache(legacy_io) model = InventoryModel(family_config_cache) proxy = FilterProxyModel() @@ -91,7 +91,7 @@ class SceneInventoryWindow(QtWidgets.QDialog): view.setColumnWidth(4, 100) # namespace # apply delegates - version_delegate = VersionDelegate(io, self) + version_delegate = VersionDelegate(legacy_io, self) column = model.Columns.index("version") view.setItemDelegateForColumn(column, version_delegate) @@ -191,17 +191,18 @@ def show(root=None, debug=False, parent=None, items=None): pass if debug is True: - io.install() + legacy_io.install() if not os.environ.get("AVALON_PROJECT"): any_project = next( - project for project in io.projects() + project for project in legacy_io.projects() if project.get("active", True) is not False ) - api.Session["AVALON_PROJECT"] = any_project["name"] + project_name = any_project["name"] else: - api.Session["AVALON_PROJECT"] = os.environ.get("AVALON_PROJECT") + project_name = os.environ.get("AVALON_PROJECT") + legacy_io.Session["AVALON_PROJECT"] = project_name with qt_app_context(): window = SceneInventoryWindow(parent) diff --git a/openpype/tools/settings/local_settings/constants.py b/openpype/tools/settings/local_settings/constants.py index 1836c579af..16f87b6f05 100644 --- a/openpype/tools/settings/local_settings/constants.py +++ b/openpype/tools/settings/local_settings/constants.py @@ -9,6 +9,7 @@ LABEL_DISCARD_CHANGES = "Discard changes" # TODO move to settings constants LOCAL_GENERAL_KEY = "general" LOCAL_PROJECTS_KEY = "projects" +LOCAL_ENV_KEY = "environments" LOCAL_APPS_KEY = "applications" # Roots key constant diff --git a/openpype/tools/settings/local_settings/environments_widget.py b/openpype/tools/settings/local_settings/environments_widget.py new file mode 100644 index 0000000000..14ca517851 --- /dev/null +++ b/openpype/tools/settings/local_settings/environments_widget.py @@ -0,0 +1,93 @@ +from Qt import QtWidgets + +from openpype.tools.utils import PlaceholderLineEdit + + +class LocalEnvironmentsWidgets(QtWidgets.QWidget): + def __init__(self, system_settings_entity, parent): + super(LocalEnvironmentsWidgets, self).__init__(parent) + + self._widgets_by_env_key = {} + self.system_settings_entity = system_settings_entity + + content_widget = QtWidgets.QWidget(self) + content_layout = QtWidgets.QGridLayout(content_widget) + content_layout.setContentsMargins(0, 0, 0, 0) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + + self._layout = layout + self._content_layout = content_layout + self._content_widget = content_widget + + def _clear_layout(self, layout): + while layout.count() > 0: + item = layout.itemAt(0) + widget = item.widget() + layout.removeItem(item) + if widget is not None: + widget.setVisible(False) + widget.deleteLater() + + def _reset_env_widgets(self): + self._clear_layout(self._content_layout) + self._clear_layout(self._layout) + + content_widget = QtWidgets.QWidget(self) + content_layout = QtWidgets.QGridLayout(content_widget) + content_layout.setContentsMargins(0, 0, 0, 0) + white_list_entity = ( + self.system_settings_entity["general"]["local_env_white_list"] + ) + row = -1 + for row, item in enumerate(white_list_entity): + key = item.value + label_widget = QtWidgets.QLabel(key, self) + input_widget = PlaceholderLineEdit(self) + input_widget.setPlaceholderText("< Keep studio value >") + + content_layout.addWidget(label_widget, row, 0) + content_layout.addWidget(input_widget, row, 1) + + self._widgets_by_env_key[key] = input_widget + + if row < 0: + label_widget = QtWidgets.QLabel( + ( + "Your studio does not allow to change" + " Environment variables locally." + ), + self + ) + content_layout.addWidget(label_widget, 0, 0) + content_layout.setColumnStretch(0, 1) + + else: + content_layout.setColumnStretch(0, 0) + content_layout.setColumnStretch(1, 1) + + self._layout.addWidget(content_widget, 1) + + self._content_layout = content_layout + self._content_widget = content_widget + + def update_local_settings(self, value): + if not value: + value = {} + + self._reset_env_widgets() + + for env_key, widget in self._widgets_by_env_key.items(): + env_value = value.get(env_key) or "" + widget.setText(env_value) + + def settings_value(self): + output = {} + for env_key, widget in self._widgets_by_env_key.items(): + value = widget.text() + if value: + output[env_key] = value + if not output: + return None + return output diff --git a/openpype/tools/settings/local_settings/window.py b/openpype/tools/settings/local_settings/window.py index fb47e69a17..6a2db3fff5 100644 --- a/openpype/tools/settings/local_settings/window.py +++ b/openpype/tools/settings/local_settings/window.py @@ -8,6 +8,7 @@ from openpype.settings.lib import ( save_local_settings ) from openpype.tools.settings import CHILD_OFFSET +from openpype.tools.utils import MessageOverlayObject from openpype.api import ( Logger, SystemSettings, @@ -25,11 +26,13 @@ from .experimental_widget import ( LOCAL_EXPERIMENTAL_KEY ) from .apps_widget import LocalApplicationsWidgets +from .environments_widget import LocalEnvironmentsWidgets from .projects_widget import ProjectSettingsWidget from .constants import ( LOCAL_GENERAL_KEY, LOCAL_PROJECTS_KEY, + LOCAL_ENV_KEY, LOCAL_APPS_KEY ) @@ -49,18 +52,20 @@ class LocalSettingsWidget(QtWidgets.QWidget): self.pype_mongo_widget = None self.general_widget = None self.experimental_widget = None + self.envs_widget = None self.apps_widget = None self.projects_widget = None - self._create_pype_mongo_ui() + self._create_mongo_url_ui() self._create_general_ui() self._create_experimental_ui() + self._create_environments_ui() self._create_app_ui() self._create_project_ui() self.main_layout.addStretch(1) - def _create_pype_mongo_ui(self): + def _create_mongo_url_ui(self): pype_mongo_expand_widget = ExpandingWidget("OpenPype Mongo URL", self) pype_mongo_content = QtWidgets.QWidget(self) pype_mongo_layout = QtWidgets.QVBoxLayout(pype_mongo_content) @@ -110,6 +115,22 @@ class LocalSettingsWidget(QtWidgets.QWidget): self.experimental_widget = experimental_widget + def _create_environments_ui(self): + envs_expand_widget = ExpandingWidget("Environments", self) + envs_content = QtWidgets.QWidget(self) + envs_layout = QtWidgets.QVBoxLayout(envs_content) + envs_layout.setContentsMargins(CHILD_OFFSET, 5, 0, 0) + envs_expand_widget.set_content_widget(envs_content) + + envs_widget = LocalEnvironmentsWidgets( + self.system_settings, envs_content + ) + envs_layout.addWidget(envs_widget) + + self.main_layout.addWidget(envs_expand_widget) + + self.envs_widget = envs_widget + def _create_app_ui(self): # Applications app_expand_widget = ExpandingWidget("Applications", self) @@ -154,6 +175,9 @@ class LocalSettingsWidget(QtWidgets.QWidget): self.general_widget.update_local_settings( value.get(LOCAL_GENERAL_KEY) ) + self.envs_widget.update_local_settings( + value.get(LOCAL_ENV_KEY) + ) self.app_widget.update_local_settings( value.get(LOCAL_APPS_KEY) ) @@ -170,6 +194,10 @@ class LocalSettingsWidget(QtWidgets.QWidget): if general_value: output[LOCAL_GENERAL_KEY] = general_value + envs_value = self.envs_widget.settings_value() + if envs_value: + output[LOCAL_ENV_KEY] = envs_value + app_value = self.app_widget.settings_value() if app_value: output[LOCAL_APPS_KEY] = app_value @@ -194,6 +222,8 @@ class LocalSettingsWindow(QtWidgets.QWidget): self.setWindowTitle("OpenPype Local settings") + overlay_object = MessageOverlayObject(self) + stylesheet = style.load_stylesheet() self.setStyleSheet(stylesheet) self.setWindowIcon(QtGui.QIcon(style.app_icon_path())) @@ -220,6 +250,7 @@ class LocalSettingsWindow(QtWidgets.QWidget): save_btn.clicked.connect(self._on_save_clicked) reset_btn.clicked.connect(self._on_reset_clicked) + self._overlay_object = overlay_object # Do not create local settings widget in init phase as it's using # settings objects that must be OK to be able create this widget # - we want to show dialog if anything goes wrong @@ -285,8 +316,10 @@ class LocalSettingsWindow(QtWidgets.QWidget): def _on_reset_clicked(self): self.reset() + self._overlay_object.add_message("Refreshed...") def _on_save_clicked(self): value = self._settings_widget.settings_value() save_local_settings(value) + self._overlay_object.add_message("Saved...", message_type="success") self.reset() diff --git a/openpype/tools/settings/settings/base.py b/openpype/tools/settings/settings/base.py index bd48b3a966..44ec09b2ca 100644 --- a/openpype/tools/settings/settings/base.py +++ b/openpype/tools/settings/settings/base.py @@ -567,7 +567,9 @@ class GUIWidget(BaseWidget): def _create_label_ui(self): label = self.entity["label"] + word_wrap = self.entity.schema_data.get("word_wrap", False) label_widget = QtWidgets.QLabel(label, self) + label_widget.setWordWrap(word_wrap) label_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) label_widget.setObjectName("SettingsLabel") label_widget.linkActivated.connect(self._on_link_activate) diff --git a/openpype/tools/settings/settings/categories.py b/openpype/tools/settings/settings/categories.py index a5b5cd40f0..c8ade5fcdb 100644 --- a/openpype/tools/settings/settings/categories.py +++ b/openpype/tools/settings/settings/categories.py @@ -216,7 +216,7 @@ class SettingsCategoryWidget(QtWidgets.QWidget): def create_ui(self): self.modify_defaults_checkbox = None - conf_wrapper_widget = QtWidgets.QWidget(self) + conf_wrapper_widget = QtWidgets.QSplitter(self) configurations_widget = QtWidgets.QWidget(conf_wrapper_widget) # Breadcrumbs/Path widget @@ -294,10 +294,7 @@ class SettingsCategoryWidget(QtWidgets.QWidget): configurations_layout.addWidget(scroll_widget, 1) - conf_wrapper_layout = QtWidgets.QHBoxLayout(conf_wrapper_widget) - conf_wrapper_layout.setContentsMargins(0, 0, 0, 0) - conf_wrapper_layout.setSpacing(0) - conf_wrapper_layout.addWidget(configurations_widget, 1) + conf_wrapper_widget.addWidget(configurations_widget) main_layout = QtWidgets.QVBoxLayout(self) main_layout.setContentsMargins(0, 0, 0, 0) @@ -327,7 +324,7 @@ class SettingsCategoryWidget(QtWidgets.QWidget): self.breadcrumbs_model = None self.refresh_btn = refresh_btn - self.conf_wrapper_layout = conf_wrapper_layout + self.conf_wrapper_widget = conf_wrapper_widget self.main_layout = main_layout self.ui_tweaks() @@ -818,7 +815,9 @@ class ProjectWidget(SettingsCategoryWidget): project_list_widget = ProjectListWidget(self) - self.conf_wrapper_layout.insertWidget(0, project_list_widget, 0) + self.conf_wrapper_widget.insertWidget(0, project_list_widget) + self.conf_wrapper_widget.setStretchFactor(0, 0) + self.conf_wrapper_widget.setStretchFactor(1, 1) project_list_widget.project_changed.connect(self._on_project_change) project_list_widget.version_change_requested.connect( diff --git a/openpype/tools/settings/settings/widgets.py b/openpype/tools/settings/settings/widgets.py index 577c2630ab..45c21d5685 100644 --- a/openpype/tools/settings/settings/widgets.py +++ b/openpype/tools/settings/settings/widgets.py @@ -1,13 +1,9 @@ -import os import copy import uuid from Qt import QtWidgets, QtCore, QtGui import qtawesome -from avalon.mongodb import ( - AvalonMongoConnection, - AvalonMongoDB -) +from openpype.pipeline import AvalonMongoDB from openpype.style import get_objected_colors from openpype.tools.utils.widgets import ImageButton from openpype.tools.utils.lib import paint_image_with_color @@ -97,6 +93,9 @@ class CompleterView(QtWidgets.QListView): QtCore.Qt.FramelessWindowHint | QtCore.Qt.Tool ) + + # Open the widget unactivated + self.setAttribute(QtCore.Qt.WA_ShowWithoutActivating) delegate = QtWidgets.QStyledItemDelegate() self.setItemDelegate(delegate) @@ -225,10 +224,18 @@ class SettingsLineEdit(PlaceholderLineEdit): def __init__(self, *args, **kwargs): super(SettingsLineEdit, self).__init__(*args, **kwargs) - self._completer = None + # Timer which will get started on focus in and stopped on focus out + # - callback checks if line edit or completer have focus + # and hide completer if not + focus_timer = QtCore.QTimer() + focus_timer.setInterval(50) + focus_timer.timeout.connect(self._on_focus_timer) self.textChanged.connect(self._on_text_change) + self._completer = None + self._focus_timer = focus_timer + def _on_text_change(self, text): if self._completer is not None: self._completer.set_text_filter(text) @@ -240,19 +247,19 @@ class SettingsLineEdit(PlaceholderLineEdit): new_point = self.mapToGlobal(point) self._completer.move(new_point) + def _on_focus_timer(self): + if not self.hasFocus() and not self._completer.hasFocus(): + self._completer.hide() + self._focus_timer.stop() + def focusInEvent(self, event): super(SettingsLineEdit, self).focusInEvent(event) self.focused_in.emit() - if self._completer is None: - return - self._completer.show() - self._update_completer() - - def focusOutEvent(self, event): - super(SettingsLineEdit, self).focusOutEvent(event) if self._completer is not None: - self._completer.hide() + self._focus_timer.start() + self._completer.show() + self._update_completer() def paintEvent(self, event): super(SettingsLineEdit, self).paintEvent(event) @@ -1198,15 +1205,6 @@ class ProjectListWidget(QtWidgets.QWidget): selected_project = index.data(PROJECT_NAME_ROLE) break - mongo_url = os.environ["OPENPYPE_MONGO"] - - # Force uninstall of whole avalon connection if url does not match - # to current environment and set it as environment - if mongo_url != os.environ["AVALON_MONGO"]: - AvalonMongoConnection.uninstall(self.dbcon, force=True) - os.environ["AVALON_MONGO"] = mongo_url - self.dbcon = None - if not self.dbcon: try: self.dbcon = AvalonMongoDB() diff --git a/openpype/tools/settings/settings/wrapper_widgets.py b/openpype/tools/settings/settings/wrapper_widgets.py index 7370fcf945..b14a226912 100644 --- a/openpype/tools/settings/settings/wrapper_widgets.py +++ b/openpype/tools/settings/settings/wrapper_widgets.py @@ -92,7 +92,8 @@ class CollapsibleWrapper(WrapperWidget): self.content_layout = content_layout if self.collapsible: - body_widget.toggle_content(self.collapsed) + if not self.collapsed: + body_widget.toggle_content() else: body_widget.hide_toolbox(hide_content=False) diff --git a/openpype/tools/standalonepublish/app.py b/openpype/tools/standalonepublish/app.py index 3630d92c83..1ad5cd119e 100644 --- a/openpype/tools/standalonepublish/app.py +++ b/openpype/tools/standalonepublish/app.py @@ -12,7 +12,7 @@ from .widgets import ( from .widgets.constants import HOST_NAME from openpype import style from openpype.api import resources -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB from openpype.modules import ModulesManager diff --git a/openpype/tools/standalonepublish/publish.py b/openpype/tools/standalonepublish/publish.py index 582e7eccf8..e1e9edebb9 100644 --- a/openpype/tools/standalonepublish/publish.py +++ b/openpype/tools/standalonepublish/publish.py @@ -1,14 +1,14 @@ import os import sys -import openpype import pyblish.api +from openpype.pipeline import install_openpype_plugins from openpype.tools.utils.host_tools import show_publish def main(env): # Registers pype's Global pyblish plugins - openpype.install() + install_openpype_plugins() # Register additional paths addition_paths_str = env.get("PUBLISH_PATHS") or "" diff --git a/openpype/tools/standalonepublish/widgets/model_asset.py b/openpype/tools/standalonepublish/widgets/model_asset.py index a7316a2aa7..02e9073555 100644 --- a/openpype/tools/standalonepublish/widgets/model_asset.py +++ b/openpype/tools/standalonepublish/widgets/model_asset.py @@ -35,7 +35,7 @@ def _iter_model_rows(model, class AssetModel(TreeModel): - """A model listing assets in the silo in the active project. + """A model listing assets in the active project. The assets are displayed in a treeview, they are visually parented by a `visualParent` field in the database containing an `_id` to a parent @@ -64,7 +64,7 @@ class AssetModel(TreeModel): self.refresh() - def _add_hierarchy(self, assets, parent=None, silos=None): + def _add_hierarchy(self, assets, parent=None): """Add the assets that are related to the parent as children items. This method does *not* query the database. These instead are queried @@ -72,27 +72,8 @@ class AssetModel(TreeModel): queries. Resulting in up to 10x speed increase. Args: - assets (dict): All assets in the currently active silo stored - by key/value - - Returns: - None - + assets (dict): All assets from current project. """ - if silos: - # WARNING: Silo item "_id" is set to silo value - # mainly because GUI issue with preserve selection and expanded row - # and because of easier hierarchy parenting (in "assets") - for silo in silos: - node = Node({ - "_id": silo, - "name": silo, - "label": silo, - "type": "silo" - }) - self.add_child(node, parent=parent) - self._add_hierarchy(assets, parent=node) - parent_id = parent["_id"] if parent else None current_assets = assets.get(parent_id, list()) @@ -132,27 +113,19 @@ class AssetModel(TreeModel): self.beginResetModel() - # Get all assets in current silo sorted by name + # Get all assets in current project sorted by name db_assets = self.dbcon.find({"type": "asset"}).sort("name", 1) - silos = db_assets.distinct("silo") or None - # if any silo is set to None then it's expected it should not be used - if silos and None in silos: - silos = None # Group the assets by their visual parent's id assets_by_parent = collections.defaultdict(list) for asset in db_assets: - parent_id = ( - asset.get("data", {}).get("visualParent") or - asset.get("silo") - ) + parent_id = asset.get("data", {}).get("visualParent") assets_by_parent[parent_id].append(asset) # Build the hierarchical tree items recursively self._add_hierarchy( assets_by_parent, - parent=None, - silos=silos + parent=None ) self.endResetModel() @@ -174,8 +147,6 @@ class AssetModel(TreeModel): # Allow a custom icon and custom icon color to be defined data = node.get("_document", {}).get("data", {}) icon = data.get("icon", None) - if icon is None and node.get("type") == "silo": - icon = "database" color = data.get("color", self._default_asset_icon_color) if icon is None: diff --git a/openpype/tools/standalonepublish/widgets/widget_asset.py b/openpype/tools/standalonepublish/widgets/widget_asset.py index e6b74f8f82..8b43cd7cf8 100644 --- a/openpype/tools/standalonepublish/widgets/widget_asset.py +++ b/openpype/tools/standalonepublish/widgets/widget_asset.py @@ -229,7 +229,6 @@ class AssetWidget(QtWidgets.QWidget): data = { 'project': project['name'], 'asset': asset['name'], - 'silo': asset.get("silo"), 'parents': self.get_parents(asset), 'task': task } diff --git a/openpype/tools/standalonepublish/widgets/widget_components.py b/openpype/tools/standalonepublish/widgets/widget_components.py index 4d7f94f825..fbafc7142a 100644 --- a/openpype/tools/standalonepublish/widgets/widget_components.py +++ b/openpype/tools/standalonepublish/widgets/widget_components.py @@ -5,16 +5,18 @@ import random import string from Qt import QtWidgets, QtCore -from . import DropDataFrame -from .constants import HOST_NAME -from avalon import io + from openpype.api import execute, Logger +from openpype.pipeline import legacy_io from openpype.lib import ( get_openpype_execute_args, apply_project_environments_value ) -log = Logger().get_logger("standalonepublisher") +from . import DropDataFrame +from .constants import HOST_NAME + +log = Logger.get_logger("standalonepublisher") class ComponentsWidget(QtWidgets.QWidget): @@ -152,18 +154,18 @@ def set_context(project, asset, task): :type asset: str ''' os.environ["AVALON_PROJECT"] = project - io.Session["AVALON_PROJECT"] = project + legacy_io.Session["AVALON_PROJECT"] = project os.environ["AVALON_ASSET"] = asset - io.Session["AVALON_ASSET"] = asset + legacy_io.Session["AVALON_ASSET"] = asset if not task: task = '' os.environ["AVALON_TASK"] = task - io.Session["AVALON_TASK"] = task + legacy_io.Session["AVALON_TASK"] = task - io.Session["current_dir"] = os.path.normpath(os.getcwd()) + legacy_io.Session["current_dir"] = os.path.normpath(os.getcwd()) os.environ["AVALON_APP"] = HOST_NAME - io.Session["AVALON_APP"] = HOST_NAME + legacy_io.Session["AVALON_APP"] = HOST_NAME def cli_publish(data, publish_paths, gui=True): @@ -171,7 +173,7 @@ def cli_publish(data, publish_paths, gui=True): os.path.dirname(os.path.dirname(__file__)), "publish.py" ) - io.install() + legacy_io.install() # Create hash name folder in temp chars = "".join([random.choice(string.ascii_letters) for i in range(15)]) @@ -203,6 +205,6 @@ def cli_publish(data, publish_paths, gui=True): log.info(f"Publish result: {result}") - io.uninstall() + legacy_io.uninstall() return False diff --git a/openpype/tools/standalonepublish/widgets/widget_drop_frame.py b/openpype/tools/standalonepublish/widgets/widget_drop_frame.py index c1c59d65b6..e6c7328e88 100644 --- a/openpype/tools/standalonepublish/widgets/widget_drop_frame.py +++ b/openpype/tools/standalonepublish/widgets/widget_drop_frame.py @@ -37,6 +37,10 @@ class DropDataFrame(QtWidgets.QFrame): "video_file": video_extensions } + sequence_types = [ + ".bgeo", ".vdb" + ] + def __init__(self, parent): super().__init__() self.parent_widget = parent @@ -176,7 +180,7 @@ class DropDataFrame(QtWidgets.QFrame): non_collectionable_paths = [] for path in in_paths: ext = os.path.splitext(path)[1] - if ext in self.image_extensions: + if ext in self.image_extensions or ext in self.sequence_types: collectionable_paths.append(path) else: non_collectionable_paths.append(path) @@ -289,7 +293,7 @@ class DropDataFrame(QtWidgets.QFrame): def get_file_data(self, data): filepath = data['files'][0] ext = data['ext'].lower() - output = {} + output = {"fps": None} file_info = None if 'file_info' in data: diff --git a/openpype/tools/standalonepublish/widgets/widget_family_desc.py b/openpype/tools/standalonepublish/widgets/widget_family_desc.py index 79681615b9..2095b332bd 100644 --- a/openpype/tools/standalonepublish/widgets/widget_family_desc.py +++ b/openpype/tools/standalonepublish/widgets/widget_family_desc.py @@ -52,6 +52,7 @@ class FamilyDescriptionWidget(QtWidgets.QWidget): family.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft) help = QtWidgets.QLabel("help") + help.setWordWrap(True) help.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft) label_layout.addWidget(family) diff --git a/openpype/tools/stdout_broker/window.py b/openpype/tools/stdout_broker/window.py index a2190e0491..f5720ca05b 100644 --- a/openpype/tools/stdout_broker/window.py +++ b/openpype/tools/stdout_broker/window.py @@ -1,7 +1,9 @@ -from avalon import style -from Qt import QtWidgets, QtCore -import collections import re +import collections + +from Qt import QtWidgets + +from openpype import style class ConsoleDialog(QtWidgets.QDialog): diff --git a/openpype/tools/subsetmanager/model.py b/openpype/tools/subsetmanager/model.py index b76c3c2343..760a167b42 100644 --- a/openpype/tools/subsetmanager/model.py +++ b/openpype/tools/subsetmanager/model.py @@ -2,7 +2,7 @@ import uuid from Qt import QtCore, QtGui -from avalon import api +from openpype.pipeline import registered_host ITEM_ID_ROLE = QtCore.Qt.UserRole + 1 @@ -21,7 +21,7 @@ class InstanceModel(QtGui.QStandardItemModel): self._instances_by_item_id = {} instances = None - host = api.registered_host() + host = registered_host() list_instances = getattr(host, "list_instances", None) if list_instances: instances = list_instances() diff --git a/openpype/tools/subsetmanager/window.py b/openpype/tools/subsetmanager/window.py index a53af52174..6314e67015 100644 --- a/openpype/tools/subsetmanager/window.py +++ b/openpype/tools/subsetmanager/window.py @@ -4,9 +4,8 @@ import sys from Qt import QtWidgets, QtCore import qtawesome -from avalon import api - from openpype import style +from openpype.pipeline import registered_host from openpype.tools.utils import PlaceholderLineEdit from openpype.tools.utils.lib import ( iter_model_rows, @@ -106,7 +105,7 @@ class SubsetManagerWindow(QtWidgets.QDialog): self._details_widget.set_details(container, item_id) def _on_save(self): - host = api.registered_host() + host = registered_host() if not hasattr(host, "save_instances"): print("BUG: Host does not have \"save_instances\" method") return @@ -141,7 +140,7 @@ class SubsetManagerWindow(QtWidgets.QDialog): # Prepare menu menu = QtWidgets.QMenu(self) actions = [] - host = api.registered_host() + host = registered_host() if hasattr(host, "remove_instance"): action = QtWidgets.QAction("Remove instance", menu) action.setData(host.remove_instance) @@ -176,7 +175,7 @@ class SubsetManagerWindow(QtWidgets.QDialog): self._details_widget.set_details(None, None) self._model.refresh() - host = api.registered_host() + host = registered_host() dev_mode = os.environ.get("AVALON_DEVELOP_MODE") or "" editable = False if dev_mode.lower() in ("1", "yes", "true", "on"): diff --git a/openpype/tools/texture_copy/app.py b/openpype/tools/texture_copy/app.py index ceca98a082..fd8d6dc02e 100644 --- a/openpype/tools/texture_copy/app.py +++ b/openpype/tools/texture_copy/app.py @@ -1,14 +1,12 @@ import os import re import click -from avalon import io, api -from pprint import pprint + +import speedcopy from openpype.lib import Terminal from openpype.api import Anatomy - -import shutil -import speedcopy +from openpype.pipeline import legacy_io t = Terminal() @@ -20,8 +18,8 @@ texture_extensions = ['.tif', '.tiff', '.jpg', '.jpeg', '.tx', '.png', '.tga', class TextureCopy: def __init__(self): - if not io.Session: - io.install() + if not legacy_io.Session: + legacy_io.install() def _get_textures(self, path): textures = [] @@ -32,14 +30,14 @@ class TextureCopy: return textures def _get_project(self, project_name): - project = io.find_one({ + project = legacy_io.find_one({ 'type': 'project', 'name': project_name }) return project def _get_asset(self, asset_name): - asset = io.find_one({ + asset = legacy_io.find_one({ 'type': 'asset', 'name': asset_name }) @@ -57,7 +55,6 @@ class TextureCopy: "name": project_name, "code": project['data']['code'] }, - "silo": asset.get('silo'), "asset": asset['name'], "family": 'texture', "subset": 'Main', @@ -155,7 +152,6 @@ def texture_copy(asset, project, path): t.echo(">>> Initializing avalon session ...") os.environ["AVALON_PROJECT"] = project os.environ["AVALON_ASSET"] = asset - os.environ["AVALON_SILO"] = "" TextureCopy().process(asset, project, path) diff --git a/openpype/tools/traypublisher/window.py b/openpype/tools/traypublisher/window.py index d0453c4f23..972e89a3ae 100644 --- a/openpype/tools/traypublisher/window.py +++ b/openpype/tools/traypublisher/window.py @@ -8,8 +8,10 @@ publishing plugins. from Qt import QtWidgets, QtCore -import avalon.api -from avalon.api import AvalonMongoDB +from openpype.pipeline import ( + install_host, + AvalonMongoDB, +) from openpype.hosts.traypublisher import ( api as traypublisher ) @@ -163,7 +165,7 @@ class TrayPublishWindow(PublisherWindow): def main(): - avalon.api.install(traypublisher) + install_host(traypublisher) app = QtWidgets.QApplication([]) window = TrayPublishWindow() window.show() diff --git a/openpype/tools/utils/__init__.py b/openpype/tools/utils/__init__.py index ea1133c442..0f367510bd 100644 --- a/openpype/tools/utils/__init__.py +++ b/openpype/tools/utils/__init__.py @@ -22,6 +22,10 @@ from .lib import ( from .models import ( RecursiveSortFilterProxyModel, ) +from .overlay_messages import ( + MessageOverlayObject, +) + __all__ = ( "PlaceholderLineEdit", @@ -45,4 +49,6 @@ __all__ = ( "get_asset_icon", "RecursiveSortFilterProxyModel", + + "MessageOverlayObject", ) diff --git a/openpype/tools/utils/delegates.py b/openpype/tools/utils/delegates.py index d3718b1734..71f817a1d7 100644 --- a/openpype/tools/utils/delegates.py +++ b/openpype/tools/utils/delegates.py @@ -287,9 +287,5 @@ class PrettyTimeDelegate(QtWidgets.QStyledItemDelegate): """ def displayText(self, value, locale): - - if value is None: - # Ignore None value - return - - return pretty_timestamp(value) + if value is not None: + return pretty_timestamp(value) diff --git a/openpype/tools/utils/host_tools.py b/openpype/tools/utils/host_tools.py index 2d9733ec94..d8f4570120 100644 --- a/openpype/tools/utils/host_tools.py +++ b/openpype/tools/utils/host_tools.py @@ -4,8 +4,14 @@ It is possible to create `HostToolsHelper` in host implementation or use singleton approach with global functions (using helper anyway). """ import os -import avalon.api + import pyblish.api + +from openpype.pipeline import ( + registered_host, + legacy_io, +) + from .lib import qt_app_context @@ -47,7 +53,7 @@ class HostToolsHelper: Window, validate_host_requirements ) # Host validation - host = avalon.api.registered_host() + host = registered_host() validate_host_requirements(host) workfiles_window = Window(parent=parent) @@ -72,8 +78,8 @@ class HostToolsHelper: if use_context: context = { - "asset": avalon.api.Session["AVALON_ASSET"], - "task": avalon.api.Session["AVALON_TASK"] + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] } workfiles_tool.set_context(context) @@ -104,7 +110,7 @@ class HostToolsHelper: use_context = False if use_context: - context = {"asset": avalon.api.Session["AVALON_ASSET"]} + context = {"asset": legacy_io.Session["AVALON_ASSET"]} loader_tool.set_context(context, refresh=True) else: loader_tool.refresh() diff --git a/openpype/tools/utils/lib.py b/openpype/tools/utils/lib.py index 93b156bef8..20fea6600b 100644 --- a/openpype/tools/utils/lib.py +++ b/openpype/tools/utils/lib.py @@ -6,16 +6,19 @@ import collections from Qt import QtWidgets, QtCore, QtGui import qtawesome -import avalon.api - -from openpype.style import get_default_entity_icon_color +from openpype.style import ( + get_default_entity_icon_color, + get_objected_colors, +) +from openpype.resources import get_image_path +from openpype.lib import filter_profiles from openpype.api import ( get_project_settings, Logger ) -from openpype.lib import filter_profiles -from openpype.style import get_objected_colors -from openpype.resources import get_image_path +from openpype.pipeline import registered_host + +log = Logger.get_logger(__name__) def center_window(window): @@ -111,13 +114,23 @@ def get_qta_icon_by_name_and_color(icon_name, icon_color): variants.append("{0}.{1}".format(key, icon_name)) icon = None + used_variant = None for variant in variants: try: icon = qtawesome.icon(variant, color=icon_color) + used_variant = variant break except Exception: pass + if used_variant is None: + log.info("Didn't find icon \"{}\"".format(icon_name)) + + elif used_variant != icon_name: + log.debug("Icon \"{}\" was not found \"{}\" is used instead".format( + icon_name, used_variant + )) + SharedObjects.icons[full_icon_name] = icon return icon @@ -140,8 +153,8 @@ def get_asset_icon_name(asset_doc, has_children=True): return icon_name if has_children: - return "folder" - return "folder-o" + return "fa.folder" + return "fa.folder-o" def get_asset_icon_color(asset_doc): @@ -390,13 +403,14 @@ class FamilyConfigCache: self.family_configs.clear() # Skip if we're not in host context - if not avalon.api.registered_host(): + if not registered_host(): return # Update the icons from the project configuration project_name = os.environ.get("AVALON_PROJECT") asset_name = os.environ.get("AVALON_ASSET") task_name = os.environ.get("AVALON_TASK") + host_name = os.environ.get("AVALON_APP") if not all((project_name, asset_name, task_name)): return @@ -410,15 +424,21 @@ class FamilyConfigCache: ["family_filter_profiles"] ) if profiles: - asset_doc = self.dbcon.find_one( + # Make sure connection is installed + # - accessing attribute which does not have auto-install + self.dbcon.install() + database = getattr(self.dbcon, "database", None) + if database is None: + database = self.dbcon._database + asset_doc = database[project_name].find_one( {"type": "asset", "name": asset_name}, {"data.tasks": True} - ) + ) or {} tasks_info = asset_doc.get("data", {}).get("tasks") or {} task_type = tasks_info.get(task_name, {}).get("type") profiles_filter = { "task_types": task_type, - "hosts": os.environ["AVALON_APP"] + "hosts": host_name } matching_item = filter_profiles(profiles, profiles_filter) @@ -707,11 +727,11 @@ def is_sync_loader(loader): def is_remove_site_loader(loader): - return hasattr(loader, "remove_site_on_representation") + return hasattr(loader, "is_remove_site_loader") def is_add_site_loader(loader): - return hasattr(loader, "add_site_to_representation") + return hasattr(loader, "is_add_site_loader") class WrappedCallbackItem: diff --git a/openpype/tools/utils/overlay_messages.py b/openpype/tools/utils/overlay_messages.py new file mode 100644 index 0000000000..62de2cf272 --- /dev/null +++ b/openpype/tools/utils/overlay_messages.py @@ -0,0 +1,324 @@ +import uuid + +from Qt import QtWidgets, QtCore, QtGui + +from openpype.style import get_objected_colors + +from .lib import set_style_property + + +class CloseButton(QtWidgets.QFrame): + """Close button drawed manually.""" + + clicked = QtCore.Signal() + + def __init__(self, parent): + super(CloseButton, self).__init__(parent) + colors = get_objected_colors() + close_btn_color = colors["overlay-messages"]["close-btn"] + self._color = close_btn_color.get_qcolor() + self._mouse_pressed = False + policy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Fixed, + QtWidgets.QSizePolicy.Fixed + ) + self.setSizePolicy(policy) + + def sizeHint(self): + size = self.fontMetrics().height() + return QtCore.QSize(size, size) + + def mousePressEvent(self, event): + if event.button() == QtCore.Qt.LeftButton: + self._mouse_pressed = True + super(CloseButton, self).mousePressEvent(event) + + def mouseReleaseEvent(self, event): + if self._mouse_pressed: + self._mouse_pressed = False + if self.rect().contains(event.pos()): + self.clicked.emit() + + super(CloseButton, self).mouseReleaseEvent(event) + + def paintEvent(self, event): + rect = self.rect() + painter = QtGui.QPainter(self) + painter.setClipRect(event.rect()) + pen = QtGui.QPen() + pen.setWidth(2) + pen.setColor(self._color) + pen.setStyle(QtCore.Qt.SolidLine) + pen.setCapStyle(QtCore.Qt.RoundCap) + painter.setPen(pen) + offset = int(rect.height() / 4) + top = rect.top() + offset + left = rect.left() + offset + right = rect.right() - offset + bottom = rect.bottom() - offset + painter.drawLine( + left, top, + right, bottom + ) + painter.drawLine( + left, bottom, + right, top + ) + + +class OverlayMessageWidget(QtWidgets.QFrame): + """Message widget showed as overlay. + + Message is hidden after timeout but can be overriden by mouse hover. + Mouse hover can add additional 2 seconds of widget's visibility. + + Args: + message_id (str): Unique identifier of message widget for + 'MessageOverlayObject'. + message (str): Text shown in message. + parent (QWidget): Parent widget where message is visible. + timeout (int): Timeout of message's visibility (default 5000). + message_type (str): Property which can be used in styles for specific + kid of message. + """ + + close_requested = QtCore.Signal(str) + _default_timeout = 5000 + + def __init__( + self, message_id, message, parent, message_type=None, timeout=None + ): + super(OverlayMessageWidget, self).__init__(parent) + self.setObjectName("OverlayMessageWidget") + + if message_type: + set_style_property(self, "type", message_type) + + if not timeout: + timeout = self._default_timeout + timeout_timer = QtCore.QTimer() + timeout_timer.setInterval(timeout) + timeout_timer.setSingleShot(True) + + hover_timer = QtCore.QTimer() + hover_timer.setInterval(2000) + hover_timer.setSingleShot(True) + + label_widget = QtWidgets.QLabel(message, self) + label_widget.setAlignment(QtCore.Qt.AlignCenter) + label_widget.setWordWrap(True) + close_btn = CloseButton(self) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(5, 5, 0, 5) + layout.addWidget(label_widget, 1) + layout.addWidget(close_btn, 0) + + close_btn.clicked.connect(self._on_close_clicked) + timeout_timer.timeout.connect(self._on_timer_timeout) + hover_timer.timeout.connect(self._on_hover_timeout) + + self._label_widget = label_widget + self._message_id = message_id + self._timeout_timer = timeout_timer + self._hover_timer = hover_timer + + def size_hint_without_word_wrap(self): + """Size hint in cases that word wrap of label is disabled.""" + self._label_widget.setWordWrap(False) + size_hint = self.sizeHint() + self._label_widget.setWordWrap(True) + return size_hint + + def showEvent(self, event): + """Start timeout on show.""" + super(OverlayMessageWidget, self).showEvent(event) + self._timeout_timer.start() + + def _on_timer_timeout(self): + """On message timeout.""" + # Skip closing if hover timer is active + if not self._hover_timer.isActive(): + self._close_message() + + def _on_hover_timeout(self): + """Hover timer timed out.""" + # Check if is still under widget + if self.underMouse(): + self._hover_timer.start() + else: + self._close_message() + + def _on_close_clicked(self): + self._close_message() + + def _close_message(self): + """Emmit close request to 'MessageOverlayObject'.""" + self.close_requested.emit(self._message_id) + + def enterEvent(self, event): + """Start hover timer on hover.""" + super(OverlayMessageWidget, self).enterEvent(event) + self._hover_timer.start() + + def leaveEvent(self, event): + """Start hover timer on hover leave.""" + super(OverlayMessageWidget, self).leaveEvent(event) + self._hover_timer.start() + + +class MessageOverlayObject(QtCore.QObject): + """Object that can be used to add overlay messages. + + Args: + widget (QWidget): + """ + + def __init__(self, widget, default_timeout=None): + super(MessageOverlayObject, self).__init__() + + widget.installEventFilter(self) + + # Timer which triggers recalculation of message positions + recalculate_timer = QtCore.QTimer() + recalculate_timer.setInterval(10) + + recalculate_timer.timeout.connect(self._recalculate_positions) + + self._widget = widget + self._recalculate_timer = recalculate_timer + + self._messages_order = [] + self._closing_messages = set() + self._messages = {} + self._spacing = 5 + self._move_size = 4 + self._move_size_remove = 8 + self._default_timeout = default_timeout + + def add_message(self, message, message_type=None, timeout=None): + """Add single message into overlay. + + Args: + message (str): Message that will be shown. + timeout (int): Message timeout. + message_type (str): Message type can be used as property in + stylesheets. + """ + # Skip empty messages + if not message: + return + + if timeout is None: + timeout = self._default_timeout + + # Create unique id of message + label_id = str(uuid.uuid4()) + # Create message widget + widget = OverlayMessageWidget( + label_id, message, self._widget, message_type, timeout + ) + widget.close_requested.connect(self._on_message_close_request) + widget.show() + + # Move widget outside of window + pos = widget.pos() + pos.setY(pos.y() - widget.height()) + widget.move(pos) + # Store message + self._messages[label_id] = widget + self._messages_order.append(label_id) + # Trigger recalculation timer + self._recalculate_timer.start() + + def _on_message_close_request(self, label_id): + """Message widget requested removement.""" + + widget = self._messages.get(label_id) + if widget is not None: + # Add message to closing messages and start recalculation + self._closing_messages.add(label_id) + self._recalculate_timer.start() + + def _recalculate_positions(self): + """Recalculate positions of widgets.""" + + # Skip if there are no messages to process + if not self._messages_order: + self._recalculate_timer.stop() + return + + # All message widgets are in expected positions + all_at_place = True + # Starting y position + pos_y = self._spacing + # Current widget width + widget_width = self._widget.width() + max_width = widget_width - (2 * self._spacing) + widget_half_width = widget_width / 2 + + # Store message ids that should be removed + message_ids_to_remove = set() + for message_id in reversed(self._messages_order): + widget = self._messages[message_id] + pos = widget.pos() + # Messages to remove are moved upwards + if message_id in self._closing_messages: + bottom = pos.y() + widget.height() + # Add message to remove if is not visible + if bottom < 0 or self._move_size_remove < 1: + message_ids_to_remove.add(message_id) + continue + + # Calculate new y position of message + dst_pos_y = pos.y() - self._move_size_remove + + else: + # Calculate y position of message + # - use y position of previous message widget and add + # move size if is not in final destination yet + if widget.underMouse(): + dst_pos_y = pos.y() + elif pos.y() == pos_y or self._move_size < 1: + dst_pos_y = pos_y + elif pos.y() < pos_y: + dst_pos_y = min(pos_y, pos.y() + self._move_size) + else: + dst_pos_y = max(pos_y, pos.y() - self._move_size) + + # Store if widget is in place where should be + if all_at_place and dst_pos_y != pos_y: + all_at_place = False + + # Calculate ideal width and height of message widget + height = widget.heightForWidth(max_width) + w_size_hint = widget.size_hint_without_word_wrap() + widget.resize(min(max_width, w_size_hint.width()), height) + + # Center message widget + size = widget.size() + pos_x = widget_half_width - (size.width() / 2) + # Move widget to destination position + widget.move(pos_x, dst_pos_y) + + # Add message widget height and spacing for next message widget + pos_y += size.height() + self._spacing + + # Remove widgets to remove + for message_id in message_ids_to_remove: + self._messages_order.remove(message_id) + self._closing_messages.remove(message_id) + widget = self._messages.pop(message_id) + widget.hide() + widget.deleteLater() + + # Stop recalculation timer if all widgets are where should be + if all_at_place: + self._recalculate_timer.stop() + + def eventFilter(self, source, event): + # Trigger recalculation of timer on resize of widget + if source is self._widget and event.type() == QtCore.QEvent.Resize: + self._recalculate_timer.start() + + return super(MessageOverlayObject, self).eventFilter(source, event) diff --git a/openpype/tools/workfiles/__init__.py b/openpype/tools/workfiles/__init__.py index cde7293931..5fbc71797d 100644 --- a/openpype/tools/workfiles/__init__.py +++ b/openpype/tools/workfiles/__init__.py @@ -1,9 +1,12 @@ +from .window import Window from .app import ( show, - Window + validate_host_requirements, ) __all__ = [ + "Window", + "show", - "Window" + "validate_host_requirements", ] diff --git a/openpype/tools/workfiles/app.py b/openpype/tools/workfiles/app.py index 713992bc4b..352847ede8 100644 --- a/openpype/tools/workfiles/app.py +++ b/openpype/tools/workfiles/app.py @@ -1,40 +1,12 @@ import sys -import os -import re -import copy -import shutil import logging -import datetime -import Qt -from Qt import QtWidgets, QtCore -from avalon import io, api - -from openpype import style -from openpype.tools.utils.lib import ( - qt_app_context +from openpype.pipeline import ( + registered_host, + legacy_io, ) -from openpype.tools.utils import PlaceholderLineEdit -from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget -from openpype.tools.utils.tasks_widget import TasksWidget -from openpype.tools.utils.delegates import PrettyTimeDelegate -from openpype.lib import ( - emit_event, - Anatomy, - get_workfile_doc, - create_workfile_doc, - save_workfile_data_to_doc, - get_workfile_template_key, - create_workdir_extra_folders, - get_workdir_data, - get_last_workfile_with_version -) -from openpype.lib.avalon_context import ( - update_current_task, - compute_session_changes -) -from .model import FilesModel -from .view import FilesView +from openpype.tools.utils import qt_app_context +from .window import Window log = logging.getLogger(__name__) @@ -42,1181 +14,6 @@ module = sys.modules[__name__] module.window = None -def build_workfile_data(session): - """Get the data required for workfile formatting from avalon `session`""" - - # Set work file data for template formatting - asset_name = session["AVALON_ASSET"] - task_name = session["AVALON_TASK"] - host_name = session["AVALON_APP"] - project_doc = io.find_one( - {"type": "project"}, - { - "name": True, - "data.code": True, - "config.tasks": True, - } - ) - - asset_doc = io.find_one( - { - "type": "asset", - "name": asset_name - }, - { - "name": True, - "data.tasks": True, - "data.parents": True - } - ) - data = get_workdir_data(project_doc, asset_doc, task_name, host_name) - data.update({ - "version": 1, - "comment": "", - "ext": None - }) - - return data - - -class CommentMatcher(object): - """Use anatomy and work file data to parse comments from filenames""" - def __init__(self, anatomy, template_key, data): - - self.fname_regex = None - - template = anatomy.templates[template_key]["file"] - if "{comment}" not in template: - # Don't look for comment if template doesn't allow it - return - - # Create a regex group for extensions - extensions = api.registered_host().file_extensions() - any_extension = "(?:{})".format( - "|".join(re.escape(ext[1:]) for ext in extensions) - ) - - # Use placeholders that will never be in the filename - temp_data = copy.deepcopy(data) - temp_data["comment"] = "<>" - temp_data["version"] = "<>" - temp_data["ext"] = "<>" - - formatted = anatomy.format(temp_data) - fname_pattern = formatted[template_key]["file"] - fname_pattern = re.escape(fname_pattern) - - # Replace comment and version with something we can match with regex - replacements = { - "<>": "(.+)", - "<>": "[0-9]+", - "<>": any_extension, - } - for src, dest in replacements.items(): - fname_pattern = fname_pattern.replace(re.escape(src), dest) - - # Match from beginning to end of string to be safe - fname_pattern = "^{}$".format(fname_pattern) - - self.fname_regex = re.compile(fname_pattern) - - def parse_comment(self, filepath): - """Parse the {comment} part from a filename""" - if not self.fname_regex: - return - - fname = os.path.basename(filepath) - match = self.fname_regex.match(fname) - if match: - return match.group(1) - - -class SubversionLineEdit(QtWidgets.QWidget): - """QLineEdit with QPushButton for drop down selection of list of strings""" - def __init__(self, parent=None): - super(SubversionLineEdit, self).__init__(parent=parent) - - layout = QtWidgets.QHBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.setSpacing(3) - - self._input = PlaceholderLineEdit() - self._button = QtWidgets.QPushButton("") - self._button.setFixedWidth(18) - self._menu = QtWidgets.QMenu(self) - self._button.setMenu(self._menu) - - layout.addWidget(self._input) - layout.addWidget(self._button) - - @property - def input(self): - return self._input - - def set_values(self, values): - self._update(values) - - def _on_button_clicked(self): - self._menu.exec_() - - def _on_action_clicked(self, action): - self._input.setText(action.text()) - - def _update(self, values): - """Create optional predefined subset names - - Args: - default_names(list): all predefined names - - Returns: - None - """ - - menu = self._menu - button = self._button - - state = any(values) - button.setEnabled(state) - if state is False: - return - - # Include an empty string - values = [""] + sorted(values) - - # Get and destroy the action group - group = button.findChild(QtWidgets.QActionGroup) - if group: - group.deleteLater() - - # Build new action group - group = QtWidgets.QActionGroup(button) - for name in values: - action = group.addAction(name) - menu.addAction(action) - - group.triggered.connect(self._on_action_clicked) - - -class NameWindow(QtWidgets.QDialog): - """Name Window to define a unique filename inside a root folder - - The filename will be based on the "workfile" template defined in the - project["config"]["template"]. - - """ - - def __init__(self, parent, root, anatomy, template_key, session=None): - super(NameWindow, self).__init__(parent=parent) - self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint) - - self.result = None - self.host = api.registered_host() - self.root = root - self.work_file = None - - if not session: - # Fallback to active session - session = api.Session - - self.data = build_workfile_data(session) - - # Store project anatomy - self.anatomy = anatomy - self.template = anatomy.templates[template_key]["file"] - self.template_key = template_key - - # Btns widget - btns_widget = QtWidgets.QWidget(self) - - btn_ok = QtWidgets.QPushButton("Ok", btns_widget) - btn_cancel = QtWidgets.QPushButton("Cancel", btns_widget) - - btns_layout = QtWidgets.QHBoxLayout(btns_widget) - btns_layout.addWidget(btn_ok) - btns_layout.addWidget(btn_cancel) - - # Inputs widget - inputs_widget = QtWidgets.QWidget(self) - - # Version widget - version_widget = QtWidgets.QWidget(inputs_widget) - - # Version number input - version_input = QtWidgets.QSpinBox(version_widget) - version_input.setMinimum(1) - version_input.setMaximum(9999) - - # Last version checkbox - last_version_check = QtWidgets.QCheckBox( - "Next Available Version", version_widget - ) - last_version_check.setChecked(True) - - version_layout = QtWidgets.QHBoxLayout(version_widget) - version_layout.setContentsMargins(0, 0, 0, 0) - version_layout.addWidget(version_input) - version_layout.addWidget(last_version_check) - - # Preview widget - preview_label = QtWidgets.QLabel("Preview filename", inputs_widget) - - # Subversion input - subversion = SubversionLineEdit(inputs_widget) - subversion.input.setPlaceholderText("Will be part of filename.") - - # Extensions combobox - ext_combo = QtWidgets.QComboBox(inputs_widget) - # Add styled delegate to use stylesheets - ext_delegate = QtWidgets.QStyledItemDelegate() - ext_combo.setItemDelegate(ext_delegate) - ext_combo.addItems(self.host.file_extensions()) - - # Build inputs - inputs_layout = QtWidgets.QFormLayout(inputs_widget) - # Add version only if template contains version key - # - since the version can be padded with "{version:0>4}" we only search - # for "{version". - if "{version" in self.template: - inputs_layout.addRow("Version:", version_widget) - else: - version_widget.setVisible(False) - - # Add subversion only if template contains `{comment}` - if "{comment}" in self.template: - inputs_layout.addRow("Subversion:", subversion) - - # Detect whether a {comment} is in the current filename - if so, - # preserve it by default and set it in the comment/subversion field - current_filepath = self.host.current_file() - if current_filepath: - # We match the current filename against the current session - # instead of the session where the user is saving to. - current_data = build_workfile_data(api.Session) - matcher = CommentMatcher(anatomy, template_key, current_data) - comment = matcher.parse_comment(current_filepath) - if comment: - log.info("Detected subversion comment: {}".format(comment)) - self.data["comment"] = comment - subversion.input.setText(comment) - - existing_comments = self.get_existing_comments() - subversion.set_values(existing_comments) - - else: - subversion.setVisible(False) - inputs_layout.addRow("Extension:", ext_combo) - inputs_layout.addRow("Preview:", preview_label) - - # Build layout - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.addWidget(inputs_widget) - main_layout.addWidget(btns_widget) - - # Signal callback registration - version_input.valueChanged.connect(self.on_version_spinbox_changed) - last_version_check.stateChanged.connect( - self.on_version_checkbox_changed - ) - - subversion.input.textChanged.connect(self.on_comment_changed) - ext_combo.currentIndexChanged.connect(self.on_extension_changed) - - btn_ok.pressed.connect(self.on_ok_pressed) - btn_cancel.pressed.connect(self.on_cancel_pressed) - - # Allow "Enter" key to accept the save. - btn_ok.setDefault(True) - - # Force default focus to comment, some hosts didn't automatically - # apply focus to this line edit (e.g. Houdini) - subversion.input.setFocus() - - # Store widgets - self.btn_ok = btn_ok - - self.version_widget = version_widget - - self.version_input = version_input - self.last_version_check = last_version_check - - self.preview_label = preview_label - self.subversion = subversion - self.ext_combo = ext_combo - self._ext_delegate = ext_delegate - - self.refresh() - - def get_existing_comments(self): - - matcher = CommentMatcher(self.anatomy, self.template_key, self.data) - host_extensions = set(self.host.file_extensions()) - comments = set() - if os.path.isdir(self.root): - for fname in os.listdir(self.root): - if not os.path.isfile(os.path.join(self.root, fname)): - continue - - ext = os.path.splitext(fname)[-1] - if ext not in host_extensions: - continue - - comment = matcher.parse_comment(fname) - if comment: - comments.add(comment) - - return list(comments) - - def on_version_spinbox_changed(self, value): - self.data["version"] = value - self.refresh() - - def on_version_checkbox_changed(self, _value): - self.refresh() - - def on_comment_changed(self, text): - self.data["comment"] = text - self.refresh() - - def on_extension_changed(self): - ext = self.ext_combo.currentText() - if ext == self.data["ext"]: - return - self.data["ext"] = ext - self.refresh() - - def on_ok_pressed(self): - self.result = self.work_file - self.close() - - def on_cancel_pressed(self): - self.close() - - def get_result(self): - return self.result - - def get_work_file(self): - data = copy.deepcopy(self.data) - if not data["comment"]: - data.pop("comment", None) - - data["ext"] = data["ext"][1:] - - anatomy_filled = self.anatomy.format(data) - return anatomy_filled[self.template_key]["file"] - - def refresh(self): - extensions = self.host.file_extensions() - extension = self.data["ext"] - if extension is None: - # Define saving file extension - current_file = self.host.current_file() - if current_file: - # Match the extension of current file - _, extension = os.path.splitext(current_file) - else: - extension = extensions[0] - - if extension != self.data["ext"]: - self.data["ext"] = extension - index = self.ext_combo.findText( - extension, QtCore.Qt.MatchFixedString - ) - if index >= 0: - self.ext_combo.setCurrentIndex(index) - - if not self.last_version_check.isChecked(): - self.version_input.setEnabled(True) - self.data["version"] = self.version_input.value() - - work_file = self.get_work_file() - - else: - self.version_input.setEnabled(False) - - data = copy.deepcopy(self.data) - template = str(self.template) - - if not data["comment"]: - data.pop("comment", None) - - data["ext"] = data["ext"][1:] - - version = get_last_workfile_with_version( - self.root, template, data, extensions - )[1] - - if version is None: - version = 1 - else: - version += 1 - - found_valid_version = False - # Check if next version is valid version and give a chance to try - # next 100 versions - for idx in range(100): - # Store version to data - self.data["version"] = version - - work_file = self.get_work_file() - # Safety check - path = os.path.join(self.root, work_file) - if not os.path.exists(path): - found_valid_version = True - break - - # Try next version - version += 1 - # Log warning - if idx == 0: - log.warning(( - "BUG: Function `get_last_workfile_with_version` " - "didn't return last version." - )) - # Raise exception if even 100 version fallback didn't help - if not found_valid_version: - raise AssertionError( - "This is a bug. Couldn't find valid version!" - ) - - self.work_file = work_file - - path_exists = os.path.exists(os.path.join(self.root, work_file)) - - self.btn_ok.setEnabled(not path_exists) - - if path_exists: - self.preview_label.setText( - "Cannot create \"{0}\" because file exists!" - "".format(work_file) - ) - else: - self.preview_label.setText( - "{0}".format(work_file) - ) - - -class FilesWidget(QtWidgets.QWidget): - """A widget displaying files that allows to save and open files.""" - file_selected = QtCore.Signal(str) - workfile_created = QtCore.Signal(str) - file_opened = QtCore.Signal() - - def __init__(self, parent=None): - super(FilesWidget, self).__init__(parent=parent) - - # Setup - self._asset_id = None - self._asset_doc = None - self._task_name = None - self._task_type = None - - # Pype's anatomy object for current project - self.anatomy = Anatomy(io.Session["AVALON_PROJECT"]) - # Template key used to get work template from anatomy templates - self.template_key = "work" - - # This is not root but workfile directory - self._workfiles_root = None - self._workdir_path = None - self.host = api.registered_host() - - # Whether to automatically select the latest modified - # file on a refresh of the files model. - self.auto_select_latest_modified = True - - # Avoid crash in Blender and store the message box - # (setting parent doesn't work as it hides the message box) - self._messagebox = None - - files_view = FilesView(self) - - # Create the Files model - extensions = set(self.host.file_extensions()) - files_model = FilesModel(file_extensions=extensions) - - # Create proxy model for files to be able sort and filter - proxy_model = QtCore.QSortFilterProxyModel() - proxy_model.setSourceModel(files_model) - proxy_model.setDynamicSortFilter(True) - proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) - - # Set up the file list tree view - files_view.setModel(proxy_model) - files_view.setSortingEnabled(True) - files_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) - - # Date modified delegate - time_delegate = PrettyTimeDelegate() - files_view.setItemDelegateForColumn(1, time_delegate) - files_view.setIndentation(3) # smaller indentation - - # Default to a wider first filename column it is what we mostly care - # about and the date modified is relatively small anyway. - files_view.setColumnWidth(0, 330) - - # Filtering input - filter_input = PlaceholderLineEdit(self) - filter_input.setPlaceholderText("Filter files..") - filter_input.textChanged.connect(proxy_model.setFilterFixedString) - - # Home Page - # Build buttons widget for files widget - btns_widget = QtWidgets.QWidget(self) - btn_save = QtWidgets.QPushButton("Save As", btns_widget) - btn_browse = QtWidgets.QPushButton("Browse", btns_widget) - btn_open = QtWidgets.QPushButton("Open", btns_widget) - - btns_layout = QtWidgets.QHBoxLayout(btns_widget) - btns_layout.setContentsMargins(0, 0, 0, 0) - btns_layout.addWidget(btn_open) - btns_layout.addWidget(btn_browse) - btns_layout.addWidget(btn_save) - - # Build files widgets for home page - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.setContentsMargins(0, 0, 0, 0) - main_layout.addWidget(filter_input) - main_layout.addWidget(files_view) - main_layout.addWidget(btns_widget) - - # Register signal callbacks - files_view.doubleClickedLeft.connect(self.on_open_pressed) - files_view.customContextMenuRequested.connect(self.on_context_menu) - files_view.selectionModel().selectionChanged.connect( - self.on_file_select - ) - - btn_open.pressed.connect(self.on_open_pressed) - btn_browse.pressed.connect(self.on_browse_pressed) - btn_save.pressed.connect(self.on_save_as_pressed) - - # Store attributes - self.time_delegate = time_delegate - - self.filter_input = filter_input - - self.files_view = files_view - self.files_model = files_model - - self.btns_widget = btns_widget - self.btn_open = btn_open - self.btn_browse = btn_browse - self.btn_save = btn_save - - def set_asset_task(self, asset_id, task_name, task_type): - if asset_id != self._asset_id: - self._asset_doc = None - self._asset_id = asset_id - self._task_name = task_name - self._task_type = task_type - - # Define a custom session so we can query the work root - # for a "Work area" that is not our current Session. - # This way we can browse it even before we enter it. - if self._asset_id and self._task_name and self._task_type: - session = self._get_session() - self._workdir_path = session["AVALON_WORKDIR"] - self._workfiles_root = self.host.work_root(session) - self.files_model.set_root(self._workfiles_root) - - else: - self.files_model.set_root(None) - - # Disable/Enable buttons based on available files in model - has_filenames = self.files_model.has_filenames() - self.btn_browse.setEnabled(has_filenames) - self.btn_open.setEnabled(has_filenames) - if not has_filenames: - # Manually trigger file selection - self.on_file_select() - - def _get_asset_doc(self): - if self._asset_id is None: - return None - - if self._asset_doc is None: - self._asset_doc = io.find_one({"_id": self._asset_id}) - return self._asset_doc - - def _get_session(self): - """Return a modified session for the current asset and task""" - - session = api.Session.copy() - self.template_key = get_workfile_template_key( - self._task_type, - session["AVALON_APP"], - project_name=session["AVALON_PROJECT"] - ) - changes = compute_session_changes( - session, - asset=self._get_asset_doc(), - task=self._task_name, - template_key=self.template_key - ) - session.update(changes) - - return session - - def _enter_session(self): - """Enter the asset and task session currently selected""" - - session = api.Session.copy() - changes = compute_session_changes( - session, - asset=self._get_asset_doc(), - task=self._task_name, - template_key=self.template_key - ) - if not changes: - # Return early if we're already in the right Session context - # to avoid any unwanted Task Changed callbacks to be triggered. - return - - update_current_task( - asset=self._get_asset_doc(), - task=self._task_name, - template_key=self.template_key - ) - - def open_file(self, filepath): - host = self.host - if host.has_unsaved_changes(): - result = self.save_changes_prompt() - if result is None: - # Cancel operation - return False - - # Save first if has changes - if result: - current_file = host.current_file() - if not current_file: - # If the user requested to save the current scene - # we can't actually automatically do so if the current - # file has not been saved with a name yet. So we'll have - # to opt out. - log.error("Can't save scene with no filename. Please " - "first save your work file using 'Save As'.") - return - - # Save current scene, continue to open file - host.save_file(current_file) - - self._enter_session() - host.open_file(filepath) - self.file_opened.emit() - - def save_changes_prompt(self): - self._messagebox = messagebox = QtWidgets.QMessageBox(parent=self) - messagebox.setWindowFlags(messagebox.windowFlags() | - QtCore.Qt.FramelessWindowHint) - messagebox.setIcon(messagebox.Warning) - messagebox.setWindowTitle("Unsaved Changes!") - messagebox.setText( - "There are unsaved changes to the current file." - "\nDo you want to save the changes?" - ) - messagebox.setStandardButtons( - messagebox.Yes | messagebox.No | messagebox.Cancel - ) - - result = messagebox.exec_() - if result == messagebox.Yes: - return True - if result == messagebox.No: - return False - return None - - def get_filename(self): - """Show save dialog to define filename for save or duplicate - - Returns: - str: The filename to create. - - """ - session = self._get_session() - - window = NameWindow( - parent=self, - root=self._workfiles_root, - anatomy=self.anatomy, - template_key=self.template_key, - session=session - ) - window.exec_() - - return window.get_result() - - def on_duplicate_pressed(self): - work_file = self.get_filename() - if not work_file: - return - - src = self._get_selected_filepath() - dst = os.path.join(self._workfiles_root, work_file) - shutil.copy(src, dst) - - self.workfile_created.emit(dst) - - self.refresh() - - def _get_selected_filepath(self): - """Return current filepath selected in view""" - selection = self.files_view.selectionModel() - index = selection.currentIndex() - if not index.isValid(): - return - - return index.data(self.files_model.FilePathRole) - - def on_open_pressed(self): - path = self._get_selected_filepath() - if not path: - print("No file selected to open..") - return - - self.open_file(path) - - def on_browse_pressed(self): - ext_filter = "Work File (*{0})".format( - " *".join(self.host.file_extensions()) - ) - kwargs = { - "caption": "Work Files", - "filter": ext_filter - } - if Qt.__binding__ in ("PySide", "PySide2"): - kwargs["dir"] = self._workfiles_root - else: - kwargs["directory"] = self._workfiles_root - - work_file = QtWidgets.QFileDialog.getOpenFileName(**kwargs)[0] - if work_file: - self.open_file(work_file) - - def on_save_as_pressed(self): - work_filename = self.get_filename() - if not work_filename: - return - - # Trigger before save event - emit_event( - "workfile.save.before", - {"filename": work_filename, "workdir_path": self._workdir_path}, - source="workfiles.tool" - ) - - # Make sure workfiles root is updated - # - this triggers 'workio.work_root(...)' which may change value of - # '_workfiles_root' - self.set_asset_task( - self._asset_id, self._task_name, self._task_type - ) - - # Create workfiles root folder - if not os.path.exists(self._workfiles_root): - log.debug("Initializing Work Directory: %s", self._workfiles_root) - os.makedirs(self._workfiles_root) - - # Update session if context has changed - self._enter_session() - # Prepare full path to workfile and save it - filepath = os.path.join( - os.path.normpath(self._workfiles_root), work_filename - ) - self.host.save_file(filepath) - # Create extra folders - create_workdir_extra_folders( - self._workdir_path, - api.Session["AVALON_APP"], - self._task_type, - self._task_name, - api.Session["AVALON_PROJECT"] - ) - # Trigger after save events - emit_event( - "workfile.save.after", - {"filename": work_filename, "workdir_path": self._workdir_path}, - source="workfiles.tool" - ) - - self.workfile_created.emit(filepath) - # Refresh files model - self.refresh() - - def on_file_select(self): - self.file_selected.emit(self._get_selected_filepath()) - - def refresh(self): - """Refresh listed files for current selection in the interface""" - self.files_model.refresh() - - if self.auto_select_latest_modified: - self._select_last_modified_file() - - def on_context_menu(self, point): - index = self.files_view.indexAt(point) - if not index.isValid(): - return - - is_enabled = index.data(FilesModel.IsEnabled) - if not is_enabled: - return - - menu = QtWidgets.QMenu(self) - - # Duplicate - action = QtWidgets.QAction("Duplicate", menu) - tip = "Duplicate selected file." - action.setToolTip(tip) - action.setStatusTip(tip) - action.triggered.connect(self.on_duplicate_pressed) - menu.addAction(action) - - # Show the context action menu - global_point = self.files_view.mapToGlobal(point) - action = menu.exec_(global_point) - if not action: - return - - def _select_last_modified_file(self): - """Utility function to select the file with latest date modified""" - role = self.files_model.DateModifiedRole - model = self.files_view.model() - - highest_index = None - highest = 0 - for row in range(model.rowCount()): - index = model.index(row, 0, parent=QtCore.QModelIndex()) - if not index.isValid(): - continue - - modified = index.data(role) - if modified is not None and modified > highest: - highest_index = index - highest = modified - - if highest_index: - self.files_view.setCurrentIndex(highest_index) - - -class SidePanelWidget(QtWidgets.QWidget): - save_clicked = QtCore.Signal() - - def __init__(self, parent=None): - super(SidePanelWidget, self).__init__(parent) - - details_label = QtWidgets.QLabel("Details", self) - details_input = QtWidgets.QPlainTextEdit(self) - details_input.setReadOnly(True) - - note_label = QtWidgets.QLabel("Artist note", self) - note_input = QtWidgets.QPlainTextEdit(self) - btn_note_save = QtWidgets.QPushButton("Save note", self) - - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.setContentsMargins(0, 0, 0, 0) - main_layout.addWidget(details_label, 0) - main_layout.addWidget(details_input, 0) - main_layout.addWidget(note_label, 0) - main_layout.addWidget(note_input, 1) - main_layout.addWidget(btn_note_save, alignment=QtCore.Qt.AlignRight) - - note_input.textChanged.connect(self.on_note_change) - btn_note_save.clicked.connect(self.on_save_click) - - self.details_input = details_input - self.note_input = note_input - self.btn_note_save = btn_note_save - - self._orig_note = "" - self._workfile_doc = None - - def on_note_change(self): - text = self.note_input.toPlainText() - self.btn_note_save.setEnabled(self._orig_note != text) - - def on_save_click(self): - self._orig_note = self.note_input.toPlainText() - self.on_note_change() - self.save_clicked.emit() - - def set_context(self, asset_id, task_name, filepath, workfile_doc): - # Check if asset, task and file are selected - # NOTE workfile document is not requirement - enabled = bool(asset_id) and bool(task_name) and bool(filepath) - - self.details_input.setEnabled(enabled) - self.note_input.setEnabled(enabled) - self.btn_note_save.setEnabled(enabled) - - # Make sure workfile doc is overridden - self._workfile_doc = workfile_doc - # Disable inputs and remove texts if any required arguments are missing - if not enabled: - self._orig_note = "" - self.details_input.setPlainText("") - self.note_input.setPlainText("") - return - - orig_note = "" - if workfile_doc: - orig_note = workfile_doc["data"].get("note") or orig_note - - self._orig_note = orig_note - self.note_input.setPlainText(orig_note) - # Set as empty string - self.details_input.setPlainText("") - - filestat = os.stat(filepath) - size_ending_mapping = { - "KB": 1024 ** 1, - "MB": 1024 ** 2, - "GB": 1024 ** 3 - } - size = filestat.st_size - ending = "B" - for _ending, _size in size_ending_mapping.items(): - if filestat.st_size < _size: - break - size = filestat.st_size / _size - ending = _ending - - # Append html string - datetime_format = "%b %d %Y %H:%M:%S" - creation_time = datetime.datetime.fromtimestamp(filestat.st_ctime) - modification_time = datetime.datetime.fromtimestamp(filestat.st_mtime) - lines = ( - "Size:", - "{:.2f} {}".format(size, ending), - "Created:", - creation_time.strftime(datetime_format), - "Modified:", - modification_time.strftime(datetime_format) - ) - self.details_input.appendHtml("
".join(lines)) - - def get_workfile_data(self): - data = { - "note": self.note_input.toPlainText() - } - return self._workfile_doc, data - - -class Window(QtWidgets.QMainWindow): - """Work Files Window""" - title = "Work Files" - - def __init__(self, parent=None): - super(Window, self).__init__(parent=parent) - self.setWindowTitle(self.title) - window_flags = QtCore.Qt.Window | QtCore.Qt.WindowCloseButtonHint - if not parent: - window_flags |= QtCore.Qt.WindowStaysOnTopHint - self.setWindowFlags(window_flags) - - # Create pages widget and set it as central widget - pages_widget = QtWidgets.QStackedWidget(self) - self.setCentralWidget(pages_widget) - - home_page_widget = QtWidgets.QWidget(pages_widget) - home_body_widget = QtWidgets.QWidget(home_page_widget) - - assets_widget = SingleSelectAssetsWidget(io, parent=home_body_widget) - assets_widget.set_current_asset_btn_visibility(True) - - tasks_widget = TasksWidget(io, home_body_widget) - files_widget = FilesWidget(home_body_widget) - side_panel = SidePanelWidget(home_body_widget) - - pages_widget.addWidget(home_page_widget) - - # Build home - home_page_layout = QtWidgets.QVBoxLayout(home_page_widget) - home_page_layout.addWidget(home_body_widget) - - # Build home - body - body_layout = QtWidgets.QVBoxLayout(home_body_widget) - split_widget = QtWidgets.QSplitter(home_body_widget) - split_widget.addWidget(assets_widget) - split_widget.addWidget(tasks_widget) - split_widget.addWidget(files_widget) - split_widget.addWidget(side_panel) - split_widget.setSizes([255, 160, 455, 175]) - - body_layout.addWidget(split_widget) - - # Add top margin for tasks to align it visually with files as - # the files widget has a filter field which tasks does not. - tasks_widget.setContentsMargins(0, 32, 0, 0) - - # Set context after asset widget is refreshed - # - to do so it is necessary to wait until refresh is done - set_context_timer = QtCore.QTimer() - set_context_timer.setInterval(100) - - # Connect signals - set_context_timer.timeout.connect(self._on_context_set_timeout) - assets_widget.selection_changed.connect(self._on_asset_changed) - tasks_widget.task_changed.connect(self._on_task_changed) - files_widget.file_selected.connect(self.on_file_select) - files_widget.workfile_created.connect(self.on_workfile_create) - files_widget.file_opened.connect(self._on_file_opened) - side_panel.save_clicked.connect(self.on_side_panel_save) - - self._set_context_timer = set_context_timer - self.home_page_widget = home_page_widget - self.pages_widget = pages_widget - self.home_body_widget = home_body_widget - self.split_widget = split_widget - - self.assets_widget = assets_widget - self.tasks_widget = tasks_widget - self.files_widget = files_widget - self.side_panel = side_panel - - # Force focus on the open button by default, required for Houdini. - files_widget.btn_open.setFocus() - - self.resize(1200, 600) - - self._first_show = True - self._context_to_set = None - - def showEvent(self, event): - super(Window, self).showEvent(event) - if self._first_show: - self._first_show = False - self.refresh() - self.setStyleSheet(style.load_stylesheet()) - - def keyPressEvent(self, event): - """Custom keyPressEvent. - - Override keyPressEvent to do nothing so that Maya's panels won't - take focus when pressing "SHIFT" whilst mouse is over viewport or - outliner. This way users don't accidentally perform Maya commands - whilst trying to name an instance. - - """ - - def set_save_enabled(self, enabled): - self.files_widget.btn_save.setEnabled(enabled) - - def on_file_select(self, filepath): - asset_id = self.assets_widget.get_selected_asset_id() - task_name = self.tasks_widget.get_selected_task_name() - - workfile_doc = None - if asset_id and task_name and filepath: - filename = os.path.split(filepath)[1] - workfile_doc = get_workfile_doc( - asset_id, task_name, filename, io - ) - self.side_panel.set_context( - asset_id, task_name, filepath, workfile_doc - ) - - def on_workfile_create(self, filepath): - self._create_workfile_doc(filepath) - - def _on_file_opened(self): - self.close() - - def on_side_panel_save(self): - workfile_doc, data = self.side_panel.get_workfile_data() - if not workfile_doc: - filepath = self.files_widget._get_selected_filepath() - self._create_workfile_doc(filepath, force=True) - workfile_doc = self._get_current_workfile_doc() - - save_workfile_data_to_doc(workfile_doc, data, io) - - def _get_current_workfile_doc(self, filepath=None): - if filepath is None: - filepath = self.files_widget._get_selected_filepath() - task_name = self.tasks_widget.get_selected_task_name() - asset_id = self.assets_widget.get_selected_asset_id() - if not task_name or not asset_id or not filepath: - return - - filename = os.path.split(filepath)[1] - return get_workfile_doc( - asset_id, task_name, filename, io - ) - - def _create_workfile_doc(self, filepath, force=False): - workfile_doc = None - if not force: - workfile_doc = self._get_current_workfile_doc(filepath) - - if not workfile_doc: - workdir, filename = os.path.split(filepath) - asset_id = self.assets_widget.get_selected_asset_id() - asset_doc = io.find_one({"_id": asset_id}) - task_name = self.tasks_widget.get_selected_task_name() - create_workfile_doc(asset_doc, task_name, filename, workdir, io) - - def refresh(self): - # Refresh asset widget - self.assets_widget.refresh() - - self._on_task_changed() - - def set_context(self, context): - self._context_to_set = context - self._set_context_timer.start() - - def _on_context_set_timeout(self): - if self._context_to_set is None: - self._set_context_timer.stop() - return - - if self.assets_widget.refreshing: - return - - self._context_to_set, context = None, self._context_to_set - if "asset" in context: - asset_doc = io.find_one( - { - "name": context["asset"], - "type": "asset" - }, - {"_id": 1} - ) or {} - asset_id = asset_doc.get("_id") - # Select the asset - self.assets_widget.select_asset(asset_id) - self.tasks_widget.set_asset_id(asset_id) - - if "task" in context: - self.tasks_widget.select_task_name(context["task"]) - self._on_task_changed() - - def _on_asset_changed(self): - asset_id = self.assets_widget.get_selected_asset_id() - if asset_id: - self.tasks_widget.setEnabled(True) - else: - # Force disable the other widgets if no - # active selection - self.tasks_widget.setEnabled(False) - self.files_widget.setEnabled(False) - - self.tasks_widget.set_asset_id(asset_id) - - def _on_task_changed(self): - asset_id = self.assets_widget.get_selected_asset_id() - task_name = self.tasks_widget.get_selected_task_name() - task_type = self.tasks_widget.get_selected_task_type() - - asset_is_valid = asset_id is not None - self.tasks_widget.setEnabled(asset_is_valid) - - self.files_widget.setEnabled(bool(task_name) and asset_is_valid) - self.files_widget.set_asset_task(asset_id, task_name, task_type) - self.files_widget.refresh() - - def validate_host_requirements(host): if host is None: raise RuntimeError("No registered host.") @@ -1252,12 +49,12 @@ def show(root=None, debug=False, parent=None, use_context=True, save=True): except (AttributeError, RuntimeError): pass - host = api.registered_host() + host = registered_host() validate_host_requirements(host) if debug: - api.Session["AVALON_ASSET"] = "Mock" - api.Session["AVALON_TASK"] = "Testing" + legacy_io.Session["AVALON_ASSET"] = "Mock" + legacy_io.Session["AVALON_TASK"] = "Testing" with qt_app_context(): window = Window(parent=parent) @@ -1265,9 +62,8 @@ def show(root=None, debug=False, parent=None, use_context=True, save=True): if use_context: context = { - "asset": api.Session["AVALON_ASSET"], - "silo": api.Session["AVALON_SILO"], - "task": api.Session["AVALON_TASK"] + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] } window.set_context(context) diff --git a/openpype/tools/workfiles/files_widget.py b/openpype/tools/workfiles/files_widget.py new file mode 100644 index 0000000000..977111b71b --- /dev/null +++ b/openpype/tools/workfiles/files_widget.py @@ -0,0 +1,724 @@ +import os +import logging +import shutil + +import Qt +from Qt import QtWidgets, QtCore + +from openpype.tools.utils import PlaceholderLineEdit +from openpype.tools.utils.delegates import PrettyTimeDelegate +from openpype.lib import ( + emit_event, + Anatomy, + get_workfile_template_key, + create_workdir_extra_folders, +) +from openpype.lib.avalon_context import ( + update_current_task, + compute_session_changes +) +from openpype.pipeline import ( + registered_host, + legacy_io, +) +from .model import ( + WorkAreaFilesModel, + PublishFilesModel, + + FILEPATH_ROLE, + DATE_MODIFIED_ROLE, +) +from .save_as_dialog import SaveAsDialog + +log = logging.getLogger(__name__) + + +class FilesView(QtWidgets.QTreeView): + doubleClickedLeft = QtCore.Signal() + doubleClickedRight = QtCore.Signal() + + def mouseDoubleClickEvent(self, event): + if event.button() == QtCore.Qt.LeftButton: + self.doubleClickedLeft.emit() + + elif event.button() == QtCore.Qt.RightButton: + self.doubleClickedRight.emit() + + return super(FilesView, self).mouseDoubleClickEvent(event) + + +class SelectContextOverlay(QtWidgets.QFrame): + def __init__(self, parent): + super(SelectContextOverlay, self).__init__(parent) + + self.setObjectName("WorkfilesPublishedContextSelect") + label_widget = QtWidgets.QLabel( + "Please choose context on the left
<", + self + ) + label_widget.setAlignment(QtCore.Qt.AlignCenter) + + layout = QtWidgets.QHBoxLayout(self) + layout.addWidget(label_widget, 1, QtCore.Qt.AlignCenter) + + label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + parent.installEventFilter(self) + + def eventFilter(self, obj, event): + if event.type() == QtCore.QEvent.Resize: + self.resize(obj.size()) + + return super(SelectContextOverlay, self).eventFilter(obj, event) + + +class FilesWidget(QtWidgets.QWidget): + """A widget displaying files that allows to save and open files.""" + file_selected = QtCore.Signal(str) + file_opened = QtCore.Signal() + workfile_created = QtCore.Signal(str) + published_visible_changed = QtCore.Signal(bool) + + def __init__(self, parent): + super(FilesWidget, self).__init__(parent) + + # Setup + self._asset_id = None + self._asset_doc = None + self._task_name = None + self._task_type = None + + # Pype's anatomy object for current project + self.anatomy = Anatomy(legacy_io.Session["AVALON_PROJECT"]) + # Template key used to get work template from anatomy templates + self.template_key = "work" + + # This is not root but workfile directory + self._workfiles_root = None + self._workdir_path = None + self.host = registered_host() + + # Whether to automatically select the latest modified + # file on a refresh of the files model. + self.auto_select_latest_modified = True + + # Avoid crash in Blender and store the message box + # (setting parent doesn't work as it hides the message box) + self._messagebox = None + + # Filtering input + filter_widget = QtWidgets.QWidget(self) + + published_checkbox = QtWidgets.QCheckBox("Published", filter_widget) + + filter_input = PlaceholderLineEdit(filter_widget) + filter_input.setPlaceholderText("Filter files..") + + filter_layout = QtWidgets.QHBoxLayout(filter_widget) + filter_layout.setContentsMargins(0, 0, 0, 0) + filter_layout.addWidget(filter_input, 1) + filter_layout.addWidget(published_checkbox, 0) + + # Create the Files models + extensions = set(self.host.file_extensions()) + + views_widget = QtWidgets.QWidget(self) + # --- Workarea view --- + workarea_files_model = WorkAreaFilesModel(extensions) + + # Create proxy model for files to be able sort and filter + workarea_proxy_model = QtCore.QSortFilterProxyModel() + workarea_proxy_model.setSourceModel(workarea_files_model) + workarea_proxy_model.setDynamicSortFilter(True) + workarea_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) + + # Set up the file list tree view + workarea_files_view = FilesView(views_widget) + workarea_files_view.setModel(workarea_proxy_model) + workarea_files_view.setSortingEnabled(True) + workarea_files_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + + # Date modified delegate + workarea_time_delegate = PrettyTimeDelegate() + workarea_files_view.setItemDelegateForColumn(1, workarea_time_delegate) + # smaller indentation + workarea_files_view.setIndentation(3) + + # Default to a wider first filename column it is what we mostly care + # about and the date modified is relatively small anyway. + workarea_files_view.setColumnWidth(0, 330) + + # --- Publish files view --- + publish_files_model = PublishFilesModel( + extensions, legacy_io, self.anatomy + ) + + publish_proxy_model = QtCore.QSortFilterProxyModel() + publish_proxy_model.setSourceModel(publish_files_model) + publish_proxy_model.setDynamicSortFilter(True) + publish_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) + + publish_files_view = FilesView(views_widget) + publish_files_view.setModel(publish_proxy_model) + + publish_files_view.setSortingEnabled(True) + publish_files_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + + # Date modified delegate + publish_time_delegate = PrettyTimeDelegate() + publish_files_view.setItemDelegateForColumn(1, publish_time_delegate) + # smaller indentation + publish_files_view.setIndentation(3) + + # Default to a wider first filename column it is what we mostly care + # about and the date modified is relatively small anyway. + publish_files_view.setColumnWidth(0, 330) + + publish_context_overlay = SelectContextOverlay(views_widget) + publish_context_overlay.setVisible(False) + + views_layout = QtWidgets.QHBoxLayout(views_widget) + views_layout.setContentsMargins(0, 0, 0, 0) + views_layout.addWidget(workarea_files_view, 1) + views_layout.addWidget(publish_files_view, 1) + + # Home Page + # Build buttons widget for files widget + btns_widget = QtWidgets.QWidget(self) + + workarea_btns_widget = QtWidgets.QWidget(btns_widget) + btn_save = QtWidgets.QPushButton("Save As", workarea_btns_widget) + btn_browse = QtWidgets.QPushButton("Browse", workarea_btns_widget) + btn_open = QtWidgets.QPushButton("Open", workarea_btns_widget) + + workarea_btns_layout = QtWidgets.QHBoxLayout(workarea_btns_widget) + workarea_btns_layout.setContentsMargins(0, 0, 0, 0) + workarea_btns_layout.addWidget(btn_open, 1) + workarea_btns_layout.addWidget(btn_browse, 1) + workarea_btns_layout.addWidget(btn_save, 1) + + publish_btns_widget = QtWidgets.QWidget(btns_widget) + btn_save_as_published = QtWidgets.QPushButton( + "Copy && Open", publish_btns_widget + ) + btn_change_context = QtWidgets.QPushButton( + "Choose different context", publish_btns_widget + ) + btn_select_context_published = QtWidgets.QPushButton( + "Copy && Open", publish_btns_widget + ) + btn_cancel_published = QtWidgets.QPushButton( + "Cancel", publish_btns_widget + ) + + publish_btns_layout = QtWidgets.QHBoxLayout(publish_btns_widget) + publish_btns_layout.setContentsMargins(0, 0, 0, 0) + publish_btns_layout.addWidget(btn_save_as_published, 1) + publish_btns_layout.addWidget(btn_change_context, 1) + publish_btns_layout.addWidget(btn_select_context_published, 1) + publish_btns_layout.addWidget(btn_cancel_published, 1) + + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + btns_layout.setContentsMargins(0, 0, 0, 0) + btns_layout.addWidget(workarea_btns_widget, 1) + btns_layout.addWidget(publish_btns_widget, 1) + + # Build files widgets for home page + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(filter_widget, 0) + main_layout.addWidget(views_widget, 1) + main_layout.addWidget(btns_widget, 0) + + # Register signal callbacks + published_checkbox.stateChanged.connect(self._on_published_change) + filter_input.textChanged.connect(self._on_filter_text_change) + + workarea_files_view.doubleClickedLeft.connect( + self._on_workarea_open_pressed + ) + workarea_files_view.customContextMenuRequested.connect( + self._on_workarea_context_menu + ) + workarea_files_view.selectionModel().selectionChanged.connect( + self.on_file_select + ) + + btn_open.pressed.connect(self._on_workarea_open_pressed) + btn_browse.pressed.connect(self.on_browse_pressed) + btn_save.pressed.connect(self._on_save_as_pressed) + btn_save_as_published.pressed.connect( + self._on_published_save_as_pressed + ) + btn_change_context.pressed.connect( + self._on_publish_change_context_pressed + ) + btn_select_context_published.pressed.connect( + self._on_publish_select_context_pressed + ) + btn_cancel_published.pressed.connect( + self._on_publish_cancel_pressed + ) + + # Store attributes + self._published_checkbox = published_checkbox + self._filter_input = filter_input + + self._workarea_time_delegate = workarea_time_delegate + self._workarea_files_view = workarea_files_view + self._workarea_files_model = workarea_files_model + self._workarea_proxy_model = workarea_proxy_model + + self._publish_time_delegate = publish_time_delegate + self._publish_files_view = publish_files_view + self._publish_files_model = publish_files_model + self._publish_proxy_model = publish_proxy_model + + self._publish_context_overlay = publish_context_overlay + + self._workarea_btns_widget = workarea_btns_widget + self._publish_btns_widget = publish_btns_widget + self._btn_open = btn_open + self._btn_browse = btn_browse + self._btn_save = btn_save + + self._btn_save_as_published = btn_save_as_published + self._btn_change_context = btn_change_context + self._btn_select_context_published = btn_select_context_published + self._btn_cancel_published = btn_cancel_published + + # Create a proxy widget for files widget + self.setFocusProxy(btn_open) + + # Hide publish files widgets + publish_files_view.setVisible(False) + publish_btns_widget.setVisible(False) + btn_select_context_published.setVisible(False) + btn_cancel_published.setVisible(False) + + self._publish_context_select_mode = False + + @property + def published_enabled(self): + return self._published_checkbox.isChecked() + + def _on_published_change(self): + published_enabled = self.published_enabled + + self._workarea_files_view.setVisible(not published_enabled) + self._workarea_btns_widget.setVisible(not published_enabled) + + self._publish_files_view.setVisible(published_enabled) + self._publish_btns_widget.setVisible(published_enabled) + + self._update_filtering() + self._update_asset_task() + + self.published_visible_changed.emit(published_enabled) + + self._select_last_modified_file() + + def _on_filter_text_change(self): + self._update_filtering() + + def _update_filtering(self): + text = self._filter_input.text() + if self.published_enabled: + self._publish_proxy_model.setFilterFixedString(text) + else: + self._workarea_proxy_model.setFilterFixedString(text) + + def set_save_enabled(self, enabled): + self._btn_save.setEnabled(enabled) + if not enabled and self._published_checkbox.isChecked(): + self._published_checkbox.setChecked(False) + self._published_checkbox.setVisible(enabled) + + def set_asset_task(self, asset_id, task_name, task_type): + if asset_id != self._asset_id: + self._asset_doc = None + self._asset_id = asset_id + self._task_name = task_name + self._task_type = task_type + self._update_asset_task() + + def _update_asset_task(self): + if self.published_enabled and not self._publish_context_select_mode: + self._publish_files_model.set_context( + self._asset_id, self._task_name + ) + has_valid_items = self._publish_files_model.has_valid_items() + self._btn_save_as_published.setEnabled(has_valid_items) + self._btn_change_context.setEnabled(has_valid_items) + + else: + # Define a custom session so we can query the work root + # for a "Work area" that is not our current Session. + # This way we can browse it even before we enter it. + if self._asset_id and self._task_name and self._task_type: + session = self._get_session() + self._workdir_path = session["AVALON_WORKDIR"] + self._workfiles_root = self.host.work_root(session) + self._workarea_files_model.set_root(self._workfiles_root) + + else: + self._workarea_files_model.set_root(None) + + # Disable/Enable buttons based on available files in model + has_valid_items = self._workarea_files_model.has_valid_items() + self._btn_browse.setEnabled(has_valid_items) + self._btn_open.setEnabled(has_valid_items) + + if self._publish_context_select_mode: + self._btn_select_context_published.setEnabled( + bool(self._asset_id) and bool(self._task_name) + ) + return + + # Manually trigger file selection + if not has_valid_items: + self.on_file_select() + + def _get_asset_doc(self): + if self._asset_id is None: + return None + + if self._asset_doc is None: + self._asset_doc = legacy_io.find_one({"_id": self._asset_id}) + return self._asset_doc + + def _get_session(self): + """Return a modified session for the current asset and task""" + + session = legacy_io.Session.copy() + self.template_key = get_workfile_template_key( + self._task_type, + session["AVALON_APP"], + project_name=session["AVALON_PROJECT"] + ) + changes = compute_session_changes( + session, + asset=self._get_asset_doc(), + task=self._task_name, + template_key=self.template_key + ) + session.update(changes) + + return session + + def _enter_session(self): + """Enter the asset and task session currently selected""" + + session = legacy_io.Session.copy() + changes = compute_session_changes( + session, + asset=self._get_asset_doc(), + task=self._task_name, + template_key=self.template_key + ) + if not changes: + # Return early if we're already in the right Session context + # to avoid any unwanted Task Changed callbacks to be triggered. + return + + update_current_task( + asset=self._get_asset_doc(), + task=self._task_name, + template_key=self.template_key + ) + + def open_file(self, filepath): + host = self.host + if host.has_unsaved_changes(): + result = self.save_changes_prompt() + if result is None: + # Cancel operation + return False + + # Save first if has changes + if result: + current_file = host.current_file() + if not current_file: + # If the user requested to save the current scene + # we can't actually automatically do so if the current + # file has not been saved with a name yet. So we'll have + # to opt out. + log.error("Can't save scene with no filename. Please " + "first save your work file using 'Save As'.") + return + + # Save current scene, continue to open file + host.save_file(current_file) + + self._enter_session() + host.open_file(filepath) + self.file_opened.emit() + + def save_changes_prompt(self): + self._messagebox = messagebox = QtWidgets.QMessageBox(parent=self) + messagebox.setWindowFlags(messagebox.windowFlags() | + QtCore.Qt.FramelessWindowHint) + messagebox.setIcon(messagebox.Warning) + messagebox.setWindowTitle("Unsaved Changes!") + messagebox.setText( + "There are unsaved changes to the current file." + "\nDo you want to save the changes?" + ) + messagebox.setStandardButtons( + messagebox.Yes | messagebox.No | messagebox.Cancel + ) + + result = messagebox.exec_() + if result == messagebox.Yes: + return True + if result == messagebox.No: + return False + return None + + def get_filename(self): + """Show save dialog to define filename for save or duplicate + + Returns: + str: The filename to create. + + """ + session = self._get_session() + + if self.published_enabled: + filepath = self._get_selected_filepath() + extensions = [os.path.splitext(filepath)[1]] + else: + extensions = self.host.file_extensions() + + window = SaveAsDialog( + parent=self, + root=self._workfiles_root, + anatomy=self.anatomy, + template_key=self.template_key, + extensions=extensions, + session=session + ) + window.exec_() + + return window.get_result() + + def on_duplicate_pressed(self): + work_file = self.get_filename() + if not work_file: + return + + src = self._get_selected_filepath() + dst = os.path.join(self._workfiles_root, work_file) + shutil.copy(src, dst) + + self.workfile_created.emit(dst) + + self.refresh() + + def _get_selected_filepath(self): + """Return current filepath selected in view""" + if self.published_enabled: + source_view = self._publish_files_view + else: + source_view = self._workarea_files_view + selection = source_view.selectionModel() + index = selection.currentIndex() + if not index.isValid(): + return + + return index.data(FILEPATH_ROLE) + + def _on_workarea_open_pressed(self): + path = self._get_selected_filepath() + if not path: + print("No file selected to open..") + return + + self.open_file(path) + + def on_browse_pressed(self): + ext_filter = "Work File (*{0})".format( + " *".join(self.host.file_extensions()) + ) + kwargs = { + "caption": "Work Files", + "filter": ext_filter + } + if Qt.__binding__ in ("PySide", "PySide2"): + kwargs["dir"] = self._workfiles_root + else: + kwargs["directory"] = self._workfiles_root + + work_file = QtWidgets.QFileDialog.getOpenFileName(**kwargs)[0] + if work_file: + self.open_file(work_file) + + def _on_save_as_pressed(self): + self._save_as_with_dialog() + + def _save_as_with_dialog(self): + work_filename = self.get_filename() + if not work_filename: + return None + + src_path = self._get_selected_filepath() + + # Trigger before save event + emit_event( + "workfile.save.before", + {"filename": work_filename, "workdir_path": self._workdir_path}, + source="workfiles.tool" + ) + + # Make sure workfiles root is updated + # - this triggers 'workio.work_root(...)' which may change value of + # '_workfiles_root' + self.set_asset_task( + self._asset_id, self._task_name, self._task_type + ) + + # Create workfiles root folder + if not os.path.exists(self._workfiles_root): + log.debug("Initializing Work Directory: %s", self._workfiles_root) + os.makedirs(self._workfiles_root) + + # Prepare full path to workfile and save it + filepath = os.path.join( + os.path.normpath(self._workfiles_root), work_filename + ) + + # Update session if context has changed + self._enter_session() + + if not self.published_enabled: + self.host.save_file(filepath) + else: + shutil.copy(src_path, filepath) + self.host.open_file(filepath) + + # Create extra folders + create_workdir_extra_folders( + self._workdir_path, + legacy_io.Session["AVALON_APP"], + self._task_type, + self._task_name, + legacy_io.Session["AVALON_PROJECT"] + ) + # Trigger after save events + emit_event( + "workfile.save.after", + {"filename": work_filename, "workdir_path": self._workdir_path}, + source="workfiles.tool" + ) + + self.workfile_created.emit(filepath) + # Refresh files model + if self.published_enabled: + self._published_checkbox.setChecked(False) + else: + self.refresh() + return filepath + + def _on_published_save_as_pressed(self): + self._save_as_with_dialog() + + def _set_publish_context_select_mode(self, enabled): + self._publish_context_select_mode = enabled + + # Show buttons related to context selection + self._publish_context_overlay.setVisible(enabled) + self._btn_cancel_published.setVisible(enabled) + self._btn_select_context_published.setVisible(enabled) + # Change enabled state based on select context + self._btn_select_context_published.setEnabled( + bool(self._asset_id) and bool(self._task_name) + ) + + self._btn_save_as_published.setVisible(not enabled) + self._btn_change_context.setVisible(not enabled) + + # Change views and disable workarea view if enabled + self._workarea_files_view.setEnabled(not enabled) + if self.published_enabled: + self._workarea_files_view.setVisible(enabled) + self._publish_files_view.setVisible(not enabled) + else: + self._workarea_files_view.setVisible(True) + self._publish_files_view.setVisible(False) + + # Disable filter widgets + self._published_checkbox.setEnabled(not enabled) + self._filter_input.setEnabled(not enabled) + + def _on_publish_change_context_pressed(self): + self._set_publish_context_select_mode(True) + + def _on_publish_select_context_pressed(self): + result = self._save_as_with_dialog() + if result is not None: + self._set_publish_context_select_mode(False) + self._update_asset_task() + + def _on_publish_cancel_pressed(self): + self._set_publish_context_select_mode(False) + self._update_asset_task() + + def on_file_select(self): + self.file_selected.emit(self._get_selected_filepath()) + + def refresh(self): + """Refresh listed files for current selection in the interface""" + if self.published_enabled: + self._publish_files_model.refresh() + else: + self._workarea_files_model.refresh() + + if self.auto_select_latest_modified: + self._select_last_modified_file() + + def _on_workarea_context_menu(self, point): + index = self._workarea_files_view.indexAt(point) + if not index.isValid(): + return + + if not index.flags() & QtCore.Qt.ItemIsEnabled: + return + + menu = QtWidgets.QMenu(self) + + # Duplicate + action = QtWidgets.QAction("Duplicate", menu) + tip = "Duplicate selected file." + action.setToolTip(tip) + action.setStatusTip(tip) + action.triggered.connect(self.on_duplicate_pressed) + menu.addAction(action) + + # Show the context action menu + global_point = self._workarea_files_view.mapToGlobal(point) + action = menu.exec_(global_point) + if not action: + return + + def _select_last_modified_file(self): + """Utility function to select the file with latest date modified""" + if self.published_enabled: + source_view = self._publish_files_view + else: + source_view = self._workarea_files_view + model = source_view.model() + + highest_index = None + highest = 0 + for row in range(model.rowCount()): + index = model.index(row, 0, parent=QtCore.QModelIndex()) + if not index.isValid(): + continue + + modified = index.data(DATE_MODIFIED_ROLE) + if modified is not None and modified > highest: + highest_index = index + highest = modified + + if highest_index: + source_view.setCurrentIndex(highest_index) diff --git a/openpype/tools/workfiles/model.py b/openpype/tools/workfiles/model.py index e9184842fc..8f9dd8c6ba 100644 --- a/openpype/tools/workfiles/model.py +++ b/openpype/tools/workfiles/model.py @@ -1,153 +1,179 @@ import os import logging -from Qt import QtCore +from Qt import QtCore, QtGui import qtawesome from openpype.style import ( get_default_entity_icon_color, get_disabled_entity_icon_color, ) - -from openpype.tools.utils.models import TreeModel, Item +from openpype.pipeline import get_representation_path log = logging.getLogger(__name__) -class FilesModel(TreeModel): - """Model listing files with specified extensions in a root folder""" - Columns = ["filename", "date"] +FILEPATH_ROLE = QtCore.Qt.UserRole + 2 +DATE_MODIFIED_ROLE = QtCore.Qt.UserRole + 3 +ITEM_ID_ROLE = QtCore.Qt.UserRole + 4 - FileNameRole = QtCore.Qt.UserRole + 2 - DateModifiedRole = QtCore.Qt.UserRole + 3 - FilePathRole = QtCore.Qt.UserRole + 4 - IsEnabled = QtCore.Qt.UserRole + 5 - def __init__(self, file_extensions, parent=None): - super(FilesModel, self).__init__(parent=parent) +class WorkAreaFilesModel(QtGui.QStandardItemModel): + """Model is looking into one folder for files with extension.""" + + def __init__(self, extensions, *args, **kwargs): + super(WorkAreaFilesModel, self).__init__(*args, **kwargs) + + self.setColumnCount(2) self._root = None - self._file_extensions = file_extensions - self._icons = { - "file": qtawesome.icon( - "fa.file-o", - color=get_default_entity_icon_color() + self._file_extensions = extensions + self._invalid_path_item = None + self._empty_root_item = None + self._file_icon = qtawesome.icon( + "fa.file-o", + color=get_default_entity_icon_color() + ) + self._invalid_item_visible = False + self._items_by_filename = {} + + def _get_invalid_path_item(self): + if self._invalid_path_item is None: + message = "Work Area does not exist. Use Save As to create it." + item = QtGui.QStandardItem(message) + icon = qtawesome.icon( + "fa.times", + color=get_disabled_entity_icon_color() ) - } + item.setData(icon, QtCore.Qt.DecorationRole) + item.setFlags(QtCore.Qt.NoItemFlags) + item.setColumnCount(self.columnCount()) + self._invalid_path_item = item + return self._invalid_path_item + + def _get_empty_root_item(self): + if self._empty_root_item is None: + message = "Work Area is empty." + item = QtGui.QStandardItem(message) + icon = qtawesome.icon( + "fa.times", + color=get_disabled_entity_icon_color() + ) + item.setData(icon, QtCore.Qt.DecorationRole) + item.setFlags(QtCore.Qt.NoItemFlags) + item.setColumnCount(self.columnCount()) + self._empty_root_item = item + return self._empty_root_item def set_root(self, root): + """Change directory where to look for file.""" self._root = root + if root and not os.path.exists(root): + log.debug("Work Area does not exist: {}".format(root)) self.refresh() - def _add_empty(self): - item = Item() - item.update({ - # Put a display message in 'filename' - "filename": "No files found.", - # Not-selectable - "enabled": False, - "date": None, - "filepath": None - }) - - self.add_child(item) + def _clear(self): + root_item = self.invisibleRootItem() + rows = root_item.rowCount() + if rows > 0: + if self._invalid_item_visible: + for row in range(rows): + root_item.takeRow(row) + else: + root_item.removeRows(0, rows) + self._items_by_filename = {} def refresh(self): - self.clear() - self.beginResetModel() - - root = self._root - - if not root: - self.endResetModel() - return - - if not os.path.exists(root): + """Refresh and update model items.""" + root_item = self.invisibleRootItem() + # If path is not set or does not exist then add invalid path item + if not self._root or not os.path.exists(self._root): + self._clear() # Add Work Area does not exist placeholder - log.debug("Work Area does not exist: %s", root) - message = "Work Area does not exist. Use Save As to create it." - item = Item({ - "filename": message, - "date": None, - "filepath": None, - "enabled": False, - "icon": qtawesome.icon( - "fa.times", - color=get_disabled_entity_icon_color() - ) - }) - self.add_child(item) - self.endResetModel() + item = self._get_invalid_path_item() + root_item.appendRow(item) + self._invalid_item_visible = True return - extensions = self._file_extensions + # Clear items if previous refresh set '_invalid_item_visible' to True + # - Invalid items are not stored to '_items_by_filename' so they would + # not be removed + if self._invalid_item_visible: + self._clear() - for filename in os.listdir(root): - path = os.path.join(root, filename) - if os.path.isdir(path): + # Check for new items that should be added and items that should be + # removed + new_items = [] + items_to_remove = set(self._items_by_filename.keys()) + for filename in os.listdir(self._root): + filepath = os.path.join(self._root, filename) + if os.path.isdir(filepath): continue ext = os.path.splitext(filename)[1] - if extensions and ext not in extensions: + if ext not in self._file_extensions: continue - modified = os.path.getmtime(path) + modified = os.path.getmtime(filepath) - item = Item({ - "filename": filename, - "date": modified, - "filepath": path - }) + # Use existing item or create new one + if filename in items_to_remove: + items_to_remove.remove(filename) + item = self._items_by_filename[filename] + else: + item = QtGui.QStandardItem(filename) + item.setColumnCount(self.columnCount()) + item.setFlags( + QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable + ) + item.setData(self._file_icon, QtCore.Qt.DecorationRole) + new_items.append(item) + self._items_by_filename[filename] = item + # Update data that may be different + item.setData(filepath, FILEPATH_ROLE) + item.setData(modified, DATE_MODIFIED_ROLE) - self.add_child(item) + # Add new items if there are any + if new_items: + root_item.appendRows(new_items) - if self.rowCount() == 0: - self._add_empty() + # Remove items that are no longer available + for filename in items_to_remove: + item = self._items_by_filename.pop(filename) + root_item.removeRow(item.row()) - self.endResetModel() - - def has_filenames(self): - for item in self._root_item.children(): - if item.get("enabled", True): - return True - return False - - def rowCount(self, parent=None): - if parent is None or not parent.isValid(): - parent_item = self._root_item + # Add empty root item if there are not filenames that could be shown + if root_item.rowCount() > 0: + self._invalid_item_visible = False else: - parent_item = parent.internalPointer() - return parent_item.childCount() + self._invalid_item_visible = True + item = self._get_empty_root_item() + root_item.appendRow(item) - def data(self, index, role): - if not index.isValid(): - return + def has_valid_items(self): + """Directory has files that are listed in items.""" + return not self._invalid_item_visible - if role == QtCore.Qt.DecorationRole: - # Add icon to filename column - item = index.internalPointer() - if index.column() == 0: - if item["filepath"]: - return self._icons["file"] - return item.get("icon", None) + def flags(self, index): + # Use flags of first column for all columns + if index.column() != 0: + index = self.index(index.row(), 0, index.parent()) + return super(WorkAreaFilesModel, self).flags(index) - if role == self.FileNameRole: - item = index.internalPointer() - return item["filename"] + def data(self, index, role=None): + if role is None: + role = QtCore.Qt.DisplayRole - if role == self.DateModifiedRole: - item = index.internalPointer() - return item["date"] + # Handle roles for first column + if index.column() == 1: + if role == QtCore.Qt.DecorationRole: + return None - if role == self.FilePathRole: - item = index.internalPointer() - return item["filepath"] + if role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole): + role = DATE_MODIFIED_ROLE + index = self.index(index.row(), 0, index.parent()) - if role == self.IsEnabled: - item = index.internalPointer() - return item.get("enabled", True) - - return super(FilesModel, self).data(index, role) + return super(WorkAreaFilesModel, self).data(index, role) def headerData(self, section, orientation, role): # Show nice labels in the header @@ -160,4 +186,274 @@ class FilesModel(TreeModel): elif section == 1: return "Date modified" - return super(FilesModel, self).headerData(section, orientation, role) + return super(WorkAreaFilesModel, self).headerData( + section, orientation, role + ) + + +class PublishFilesModel(QtGui.QStandardItemModel): + """Model filling files with published files calculated from representation. + + This model looks for workfile family representations based on selected + asset and task. + + Asset must set to be able look for representations that could be used. + Task is used to filter representations by task. + Model has few filter criteria for filling. + - First criteria is that version document must have "workfile" in + "data.families". + - Second cirteria is that representation must have extension same as + defined extensions + - If task is set then representation must have 'task["name"]' with same + name. + """ + + def __init__(self, extensions, dbcon, anatomy, *args, **kwargs): + super(PublishFilesModel, self).__init__(*args, **kwargs) + + self.setColumnCount(2) + + self._dbcon = dbcon + self._anatomy = anatomy + self._file_extensions = extensions + + self._invalid_context_item = None + self._empty_root_item = None + self._file_icon = qtawesome.icon( + "fa.file-o", + color=get_default_entity_icon_color() + ) + self._invalid_icon = qtawesome.icon( + "fa.times", + color=get_disabled_entity_icon_color() + ) + self._invalid_item_visible = False + + self._items_by_id = {} + + self._asset_id = None + self._task_name = None + + def _set_item_invalid(self, item): + item.setFlags(QtCore.Qt.NoItemFlags) + item.setData(self._invalid_icon, QtCore.Qt.DecorationRole) + + def _set_item_valid(self, item): + item.setFlags( + QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable + ) + item.setData(self._file_icon, QtCore.Qt.DecorationRole) + + def _get_invalid_context_item(self): + if self._invalid_context_item is None: + item = QtGui.QStandardItem("Selected context is not valid.") + item.setColumnCount(self.columnCount()) + self._set_item_invalid(item) + self._invalid_context_item = item + return self._invalid_context_item + + def _get_empty_root_item(self): + if self._empty_root_item is None: + item = QtGui.QStandardItem("Didn't find any published workfiles.") + item.setColumnCount(self.columnCount()) + self._set_item_invalid(item) + self._empty_root_item = item + return self._empty_root_item + + def set_context(self, asset_id, task_name): + """Change context to asset and task. + + Args: + asset_id (ObjectId): Id of selected asset. + task_name (str): Name of selected task. + """ + self._asset_id = asset_id + self._task_name = task_name + self.refresh() + + def _clear(self): + root_item = self.invisibleRootItem() + rows = root_item.rowCount() + if rows > 0: + if self._invalid_item_visible: + for row in range(rows): + root_item.takeRow(row) + else: + root_item.removeRows(0, rows) + self._items_by_id = {} + + def _get_workfie_representations(self): + output = [] + # Get subset docs of asset + subset_docs = self._dbcon.find( + { + "type": "subset", + "parent": self._asset_id + }, + { + "_id": True, + "name": True + } + ) + + subset_ids = [subset_doc["_id"] for subset_doc in subset_docs] + if not subset_ids: + return output + + # Get version docs of subsets with their families + version_docs = self._dbcon.find( + { + "type": "version", + "parent": {"$in": subset_ids} + }, + { + "_id": True, + "data.families": True, + "parent": True + } + ) + # Filter versions if they contain 'workfile' family + filtered_versions = [] + for version_doc in version_docs: + data = version_doc.get("data") or {} + families = data.get("families") or [] + if "workfile" in families: + filtered_versions.append(version_doc) + + version_ids = [version_doc["_id"] for version_doc in filtered_versions] + if not version_ids: + return output + + # Query representations of filtered versions and add filter for + # extension + extensions = [ext.replace(".", "") for ext in self._file_extensions] + repre_docs = self._dbcon.find( + { + "type": "representation", + "parent": {"$in": version_ids}, + "context.ext": {"$in": extensions} + } + ) + # Filter queried representations by task name if task is set + filtered_repre_docs = [] + for repre_doc in repre_docs: + if self._task_name is None: + filtered_repre_docs.append(repre_doc) + continue + + task_info = repre_doc["context"].get("task") + if not task_info: + print("Not task info") + continue + + if isinstance(task_info, dict): + task_name = task_info.get("name") + else: + task_name = task_info + + if task_name == self._task_name: + filtered_repre_docs.append(repre_doc) + + # Collect paths of representations + for repre_doc in filtered_repre_docs: + path = get_representation_path( + repre_doc, root=self._anatomy.roots + ) + output.append((path, repre_doc["_id"])) + return output + + def refresh(self): + root_item = self.invisibleRootItem() + if not self._asset_id: + self._clear() + # Add Work Area does not exist placeholder + item = self._get_invalid_context_item() + root_item.appendRow(item) + self._invalid_item_visible = True + return + + if self._invalid_item_visible: + self._clear() + + new_items = [] + items_to_remove = set(self._items_by_id.keys()) + for item in self._get_workfie_representations(): + filepath, repre_id = item + # TODO handle empty filepaths + if not filepath: + continue + filename = os.path.basename(filepath) + + if repre_id in items_to_remove: + items_to_remove.remove(repre_id) + item = self._items_by_id[repre_id] + else: + item = QtGui.QStandardItem(filename) + item.setColumnCount(self.columnCount()) + new_items.append(item) + self._items_by_id[repre_id] = item + + if os.path.exists(filepath): + modified = os.path.getmtime(filepath) + tooltip = None + self._set_item_valid(item) + else: + modified = None + tooltip = "File is not available from this machine" + self._set_item_invalid(item) + + item.setData(tooltip, QtCore.Qt.ToolTipRole) + item.setData(filepath, FILEPATH_ROLE) + item.setData(modified, DATE_MODIFIED_ROLE) + item.setData(repre_id, ITEM_ID_ROLE) + + if new_items: + root_item.appendRows(new_items) + + for filename in items_to_remove: + item = self._items_by_id.pop(filename) + root_item.removeRow(item.row()) + + if root_item.rowCount() > 0: + self._invalid_item_visible = False + else: + self._invalid_item_visible = True + item = self._get_empty_root_item() + root_item.appendRow(item) + + def has_valid_items(self): + return not self._invalid_item_visible + + def flags(self, index): + if index.column() != 0: + index = self.index(index.row(), 0, index.parent()) + return super(PublishFilesModel, self).flags(index) + + def data(self, index, role=None): + if role is None: + role = QtCore.Qt.DisplayRole + + if index.column() == 1: + if role == QtCore.Qt.DecorationRole: + return None + + if role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole): + role = DATE_MODIFIED_ROLE + index = self.index(index.row(), 0, index.parent()) + + return super(PublishFilesModel, self).data(index, role) + + def headerData(self, section, orientation, role): + # Show nice labels in the header + if ( + role == QtCore.Qt.DisplayRole + and orientation == QtCore.Qt.Horizontal + ): + if section == 0: + return "Name" + elif section == 1: + return "Date modified" + + return super(PublishFilesModel, self).headerData( + section, orientation, role + ) diff --git a/openpype/tools/workfiles/save_as_dialog.py b/openpype/tools/workfiles/save_as_dialog.py new file mode 100644 index 0000000000..3e97d6c938 --- /dev/null +++ b/openpype/tools/workfiles/save_as_dialog.py @@ -0,0 +1,487 @@ +import os +import re +import copy +import logging + +from Qt import QtWidgets, QtCore + +from openpype.lib import ( + get_last_workfile_with_version, + get_workdir_data, +) +from openpype.pipeline import ( + registered_host, + legacy_io, +) +from openpype.tools.utils import PlaceholderLineEdit + +log = logging.getLogger(__name__) + + +def build_workfile_data(session): + """Get the data required for workfile formatting from avalon `session`""" + + # Set work file data for template formatting + asset_name = session["AVALON_ASSET"] + task_name = session["AVALON_TASK"] + host_name = session["AVALON_APP"] + project_doc = legacy_io.find_one( + {"type": "project"}, + { + "name": True, + "data.code": True, + "config.tasks": True, + } + ) + + asset_doc = legacy_io.find_one( + { + "type": "asset", + "name": asset_name + }, + { + "name": True, + "data.tasks": True, + "data.parents": True + } + ) + data = get_workdir_data(project_doc, asset_doc, task_name, host_name) + data.update({ + "version": 1, + "comment": "", + "ext": None + }) + + return data + + +class CommentMatcher(object): + """Use anatomy and work file data to parse comments from filenames""" + def __init__(self, anatomy, template_key, data): + + self.fname_regex = None + + template = anatomy.templates[template_key]["file"] + if "{comment}" not in template: + # Don't look for comment if template doesn't allow it + return + + # Create a regex group for extensions + extensions = registered_host().file_extensions() + any_extension = "(?:{})".format( + "|".join(re.escape(ext[1:]) for ext in extensions) + ) + + # Use placeholders that will never be in the filename + temp_data = copy.deepcopy(data) + temp_data["comment"] = "<>" + temp_data["version"] = "<>" + temp_data["ext"] = "<>" + + formatted = anatomy.format(temp_data) + fname_pattern = formatted[template_key]["file"] + fname_pattern = re.escape(fname_pattern) + + # Replace comment and version with something we can match with regex + replacements = { + "<>": "(.+)", + "<>": "[0-9]+", + "<>": any_extension, + } + for src, dest in replacements.items(): + fname_pattern = fname_pattern.replace(re.escape(src), dest) + + # Match from beginning to end of string to be safe + fname_pattern = "^{}$".format(fname_pattern) + + self.fname_regex = re.compile(fname_pattern) + + def parse_comment(self, filepath): + """Parse the {comment} part from a filename""" + if not self.fname_regex: + return + + fname = os.path.basename(filepath) + match = self.fname_regex.match(fname) + if match: + return match.group(1) + + +class SubversionLineEdit(QtWidgets.QWidget): + """QLineEdit with QPushButton for drop down selection of list of strings""" + + text_changed = QtCore.Signal(str) + + def __init__(self, *args, **kwargs): + super(SubversionLineEdit, self).__init__(*args, **kwargs) + + input_field = PlaceholderLineEdit(self) + menu_btn = QtWidgets.QPushButton(self) + menu_btn.setFixedWidth(18) + + menu = QtWidgets.QMenu(self) + menu_btn.setMenu(menu) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(3) + + layout.addWidget(input_field, 1) + layout.addWidget(menu_btn, 0) + + input_field.textChanged.connect(self.text_changed) + + self.setFocusProxy(input_field) + + self._input_field = input_field + self._menu_btn = menu_btn + self._menu = menu + + def set_placeholder(self, placeholder): + self._input_field.setPlaceholderText(placeholder) + + def set_text(self, text): + self._input_field.setText(text) + + def set_values(self, values): + self._update(values) + + def _on_button_clicked(self): + self._menu.exec_() + + def _on_action_clicked(self, action): + self._input_field.setText(action.text()) + + def _update(self, values): + """Create optional predefined subset names + + Args: + default_names(list): all predefined names + + Returns: + None + """ + + menu = self._menu + button = self._menu_btn + + state = any(values) + button.setEnabled(state) + if state is False: + return + + # Include an empty string + values = [""] + sorted(values) + + # Get and destroy the action group + group = button.findChild(QtWidgets.QActionGroup) + if group: + group.deleteLater() + + # Build new action group + group = QtWidgets.QActionGroup(button) + for name in values: + action = group.addAction(name) + menu.addAction(action) + + group.triggered.connect(self._on_action_clicked) + + +class SaveAsDialog(QtWidgets.QDialog): + """Name Window to define a unique filename inside a root folder + + The filename will be based on the "workfile" template defined in the + project["config"]["template"]. + + """ + + def __init__( + self, parent, root, anatomy, template_key, extensions, session=None + ): + super(SaveAsDialog, self).__init__(parent=parent) + self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint) + + self.result = None + self.host = registered_host() + self.root = root + self.work_file = None + self._extensions = extensions + + if not session: + # Fallback to active session + session = legacy_io.Session + + self.data = build_workfile_data(session) + + # Store project anatomy + self.anatomy = anatomy + self.template = anatomy.templates[template_key]["file"] + self.template_key = template_key + + # Btns widget + btns_widget = QtWidgets.QWidget(self) + + btn_ok = QtWidgets.QPushButton("Ok", btns_widget) + btn_cancel = QtWidgets.QPushButton("Cancel", btns_widget) + + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + btns_layout.addWidget(btn_ok) + btns_layout.addWidget(btn_cancel) + + # Inputs widget + inputs_widget = QtWidgets.QWidget(self) + + # Version widget + version_widget = QtWidgets.QWidget(inputs_widget) + + # Version number input + version_input = QtWidgets.QSpinBox(version_widget) + version_input.setMinimum(1) + version_input.setMaximum(9999) + + # Last version checkbox + last_version_check = QtWidgets.QCheckBox( + "Next Available Version", version_widget + ) + last_version_check.setChecked(True) + + version_layout = QtWidgets.QHBoxLayout(version_widget) + version_layout.setContentsMargins(0, 0, 0, 0) + version_layout.addWidget(version_input) + version_layout.addWidget(last_version_check) + + # Preview widget + preview_label = QtWidgets.QLabel("Preview filename", inputs_widget) + + # Subversion input + subversion = SubversionLineEdit(inputs_widget) + subversion.set_placeholder("Will be part of filename.") + + # Extensions combobox + ext_combo = QtWidgets.QComboBox(inputs_widget) + # Add styled delegate to use stylesheets + ext_delegate = QtWidgets.QStyledItemDelegate() + ext_combo.setItemDelegate(ext_delegate) + ext_combo.addItems(self._extensions) + + # Build inputs + inputs_layout = QtWidgets.QFormLayout(inputs_widget) + # Add version only if template contains version key + # - since the version can be padded with "{version:0>4}" we only search + # for "{version". + if "{version" in self.template: + inputs_layout.addRow("Version:", version_widget) + else: + version_widget.setVisible(False) + + # Add subversion only if template contains `{comment}` + if "{comment}" in self.template: + inputs_layout.addRow("Subversion:", subversion) + + # Detect whether a {comment} is in the current filename - if so, + # preserve it by default and set it in the comment/subversion field + current_filepath = self.host.current_file() + if current_filepath: + # We match the current filename against the current session + # instead of the session where the user is saving to. + current_data = build_workfile_data(legacy_io.Session) + matcher = CommentMatcher(anatomy, template_key, current_data) + comment = matcher.parse_comment(current_filepath) + if comment: + log.info("Detected subversion comment: {}".format(comment)) + self.data["comment"] = comment + subversion.set_text(comment) + + existing_comments = self.get_existing_comments() + subversion.set_values(existing_comments) + + else: + subversion.setVisible(False) + inputs_layout.addRow("Extension:", ext_combo) + inputs_layout.addRow("Preview:", preview_label) + + # Build layout + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.addWidget(inputs_widget) + main_layout.addWidget(btns_widget) + + # Signal callback registration + version_input.valueChanged.connect(self.on_version_spinbox_changed) + last_version_check.stateChanged.connect( + self.on_version_checkbox_changed + ) + + subversion.text_changed.connect(self.on_comment_changed) + ext_combo.currentIndexChanged.connect(self.on_extension_changed) + + btn_ok.pressed.connect(self.on_ok_pressed) + btn_cancel.pressed.connect(self.on_cancel_pressed) + + # Allow "Enter" key to accept the save. + btn_ok.setDefault(True) + + # Force default focus to comment, some hosts didn't automatically + # apply focus to this line edit (e.g. Houdini) + subversion.setFocus() + + # Store widgets + self.btn_ok = btn_ok + + self.version_widget = version_widget + + self.version_input = version_input + self.last_version_check = last_version_check + + self.preview_label = preview_label + self.subversion = subversion + self.ext_combo = ext_combo + self._ext_delegate = ext_delegate + + self.refresh() + + def get_existing_comments(self): + matcher = CommentMatcher(self.anatomy, self.template_key, self.data) + host_extensions = set(self._extensions) + comments = set() + if os.path.isdir(self.root): + for fname in os.listdir(self.root): + if not os.path.isfile(os.path.join(self.root, fname)): + continue + + ext = os.path.splitext(fname)[-1] + if ext not in host_extensions: + continue + + comment = matcher.parse_comment(fname) + if comment: + comments.add(comment) + + return list(comments) + + def on_version_spinbox_changed(self, value): + self.data["version"] = value + self.refresh() + + def on_version_checkbox_changed(self, _value): + self.refresh() + + def on_comment_changed(self, text): + self.data["comment"] = text + self.refresh() + + def on_extension_changed(self): + ext = self.ext_combo.currentText() + if ext == self.data["ext"]: + return + self.data["ext"] = ext + self.refresh() + + def on_ok_pressed(self): + self.result = self.work_file + self.close() + + def on_cancel_pressed(self): + self.close() + + def get_result(self): + return self.result + + def get_work_file(self): + data = copy.deepcopy(self.data) + if not data["comment"]: + data.pop("comment", None) + + data["ext"] = data["ext"][1:] + + anatomy_filled = self.anatomy.format(data) + return anatomy_filled[self.template_key]["file"] + + def refresh(self): + extensions = list(self._extensions) + extension = self.data["ext"] + if extension is None: + # Define saving file extension + current_file = self.host.current_file() + if current_file: + # Match the extension of current file + _, extension = os.path.splitext(current_file) + else: + extension = extensions[0] + + if extension != self.data["ext"]: + self.data["ext"] = extension + index = self.ext_combo.findText( + extension, QtCore.Qt.MatchFixedString + ) + if index >= 0: + self.ext_combo.setCurrentIndex(index) + + if not self.last_version_check.isChecked(): + self.version_input.setEnabled(True) + self.data["version"] = self.version_input.value() + + work_file = self.get_work_file() + + else: + self.version_input.setEnabled(False) + + data = copy.deepcopy(self.data) + template = str(self.template) + + if not data["comment"]: + data.pop("comment", None) + + data["ext"] = data["ext"][1:] + + version = get_last_workfile_with_version( + self.root, template, data, extensions + )[1] + + if version is None: + version = 1 + else: + version += 1 + + found_valid_version = False + # Check if next version is valid version and give a chance to try + # next 100 versions + for idx in range(100): + # Store version to data + self.data["version"] = version + + work_file = self.get_work_file() + # Safety check + path = os.path.join(self.root, work_file) + if not os.path.exists(path): + found_valid_version = True + break + + # Try next version + version += 1 + # Log warning + if idx == 0: + log.warning(( + "BUG: Function `get_last_workfile_with_version` " + "didn't return last version." + )) + # Raise exception if even 100 version fallback didn't help + if not found_valid_version: + raise AssertionError( + "This is a bug. Couldn't find valid version!" + ) + + self.work_file = work_file + + path_exists = os.path.exists(os.path.join(self.root, work_file)) + + self.btn_ok.setEnabled(not path_exists) + + if path_exists: + self.preview_label.setText( + "Cannot create \"{0}\" because file exists!" + "".format(work_file) + ) + else: + self.preview_label.setText( + "{0}".format(work_file) + ) diff --git a/openpype/tools/workfiles/view.py b/openpype/tools/workfiles/view.py deleted file mode 100644 index 8e3993e4c7..0000000000 --- a/openpype/tools/workfiles/view.py +++ /dev/null @@ -1,15 +0,0 @@ -from Qt import QtWidgets, QtCore - - -class FilesView(QtWidgets.QTreeView): - doubleClickedLeft = QtCore.Signal() - doubleClickedRight = QtCore.Signal() - - def mouseDoubleClickEvent(self, event): - if event.button() == QtCore.Qt.LeftButton: - self.doubleClickedLeft.emit() - - elif event.button() == QtCore.Qt.RightButton: - self.doubleClickedRight.emit() - - return super(FilesView, self).mouseDoubleClickEvent(event) diff --git a/openpype/tools/workfiles/window.py b/openpype/tools/workfiles/window.py new file mode 100644 index 0000000000..02a22af26c --- /dev/null +++ b/openpype/tools/workfiles/window.py @@ -0,0 +1,363 @@ +import os +import datetime +from Qt import QtCore, QtWidgets + +from openpype import style +from openpype.lib import ( + get_workfile_doc, + create_workfile_doc, + save_workfile_data_to_doc, +) +from openpype.pipeline import legacy_io +from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget +from openpype.tools.utils.tasks_widget import TasksWidget + +from .files_widget import FilesWidget + + +def file_size_to_string(file_size): + size = 0 + size_ending_mapping = { + "KB": 1024 ** 1, + "MB": 1024 ** 2, + "GB": 1024 ** 3 + } + ending = "B" + for _ending, _size in size_ending_mapping.items(): + if file_size < _size: + break + size = file_size / _size + ending = _ending + return "{:.2f} {}".format(size, ending) + + +class SidePanelWidget(QtWidgets.QWidget): + save_clicked = QtCore.Signal() + published_workfile_message = ( + "INFO: Opened published workfiles will be stored in" + " temp directory on your machine. Current temp size: {}." + ) + + def __init__(self, parent=None): + super(SidePanelWidget, self).__init__(parent) + + details_label = QtWidgets.QLabel("Details", self) + details_input = QtWidgets.QPlainTextEdit(self) + details_input.setReadOnly(True) + + artist_note_widget = QtWidgets.QWidget(self) + note_label = QtWidgets.QLabel("Artist note", artist_note_widget) + note_input = QtWidgets.QPlainTextEdit(artist_note_widget) + btn_note_save = QtWidgets.QPushButton("Save note", artist_note_widget) + + artist_note_layout = QtWidgets.QVBoxLayout(artist_note_widget) + artist_note_layout.setContentsMargins(0, 0, 0, 0) + artist_note_layout.addWidget(note_label, 0) + artist_note_layout.addWidget(note_input, 1) + artist_note_layout.addWidget( + btn_note_save, 0, alignment=QtCore.Qt.AlignRight + ) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(details_label, 0) + main_layout.addWidget(details_input, 1) + main_layout.addWidget(artist_note_widget, 1) + + note_input.textChanged.connect(self._on_note_change) + btn_note_save.clicked.connect(self._on_save_click) + + self._details_input = details_input + self._artist_note_widget = artist_note_widget + self._note_input = note_input + self._btn_note_save = btn_note_save + + self._orig_note = "" + self._workfile_doc = None + + def set_published_visible(self, published_visible): + self._artist_note_widget.setVisible(not published_visible) + + def _on_note_change(self): + text = self._note_input.toPlainText() + self._btn_note_save.setEnabled(self._orig_note != text) + + def _on_save_click(self): + self._orig_note = self._note_input.toPlainText() + self._on_note_change() + self.save_clicked.emit() + + def set_context(self, asset_id, task_name, filepath, workfile_doc): + # Check if asset, task and file are selected + # NOTE workfile document is not requirement + enabled = bool(asset_id) and bool(task_name) and bool(filepath) + + self._details_input.setEnabled(enabled) + self._note_input.setEnabled(enabled) + self._btn_note_save.setEnabled(enabled) + + # Make sure workfile doc is overridden + self._workfile_doc = workfile_doc + # Disable inputs and remove texts if any required arguments are missing + if not enabled: + self._orig_note = "" + self._details_input.setPlainText("") + self._note_input.setPlainText("") + return + + orig_note = "" + if workfile_doc: + orig_note = workfile_doc["data"].get("note") or orig_note + + self._orig_note = orig_note + self._note_input.setPlainText(orig_note) + # Set as empty string + self._details_input.setPlainText("") + + filestat = os.stat(filepath) + size_value = file_size_to_string(filestat.st_size) + + # Append html string + datetime_format = "%b %d %Y %H:%M:%S" + creation_time = datetime.datetime.fromtimestamp(filestat.st_ctime) + modification_time = datetime.datetime.fromtimestamp(filestat.st_mtime) + lines = ( + "Size:", + size_value, + "Created:", + creation_time.strftime(datetime_format), + "Modified:", + modification_time.strftime(datetime_format) + ) + self._details_input.appendHtml("
".join(lines)) + + def get_workfile_data(self): + data = { + "note": self._note_input.toPlainText() + } + return self._workfile_doc, data + + +class Window(QtWidgets.QMainWindow): + """Work Files Window""" + title = "Work Files" + + def __init__(self, parent=None): + super(Window, self).__init__(parent=parent) + self.setWindowTitle(self.title) + window_flags = QtCore.Qt.Window | QtCore.Qt.WindowCloseButtonHint + if not parent: + window_flags |= QtCore.Qt.WindowStaysOnTopHint + self.setWindowFlags(window_flags) + + # Create pages widget and set it as central widget + pages_widget = QtWidgets.QStackedWidget(self) + self.setCentralWidget(pages_widget) + + home_page_widget = QtWidgets.QWidget(pages_widget) + home_body_widget = QtWidgets.QWidget(home_page_widget) + + assets_widget = SingleSelectAssetsWidget( + legacy_io, parent=home_body_widget + ) + assets_widget.set_current_asset_btn_visibility(True) + + tasks_widget = TasksWidget(legacy_io, home_body_widget) + files_widget = FilesWidget(home_body_widget) + side_panel = SidePanelWidget(home_body_widget) + + pages_widget.addWidget(home_page_widget) + + # Build home + home_page_layout = QtWidgets.QVBoxLayout(home_page_widget) + home_page_layout.addWidget(home_body_widget) + + # Build home - body + body_layout = QtWidgets.QVBoxLayout(home_body_widget) + split_widget = QtWidgets.QSplitter(home_body_widget) + split_widget.addWidget(assets_widget) + split_widget.addWidget(tasks_widget) + split_widget.addWidget(files_widget) + split_widget.addWidget(side_panel) + split_widget.setSizes([255, 160, 455, 175]) + + body_layout.addWidget(split_widget) + + # Add top margin for tasks to align it visually with files as + # the files widget has a filter field which tasks does not. + tasks_widget.setContentsMargins(0, 32, 0, 0) + + # Set context after asset widget is refreshed + # - to do so it is necessary to wait until refresh is done + set_context_timer = QtCore.QTimer() + set_context_timer.setInterval(100) + + # Connect signals + set_context_timer.timeout.connect(self._on_context_set_timeout) + assets_widget.selection_changed.connect(self._on_asset_changed) + tasks_widget.task_changed.connect(self._on_task_changed) + files_widget.file_selected.connect(self.on_file_select) + files_widget.workfile_created.connect(self.on_workfile_create) + files_widget.file_opened.connect(self._on_file_opened) + files_widget.published_visible_changed.connect( + self._on_published_change + ) + side_panel.save_clicked.connect(self.on_side_panel_save) + + self._set_context_timer = set_context_timer + self.home_page_widget = home_page_widget + self.pages_widget = pages_widget + self.home_body_widget = home_body_widget + self.split_widget = split_widget + + self.assets_widget = assets_widget + self.tasks_widget = tasks_widget + self.files_widget = files_widget + self.side_panel = side_panel + + # Force focus on the open button by default, required for Houdini. + files_widget.setFocus() + + self.resize(1200, 600) + + self._first_show = True + self._context_to_set = None + + def showEvent(self, event): + super(Window, self).showEvent(event) + if self._first_show: + self._first_show = False + self.refresh() + self.setStyleSheet(style.load_stylesheet()) + + def keyPressEvent(self, event): + """Custom keyPressEvent. + + Override keyPressEvent to do nothing so that Maya's panels won't + take focus when pressing "SHIFT" whilst mouse is over viewport or + outliner. This way users don't accidentally perform Maya commands + whilst trying to name an instance. + + """ + + def set_save_enabled(self, enabled): + self.files_widget.set_save_enabled(enabled) + + def on_file_select(self, filepath): + asset_id = self.assets_widget.get_selected_asset_id() + task_name = self.tasks_widget.get_selected_task_name() + + workfile_doc = None + if asset_id and task_name and filepath: + filename = os.path.split(filepath)[1] + workfile_doc = get_workfile_doc( + asset_id, task_name, filename, legacy_io + ) + self.side_panel.set_context( + asset_id, task_name, filepath, workfile_doc + ) + + def on_workfile_create(self, filepath): + self._create_workfile_doc(filepath) + + def _on_file_opened(self): + self.close() + + def _on_published_change(self, visible): + self.side_panel.set_published_visible(visible) + + def on_side_panel_save(self): + workfile_doc, data = self.side_panel.get_workfile_data() + if not workfile_doc: + filepath = self.files_widget._get_selected_filepath() + self._create_workfile_doc(filepath, force=True) + workfile_doc = self._get_current_workfile_doc() + + save_workfile_data_to_doc(workfile_doc, data, legacy_io) + + def _get_current_workfile_doc(self, filepath=None): + if filepath is None: + filepath = self.files_widget._get_selected_filepath() + task_name = self.tasks_widget.get_selected_task_name() + asset_id = self.assets_widget.get_selected_asset_id() + if not task_name or not asset_id or not filepath: + return + + filename = os.path.split(filepath)[1] + return get_workfile_doc( + asset_id, task_name, filename, legacy_io + ) + + def _create_workfile_doc(self, filepath, force=False): + workfile_doc = None + if not force: + workfile_doc = self._get_current_workfile_doc(filepath) + + if not workfile_doc: + workdir, filename = os.path.split(filepath) + asset_id = self.assets_widget.get_selected_asset_id() + asset_doc = legacy_io.find_one({"_id": asset_id}) + task_name = self.tasks_widget.get_selected_task_name() + create_workfile_doc( + asset_doc, task_name, filename, workdir, legacy_io + ) + + def refresh(self): + # Refresh asset widget + self.assets_widget.refresh() + + self._on_task_changed() + + def set_context(self, context): + self._context_to_set = context + self._set_context_timer.start() + + def _on_context_set_timeout(self): + if self._context_to_set is None: + self._set_context_timer.stop() + return + + if self.assets_widget.refreshing: + return + + self._context_to_set, context = None, self._context_to_set + if "asset" in context: + asset_doc = legacy_io.find_one( + { + "name": context["asset"], + "type": "asset" + }, + {"_id": 1} + ) or {} + asset_id = asset_doc.get("_id") + # Select the asset + self.assets_widget.select_asset(asset_id) + self.tasks_widget.set_asset_id(asset_id) + + if "task" in context: + self.tasks_widget.select_task_name(context["task"]) + self._on_task_changed() + + def _on_asset_changed(self): + asset_id = self.assets_widget.get_selected_asset_id() + if asset_id: + self.tasks_widget.setEnabled(True) + else: + # Force disable the other widgets if no + # active selection + self.tasks_widget.setEnabled(False) + self.files_widget.setEnabled(False) + + self.tasks_widget.set_asset_id(asset_id) + + def _on_task_changed(self): + asset_id = self.assets_widget.get_selected_asset_id() + task_name = self.tasks_widget.get_selected_task_name() + task_type = self.tasks_widget.get_selected_task_type() + + asset_is_valid = asset_id is not None + self.tasks_widget.setEnabled(asset_is_valid) + + self.files_widget.setEnabled(bool(task_name) and asset_is_valid) + self.files_widget.set_asset_task(asset_id, task_name, task_type) + self.files_widget.refresh() diff --git a/openpype/version.py b/openpype/version.py index 1ef25e3f48..662adf28ca 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.9.1" +__version__ = "3.10.0-nightly.2" diff --git a/openpype/widgets/attribute_defs/files_widget.py b/openpype/widgets/attribute_defs/files_widget.py index 87b98e2378..34f7d159ad 100644 --- a/openpype/widgets/attribute_defs/files_widget.py +++ b/openpype/widgets/attribute_defs/files_widget.py @@ -641,5 +641,6 @@ class SingleFileWidget(QtWidgets.QWidget): filepaths.append(filepath) # TODO filter check if len(filepaths) == 1: - self.set_value(filepaths[0], False) + self._filepath_input.setText(filepaths[0]) + event.accept() diff --git a/openpype/widgets/attribute_defs/widgets.py b/openpype/widgets/attribute_defs/widgets.py index a6f1b8d6c9..23f025967d 100644 --- a/openpype/widgets/attribute_defs/widgets.py +++ b/openpype/widgets/attribute_defs/widgets.py @@ -2,7 +2,7 @@ import uuid from Qt import QtWidgets, QtCore -from openpype.pipeline.lib import ( +from openpype.lib.attribute_definitions import ( AbtractAttrDef, UnknownDef, NumberDef, diff --git a/openpype/widgets/popup.py b/openpype/widgets/popup.py index e661d3d293..9fc33ccbb8 100644 --- a/openpype/widgets/popup.py +++ b/openpype/widgets/popup.py @@ -1,16 +1,19 @@ import sys -import logging import contextlib from Qt import QtCore, QtWidgets -log = logging.getLogger(__name__) - class Popup(QtWidgets.QDialog): + """A Popup that moves itself to bottom right of screen on show event. - on_show = QtCore.Signal() + The UI contains a message label and a red highlighted button to "show" + or perform another custom action from this pop-up. + + """ + + on_clicked = QtCore.Signal() def __init__(self, parent=None, *args, **kwargs): super(Popup, self).__init__(parent=parent, *args, **kwargs) @@ -19,32 +22,34 @@ class Popup(QtWidgets.QDialog): # Layout layout = QtWidgets.QHBoxLayout(self) layout.setContentsMargins(10, 5, 10, 10) + + # Increase spacing slightly for readability + layout.setSpacing(10) + message = QtWidgets.QLabel("") message.setStyleSheet(""" QLabel { font-size: 12px; } """) - show = QtWidgets.QPushButton("Show") - show.setSizePolicy(QtWidgets.QSizePolicy.Maximum, + button = QtWidgets.QPushButton("Show") + button.setSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum) - show.setStyleSheet("""QPushButton { background-color: #BB0000 }""") + button.setStyleSheet("""QPushButton { background-color: #BB0000 }""") layout.addWidget(message) - layout.addWidget(show) + layout.addWidget(button) - # Size + # Default size self.resize(400, 40) - geometry = self.calculate_window_geometry() - self.setGeometry(geometry) self.widgets = { "message": message, - "show": show, + "button": button, } # Signals - show.clicked.connect(self._on_show_clicked) + button.clicked.connect(self._on_clicked) # Set default title self.setWindowTitle("Popup") @@ -52,7 +57,10 @@ class Popup(QtWidgets.QDialog): def setMessage(self, message): self.widgets['message'].setText(message) - def _on_show_clicked(self): + def setButtonText(self, text): + self.widgets["button"].setText(text) + + def _on_clicked(self): """Callback for when the 'show' button is clicked. Raises the parent (if any) @@ -63,11 +71,19 @@ class Popup(QtWidgets.QDialog): self.close() # Trigger the signal - self.on_show.emit() + self.on_clicked.emit() if parent: parent.raise_() + def showEvent(self, event): + + # Position popup based on contents on show event + geo = self.calculate_window_geometry() + self.setGeometry(geo) + + return super(Popup, self).showEvent(event) + def calculate_window_geometry(self): """Respond to status changes @@ -104,45 +120,29 @@ class Popup(QtWidgets.QDialog): return QtCore.QRect(x, y, width, height) -class Popup2(Popup): +class PopupUpdateKeys(Popup): + """Popup with Update Keys checkbox (intended for Maya)""" - on_show = QtCore.Signal() + on_clicked_state = QtCore.Signal(bool) def __init__(self, parent=None, *args, **kwargs): Popup.__init__(self, parent=parent, *args, **kwargs) layout = self.layout() - # Add toggle + # Insert toggle for Update keys toggle = QtWidgets.QCheckBox("Update Keys") layout.insertWidget(1, toggle) self.widgets["toggle"] = toggle + self.on_clicked.connect(self.emit_click_with_state) + layout.insertStretch(1, 1) - # Update button text - fix = self.widgets["show"] - fix.setText("Fix") - - def calculate_window_geometry(self): - """Respond to status changes - - On creation, align window with screen bottom right. - - """ - parent_widget = self.parent() - - desktop = QtWidgets.QApplication.desktop() - if parent_widget: - screen = desktop.screenNumber(parent_widget) - else: - screen = desktop.screenNumber(desktop.cursor().pos()) - center_point = desktop.screenGeometry(screen).center() - - frame_geo = self.frameGeometry() - frame_geo.moveCenter(center_point) - - return frame_geo + def emit_click_with_state(self): + """Emit the on_clicked_state signal with the toggled state""" + checked = self.widgets["toggle"].isChecked() + self.on_clicked_state.emit(checked) @contextlib.contextmanager diff --git a/poetry.lock b/poetry.lock index ee7b839b8d..7998ede693 100644 --- a/poetry.lock +++ b/poetry.lock @@ -11,7 +11,7 @@ develop = false type = "git" url = "https://github.com/pypeclub/acre.git" reference = "master" -resolved_reference = "55a7c331e6dc5f81639af50ca4a8cc9d73e9273d" +resolved_reference = "126f7a188cfe36718f707f42ebbc597e86aa86c3" [[package]] name = "aiohttp" @@ -680,15 +680,8 @@ category = "main" optional = false python-versions = "*" -[package.dependencies] -attrs = ">=17.4.0" -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} -pyrsistent = ">=0.14.0" -six = ">=1.11.0" - [package.extras] -format = ["idna", "jsonpointer (>1.13)", "rfc3987", "strict-rfc3339", "webcolors"] -format_nongpl = ["idna", "jsonpointer (>1.13)", "webcolors", "rfc3986-validator (>0.1.0)", "rfc3339-validator"] +format = ["rfc3987", "strict-rfc3339", "webcolors"] [[package]] name = "keyring" @@ -784,7 +777,7 @@ pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" [[package]] name = "paramiko" -version = "2.9.2" +version = "2.10.1" description = "SSH2 protocol library" category = "main" optional = false @@ -794,6 +787,7 @@ python-versions = "*" bcrypt = ">=3.1.3" cryptography = ">=2.5" pynacl = ">=1.0.1" +six = "*" [package.extras] all = ["pyasn1 (>=0.1.7)", "pynacl (>=1.0.1)", "bcrypt (>=3.1.3)", "invoke (>=1.3)", "gssapi (>=1.4.1)", "pywin32 (>=2.1.8)"] @@ -1087,14 +1081,6 @@ category = "main" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" -[[package]] -name = "pyrsistent" -version = "0.18.1" -description = "Persistent/Functional/Immutable data structures" -category = "main" -optional = false -python-versions = ">=3.7" - [[package]] name = "pysftp" version = "0.2.9" @@ -1633,7 +1619,7 @@ testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest- [metadata] lock-version = "1.1" python-versions = "3.7.*" -content-hash = "2f78d48a6aad2d8a88b7dd7f31a76d907bec9fb65f0086fba6b6d2e1605f0f88" +content-hash = "b02313c8255a1897b0f0617ad4884a5943696c363512921aab1cb2dd8f4fdbe0" [metadata.files] acre = [] @@ -2171,12 +2157,28 @@ log4mongo = [ {file = "log4mongo-1.7.0.tar.gz", hash = "sha256:dc374617206162a0b14167fbb5feac01dbef587539a235dadba6200362984a68"}, ] markupsafe = [ + {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4dc8f9fb58f7364b63fd9f85013b780ef83c11857ae79f2feda41e270468dd9b"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20dca64a3ef2d6e4d5d615a3fd418ad3bde77a47ec8a23d984a12b5b4c74491a"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cdfba22ea2f0029c9261a4bd07e830a8da012291fbe44dc794e488b6c9bb353a"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-win32.whl", hash = "sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:deb993cacb280823246a026e3b2d81c493c53de6acfd5e6bfe31ab3402bb37dd"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:63f3268ba69ace99cab4e3e3b5840b03340efed0948ab8f78d2fd87ee5442a4f"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8d206346619592c6200148b01a2142798c989edcb9c896f9ac9722a99d4e77e6"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-win32.whl", hash = "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567"}, @@ -2185,14 +2187,27 @@ markupsafe = [ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d6c7ebd4e944c85e2c3421e612a7057a2f48d478d79e61800d81468a8d842207"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f0567c4dc99f264f49fe27da5f735f414c4e7e7dd850cfd8e69f0862d7c74ea9"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:89c687013cb1cd489a0f0ac24febe8c7a666e6e221b783e53ac50ebf68e45d86"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9"}, {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aca6377c0cb8a8253e493c6b451565ac77e98c2951c45f913e0b52facdcff83f"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:04635854b943835a6ea959e948d19dcd311762c5c0c6e1f0e16ee57022669194"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6300b8454aa6930a24b9618fbb54b5a68135092bc666f7b06901f897fa5c2fee"}, {file = "MarkupSafe-2.0.1-cp38-cp38-win32.whl", hash = "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64"}, {file = "MarkupSafe-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833"}, {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26"}, @@ -2202,6 +2217,12 @@ markupsafe = [ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135"}, {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902"}, {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4296f2b1ce8c86a6aea78613c34bb1a672ea0e3de9c6ba08a960efe0b0a09047"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f02365d4e99430a12647f09b6cc8bab61a6564363f313126f775eb4f6ef798e"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5b6d930f030f8ed98e3e6c98ffa0652bdb82601e7a016ec2ab5d7ff23baa78d1"}, {file = "MarkupSafe-2.0.1-cp39-cp39-win32.whl", hash = "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74"}, {file = "MarkupSafe-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8"}, {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"}, @@ -2277,8 +2298,8 @@ packaging = [ {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, ] paramiko = [ - {file = "paramiko-2.9.2-py2.py3-none-any.whl", hash = "sha256:04097dbd96871691cdb34c13db1883066b8a13a0df2afd4cb0a92221f51c2603"}, - {file = "paramiko-2.9.2.tar.gz", hash = "sha256:944a9e5dbdd413ab6c7951ea46b0ab40713235a9c4c5ca81cfe45c6f14fa677b"}, + {file = "paramiko-2.10.1-py2.py3-none-any.whl", hash = "sha256:f6cbd3e1204abfdbcd40b3ecbc9d32f04027cd3080fe666245e21e7540ccfc1b"}, + {file = "paramiko-2.10.1.tar.gz", hash = "sha256:443f4da23ec24e9a9c0ea54017829c282abdda1d57110bf229360775ccd27a31"}, ] parso = [ {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, @@ -2598,29 +2619,6 @@ pyparsing = [ {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, ] -pyrsistent = [ - {file = "pyrsistent-0.18.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1"}, - {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d45866ececf4a5fff8742c25722da6d4c9e180daa7b405dc0a2a2790d668c26"}, - {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ed6784ceac462a7d6fcb7e9b663e93b9a6fb373b7f43594f9ff68875788e01e"}, - {file = "pyrsistent-0.18.1-cp310-cp310-win32.whl", hash = "sha256:e4f3149fd5eb9b285d6bfb54d2e5173f6a116fe19172686797c056672689daf6"}, - {file = "pyrsistent-0.18.1-cp310-cp310-win_amd64.whl", hash = "sha256:636ce2dc235046ccd3d8c56a7ad54e99d5c1cd0ef07d9ae847306c91d11b5fec"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e92a52c166426efbe0d1ec1332ee9119b6d32fc1f0bbfd55d5c1088070e7fc1b"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7a096646eab884bf8bed965bad63ea327e0d0c38989fc83c5ea7b8a87037bfc"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cdfd2c361b8a8e5d9499b9082b501c452ade8bbf42aef97ea04854f4a3f43b22"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-win32.whl", hash = "sha256:7ec335fc998faa4febe75cc5268a9eac0478b3f681602c1f27befaf2a1abe1d8"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6455fc599df93d1f60e1c5c4fe471499f08d190d57eca040c0ea182301321286"}, - {file = "pyrsistent-0.18.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fd8da6d0124efa2f67d86fa70c851022f87c98e205f0594e1fae044e7119a5a6"}, - {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bfe2388663fd18bd8ce7db2c91c7400bf3e1a9e8bd7d63bf7e77d39051b85ec"}, - {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e3e1fcc45199df76053026a51cc59ab2ea3fc7c094c6627e93b7b44cdae2c8c"}, - {file = "pyrsistent-0.18.1-cp38-cp38-win32.whl", hash = "sha256:b568f35ad53a7b07ed9b1b2bae09eb15cdd671a5ba5d2c66caee40dbf91c68ca"}, - {file = "pyrsistent-0.18.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1b96547410f76078eaf66d282ddca2e4baae8964364abb4f4dcdde855cd123a"}, - {file = "pyrsistent-0.18.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f87cc2863ef33c709e237d4b5f4502a62a00fab450c9e020892e8e2ede5847f5"}, - {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bc66318fb7ee012071b2792024564973ecc80e9522842eb4e17743604b5e045"}, - {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:914474c9f1d93080338ace89cb2acee74f4f666fb0424896fcfb8d86058bf17c"}, - {file = "pyrsistent-0.18.1-cp39-cp39-win32.whl", hash = "sha256:1b34eedd6812bf4d33814fca1b66005805d3640ce53140ab8bbb1e2651b0d9bc"}, - {file = "pyrsistent-0.18.1-cp39-cp39-win_amd64.whl", hash = "sha256:e24a828f57e0c337c8d8bb9f6b12f09dfdf0273da25fda9e314f0b684b415a07"}, - {file = "pyrsistent-0.18.1.tar.gz", hash = "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96"}, -] pysftp = [ {file = "pysftp-0.2.9.tar.gz", hash = "sha256:fbf55a802e74d663673400acd92d5373c1c7ee94d765b428d9f977567ac4854a"}, ] diff --git a/pyproject.toml b/pyproject.toml index 7c09495a99..f32e385e80 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "OpenPype" -version = "3.9.1" # OpenPype +version = "3.10.0-nightly.2" # OpenPype description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" @@ -136,3 +136,19 @@ hash = "de63a8bf7f6c45ff59ecafeba13123f710c2cbc1783ec9e0b938e980d4f5c37f" [openpype.thirdparty.oiio.darwin] url = "https://distribute.openpype.io/thirdparty/oiio-2.2.0-darwin.tgz" hash = "sha256:..." + +[tool.pyright] +include = [ + "igniter", + "openpype", + "repos", + "vendor" +] +exclude = [ + "**/node_modules", + "**/__pycache__" +] +ignore = ["website", "docs", ".git"] + +reportMissingImports = true +reportMissingTypeStubs = false \ No newline at end of file diff --git a/repos/avalon-core b/repos/avalon-core deleted file mode 160000 index 3188da17b6..0000000000 --- a/repos/avalon-core +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 3188da17b667dc49d007aed56ad20ba01c67f4bc diff --git a/setup.py b/setup.py index 3ee6ad43ea..899e9375c0 100644 --- a/setup.py +++ b/setup.py @@ -123,7 +123,6 @@ bin_includes = [ include_files = [ "igniter", "openpype", - "repos", "schema", "LICENSE", "README.md" @@ -187,5 +186,6 @@ setup( "build_dir": (openpype_root / "docs" / "build").as_posix() } }, - executables=executables + executables=executables, + packages=[] ) diff --git a/start.py b/start.py index f8a01dd9ab..38eb9e9bf4 100644 --- a/start.py +++ b/start.py @@ -191,6 +191,51 @@ else: if os.getenv("OPENPYPE_HEADLESS_MODE") != "1": os.environ.pop("OPENPYPE_HEADLESS_MODE", None) +# Enabled logging debug mode when "--debug" is passed +if "--verbose" in sys.argv: + expected_values = ( + "Expected: notset, debug, info, warning, error, critical" + " or integer [0-50]." + ) + idx = sys.argv.index("--verbose") + sys.argv.pop(idx) + if idx < len(sys.argv): + value = sys.argv.pop(idx) + else: + raise RuntimeError(( + "Expect value after \"--verbose\" argument. {}" + ).format(expected_values)) + + log_level = None + low_value = value.lower() + if low_value.isdigit(): + log_level = int(low_value) + elif low_value == "notset": + log_level = 0 + elif low_value == "debug": + log_level = 10 + elif low_value == "info": + log_level = 20 + elif low_value == "warning": + log_level = 30 + elif low_value == "error": + log_level = 40 + elif low_value == "critical": + log_level = 50 + + if log_level is None: + raise RuntimeError(( + "Unexpected value after \"--verbose\" argument \"{}\". {}" + ).format(value, expected_values)) + + os.environ["OPENPYPE_LOG_LEVEL"] = str(log_level) + +# Enable debug mode, may affect log level if log level is not defined +if "--debug" in sys.argv: + sys.argv.remove("--debug") + os.environ["OPENPYPE_DEBUG"] = "1" + + import igniter # noqa: E402 from igniter import BootstrapRepos # noqa: E402 from igniter.tools import ( @@ -320,6 +365,7 @@ def run_disk_mapping_commands(settings): destination)) raise + def set_avalon_environments(): """Set avalon specific environments. @@ -327,28 +373,12 @@ def set_avalon_environments(): before avalon module is imported because avalon works with globals set with environment variables. """ - from openpype import PACKAGE_DIR - # Path to OpenPype's schema - schema_path = os.path.join( - os.path.dirname(PACKAGE_DIR), - "schema" - ) - # Avalon mongo URL - avalon_mongo_url = ( - os.environ.get("AVALON_MONGO") - or os.environ["OPENPYPE_MONGO"] - ) avalon_db = os.environ.get("AVALON_DB") or "avalon" # for tests os.environ.update({ - # Mongo url (use same as OpenPype has) - "AVALON_MONGO": avalon_mongo_url, - - "AVALON_SCHEMA": schema_path, # Mongo DB name where avalon docs are stored "AVALON_DB": avalon_db, # Name of config - "AVALON_CONFIG": "openpype", "AVALON_LABEL": "OpenPype" }) @@ -838,17 +868,15 @@ def _bootstrap_from_code(use_version, use_staging): version_path = Path(_openpype_root) os.environ["OPENPYPE_REPOS_ROOT"] = _openpype_root - repos = os.listdir(os.path.join(_openpype_root, "repos")) - repos = [os.path.join(_openpype_root, "repos", repo) for repo in repos] - # add self to python paths - repos.insert(0, _openpype_root) - for repo in repos: - sys.path.insert(0, repo) + # add self to sys.path of current process + # NOTE: this seems to be duplicate of 'add_paths_from_directory' + sys.path.insert(0, _openpype_root) # add venv 'site-packages' to PYTHONPATH python_path = os.getenv("PYTHONPATH", "") split_paths = python_path.split(os.pathsep) - # Add repos as first in list - split_paths = repos + split_paths + # add self to python paths + split_paths.insert(0, _openpype_root) + # last one should be venv site-packages # this is slightly convoluted as we can get here from frozen code too # in case when we are running without any version installed. @@ -927,6 +955,16 @@ def boot(): _print(">>> run disk mapping command ...") run_disk_mapping_commands(global_settings) + # Logging to server enabled/disabled + log_to_server = global_settings.get("log_to_server", True) + if log_to_server: + os.environ["OPENPYPE_LOG_TO_SERVER"] = "1" + log_to_server_msg = "ON" + else: + os.environ.pop("OPENPYPE_LOG_TO_SERVER", None) + log_to_server_msg = "OFF" + _print(f">>> Logging to server is turned {log_to_server_msg}") + # Get openpype path from database and set it to environment so openpype can # find its versions there and bootstrap them. openpype_path = get_openpype_path_from_settings(global_settings) diff --git a/tests/integration/conftest.py b/tests/conftest.py similarity index 100% rename from tests/integration/conftest.py rename to tests/conftest.py diff --git a/tests/lib/assert_classes.py b/tests/lib/assert_classes.py index 98f758767d..9a94f89fd0 100644 --- a/tests/lib/assert_classes.py +++ b/tests/lib/assert_classes.py @@ -24,16 +24,19 @@ class DBAssert: else: args[key] = val - msg = None no_of_docs = dbcon.count_documents(args) - if expected != no_of_docs: - msg = "Not expected no of versions. "\ - "Expected {}, found {}".format(expected, no_of_docs) + msg = None args.pop("type") detail_str = " " if args: - detail_str = " with {}".format(args) + detail_str = " with '{}'".format(args) + + if expected != no_of_docs: + msg = "Not expected no of '{}'{}."\ + "Expected {}, found {}".format(queried_type, + detail_str, + expected, no_of_docs) status = "successful" if msg: @@ -42,7 +45,5 @@ class DBAssert: print("Comparing count of {}{} {}".format(queried_type, detail_str, status)) - if msg: - print(msg) return msg diff --git a/tests/lib/testing_classes.py b/tests/lib/testing_classes.py index 0a9da1aca8..7dfbf6fd0d 100644 --- a/tests/lib/testing_classes.py +++ b/tests/lib/testing_classes.py @@ -273,8 +273,6 @@ class PublishTest(ModuleUnitTest): ) os.environ["AVALON_SCHEMA"] = schema_path - import openpype - openpype.install() os.environ["OPENPYPE_EXECUTABLE"] = sys.executable from openpype.lib import ApplicationManager diff --git a/tests/unit/igniter/test_bootstrap_repos.py b/tests/unit/igniter/test_bootstrap_repos.py index 65cd5a2399..10278c4928 100644 --- a/tests/unit/igniter/test_bootstrap_repos.py +++ b/tests/unit/igniter/test_bootstrap_repos.py @@ -152,8 +152,6 @@ def test_install_live_repos(fix_bootstrap, printer, monkeypatch, pytestconfig): openpype_version = fix_bootstrap.create_version_from_live_code() sep = os.path.sep expected_paths = [ - f"{openpype_version.path}{sep}repos{sep}avalon-core", - f"{openpype_version.path}{sep}repos{sep}avalon-unreal-integration", f"{openpype_version.path}" ] printer("testing zip creation") diff --git a/tests/unit/openpype/modules/sync_server/test_module_api.py b/tests/unit/openpype/modules/sync_server/test_module_api.py new file mode 100644 index 0000000000..a484977758 --- /dev/null +++ b/tests/unit/openpype/modules/sync_server/test_module_api.py @@ -0,0 +1,64 @@ +"""Test file for Sync Server, tests API methods, currently for integrate_new + + File: + creates temporary directory and downloads .zip file from GDrive + unzips .zip file + uses content of .zip file (MongoDB's dumps) to import to new databases + with use of 'monkeypatch_session' modifies required env vars + temporarily + runs battery of tests checking that site operation for Sync Server + module are working + removes temporary folder + removes temporary databases (?) +""" +import pytest + +from tests.lib.testing_classes import ModuleUnitTest + + +class TestModuleApi(ModuleUnitTest): + + REPRESENTATION_ID = "60e578d0c987036c6a7b741d" + + TEST_FILES = [("1eCwPljuJeOI8A3aisfOIBKKjcmIycTEt", + "test_site_operations.zip", '')] + + @pytest.fixture(scope="module") + def setup_sync_server_module(self, dbcon): + """Get sync_server_module from ModulesManager""" + from openpype.modules import ModulesManager + + manager = ModulesManager() + sync_server = manager.modules_by_name["sync_server"] + yield sync_server + + def test_get_alt_site_pairs(self, setup_sync_server_module): + conf_sites = {"SFTP": {"alternative_sites": ["studio"]}, + "studio2": {"alternative_sites": ["studio"]}} + + ret = setup_sync_server_module._get_alt_site_pairs(conf_sites) + expected = {"SFTP": {"studio", "studio2"}, + "studio": {"SFTP", "studio2"}, + "studio2": {"studio", "SFTP"}} + assert ret == expected, "Not matching result" + + def test_get_alt_site_pairs_deep(self, setup_sync_server_module): + conf_sites = {"A": {"alternative_sites": ["C"]}, + "B": {"alternative_sites": ["C"]}, + "C": {"alternative_sites": ["D"]}, + "D": {"alternative_sites": ["A"]}, + "F": {"alternative_sites": ["G"]}, + "G": {"alternative_sites": ["F"]}, + } + + ret = setup_sync_server_module._get_alt_site_pairs(conf_sites) + expected = {"A": {"B", "C", "D"}, + "B": {"A", "C", "D"}, + "C": {"A", "B", "D"}, + "D": {"A", "B", "C"}, + "F": {"G"}, + "G": {"F"}} + assert ret == expected, "Not matching result" + + +test_case = TestModuleApi() diff --git a/tools/build.ps1 b/tools/build.ps1 index 10da3d0b83..ff28544954 100644 --- a/tools/build.ps1 +++ b/tools/build.ps1 @@ -180,6 +180,9 @@ $out = & "$($env:POETRY_HOME)\bin\poetry" run python setup.py build 2>&1 Set-Content -Path "$($openpype_root)\build\build.log" -Value $out if ($LASTEXITCODE -ne 0) { + Write-Host "------------------------------------------" -ForegroundColor Red + Get-Content "$($openpype_root)\build\build.log" + Write-Host "------------------------------------------" -ForegroundColor Red Write-Host "!!! " -NoNewLine -ForegroundColor Red Write-Host "Build failed. Check the log: " -NoNewline Write-Host ".\build\build.log" -ForegroundColor Yellow diff --git a/tools/build.sh b/tools/build.sh index 301f26023a..79fb748cd5 100755 --- a/tools/build.sh +++ b/tools/build.sh @@ -185,9 +185,9 @@ if [ "$disable_submodule_update" == 1 ]; then fi echo -e "${BIGreen}>>>${RST} Building ..." if [[ "$OSTYPE" == "linux-gnu"* ]]; then - "$POETRY_HOME/bin/poetry" run python "$openpype_root/setup.py" build &> "$openpype_root/build/build.log" || { echo -e "${BIRed}!!!${RST} Build failed, see the build log."; return 1; } + "$POETRY_HOME/bin/poetry" run python "$openpype_root/setup.py" build &> "$openpype_root/build/build.log" || { echo -e "${BIRed}------------------------------------------${RST}"; cat "$openpype_root/build/build.log"; echo -e "${BIRed}------------------------------------------${RST}"; echo -e "${BIRed}!!!${RST} Build failed, see the build log."; return 1; } elif [[ "$OSTYPE" == "darwin"* ]]; then - "$POETRY_HOME/bin/poetry" run python "$openpype_root/setup.py" bdist_mac &> "$openpype_root/build/build.log" || { echo -e "${BIRed}!!!${RST} Build failed, see the build log."; return 1; } + "$POETRY_HOME/bin/poetry" run python "$openpype_root/setup.py" bdist_mac &> "$openpype_root/build/build.log" || { echo -e "${BIRed}------------------------------------------${RST}"; cat "$openpype_root/build/build.log"; echo -e "${BIRed}------------------------------------------${RST}"; echo -e "${BIRed}!!!${RST} Build failed, see the build log."; return 1; } fi "$POETRY_HOME/bin/poetry" run python "$openpype_root/tools/build_dependencies.py" diff --git a/website/docs/admin_openpype_commands.md b/website/docs/admin_openpype_commands.md index 74cb895ac9..53b4799d6e 100644 --- a/website/docs/admin_openpype_commands.md +++ b/website/docs/admin_openpype_commands.md @@ -24,7 +24,11 @@ openpype_console --use-version=3.0.0-foo+bar `--list-versions [--use-staging]` - to list available versions. -`--validate-version` to validate integrity of given version +`--validate-version` - to validate integrity of given version + +`--verbose` `` - change log verbose level of OpenPype loggers + +`--debug` - set debug flag affects logging For more information [see here](admin_use.md#run-openpype). @@ -47,13 +51,9 @@ For more information [see here](admin_use.md#run-openpype). --- ### `tray` arguments {#tray-arguments} -| Argument | Description | -| --- | --- | -| `--debug` | print verbose information useful for debugging (works with `openpype_console`) | -To launch Tray with debugging information: ```shell -openpype_console tray --debug +openpype_console tray ``` --- ### `launch` arguments {#eventserver-arguments} @@ -62,7 +62,6 @@ option to specify them. | Argument | Description | | --- | --- | -| `--debug` | print debug info | | `--ftrack-url` | URL to ftrack server (can be set with `FTRACK_SERVER`) | | `--ftrack-user` |user name to log in to ftrack (can be set with `FTRACK_API_USER`) | | `--ftrack-api-key` | ftrack api key (can be set with `FTRACK_API_KEY`) | @@ -98,12 +97,16 @@ pype launch --app python --project my_project --asset my_asset --task my_task --- ### `publish` arguments {#publish-arguments} +Run publishing based on metadata passed in json file e.g. on farm. + | Argument | Description | | --- | --- | -| `--debug` | print more verbose information | +| `--targets` | define publishing targets (e.g. "farm") | +| `--gui` (`-g`) | Show publishing | +| Positional argument | Path to metadata json file | ```shell -pype publish +openpype publish --targes farm ``` --- diff --git a/website/docs/admin_settings_project_anatomy.md b/website/docs/admin_settings_project_anatomy.md index 98003dc381..b98819cd8a 100644 --- a/website/docs/admin_settings_project_anatomy.md +++ b/website/docs/admin_settings_project_anatomy.md @@ -59,7 +59,7 @@ We have a few required anatomy templates for OpenPype to work properly, however | `asset` | Name of asset or shot | | `task[name]` | Name of task | | `task[type]` | Type of task | -| `task[short]` | Shortname of task | +| `task[short]` | Short name of task type (eg. 'Modeling' > 'mdl') | | `parent` | Name of hierarchical parent | | `version` | Version number | | `subset` | Subset name | @@ -105,5 +105,8 @@ We have a few required anatomy templates for OpenPype to work properly, however ## Task Types +Current state of default Task descriptors. + +![tasks](assets/settings/anatomy_tasks.png) ## Colour Management and Formats \ No newline at end of file diff --git a/website/docs/admin_use.md b/website/docs/admin_use.md index 178241ad19..f84905c486 100644 --- a/website/docs/admin_use.md +++ b/website/docs/admin_use.md @@ -69,6 +69,22 @@ stored in `checksums` file. Add `--headless` to run OpenPype without graphical UI (useful on server or on automated tasks, etc.) ::: +`--verbose` `` - change log verbose level of OpenPype loggers. + +Level value can be integer in range `0-50` or one of enum strings `"notset" (0)`, `"debug" (10)`, `"info" (20)`, `"warning" (30)`, `"error" (40)`, `"ciritcal" (50)`. Value is stored to `OPENPYPE_LOG_LEVEL` environment variable for next processes. + +```shell +openpype_console --verbose debug +``` + +`--debug` - set debug flag affects logging + +Enable debug flag for OpenPype process. Change value of environment variable `OPENPYPE_DEBUG` to `"1"`. At this moment affects only OpenPype loggers. Argument `--verbose` or environment variable `OPENPYPE_LOG_LEVEL` are used in preference to affect log level. + +```shell +openpype_console --debug +``` + ### Details When you run OpenPype from executable, few check are made: diff --git a/website/docs/api.md b/website/docs/api.md deleted file mode 100644 index 7cad92d603..0000000000 --- a/website/docs/api.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -id: api -title: Pype API -sidebar_label: API ---- - -Work in progress diff --git a/website/docs/artist_hosts.md b/website/docs/artist_hosts.md deleted file mode 100644 index 609f6d97c8..0000000000 --- a/website/docs/artist_hosts.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: artist_hosts -title: Hosts -sidebar_label: Hosts ---- - -## Maya - -## Houdini - -## Nuke - -## Fusion - -## Unreal - -## System diff --git a/website/docs/artist_hosts_hiero.md b/website/docs/artist_hosts_hiero.md index f516c3a6e0..dc6f1696e7 100644 --- a/website/docs/artist_hosts_hiero.md +++ b/website/docs/artist_hosts_hiero.md @@ -94,6 +94,8 @@ This tool will set any defined colorspace definition from OpenPype `Settings / P With OpenPype, you can use Hiero/NKS as a starting point for creating a project's **shots** as *assets* from timeline clips with its *hierarchycal parents* like **episodes**, **sequences**, **folders**, and its child **tasks**. Most importantly it will create **versions** of plate *subsets*, with or without **reference video**. Publishig is naturally creating clip's **thumbnails** and assigns it to shot *asset*. Hiero is also publishing **audio** *subset* and various **soft-effects** either as retiming component as part of published plates or **color-tranformations**, that will be evailable later on for compositor artists to use either as *viewport input-process* or *loaded nodes* in graph editor.



+ + ### Preparing timeline for conversion to instances Because we don't support on-fly data conversion so in case of working with raw camera sources or some other formats which need to be converted for 2D/3D work. We suggest to convert those before and reconform the timeline. Before any clips in timeline could be converted to publishable instances we recommend following. 1. Merge all tracks which supposed to be one and they are multiply only because of editor's style @@ -191,3 +193,12 @@ If you wish to change any individual properties of the shot then you are able to + +### Publishing Effects from Hiero to Nuke +This video shows a way to publish shot look as effect from Hiero to Nuke. + + + +### Assembling edit from published shot versions + + diff --git a/website/docs/artist_hosts_nuke.md b/website/docs/artist_hosts_nuke.md deleted file mode 100644 index 1e02599570..0000000000 --- a/website/docs/artist_hosts_nuke.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -id: artist_hosts_nuke -title: Nuke -sidebar_label: Nuke ---- - -:::important -After Nuke starts it will automatically **Apply All Settings** for you. If you are sure the settings are wrong just contact your supervisor and he will set them correctly for you in project database. -::: - -:::note -The workflows are identical for both. We are supporting versions **`11.0`** and above. -::: - -## OpenPype global tools - -- [Set Context](artist_tools.md#set-context) -- [Work Files](artist_tools.md#workfiles) -- [Create](artist_tools.md#creator) -- [Load](artist_tools.md#loader) -- [Manage (Inventory)](artist_tools.md#inventory) -- [Publish](artist_tools.md#publisher) -- [Library Loader](artist_tools.md#library-loader) - -## Nuke specific tools - -
-
- -### Set Frame Ranges - -Use this feature in case you are not sure the frame range is correct. - -##### Result - -- setting Frame Range in script settings -- setting Frame Range in viewers (timeline) - -
-
- -![Set Frame Ranges](assets/nuke_setFrameRanges.png) - -
-
- - -
- -![Set Frame Ranges Timeline](assets/nuke_setFrameRanges_timeline.png) - -
- -1. limiting to Frame Range without handles -2. **Input** handle on start -3. **Output** handle on end - -
-
- -### Set Resolution - -
-
- - -This menu item will set correct resolution format for you defined by your production. - -##### Result - -- creates new item in formats with project name -- sets the new format as used - -
-
- -![Set Resolution](assets/nuke_setResolution.png) - -
-
- - -### Set Colorspace - -
-
- -This menu item will set correct Colorspace definitions for you. All has to be configured by your production (Project coordinator). - -##### Result - -- set Colorspace in your script settings -- set preview LUT to your viewers -- set correct colorspace to all discovered Read nodes (following expression set in settings) - -
-
- -![Set Colorspace](assets/nuke_setColorspace.png) - -
-
- - -### Apply All Settings - -
-
- -It is usually enough if you once per while use this option just to make yourself sure the workfile is having set correct properties. - -##### Result - -- set Frame Ranges -- set Colorspace -- set Resolution - -
-
- -![Apply All Settings](assets/nuke_applyAllSettings.png) - -
-
- -### Build Workfile - -
-
- -This tool will append all available subsets into an actual node graph. It will look into database and get all last [versions](artist_concepts.md#version) of available [subsets](artist_concepts.md#subset). - - -##### Result - -- adds all last versions of subsets (rendered image sequences) as read nodes -- adds publishable write node as `renderMain` subset - -
-
- -![Build First Work File](assets/nuke_buildFirstWorkfile.png) - -
-
\ No newline at end of file diff --git a/website/docs/artist_hosts_nuke_tut.md b/website/docs/artist_hosts_nuke_tut.md index 4b0ef7a78a..296fdf44d5 100644 --- a/website/docs/artist_hosts_nuke_tut.md +++ b/website/docs/artist_hosts_nuke_tut.md @@ -89,6 +89,8 @@ This menu item will set correct Colorspace definitions for you. All has to be co - set preview LUT to your viewers - set correct colorspace to all discovered Read nodes (following expression set in settings) +See [Nuke Color Management](artist_hosts_nuke_tut.md#nuke-color-management) +
@@ -144,6 +146,8 @@ This tool will append all available subsets into an actual node graph. It will l This QuickStart is short introduction to what OpenPype can do for you. It attempts to make an overview for compositing artists, and simplifies processes that are better described in specific parts of the documentation. + + ### Launch Nuke - Shot and Task Context OpenPype has to know what shot and task you are working on. You need to run Nuke in context of the task, using Ftrack Action or OpenPype Launcher to select the task and run Nuke. @@ -161,7 +165,7 @@ Nuke OpenPype menu shows the current context Launching Nuke with context stops your timer, and starts the clock on the shot and task you picked. -Openpype makes initial setup for your Nuke script. It is the same as running [Apply All Settings](artist_hosts_nuke.md#apply-all-settings) from the OpenPype menu. +Openpype makes initial setup for your Nuke script. It is the same as running [Apply All Settings](artist_hosts_nuke_tut.md#apply-all-settings) from the OpenPype menu. - Reads frame range and resolution from Avalon database, sets it in Nuke Project Settings, Creates Viewer node, sets it’s range and indicates handles by In and Out points. @@ -226,6 +230,11 @@ This will create a Group with a Write node inside. You can configure write node parameters in **Studio Settings → Project → Anatomy → Color Management and Output Formats → Nuke → Nodes** ::: +### Create Prerender Node +Creating Prerender is very similar to creating OpenPype managed Write node. + + + #### What Nuke Publish Does From Artist perspective, Nuke publish gathers all the stuff found in the Nuke script with Publish checkbox set to on, exports stuff and raises the Nuke script (workfile) version. @@ -315,6 +324,8 @@ Main disadvantage of this approach is that you can render only one version of yo When making quick farm publishes, like making two versions with different color correction, care must be taken to let the first job (first version) completely finish before the second version starts rendering. + + ### Managing Versions ![Versionless](assets/nuke_tut/nuke_ManageVersion.png) @@ -323,15 +334,30 @@ OpenPype checks all the assets loaded to Nuke on script open. All out of date as Use Manage to switch versions for loaded assets. +### Loading Effects +This video show how to publish effect from Hiero / Nuke Studio, and use the effect in Nuke. + + + + + +### Nuke Color Management + + + ## Troubleshooting ### Fixing Validate Containers +If your Pyblish dialog fails on Validate Containers, you might have an old asset loaded. Use OpenPype - Manage... to switch the asset(s) to the latest version. + ![Versionless](assets/nuke_tut/nuke_ValidateContainers.png) -If your Pyblish dialog fails on Validate Containers, you might have an old asset loaded. Use OpenPype - Manage... to switch the asset(s) to the latest version. + ### Fixing Validate Version If your Pyblish dialog fails on Validate Version, you might be trying to publish already published version. Rise your version in the OpenPype WorkFiles SaveAs. -Or maybe you accidentally copied write node from different shot to your current one. Check the write publishes on the left side of the Pyblish dialog. Typically you publish only one write. Locate and delete the stray write from other shot. \ No newline at end of file +Or maybe you accidentally copied write node from different shot to your current one. Check the write publishes on the left side of the Pyblish dialog. Typically you publish only one write. Locate and delete the stray write from other shot. + + diff --git a/website/docs/artist_hosts_photoshop.md b/website/docs/artist_hosts_photoshop.md index b2b5fd58da..36670054ee 100644 --- a/website/docs/artist_hosts_photoshop.md +++ b/website/docs/artist_hosts_photoshop.md @@ -49,6 +49,12 @@ With the `Creator` you have a variety of options to create: - Uncheck `Use selection`. - This will create a single group named after the `Subset` in the `Creator`. +#### Simplified publish + +There is a simplified workflow for simple use case where only single image should be created containing all visible layers. +No image instances must be present in a workfile and `project_settings/photoshop/publish/CollectInstances/flatten_subset_template` must be filled in Settings. +Then artists just need to hit 'Publish' button in menu. + ### Publish When you are ready to share some work, you will need to publish. This is done by opening the `Pyblish` through the extensions `Publish` button. @@ -105,3 +111,67 @@ You can switch to a previous version of the image or update to the latest. ![Loader](assets/photoshop_manage_switch.gif) ![Loader](assets/photoshop_manage_update.gif) + + +### New Publisher + +All previous screenshot came from regular [pyblish](https://pyblish.com/) process, there is also a different UI available. This process extends existing implementation and adds new functionalities. + +To test this in Photoshop, the artist needs first to enable experimental `New publisher` in Settings. (Tray > Settings > Experimental tools) +![Settings](assets/experimental_tools_settings.png) + +New dialog opens after clicking on `Experimental tools` button in Openpype extension menu. +![Menu](assets/experimental_tools_menu.png) + +After you click on this button, this dialog will show up. + +![Menu](assets/artist_photoshop_new_publisher_workfile.png) + +You can see the first instance, called `workfileYourTaskName`. (Name depends on studio naming convention for Photoshop's workfiles.). This instance is so called "automatic", +it was created without instigation by the artist. You shouldn't delete this instance as it might hold necessary values for future publishing, but you can choose to skip it +from publishing (by toggling the pill button inside of the rectangular object denoting instance). + +New publisher allows publishing into different context, just click on a workfile instance, update `Variant`, `Asset` or `Task` in the form in the middle and don't forget to click on the 'Confirm' button. + +Similarly to the old publishing approach, you need to create instances for everything you want to publish. You will initiate by clicking on the '+' sign in the bottom left corner. + +![Instance creator](assets/artist_photoshop_new_publisher_instance.png) + +In this dialog you can select the family for the published layer or group. Currently only 'image' is implemented. + +On right hand side you can see creator attributes: +- `Create only for selected` - mimics `Use selected` option of regular publish +- `Create separate instance for each selected` - if separate instance should be created for each layer if multiple selected + +![Instance created](assets/artist_photoshop_new_publisher_instance_created.png) + +Here you can see a newly created instance of image family. (Name depends on studio naming convention for image family.) You can disable instance from publishing in the same fashion as a workfile instance. +You could also decide delete instance by selecting it and clicking on a trashcan icon (next to plus button on left button) + +Buttons on the bottom right are for: +- `Refresh publishing` - set publishing process to starting position - useful if previous publish failed, or you changed configuration of a publish +- `Stop/pause publishing` - if you would like to pause publishing process at any time +- `Validate` - if you would like to run only collecting and validating phases (nothing will be published yet) +- `Publish` - standard way how to kick off full publishing process + +In the unfortunate case of some error during publishing, you would receive this kind of error dialog. + +![Publish failed](assets/artist_photoshop_new_publisher_publish_failed.png) + +In this case there is an issue that you are publishing two or more instances with the same subset name ('imageMaing'). If the error is recoverable by the artist, you should +see helpful information in a `How to repair?` section or fix it automatically by clicking on a 'Wrench' button on the right if present. + +If you would like to ask for help admin or support, you could use any of the three buttons on bottom left: +- `Copy report` - stash full publishing log to a clipboard +- `Export and save report` - save log into a file for sending it via mail or any communication tool +- `Show details` - switches into a more detailed list of published instances and plugins. Similar to the old pyblish list. + +If you are able to fix the workfile yourself, use the first button on the right to set the UI to initial state before publish. (Click the `Publish` button to start again.) + +New publishing process should be backward compatible, eg. if you have a workfile with instances created in the previous publishing approach, they will be translated automatically and +could be used right away. + +If you would create instances in a new publisher, you cannot use them in the old approach though! + +If you would hit on unexpected behaviour with old instances, contact support first, then you could try some steps to recover your publish. Delete instances in New publisher UI, or try `Subset manager` in the extension menu. +Nuclear option is to purge workfile metadata in `File > File Info > Origin > Headline`. This is only for most determined daredevils though! diff --git a/website/docs/assets/artist_photoshop_new_publisher_instance.png b/website/docs/assets/artist_photoshop_new_publisher_instance.png new file mode 100644 index 0000000000..723a032c94 Binary files /dev/null and b/website/docs/assets/artist_photoshop_new_publisher_instance.png differ diff --git a/website/docs/assets/artist_photoshop_new_publisher_instance_created.png b/website/docs/assets/artist_photoshop_new_publisher_instance_created.png new file mode 100644 index 0000000000..0cf6d1d18c Binary files /dev/null and b/website/docs/assets/artist_photoshop_new_publisher_instance_created.png differ diff --git a/website/docs/assets/artist_photoshop_new_publisher_publish_failed.png b/website/docs/assets/artist_photoshop_new_publisher_publish_failed.png new file mode 100644 index 0000000000..e34497b77d Binary files /dev/null and b/website/docs/assets/artist_photoshop_new_publisher_publish_failed.png differ diff --git a/website/docs/assets/artist_photoshop_new_publisher_workfile.png b/website/docs/assets/artist_photoshop_new_publisher_workfile.png new file mode 100644 index 0000000000..006206519f Binary files /dev/null and b/website/docs/assets/artist_photoshop_new_publisher_workfile.png differ diff --git a/website/docs/assets/experimental_tools_menu.png b/website/docs/assets/experimental_tools_menu.png new file mode 100644 index 0000000000..79fa8d3655 Binary files /dev/null and b/website/docs/assets/experimental_tools_menu.png differ diff --git a/website/docs/assets/experimental_tools_settings.png b/website/docs/assets/experimental_tools_settings.png new file mode 100644 index 0000000000..4d514e8a8f Binary files /dev/null and b/website/docs/assets/experimental_tools_settings.png differ diff --git a/website/docs/assets/publisher_card_view.png b/website/docs/assets/publisher_card_view.png new file mode 100644 index 0000000000..57b012cb6d Binary files /dev/null and b/website/docs/assets/publisher_card_view.png differ diff --git a/website/docs/assets/publisher_create_dialog.png b/website/docs/assets/publisher_create_dialog.png new file mode 100644 index 0000000000..6e9275062d Binary files /dev/null and b/website/docs/assets/publisher_create_dialog.png differ diff --git a/website/docs/assets/publisher_list_view.png b/website/docs/assets/publisher_list_view.png new file mode 100644 index 0000000000..e9dc8a607a Binary files /dev/null and b/website/docs/assets/publisher_list_view.png differ diff --git a/website/docs/assets/settings/anatomy_tasks.png b/website/docs/assets/settings/anatomy_tasks.png new file mode 100644 index 0000000000..16265cf8eb Binary files /dev/null and b/website/docs/assets/settings/anatomy_tasks.png differ diff --git a/website/docs/dev_publishing.md b/website/docs/dev_publishing.md new file mode 100644 index 0000000000..8ee3b7e85f --- /dev/null +++ b/website/docs/dev_publishing.md @@ -0,0 +1,548 @@ +--- +id: dev_publishing +title: Publishing +sidebar_label: Publishing +toc_max_heading_level: 4 +--- + +Publishing workflow consists of 2 parts: +- Creating - Mark what will be published and how. +- Publishing - Use data from Creating to go through the pyblish process. + +OpenPype is using [pyblish](https://pyblish.com/) for the publishing process. OpenPype extends and modifies its few functions a bit, mainly for reports and UI purposes. The main differences are that OpenPype's publish UI allows to enable/disable instances or plugins during Creating part instead of in the publishing part and has limited plugin actions only for failed validation plugins. + +## **Creating** + +Concept of Creating does not have to "create" anything yet, but prepare and store metadata about an "instance" (becomes a subset after the publish process). Created instance always has `family` which defines what kind of data will be published, the best example is `workfile` family. Storing of metadata is host specific and may be even a Creator plugin specific. Most hosts are storing metadata into a workfile (Maya scene, Nuke script, etc.) to an item or a node the same way as regular Pyblish instances, so consistency of host implementation is kept, but some features may require a different approach that is the reason why it is creator plugin responsibility. Storing the metadata to the workfile persists values, so the artist does not have to create and set what should be published and how over and over. + +### Created instance + +Objected representation of created instance metadata defined by class **CreatedInstance**. Has access to **CreateContext** and **BaseCreator** that initialized the object. Is a dictionary-like object with few immutable keys (marked with start `*` in table). The immutable keys are set by the creator plugin or create context on initialization and their values can't change. Instance can have more arbitrary data, for example ids of nodes in scene but keep in mind that some keys are reserved. + +| Key | Type | Description | +|---|---|---| +| *id | str | Identifier of metadata type. ATM constant **"pyblish.avalon.instance"** | +| *instance_id | str | Unique ID of instance. Set automatically on instance creation using `str(uuid.uuid4())` | +| *family | str | Instance's family representing type defined by creator plugin. | +| *creator_identifier | str | Identifier of creator that collected/created the instance. | +| *creator_attributes | dict | Dictionary of attributes that are defined by the creator plugin (`get_instance_attr_defs`). | +| *publish_attributes | dict | Dictionary of attributes that are defined by publish plugins. | +| variant | str | Variant is entered by the artist on creation and may affect **subset**. | +| subset | str | Name of instance. This name will be used as a subset name during publishing. Can be changed on context change or variant change. | +| active | bool | Is the instance active and will be published or not. | +| asset | str | Name of asset in which context was created. | +| task | str | Name of task in which context was created. Can be set to `None`. | + +:::note +Task should not be required until the subset name template expects it. +::: + +object of **CreatedInstance** has method **data_to_store** which returns a dictionary that can be parsed to a json string. This method will return all data related to the instance so it can be re-created using `CreatedInstance.from_existing(data)`. + +#### *Create context* {#category-doc-link} + +Controller and wrapper around Creating is `CreateContext` which cares about loading of plugins needed for Creating. And validates required functions in host implementation. + +Context discovers creator and publish plugins. Trigger collections of existing instances on creators and trigger Creating itself. Also it keeps in mind instance objects by their ids. + +Creator plugins can call **creator_adds_instance** or **creator_removed_instance** to add/remove instances but these methods are not meant to be called directly out of the creator. The reason is that it is the creator's responsibility to remove metadata or decide if it should remove the instance. + +#### Required functions in host implementation +Host implementation **must** implement **get_context_data** and **update_context_data**. These two functions are needed to store metadata that are not related to any instance but are needed for Creating and publishing process. Right now only data about enabled/disabled optional publish plugins is stored there. When data is not stored and loaded properly, reset of publishing will cause that they will be set to default value. Context data also parsed to json string similarly as instance data. + +There are also few optional functions. For UI purposes it is possible to implement **get_context_title** which can return a string shown in UI as a title. Output string may contain html tags. It is recommended to return context path (it will be created function this purposes) in this order `"{project name}/{asset hierarchy}/{asset name}/{task name}"`. + +Another optional function is **get_current_context**. This function is handy in hosts where it is possible to open multiple workfiles in one process so using global context variables is not relevant because artists can switch between opened workfiles without being acknowledged. When a function is not implemented or won't return the right keys the global context is used. +```json +# Expected keys in output +{ + "project_name": "MyProject", + "asset_name": "sq01_sh0010", + "task_name": "Modeling" +} +``` + +### Create plugin +Main responsibility of create plugin is to create, update, collect and remove instance metadata and propagate changes to create context. Has access to **CreateContext** (`self.create_context`) that discovered the plugin so has also access to other creators and instances. Create plugins have a lot of responsibility so it is recommended to implement common code per host. + +#### *BaseCreator* +Base implementation of creator plugin. It is not recommended to use this class as base for production plugins but rather use one of **AutoCreator** and **Creator** variants. + +**Abstractions** +- **`family`** (class attr) - Tells what kind of instance will be created. +```python +class WorkfileCreator(Creator): + family = "workfile" +``` + +- **`collect_instances`** (method) - Collect already existing instances from the workfile and add them to create context. This method is called on initialization or reset of **CreateContext**. Each creator is responsible to find its instance metadata, convert them to **CreatedInstance** object and add them to create context (`self._add_instance_to_context(instnace_obj)`). +```python +def collect_instances(self): + # Using 'pipeline.list_instances' is just example how to get existing instances from scene + # - getting existing instances is different per host implementation + for instance_data in pipeline.list_instances(): + # Process only instances that were created by this creator + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + # Create instance object from existing data + instance = CreatedInstance.from_existing( + instance_data, self + ) + # Add instance to create context + self._add_instance_to_context(instance) +``` + +- **`create`** (method) - Create a new object of **CreatedInstance** store its metadata to the workfile and add the instance into the created context. Failed Creating should raise **CreatorError** if an error happens that artists can fix or give them some useful information. Triggers and implementation differs for **Creator** and **AutoCreator**. + +- **`update_instances`** (method) - Update data of instances. Receives tuple with **instance** and **changes**. +```python +def update_instances(self, update_list): + # Loop over changed instances + for instance, changes in update_list: + # Example possible usage of 'changes' to use different node on change + # of node id in instance data (MADE UP) + node = None + if "node_id" in changes: + old_value, new_value = changes["node_id"] + if new_value is not None: + node = pipeline.get_node_by_id(new_value) + + if node is None: + node = pipeline.get_node_by_instance_id(instance.id) + # Get node in scene that represents the instance + # Imprind data to a node + pipeline.imprint(node, instance.data_to_store()) + + +# Most implementations will probably ignore 'changes' completely +def update_instances(self, update_list): + for instance, _ in update_list: + # Get node from scene + node = pipeline.get_node_by_instance_id(instance.id) + # Imprint data to node + pipeline.imprint(node, instance.data_to_store()) +``` + +- **`remove_instances`** (method) - Remove instance metadata from workfile and from create context. +```python +# Possible way how to remove instance +def remove_instances(self, instances): + for instance in instances: + # Remove instance metadata from workflle + pipeline.remove_instance(instance.id) + # Remove instance from create context + self._remove_instance_from_context(instance) + + +# Default implementation of `AutoCreator` +def remove_instances(self, instances): + pass +``` + +:::note +When host implementation use universal way how to store and load instances you should implement host specific creator plugin base class with implemented **collect_instances**, **update_instances** and **remove_instances**. +::: + +**Optional implementations** + +- **`enabled`** (attr) - Boolean if the creator plugin is enabled and used. +- **`identifier`** (class attr) - Consistent unique string identifier of the creator plugin. Is used to identify source plugin of existing instances. There can't be 2 creator plugins with the same identifier. Default implementation returns `family` attribute. +```python +class RenderLayerCreator(Creator): + family = "render" + identifier = "render_layer" + + +class RenderPassCreator(Creator): + family = "render" + identifier = "render_pass" +``` + +- **`label`** (attr) - String label of creator plugin which will show up in UI, `identifier` is used when not set. It should be possible to use html tags. +```python +class RenderLayerCreator(Creator): + label = "Render Layer" +``` + +- **`get_icon`** (attr) - Icon of creator and its instances. Value can be a path to an image file, full name of qtawesome icon, `QPixmap` or `QIcon`. For complex cases or cases when `Qt` objects are returned it is recommended to override `get_icon` method and handle the logic or import `Qt` inside the method to not break headless usage of creator plugin. For list of qtawesome icons check qtawesome github repository (look for the used version in pyproject.toml). Default implementation return **icon** attribute. +- **`icon`** (method) - Attribute for default implementation of **get_icon**. +```python +class RenderLayerCreator(Creator): + # Use font awesome 5 icon + icon = "fa5.building" +``` + +- **`get_instance_attr_defs`** (method) - Attribute definitions of instance. Creator can define attribute values with default values for each instance. These attributes may affect how instances will be instance processed during publishing. Attribute defiitions can be used from `openpype.pipeline.lib.attribute_definitions` (NOTE: Will be moved to `openpype.lib.attribute_definitions` soon). Attribute definitions define basic types of values for different cases e.g. boolean, number, string, enumerator, etc. Default implementation returns **instance_attr_defs**. +- **`instance_attr_defs`** (attr) - Attribute for default implementation of **get_instance_attr_defs**. + +```python +from openpype.pipeline import attribute_definitions + + +class RenderLayerCreator(Creator): + def get_instance_attr_defs(self): + # Return empty list if '_allow_farm_render' is not enabled (can be set during initialization) + if not self._allow_farm_render: + return [] + # Give artist option to change if should be rendered on farm or locally + return [ + attribute_definitions.BoolDef( + "render_farm", + default=False, + label="Render on Farm" + ) + ] +``` + +- **`get_subset_name`** (method) - Calculate subset name based on passed data. Data can be extended using the `get_dynamic_data` method. Default implementation is using `get_subset_name` from `openpype.lib` which is recommended. + +- **`get_dynamic_data`** (method) - Can be used to extend data for subset templates which may be required in some cases. + + +#### *AutoCreator* +Creator that is triggered on reset of create context. Can be used for families that are expected to be created automatically without artist interaction (e.g. **workfile**). Method `create` is triggered after collecting all creators. + +:::important +**AutoCreator** has implemented **remove_instances** to do nothing as removing of auto created instances would lead to creating new instance immediately or on refresh. +::: + +```python +def __init__( + self, create_context, system_settings, project_settings, *args, **kwargs +): + super(MyCreator, self).__init__( + create_context, system_settings, project_settings, *args, **kwargs + ) + # Get variant value from settings + variant_name = ( + project_settings["my_host"][self.identifier]["variant"] + ).strip() + if not variant_name: + variant_name = "Main" + self._variant_name = variant_name + +# Create does not expect any arguments +def create(self): + # Look for existing instance in create context + existing_instance = None + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + existing_instance = instance + break + + # Collect current context information + # - variant can be filled from settings + variant = self._variant_name + # Only place where we can look for current context + project_name = io.Session["AVALON_PROJECT"] + asset_name = io.Session["AVALON_ASSET"] + task_name = io.Session["AVALON_TASK"] + host_name = io.Session["AVALON_APP"] + + # Create new instance if does not exist yet + if existing_instance is None: + asset_doc = io.find_one({"type": "asset", "name": asset_name}) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": variant + } + data.update(self.get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name + )) + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(new_instance) + + # Update instance context if is not the same + elif ( + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = io.find_one({"type": "asset", "name": asset_name}) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name +``` + +#### *Creator* +Implementation of creator plugin that is triggered manually by the artist in UI (or by code). Has extended options for UI purposes than **AutoCreator** and **create** method expect more arguments. + +**Optional implementations** +- **`create_allow_context_change`** (class attr) - Allow to set context in UI before Creating. Some creators may not allow it or their logic would not use the context selection (e.g. bulk creators). Is set to `True` but default. +```python +class BulkRenderCreator(Creator): + create_allow_context_change = False +``` +- **`get_default_variants`** (method) - Returns list of default variants that are listed in create dialog for user. Returns **default_variants** attribute by default. +- **`default_variants`** (attr) - Attribute for default implementation of **get_default_variants**. + +- **`get_default_variant`** (method) - Returns default variant that is prefilled in UI (value does not have to be in default variants). By default returns **default_variant** attribute. If returns `None` then UI logic will take first item from **get_default_variants** if there is any otherwise **"Main"** is used. +- **`default_variant`** (attr) - Attribute for default implementation of **get_default_variant**. + +- **`get_description`** (method) - Returns a short string description of the creator. Returns **description** attribute by default. +- **`description`** (attr) - Attribute for default implementation of **get_description**. + +- **`get_detailed_description`** (method) - Returns detailed string description of creator. Can contain markdown. Returns **detailed_description** attribute by default. +- **`detailed_description`** (attr) - Attribute for default implementation of **get_detailed_description**. + +- **`get_pre_create_attr_defs`** (method) - Similar to **get_instance_attr_defs** returns attribute definitions but they are filled before creation. When creation is called from UI the values are passed to **create** method. Returns **pre_create_attr_defs** attribute by default. +- **`pre_create_attr_defs`** (attr) - Attribute for default implementation of **get_pre_create_attr_defs**. + +```python +from openpype.pipeline import Creator, attribute_definitions + + +class CreateRender(Creator): + family = "render" + label = "Render" + icon = "fa.eye" + description = "Render scene viewport" + + def __init__( + self, context, system_settings, project_settings, *args, **kwargs + ): + super(CreateRender, self).__init__( + context, system_settings, project_settings, *args, **kwargs + ) + plugin_settings = ( + project_settings["my_host"]["create"][self.__class__.__name__] + ) + # Get information if studio has enabled farm publishing + self._allow_farm_render = plugin_settings["allow_farm_render"] + # Get default variants from settings + self.default_variants = plugin_settings["variants"] + + def get_instance_attr_defs(self): + # Return empty list if '_allow_farm_render' is not enabled (can be set during initialization) + if not self._allow_farm_render: + return [] + # Give artist option to change if should be rendered on farm or locally + return [ + attribute_definitions.BoolDef( + "render_farm", + default=False, + label="Render on Farm" + ) + ] + + def get_pre_create_attr_defs(self): + # Give user option to use selection or not + attrs = [ + attribute_definitions.BoolDef( + "use_selection", + default=False, + label="Use selection" + ) + ] + if self._allow_farm_render: + # Set to render on farm in creator dialog + # - this value is not automatically passed to instance attributes + # creator must do that during creation + attrs.append( + attribute_definitions.BoolDef( + "render_farm", + default=False, + label="Render on Farm" + ) + ) + return attrs + + def create(self, subset_name, instance_data, pre_create_data): + # ARGS: + # - 'subset_name' - precalculated subset name + # - 'instance_data' - context data + # - 'asset' - asset name + # - 'task' - task name + # - 'variant' - variant + # - 'family' - instnace family + + # Check if should use selection or not + if pre_create_data.get("use_selection"): + items = pipeline.get_selection() + else: + items = [pipeline.create_write()] + + # Validations related to selection + if len(items) > 1: + raise CreatorError("Please select only single item at time.") + + elif not items: + raise CreatorError("Nothing to create. Select at least one item.") + + # Create instence object + new_instance = CreatedInstance(self.family, subset_name, data, self) + # Pass value from pre create attribute to instance + # - use them only when pre create date contain the data + if "render_farm" in pre_create_data: + use_farm = pre_create_data["render_farm"] + new_instance.creator_attributes["render_farm"] = use_farm + + # Store metadata to workfile + pipeline.imprint(new_instance.id, new_instance.data_to_store()) + + # Add instance to context + self._add_instance_to_context(new_instance) +``` + +## **Publish** +### Exceptions +OpenPype define few specific exceptions that should be used in publish plugins. + +#### *Validation exception* +Validation plugins should raise `PublishValidationError` to show to an artist what's wrong and give him actions to fix it. The exception says that errors in the plugin can be fixed by the artist himself (with or without action on plugin). Any other errors will stop publishing immediately. The exception `PublishValidationError` raised after validation order has the same effect as any other exception. + +Exception `PublishValidationError` expects 4 arguments: +- **message** Which is not used in UI but for headless publishing. +- **title** Short description of error (2-5 words). Title is used for grouping of exceptions per plugin. +- **description** Detailed description of the issue where markdown and html can be used. +- **detail** Is optional to give even more detailed information for advanced users. At this moment the detail is shown directly under description but it is in plan to have detail in a collapsible widget. + +Extended version is `PublishXmlValidationError` which uses xml files with stored descriptions. This helps to avoid having huge markdown texts inside code. The exception has 4 arguments: +- **plugin** The plugin object which raises the exception to find its related xml file. +- **message** Exception message for publishing without UI or different pyblish UI. +- **key** Optional argument says which error from xml is used as a validation plugin may raise error with different messages based on the current errors. Default is **"main"**. +- **formatting_data** Optional dictionary to format data in the error. This is used to fill detailed description with data from the publishing so artist can get more precise information. + +**Where and how to create xml file** + +Xml files for `PublishXmlValidationError` must be located in **./help** subfolder next to the plugin and the filename must match the filename of the plugin. +``` +# File location related to plugin file +└ publish + ├ help + │ ├ validate_scene.xml + │ └ ... + ├ validate_scene.py + └ ... +``` + +Xml file content has **<root>** node which may contain any amount of **<error>** nodes, but each of them must have **id** attribute with unique value. That is then used for **key**. Each error must have **<title>** and **<description>** and **<detail>**. Text content may contain python formatting keys that can be filled when an exception is raised. +```xml + + + + Subset context + ## Invalid subset context + +Context of the given subset doesn't match your current scene. + +### How to repair? + +You can fix this with the "Repair" button on the right. This will use '{expected_asset}' asset name and overwrite '{found_asset}' asset name in scene metadata. + +After that restart publishing with Reload button. + + +### How could this happen? + +The subset was created in different scene with different context +or the scene file was copy pasted from different context. + + + +``` + +#### *Known errors* +When there is a known error that can't be fixed by the user (e.g. can't connect to deadline service, etc.) `KnownPublishError` should be raised. The only difference is that its message is shown in UI to the artist otherwise a neutral message without context is shown. + +### Plugin extension +Publish plugins can be extended by additional logic when inheriting from `OpenPypePyblishPluginMixin` which can be used as mixin (additional inheritance of class). Publish plugins that inherit from this mixin can define attributes that will be shown in **CreatedInstance**. One of the most important usages is to be able turn on/off optional plugins. + +Attributes are defined by the return value of `get_attribute_defs` method. Attribute definitions are for families defined in plugin's `families` attribute if it's instance plugin or for whole context if it's context plugin. To convert existing values (or to remove legacy values) can be re-implemented `convert_attribute_values`. Default implementation just converts the values to right types. + +:::Important +Values of publish attributes from created instance are never removed automatically so implementing this method is the best way to remove legacy data or convert them to new data structure. +::: + +Possible attribute definitions can be found in `openpype/pipeline/lib/attribute_definitions.py`. + +
+Example plugin +

+ +```python +import pyblish.api +from openpype.pipeline import ( + OpenPypePyblishPluginMixin, + attribute_definitions, +) + + +# Example context plugin +class MyExtendedPlugin( + pyblish.api.ContextPlugin, OpenPypePyblishPluginMixin +): + optional = True + active = True + + @classmethod + def get_attribute_defs(cls): + return [ + attribute_definitions.BoolDef( + # Key under which it will be stored + "process", + # Use 'active' as default value + default=cls.active, + # Use plugin label as label for attribute + label=cls.label + ) + ] + + def process_plugin(self, context): + # First check if plugin is optional + if not self.optional: + return True + + # Attribute values are stored by class names + # - for those purposes was implemented 'get_attr_values_from_data' + # to help with accessing it + attribute_values = self.get_attr_values_from_data(context.data) + # Get 'process' key + process_value = attribute_values.get("process") + if process_value is None or process_value: + return True + return False + + def process(self, context): + if not self.process_plugin(context): + return + # Do plugin logic + ... +``` +

+
+ +## **UI examples** +### Main publish window +Main window of publisher shows instances and their values, collected by creators. + +**Card view** +![Publisher UI - Card view](assets/publisher_card_view.png) +**List view** +![Publisher UI - List view](assets/publisher_list_view.png) + +#### *Instances views* +List of instances always contains an `Options` item which is used to show attributes of context plugins. Values from the item are saved and loaded using [host implementation](#required-functions-in-host-implementation) **get_context_data** and **update_context_data**. Instances are grouped by family and can be shown in card view (single selection) or list view (multi selection). + +Instance view has at the bottom 3 buttons. Plus sign opens [create dialog](#create-dialog), bin removes selected instances and stripes swap card and list view. + +#### *Context options* +It is possible to change variant or asset and task context of instances at the top part but all changes there must be confirmed. Confirmation will trigger recalculation of subset names and all new data are stored to instances. + +#### *Create attributes* +Instance attributes display all created attributes of all selected instances. All attributes that have the same definition are grouped into one input and are visually indicated if values are not the same for selected instances. In most cases have **< Multiselection >** placeholder. + +#### *Publish attributes* +Publish attributes work the same way as create attributes but the source of attribute definitions are pyblish plugins. Attributes are filtered based on families of selected instances and families defined in the pyblish plugin. + +### Create dialog +![Publisher UI - Create dialog](assets/publisher_create_dialog.png) +Create dialog is used by artist to create new instances in a context. The context selection can be enabled/disabled by changing `create_allow_context_change` on [creator plugin](#creator). In the middle part the artist selects what will be created and what variant it is. On the right side is information about the selected creator and its pre-create attributes. There is also a question mark button which extends the window and displays more detailed information about the creator. \ No newline at end of file diff --git a/website/docs/dev_requirements.md b/website/docs/dev_requirements.md index bbf3b1fb5b..a10aea7865 100644 --- a/website/docs/dev_requirements.md +++ b/website/docs/dev_requirements.md @@ -14,7 +14,7 @@ The main things you will need to run and build pype are: - **Terminal** in your OS - PowerShell 5.0+ (Windows) - Bash (Linux) -- [**Python 3.7.8**](#python) or higher +- [**Python 3.7.9**](#python) or higher - [**MongoDB**](#database) @@ -33,6 +33,8 @@ It can be built and ran on all common platforms. We develop and test on the foll ## Database +Database version should be at least **MongoDB 4.4**. + Pype needs site-wide installation of **MongoDB**. It should be installed on reliable server, that all workstations (and possibly render nodes) can connect. This server holds **Avalon** database that is at the core of everything diff --git a/website/docs/hosts-maya.md b/website/docs/hosts-maya.md deleted file mode 100644 index 0ee0c2d86b..0000000000 --- a/website/docs/hosts-maya.md +++ /dev/null @@ -1,33 +0,0 @@ -### Tools -Creator -Publisher -Loader -Scene Inventory -Look assigner -Workfiles - -### Plugins -Deadline -Muster -Yeti -Arnold -Vray -Redshift - -### Families -Model -Look -Rig -Animation -Cache -Camera -Assembly -MayaAscii (generic scene) -Setdress -RenderSetup -Review -arnoldStandin -vrayProxy -vrayScene -yetiCache -yetiRig diff --git a/website/docs/manager_ftrack.md b/website/docs/manager_ftrack.md index defbb4b48f..b5ca167838 100644 --- a/website/docs/manager_ftrack.md +++ b/website/docs/manager_ftrack.md @@ -4,7 +4,7 @@ title: Ftrack sidebar_label: Project Manager --- -Ftrack is currently the main project management option for OpenPype. This documentation assumes that you are familiar with Ftrack and it's basic principles. If you're new to Ftrack, we recommend having a thorough look at [Ftrack Official Documentation](http://ftrack.rtd.ftrack.com/en/stable/). +Ftrack is currently the main project management option for OpenPype. This documentation assumes that you are familiar with Ftrack and it's basic principles. If you're new to Ftrack, we recommend having a thorough look at [Ftrack Official Documentation](https://help.ftrack.com/en/). ## Project management Setting project attributes is the key to properly working pipeline. @@ -31,7 +31,7 @@ This process describes how data from Ftrack will get into Avalon database. ### How to synchronize You can trigger synchronization manually using [Sync To Avalon](manager_ftrack_actions.md#sync-to-avalon) action. -Synchronization can also be automated with OpenPype's [event server](#event-server) and synchronization events. If your Ftrack is [prepared for OpenPype](#prepare-ftrack-for-openpype), the project should have custom attribute `Avalon auto-sync`. Check the custom attribute to allow auto-updates with event server. +Synchronization can also be automated with OpenPype's [event server](#event-server) and synchronization events. If your Ftrack is [prepared for OpenPype](module_ftrack.md#prepare-ftrack-for-openpype), the project should have custom attribute `Avalon auto-sync`. Check the custom attribute to allow auto-updates with event server. :::tip Always use `Sync To Avalon` action before you enable `Avalon auto-sync`! diff --git a/website/docs/manager_naming.md b/website/docs/manager_naming.md deleted file mode 100644 index bf822fbeb4..0000000000 --- a/website/docs/manager_naming.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -id: manager_naming -title: Naming Conventions -sidebar_label: Naming Conventions ---- - -:::note -This naming convention holds true for most of our pipeline. Please match it as close as possible even for projects and files that might be outside of pipeline scope at this point. Small errors count! The reason for given formatting is to allow people to understand the file at glance and that a script or a program can easily get meaningful information from your files without errors. -::: - -## General rules - -For more detailed rules and different file types, have a look at naming conventions for scenes and assets - -- Every file starts with file code based on a project it belongs to e.g. ‘tst_’, ‘drm_’ -- Optional subversion and comment always comes after the major version. v##.subversion_comment. -- File names can only be composed of letters, numbers, underscores `_` and dots “.” -- You can use snakeCase or CamelCase if you need more words in a section.  thisIsLongerSentenceInComment -- No spaces in filenames. Ever! -- Frame numbers are always separated by a period ”.” -- If you're not sure use this template: - -## Work files - -**`{code}_{shot}_{task}_v001.ext`** - -**`{code}_{asset}_{task}_v001.ext`** - -**Examples:** - - prj_sh010_enviro_v001.ma - prj_sh010_animation_v001.ma - prj_sh010_comp_v001.nk - - prj_bob_modelling_v001.ma - prj_bob_rigging_v001.ma - prj_bob_lookdev_v001.ma - -:::info -In all of the examples anything enclosed in curly brackets  { } is compulsory in the name. -Anything in square brackets [ ] is optional. -::: - -## Published Assets - -**`{code}_{asset}_{family}_{subset}_{version}_[comment].ext`** - -**Examples:** - - prj_bob_model_main_v01.ma - prj_bob_model_hires_v01.ma - prj_bob_model_main_v01_clothes.ma - prj_bob_model_main_v01_body.ma - prj_bob_rig_main_v01.ma - Prj_bob_look_main_v01.ma - Prj_bob_look_wet_v01.ma diff --git a/website/docs/module_site_sync.md b/website/docs/module_site_sync.md index 78f482352e..2e9cf01102 100644 --- a/website/docs/module_site_sync.md +++ b/website/docs/module_site_sync.md @@ -123,6 +123,10 @@ To get working connection to Google Drive there are some necessary steps: - add new site back in OpenPype Settings, name as you want, provider needs to be 'gdrive' - distribute credentials file via shared mounted disk location +:::note +If you are using regular personal GDrive for testing don't forget adding `/My Drive` as the prefix in root configuration. Business accounts and share drives don't need this. +::: + ### SFTP SFTP provider is used to connect to SFTP server. Currently authentication with `user:password` or `user:ssh key` is implemented. diff --git a/website/sidebars.js b/website/sidebars.js index 16af1e1151..105afc30eb 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -136,6 +136,13 @@ module.exports = { "dev_requirements", "dev_build", "dev_testing", - "dev_contribute" + "dev_contribute", + { + type: "category", + label: "Hosts integrations", + items: [ + "dev_publishing" + ] + } ] }; diff --git a/website/src/pages/index.js b/website/src/pages/index.js index 791b309bbc..d9bbc3eaa0 100644 --- a/website/src/pages/index.js +++ b/website/src/pages/index.js @@ -144,6 +144,11 @@ const studios = [ title: "Ember Light", image: "/img/EmberLight_black.png", infoLink: "https://emberlight.se/", + }, + { + title: "IGG Canada", + image: "/img/igg-logo.png", + infoLink: "https://www.igg.com/", } ]; diff --git a/website/static/img/igg-logo.png b/website/static/img/igg-logo.png new file mode 100644 index 0000000000..3c7f7718f7 Binary files /dev/null and b/website/static/img/igg-logo.png differ diff --git a/website/yarn.lock b/website/yarn.lock index 7f677aaed7..04b9dd658b 100644 --- a/website/yarn.lock +++ b/website/yarn.lock @@ -2311,9 +2311,9 @@ asap@~2.0.3: integrity sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY= async@^2.6.2: - version "2.6.3" - resolved "https://registry.yarnpkg.com/async/-/async-2.6.3.tgz#d72625e2344a3656e3a3ad4fa749fa83299d82ff" - integrity sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg== + version "2.6.4" + resolved "https://registry.yarnpkg.com/async/-/async-2.6.4.tgz#706b7ff6084664cd7eae713f6f965433b5504221" + integrity sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA== dependencies: lodash "^4.17.14" @@ -5125,9 +5125,9 @@ minimatch@^3.0.4: brace-expansion "^1.1.7" minimist@^1.2.0, minimist@^1.2.5: - version "1.2.5" - resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.5.tgz#67d66014b66a6a8aaa0c083c5fd58df4e4e97602" - integrity sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw== + version "1.2.6" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44" + integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== mkdirp@^0.5.5: version "0.5.5" @@ -5207,9 +5207,9 @@ node-fetch@2.6.7: whatwg-url "^5.0.0" node-forge@^1.2.0: - version "1.2.1" - resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-1.2.1.tgz#82794919071ef2eb5c509293325cec8afd0fd53c" - integrity sha512-Fcvtbb+zBcZXbTTVwqGA5W+MKBj56UjVRevvchv5XrcyXbmNdesfZL37nlcWOfpgHhgmxApw3tQbTr4CqNmX4w== + version "1.3.0" + resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-1.3.0.tgz#37a874ea723855f37db091e6c186e5b67a01d4b2" + integrity sha512-08ARB91bUi6zNKzVmaj3QO7cr397uiDT2nJ63cHjyNtCTWIgvS47j3eT0WfzUwS9+6Z5YshRaoasFkXCKrIYbA== node-releases@^2.0.1: version "2.0.2"