diff --git a/.all-contributorsrc b/.all-contributorsrc new file mode 100644 index 0000000000..a3b85cae68 --- /dev/null +++ b/.all-contributorsrc @@ -0,0 +1,315 @@ +{ + "projectName": "OpenPype", + "projectOwner": "pypeclub", + "repoType": "github", + "repoHost": "https://github.com", + "files": [ + "README.md" + ], + "imageSize": 100, + "commit": true, + "commitConvention": "none", + "contributors": [ + { + "login": "mkolar", + "name": "Milan Kolar", + "avatar_url": "https://avatars.githubusercontent.com/u/3333008?v=4", + "profile": "http://pype.club/", + "contributions": [ + "code", + "doc", + "infra", + "business", + "content", + "fundingFinding", + "maintenance", + "projectManagement", + "review", + "mentoring", + "question" + ] + }, + { + "login": "jakubjezek001", + "name": "Jakub Ježek", + "avatar_url": "https://avatars.githubusercontent.com/u/40640033?v=4", + "profile": "https://www.linkedin.com/in/jakubjezek79", + "contributions": [ + "code", + "doc", + "infra", + "content", + "review", + "maintenance", + "mentoring", + "projectManagement", + "question" + ] + }, + { + "login": "antirotor", + "name": "Ondřej Samohel", + "avatar_url": "https://avatars.githubusercontent.com/u/33513211?v=4", + "profile": "https://github.com/antirotor", + "contributions": [ + "code", + "doc", + "infra", + "content", + "review", + "maintenance", + "mentoring", + "projectManagement", + "question" + ] + }, + { + "login": "iLLiCiTiT", + "name": "Jakub Trllo", + "avatar_url": "https://avatars.githubusercontent.com/u/43494761?v=4", + "profile": "https://github.com/iLLiCiTiT", + "contributions": [ + "code", + "doc", + "infra", + "review", + "maintenance", + "question" + ] + }, + { + "login": "kalisp", + "name": "Petr Kalis", + "avatar_url": "https://avatars.githubusercontent.com/u/4457962?v=4", + "profile": "https://github.com/kalisp", + "contributions": [ + "code", + "doc", + "infra", + "review", + "maintenance", + "question" + ] + }, + { + "login": "64qam", + "name": "64qam", + "avatar_url": "https://avatars.githubusercontent.com/u/26925793?v=4", + "profile": "https://github.com/64qam", + "contributions": [ + "code", + "review", + "doc", + "infra", + "projectManagement", + "maintenance", + "content", + "userTesting" + ] + }, + { + "login": "BigRoy", + "name": "Roy Nieterau", + "avatar_url": "https://avatars.githubusercontent.com/u/2439881?v=4", + "profile": "http://www.colorbleed.nl/", + "contributions": [ + "code", + "doc", + "review", + "mentoring", + "question" + ] + }, + { + "login": "tokejepsen", + "name": "Toke Jepsen", + "avatar_url": "https://avatars.githubusercontent.com/u/1860085?v=4", + "profile": "https://github.com/tokejepsen", + "contributions": [ + "code", + "doc", + "review", + "mentoring", + "question" + ] + }, + { + "login": "jrsndl", + "name": "Jiri Sindelar", + "avatar_url": "https://avatars.githubusercontent.com/u/45896205?v=4", + "profile": "https://github.com/jrsndl", + "contributions": [ + "code", + "review", + "doc", + "content", + "tutorial", + "userTesting" + ] + }, + { + "login": "simonebarbieri", + "name": "Simone Barbieri", + "avatar_url": "https://avatars.githubusercontent.com/u/1087869?v=4", + "profile": "https://barbierisimone.com/", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "karimmozilla", + "name": "karimmozilla", + "avatar_url": "https://avatars.githubusercontent.com/u/82811760?v=4", + "profile": "http://karimmozilla.xyz/", + "contributions": [ + "code" + ] + }, + { + "login": "Allan-I", + "name": "Allan I. A.", + "avatar_url": "https://avatars.githubusercontent.com/u/76656700?v=4", + "profile": "https://github.com/Allan-I", + "contributions": [ + "code" + ] + }, + { + "login": "m-u-r-p-h-y", + "name": "murphy", + "avatar_url": "https://avatars.githubusercontent.com/u/352795?v=4", + "profile": "https://www.linkedin.com/in/mmuurrpphhyy/", + "contributions": [ + "code", + "review", + "userTesting", + "doc", + "projectManagement" + ] + }, + { + "login": "aardschok", + "name": "Wijnand Koreman", + "avatar_url": "https://avatars.githubusercontent.com/u/26920875?v=4", + "profile": "https://github.com/aardschok", + "contributions": [ + "code" + ] + }, + { + "login": "zhoub", + "name": "Bo Zhou", + "avatar_url": "https://avatars.githubusercontent.com/u/1798206?v=4", + "profile": "http://jedimaster.cnblogs.com/", + "contributions": [ + "code" + ] + }, + { + "login": "ClementHector", + "name": "Clément Hector", + "avatar_url": "https://avatars.githubusercontent.com/u/7068597?v=4", + "profile": "https://www.linkedin.com/in/clementhector/", + "contributions": [ + "code", + "review" + ] + }, + { + "login": "davidlatwe", + "name": "David Lai", + "avatar_url": "https://avatars.githubusercontent.com/u/3357009?v=4", + "profile": "https://twitter.com/davidlatwe", + "contributions": [ + "code", + "review" + ] + }, + { + "login": "2-REC", + "name": "Derek ", + "avatar_url": "https://avatars.githubusercontent.com/u/42170307?v=4", + "profile": "https://github.com/2-REC", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "gabormarinov", + "name": "Gábor Marinov", + "avatar_url": "https://avatars.githubusercontent.com/u/8620515?v=4", + "profile": "https://github.com/gabormarinov", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "icyvapor", + "name": "icyvapor", + "avatar_url": "https://avatars.githubusercontent.com/u/1195278?v=4", + "profile": "https://github.com/icyvapor", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "jlorrain", + "name": "Jérôme LORRAIN", + "avatar_url": "https://avatars.githubusercontent.com/u/7955673?v=4", + "profile": "https://github.com/jlorrain", + "contributions": [ + "code" + ] + }, + { + "login": "dmo-j-cube", + "name": "David Morris-Oliveros", + "avatar_url": "https://avatars.githubusercontent.com/u/89823400?v=4", + "profile": "https://github.com/dmo-j-cube", + "contributions": [ + "code" + ] + }, + { + "login": "BenoitConnan", + "name": "BenoitConnan", + "avatar_url": "https://avatars.githubusercontent.com/u/82808268?v=4", + "profile": "https://github.com/BenoitConnan", + "contributions": [ + "code" + ] + }, + { + "login": "Malthaldar", + "name": "Malthaldar", + "avatar_url": "https://avatars.githubusercontent.com/u/33671694?v=4", + "profile": "https://github.com/Malthaldar", + "contributions": [ + "code" + ] + }, + { + "login": "svenneve", + "name": "Sven Neve", + "avatar_url": "https://avatars.githubusercontent.com/u/2472863?v=4", + "profile": "http://www.svenneve.com/", + "contributions": [ + "code" + ] + }, + { + "login": "zafrs", + "name": "zafrs", + "avatar_url": "https://avatars.githubusercontent.com/u/26890002?v=4", + "profile": "https://github.com/zafrs", + "contributions": [ + "code" + ] + } + ], + "contributorsPerLine": 7 +} \ No newline at end of file diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml index d9b4d8089c..5acd20007c 100644 --- a/.github/workflows/prerelease.yml +++ b/.github/workflows/prerelease.yml @@ -80,7 +80,7 @@ jobs: git tag -a $tag_name -m "nightly build" - name: Push to protected main branch - uses: CasperWA/push-protected@v2 + uses: CasperWA/push-protected@v2.10.0 with: token: ${{ secrets.ADMIN_TOKEN }} branch: main diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 917e6c884c..85864b4442 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -68,7 +68,7 @@ jobs: - name: 🔏 Push to protected main branch if: steps.version.outputs.release_tag != 'skip' - uses: CasperWA/push-protected@v2 + uses: CasperWA/push-protected@v2.10.0 with: token: ${{ secrets.ADMIN_TOKEN }} branch: main diff --git a/.gitignore b/.gitignore index fa3fae1ad2..28cfb4b1e9 100644 --- a/.gitignore +++ b/.gitignore @@ -70,6 +70,8 @@ coverage.xml ################## node_modules package-lock.json +package.json +yarn.lock openpype/premiere/ppro/js/debug.log diff --git a/CHANGELOG.md b/CHANGELOG.md index f20276cbd7..a48e9ee806 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,118 +1,177 @@ # Changelog -## [3.9.2-nightly.1](https://github.com/pypeclub/OpenPype/tree/HEAD) +## [3.10.0-nightly.1](https://github.com/pypeclub/OpenPype/tree/HEAD) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.1...HEAD) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.4...HEAD) + +### 📖 Documentation + +- Nuke docs with videos [\#3052](https://github.com/pypeclub/OpenPype/pull/3052) **🚀 Enhancements** -- CI: change the version bump logic [\#2919](https://github.com/pypeclub/OpenPype/pull/2919) -- Deadline: Add headless argument [\#2916](https://github.com/pypeclub/OpenPype/pull/2916) -- Ftrack: Fill workfile in custom attribute [\#2906](https://github.com/pypeclub/OpenPype/pull/2906) -- Settings UI: Add simple tooltips for settings entities [\#2901](https://github.com/pypeclub/OpenPype/pull/2901) +- Update collect\_render.py [\#3055](https://github.com/pypeclub/OpenPype/pull/3055) **🐛 Bug fixes** -- Ftrack: Missing Ftrack id after editorial publish [\#2905](https://github.com/pypeclub/OpenPype/pull/2905) -- AfterEffects: Fix rendering for single frame in DL [\#2875](https://github.com/pypeclub/OpenPype/pull/2875) +- Nuke: Add aov matching even for remainder and prerender [\#3060](https://github.com/pypeclub/OpenPype/pull/3060) **🔀 Refactored code** -- General: Move formatting and workfile functions [\#2914](https://github.com/pypeclub/OpenPype/pull/2914) +- General: Move host install [\#3009](https://github.com/pypeclub/OpenPype/pull/3009) + +## [3.9.4](https://github.com/pypeclub/OpenPype/tree/3.9.4) (2022-04-15) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.4-nightly.2...3.9.4) + +### 📖 Documentation + +- Documentation: more info about Tasks [\#3062](https://github.com/pypeclub/OpenPype/pull/3062) +- Documentation: Python requirements to 3.7.9 [\#3035](https://github.com/pypeclub/OpenPype/pull/3035) +- Website Docs: Remove unused pages [\#2974](https://github.com/pypeclub/OpenPype/pull/2974) + +**🆕 New features** + +- General: Local overrides for environment variables [\#3045](https://github.com/pypeclub/OpenPype/pull/3045) + +**🚀 Enhancements** + +- TVPaint: Added init file for worker to triggers missing sound file dialog [\#3053](https://github.com/pypeclub/OpenPype/pull/3053) +- Ftrack: Custom attributes can be filled in slate values [\#3036](https://github.com/pypeclub/OpenPype/pull/3036) +- Resolve environment variable in google drive credential path [\#3008](https://github.com/pypeclub/OpenPype/pull/3008) + +**🐛 Bug fixes** + +- GitHub: Updated push-protected action in github workflow [\#3064](https://github.com/pypeclub/OpenPype/pull/3064) +- Nuke: Typos in imports from Nuke implementation [\#3061](https://github.com/pypeclub/OpenPype/pull/3061) +- Hotfix: fixing deadline job publishing [\#3059](https://github.com/pypeclub/OpenPype/pull/3059) +- General: Extract Review handle invalid characters for ffmpeg [\#3050](https://github.com/pypeclub/OpenPype/pull/3050) +- Slate Review: Support to keep format on slate concatenation [\#3049](https://github.com/pypeclub/OpenPype/pull/3049) +- Webpublisher: fix processing of workfile [\#3048](https://github.com/pypeclub/OpenPype/pull/3048) +- Ftrack: Integrate ftrack api fix [\#3044](https://github.com/pypeclub/OpenPype/pull/3044) +- Webpublisher - removed wrong hardcoded family [\#3043](https://github.com/pypeclub/OpenPype/pull/3043) +- LibraryLoader: Use current project for asset query in families filter [\#3042](https://github.com/pypeclub/OpenPype/pull/3042) +- SiteSync: Providers ignore that site is disabled [\#3041](https://github.com/pypeclub/OpenPype/pull/3041) +- Unreal: Creator import fixes [\#3040](https://github.com/pypeclub/OpenPype/pull/3040) +- SiteSync: fix transitive alternate sites, fix dropdown in Local Settings [\#3018](https://github.com/pypeclub/OpenPype/pull/3018) + +**Merged pull requests:** + +- Deadline: reworked pools assignment [\#3051](https://github.com/pypeclub/OpenPype/pull/3051) +- Houdini: Avoid ImportError on `hdefereval` when Houdini runs without UI [\#2987](https://github.com/pypeclub/OpenPype/pull/2987) + +## [3.9.3](https://github.com/pypeclub/OpenPype/tree/3.9.3) (2022-04-07) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.3-nightly.2...3.9.3) + +### 📖 Documentation + +- Website Docs: Manager Ftrack fix broken links [\#2979](https://github.com/pypeclub/OpenPype/pull/2979) + +**🆕 New features** + +- Ftrack: Add description integrator [\#3027](https://github.com/pypeclub/OpenPype/pull/3027) +- Publishing textures for Unreal [\#2988](https://github.com/pypeclub/OpenPype/pull/2988) +- Maya to Unreal: Static and Skeletal Meshes [\#2978](https://github.com/pypeclub/OpenPype/pull/2978) + +**🚀 Enhancements** + +- Ftrack: Add more options for note text of integrate ftrack note [\#3025](https://github.com/pypeclub/OpenPype/pull/3025) +- Console Interpreter: Changed how console splitter size are reused on show [\#3016](https://github.com/pypeclub/OpenPype/pull/3016) +- Deadline: Use more suitable name for sequence review logic [\#3015](https://github.com/pypeclub/OpenPype/pull/3015) +- General: default workfile subset name for workfile [\#3011](https://github.com/pypeclub/OpenPype/pull/3011) +- Nuke: add concurrency attr to deadline job [\#3005](https://github.com/pypeclub/OpenPype/pull/3005) +- Deadline: priority configurable in Maya jobs [\#2995](https://github.com/pypeclub/OpenPype/pull/2995) +- Workfiles tool: Save as published workfiles [\#2937](https://github.com/pypeclub/OpenPype/pull/2937) + +**🐛 Bug fixes** + +- Deadline: Fixed default value of use sequence for review [\#3033](https://github.com/pypeclub/OpenPype/pull/3033) +- Settings UI: Version column can be extended so version are visible [\#3032](https://github.com/pypeclub/OpenPype/pull/3032) +- General: Fix validate asset docs plug-in filename and class name [\#3029](https://github.com/pypeclub/OpenPype/pull/3029) +- General: Fix import after movements [\#3028](https://github.com/pypeclub/OpenPype/pull/3028) +- Harmony: Added creating subset name for workfile from template [\#3024](https://github.com/pypeclub/OpenPype/pull/3024) +- AfterEffects: Added creating subset name for workfile from template [\#3023](https://github.com/pypeclub/OpenPype/pull/3023) +- General: Add example addons to ignored [\#3022](https://github.com/pypeclub/OpenPype/pull/3022) +- Maya: Remove missing import [\#3017](https://github.com/pypeclub/OpenPype/pull/3017) +- Ftrack: multiple reviewable componets [\#3012](https://github.com/pypeclub/OpenPype/pull/3012) +- Tray publisher: Fixes after code movement [\#3010](https://github.com/pypeclub/OpenPype/pull/3010) +- Nuke: fixing unicode type detection in effect loaders [\#3002](https://github.com/pypeclub/OpenPype/pull/3002) +- Nuke: removing redundant Ftrack asset when farm publishing [\#2996](https://github.com/pypeclub/OpenPype/pull/2996) + +**🔀 Refactored code** + +- General: Move plugins register and discover [\#2935](https://github.com/pypeclub/OpenPype/pull/2935) + +**Merged pull requests:** + +- Maya: Allow to select invalid camera contents if no cameras found [\#3030](https://github.com/pypeclub/OpenPype/pull/3030) +- General: adding limitations for pyright [\#2994](https://github.com/pypeclub/OpenPype/pull/2994) + +## [3.9.2](https://github.com/pypeclub/OpenPype/tree/3.9.2) (2022-04-04) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.2-nightly.4...3.9.2) + +### 📖 Documentation + +- Documentation: Added mention of adding My Drive as a root [\#2999](https://github.com/pypeclub/OpenPype/pull/2999) +- Docs: Added MongoDB requirements [\#2951](https://github.com/pypeclub/OpenPype/pull/2951) + +**🆕 New features** + +- nuke: bypass baking [\#2992](https://github.com/pypeclub/OpenPype/pull/2992) + +**🚀 Enhancements** + +- Photoshop: create image without instance [\#3001](https://github.com/pypeclub/OpenPype/pull/3001) +- TVPaint: Render scene family [\#3000](https://github.com/pypeclub/OpenPype/pull/3000) +- Nuke: ReviewDataMov Read RAW attribute [\#2985](https://github.com/pypeclub/OpenPype/pull/2985) +- SiteSync: Added compute\_resource\_sync\_sites to sync\_server\_module [\#2983](https://github.com/pypeclub/OpenPype/pull/2983) +- General: `METADATA\_KEYS` constant as `frozenset` for optimal immutable lookup [\#2980](https://github.com/pypeclub/OpenPype/pull/2980) +- General: Tools with host filters [\#2975](https://github.com/pypeclub/OpenPype/pull/2975) +- Hero versions: Use custom templates [\#2967](https://github.com/pypeclub/OpenPype/pull/2967) +- Slack: Added configurable maximum file size of review upload to Slack [\#2945](https://github.com/pypeclub/OpenPype/pull/2945) +- NewPublisher: Prepared implementation of optional pyblish plugin [\#2943](https://github.com/pypeclub/OpenPype/pull/2943) +- TVPaint: Extractor to convert PNG into EXR [\#2942](https://github.com/pypeclub/OpenPype/pull/2942) + +**🐛 Bug fixes** + +- Hosts: Remove path existence checks in 'add\_implementation\_envs' [\#3004](https://github.com/pypeclub/OpenPype/pull/3004) +- Fix - remove doubled dot in workfile created from template [\#2998](https://github.com/pypeclub/OpenPype/pull/2998) +- PS: fix renaming subset incorrectly in PS [\#2991](https://github.com/pypeclub/OpenPype/pull/2991) +- Fix: Disable setuptools auto discovery [\#2990](https://github.com/pypeclub/OpenPype/pull/2990) +- AEL: fix opening existing workfile if no scene opened [\#2989](https://github.com/pypeclub/OpenPype/pull/2989) +- Maya: Don't do hardlinks on windows for look publishing [\#2986](https://github.com/pypeclub/OpenPype/pull/2986) +- Settings UI: Fix version completer on linux [\#2981](https://github.com/pypeclub/OpenPype/pull/2981) +- Photoshop: Fix creation of subset names in PS review and workfile [\#2969](https://github.com/pypeclub/OpenPype/pull/2969) +- Slack: Added default for review\_upload\_limit for Slack [\#2965](https://github.com/pypeclub/OpenPype/pull/2965) +- General: OIIO conversion for ffmeg can handle sequences [\#2958](https://github.com/pypeclub/OpenPype/pull/2958) +- Settings: Conditional dictionary avoid invalid logs [\#2956](https://github.com/pypeclub/OpenPype/pull/2956) +- General: Smaller fixes and typos [\#2950](https://github.com/pypeclub/OpenPype/pull/2950) +- LogViewer: Don't refresh on initialization [\#2949](https://github.com/pypeclub/OpenPype/pull/2949) +- nuke: python3 compatibility issue with `iteritems` [\#2948](https://github.com/pypeclub/OpenPype/pull/2948) +- General: anatomy data with correct task short key [\#2947](https://github.com/pypeclub/OpenPype/pull/2947) +- SceneInventory: Fix imports in UI [\#2944](https://github.com/pypeclub/OpenPype/pull/2944) +- Slack: add generic exception [\#2941](https://github.com/pypeclub/OpenPype/pull/2941) +- General: Python specific vendor paths on env injection [\#2939](https://github.com/pypeclub/OpenPype/pull/2939) +- General: More fail safe delete old versions [\#2936](https://github.com/pypeclub/OpenPype/pull/2936) +- Settings UI: Collapsed of collapsible wrapper works as expected [\#2934](https://github.com/pypeclub/OpenPype/pull/2934) + +**Merged pull requests:** + +- Bump paramiko from 2.9.2 to 2.10.1 [\#2973](https://github.com/pypeclub/OpenPype/pull/2973) +- Bump minimist from 1.2.5 to 1.2.6 in /website [\#2954](https://github.com/pypeclub/OpenPype/pull/2954) +- Bump node-forge from 1.2.1 to 1.3.0 in /website [\#2953](https://github.com/pypeclub/OpenPype/pull/2953) +- Maya - added transparency into review creator [\#2952](https://github.com/pypeclub/OpenPype/pull/2952) ## [3.9.1](https://github.com/pypeclub/OpenPype/tree/3.9.1) (2022-03-18) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.1-nightly.3...3.9.1) -**🚀 Enhancements** - -- General: Change how OPENPYPE\_DEBUG value is handled [\#2907](https://github.com/pypeclub/OpenPype/pull/2907) -- nuke: imageio adding ocio config version 1.2 [\#2897](https://github.com/pypeclub/OpenPype/pull/2897) -- Flame: support for comment with xml attribute overrides [\#2892](https://github.com/pypeclub/OpenPype/pull/2892) -- Nuke: ExtractReviewSlate can handle more codes and profiles [\#2879](https://github.com/pypeclub/OpenPype/pull/2879) -- Flame: sequence used for reference video [\#2869](https://github.com/pypeclub/OpenPype/pull/2869) - -**🐛 Bug fixes** - -- General: Fix use of Anatomy roots [\#2904](https://github.com/pypeclub/OpenPype/pull/2904) -- Fixing gap detection in extract review [\#2902](https://github.com/pypeclub/OpenPype/pull/2902) -- Pyblish Pype - ensure current state is correct when entering new group order [\#2899](https://github.com/pypeclub/OpenPype/pull/2899) -- SceneInventory: Fix import of load function [\#2894](https://github.com/pypeclub/OpenPype/pull/2894) -- Harmony - fixed creator issue [\#2891](https://github.com/pypeclub/OpenPype/pull/2891) -- General: Remove forgotten use of avalon Creator [\#2885](https://github.com/pypeclub/OpenPype/pull/2885) -- General: Avoid circular import [\#2884](https://github.com/pypeclub/OpenPype/pull/2884) -- Fixes for attaching loaded containers \(\#2837\) [\#2874](https://github.com/pypeclub/OpenPype/pull/2874) - -**🔀 Refactored code** - -- General: Reduce style usage to OpenPype repository [\#2889](https://github.com/pypeclub/OpenPype/pull/2889) -- General: Move loader logic from avalon to openpype [\#2886](https://github.com/pypeclub/OpenPype/pull/2886) - ## [3.9.0](https://github.com/pypeclub/OpenPype/tree/3.9.0) (2022-03-14) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.0-nightly.9...3.9.0) -**Deprecated:** - -- AssetCreator: Remove the tool [\#2845](https://github.com/pypeclub/OpenPype/pull/2845) - -### 📖 Documentation - -- Documentation: Change Photoshop & AfterEffects plugin path [\#2878](https://github.com/pypeclub/OpenPype/pull/2878) - -**🚀 Enhancements** - -- General: Subset name filtering in ExtractReview outpus [\#2872](https://github.com/pypeclub/OpenPype/pull/2872) -- NewPublisher: Descriptions and Icons in creator dialog [\#2867](https://github.com/pypeclub/OpenPype/pull/2867) -- NewPublisher: Changing task on publishing instance [\#2863](https://github.com/pypeclub/OpenPype/pull/2863) -- TrayPublisher: Choose project widget is more clear [\#2859](https://github.com/pypeclub/OpenPype/pull/2859) -- New: Validation exceptions [\#2841](https://github.com/pypeclub/OpenPype/pull/2841) -- Maya: add loaded containers to published instance [\#2837](https://github.com/pypeclub/OpenPype/pull/2837) -- Ftrack: Can sync fps as string [\#2836](https://github.com/pypeclub/OpenPype/pull/2836) -- General: Custom function for find executable [\#2822](https://github.com/pypeclub/OpenPype/pull/2822) -- General: Color dialog UI fixes [\#2817](https://github.com/pypeclub/OpenPype/pull/2817) -- global: letter box calculated on output as last process [\#2812](https://github.com/pypeclub/OpenPype/pull/2812) -- Nuke: adding Reformat to baking mov plugin [\#2811](https://github.com/pypeclub/OpenPype/pull/2811) -- Manager: Update all to latest button [\#2805](https://github.com/pypeclub/OpenPype/pull/2805) - -**🐛 Bug fixes** - -- General: Missing time function [\#2877](https://github.com/pypeclub/OpenPype/pull/2877) -- Deadline: Fix plugin name for tile assemble [\#2868](https://github.com/pypeclub/OpenPype/pull/2868) -- Nuke: gizmo precollect fix [\#2866](https://github.com/pypeclub/OpenPype/pull/2866) -- General: Fix hardlink for windows [\#2864](https://github.com/pypeclub/OpenPype/pull/2864) -- General: ffmpeg was crashing on slate merge [\#2860](https://github.com/pypeclub/OpenPype/pull/2860) -- WebPublisher: Video file was published with one too many frame [\#2858](https://github.com/pypeclub/OpenPype/pull/2858) -- New Publisher: Error dialog got right styles [\#2857](https://github.com/pypeclub/OpenPype/pull/2857) -- General: Fix getattr clalback on dynamic modules [\#2855](https://github.com/pypeclub/OpenPype/pull/2855) -- Nuke: slate resolution to input video resolution [\#2853](https://github.com/pypeclub/OpenPype/pull/2853) -- WebPublisher: Fix username stored in DB [\#2852](https://github.com/pypeclub/OpenPype/pull/2852) -- WebPublisher: Fix wrong number of frames for video file [\#2851](https://github.com/pypeclub/OpenPype/pull/2851) -- Nuke: Fix family test in validate\_write\_legacy to work with stillImage [\#2847](https://github.com/pypeclub/OpenPype/pull/2847) -- Nuke: fix multiple baking profile farm publishing [\#2842](https://github.com/pypeclub/OpenPype/pull/2842) -- Blender: Fixed parameters for FBX export of the camera [\#2840](https://github.com/pypeclub/OpenPype/pull/2840) -- Maya: Stop creation of reviews for Cryptomattes [\#2832](https://github.com/pypeclub/OpenPype/pull/2832) -- Deadline: Remove recreated event [\#2828](https://github.com/pypeclub/OpenPype/pull/2828) -- Deadline: Added missing events folder [\#2827](https://github.com/pypeclub/OpenPype/pull/2827) -- Maya: Deformer node ids validation plugin [\#2826](https://github.com/pypeclub/OpenPype/pull/2826) -- Settings: Missing document with OP versions may break start of OpenPype [\#2825](https://github.com/pypeclub/OpenPype/pull/2825) -- Deadline: more detailed temp file name for environment json [\#2824](https://github.com/pypeclub/OpenPype/pull/2824) -- General: Host name was formed from obsolete code [\#2821](https://github.com/pypeclub/OpenPype/pull/2821) -- Settings UI: Fix "Apply from" action [\#2820](https://github.com/pypeclub/OpenPype/pull/2820) -- Ftrack: Job killer with missing user [\#2819](https://github.com/pypeclub/OpenPype/pull/2819) -- Nuke: Use AVALON\_APP to get value for "app" key [\#2818](https://github.com/pypeclub/OpenPype/pull/2818) -- StandalonePublisher: use dynamic groups in subset names [\#2816](https://github.com/pypeclub/OpenPype/pull/2816) - -**🔀 Refactored code** - -- Refactor: move webserver tool to openpype [\#2876](https://github.com/pypeclub/OpenPype/pull/2876) -- General: Move create logic from avalon to OpenPype [\#2854](https://github.com/pypeclub/OpenPype/pull/2854) -- General: Add vendors from avalon [\#2848](https://github.com/pypeclub/OpenPype/pull/2848) -- General: Basic event system [\#2846](https://github.com/pypeclub/OpenPype/pull/2846) -- General: Move change context functions [\#2839](https://github.com/pypeclub/OpenPype/pull/2839) -- Tools: Don't use avalon tools code [\#2829](https://github.com/pypeclub/OpenPype/pull/2829) -- Move Unreal Implementation to OpenPype [\#2823](https://github.com/pypeclub/OpenPype/pull/2823) - ## [3.8.2](https://github.com/pypeclub/OpenPype/tree/3.8.2) (2022-02-07) [Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.8.2-nightly.3...3.8.2) diff --git a/README.md b/README.md index 0e450fc48d..b6966adbc4 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,7 @@ + +[![All Contributors](https://img.shields.io/badge/all_contributors-26-orange.svg?style=flat-square)](#contributors-) + OpenPype ==== @@ -283,3 +286,54 @@ Running tests To run tests, execute `.\tools\run_tests(.ps1|.sh)`. **Note that it needs existing virtual environment.** + +## Contributors ✨ + +Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Milan Kolar

💻 📖 🚇 💼 🖋 🔍 🚧 📆 👀 🧑‍🏫 💬

Jakub Ježek

💻 📖 🚇 🖋 👀 🚧 🧑‍🏫 📆 💬

Ondřej Samohel

💻 📖 🚇 🖋 👀 🚧 🧑‍🏫 📆 💬

Jakub Trllo

💻 📖 🚇 👀 🚧 💬

Petr Kalis

💻 📖 🚇 👀 🚧 💬

64qam

💻 👀 📖 🚇 📆 🚧 🖋 📓

Roy Nieterau

💻 📖 👀 🧑‍🏫 💬

Toke Jepsen

💻 📖 👀 🧑‍🏫 💬

Jiri Sindelar

💻 👀 📖 🖋 📓

Simone Barbieri

💻 📖

karimmozilla

💻

Allan I. A.

💻

murphy

💻 👀 📓 📖 📆

Wijnand Koreman

💻

Bo Zhou

💻

Clément Hector

💻 👀

David Lai

💻 👀

Derek

💻 📖

Gábor Marinov

💻 📖

icyvapor

💻 📖

Jérôme LORRAIN

💻

David Morris-Oliveros

💻

BenoitConnan

💻

Malthaldar

💻

Sven Neve

💻

zafrs

💻
+ + + + + + +This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! \ No newline at end of file diff --git a/openpype/__init__.py b/openpype/__init__.py index 8b94b2dc3f..810664707a 100644 --- a/openpype/__init__.py +++ b/openpype/__init__.py @@ -1,155 +1,5 @@ -# -*- coding: utf-8 -*- -"""Pype module.""" import os -import platform -import functools -import logging - -from .settings import get_project_settings -from .lib import ( - Anatomy, - filter_pyblish_plugins, - set_plugin_attributes_from_settings, - change_timer_to_current_context, - register_event_callback, -) - -pyblish = avalon = _original_discover = None - -log = logging.getLogger(__name__) PACKAGE_DIR = os.path.dirname(os.path.abspath(__file__)) PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") - -# Global plugin paths -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") - - -def import_wrapper(func): - """Wrap module imports to specific functions.""" - @functools.wraps(func) - def decorated(*args, **kwargs): - global pyblish - global avalon - global _original_discover - if pyblish is None: - from pyblish import api as pyblish - from avalon import api as avalon - - # we are monkey patching `avalon.api.discover()` to allow us to - # load plugin presets on plugins being discovered by avalon. - # Little bit of hacking, but it allows us to add out own features - # without need to modify upstream code. - - _original_discover = avalon.discover - - return func(*args, **kwargs) - - return decorated - - -@import_wrapper -def patched_discover(superclass): - """Patch `avalon.api.discover()`. - - Monkey patched version of :func:`avalon.api.discover()`. It allows - us to load presets on plugins being discovered. - """ - # run original discover and get plugins - plugins = _original_discover(superclass) - filtered_plugins = [ - plugin - for plugin in plugins - if issubclass(plugin, superclass) - ] - - set_plugin_attributes_from_settings(filtered_plugins, superclass) - - return filtered_plugins - - -@import_wrapper -def install(): - """Install Pype to Avalon.""" - from pyblish.lib import MessageHandler - from openpype.modules import load_modules - from openpype.pipeline import ( - LegacyCreator, - register_loader_plugin_path, - register_inventory_action, - ) - from avalon import pipeline - - # Make sure modules are loaded - load_modules() - - def modified_emit(obj, record): - """Method replacing `emit` in Pyblish's MessageHandler.""" - record.msg = record.getMessage() - obj.records.append(record) - - MessageHandler.emit = modified_emit - - log.info("Registering global plug-ins..") - pyblish.register_plugin_path(PUBLISH_PATH) - pyblish.register_discovery_filter(filter_pyblish_plugins) - register_loader_plugin_path(LOAD_PATH) - - project_name = os.environ.get("AVALON_PROJECT") - - # Register studio specific plugins - if project_name: - anatomy = Anatomy(project_name) - anatomy.set_root_environments() - avalon.register_root(anatomy.roots) - - project_settings = get_project_settings(project_name) - platform_name = platform.system().lower() - project_plugins = ( - project_settings - .get("global", {}) - .get("project_plugins", {}) - .get(platform_name) - ) or [] - for path in project_plugins: - try: - path = str(path.format(**os.environ)) - except KeyError: - pass - - if not path or not os.path.exists(path): - continue - - pyblish.register_plugin_path(path) - register_loader_plugin_path(path) - avalon.register_plugin_path(LegacyCreator, path) - register_inventory_action(path) - - # apply monkey patched discover to original one - log.info("Patching discovery") - - avalon.discover = patched_discover - pipeline.discover = patched_discover - - register_event_callback("taskChanged", _on_task_change) - - -def _on_task_change(): - change_timer_to_current_context() - - -@import_wrapper -def uninstall(): - """Uninstall Pype from Avalon.""" - from openpype.pipeline import deregister_loader_plugin_path - - log.info("Deregistering global plug-ins..") - pyblish.deregister_plugin_path(PUBLISH_PATH) - pyblish.deregister_discovery_filter(filter_pyblish_plugins) - deregister_loader_plugin_path(LOAD_PATH) - log.info("Global plug-ins unregistred") - - # restore original discover - avalon.discover = _original_discover diff --git a/openpype/hooks/pre_python_2_prelaunch.py b/openpype/hooks/pre_python_2_prelaunch.py deleted file mode 100644 index 84272d2e5d..0000000000 --- a/openpype/hooks/pre_python_2_prelaunch.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -from openpype.lib import PreLaunchHook - - -class PrePython2Vendor(PreLaunchHook): - """Prepend python 2 dependencies for py2 hosts.""" - order = 10 - - def execute(self): - if not self.application.use_python_2: - return - - # Prepare vendor dir path - self.log.info("adding global python 2 vendor") - pype_root = os.getenv("OPENPYPE_REPOS_ROOT") - python_2_vendor = os.path.join( - pype_root, - "openpype", - "vendor", - "python", - "python_2" - ) - - # Add Python 2 modules - python_paths = [ - python_2_vendor - ] - - # Load PYTHONPATH from current launch context - python_path = self.launch_context.env.get("PYTHONPATH") - if python_path: - python_paths.append(python_path) - - # Set new PYTHONPATH to launch context environments - self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths) diff --git a/openpype/hosts/aftereffects/api/extension.zxp b/openpype/hosts/aftereffects/api/extension.zxp index 389d74505d..0ed799991e 100644 Binary files a/openpype/hosts/aftereffects/api/extension.zxp and b/openpype/hosts/aftereffects/api/extension.zxp differ diff --git a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml b/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml index 668cb3fc24..a39f5781bb 100644 --- a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml +++ b/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml @@ -1,5 +1,5 @@ - diff --git a/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx b/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx index 8f82c9709d..91df433908 100644 --- a/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx +++ b/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx @@ -417,7 +417,9 @@ function getRenderInfo(){ var file_url = item.file.toString(); return JSON.stringify({ - "file_name": file_url + "file_name": file_url, + "width": render_item.comp.width, + "height": render_item.comp.height }) } diff --git a/openpype/hosts/aftereffects/api/lib.py b/openpype/hosts/aftereffects/api/lib.py index dac6b5d28f..ce4cbf09af 100644 --- a/openpype/hosts/aftereffects/api/lib.py +++ b/openpype/hosts/aftereffects/api/lib.py @@ -6,6 +6,7 @@ import logging from Qt import QtWidgets +from openpype.pipeline import install_host from openpype.lib.remote_publish import headless_publish from openpype.tools.utils import host_tools @@ -22,10 +23,9 @@ def safe_excepthook(*args): def main(*subprocess_args): sys.excepthook = safe_excepthook - import avalon.api from openpype.hosts.aftereffects import api - avalon.api.install(api) + install_host(api) os.environ["OPENPYPE_LOG_NO_COLORS"] = "False" app = QtWidgets.QApplication([]) diff --git a/openpype/hosts/aftereffects/api/pipeline.py b/openpype/hosts/aftereffects/api/pipeline.py index e14b8adc8c..b26244f131 100644 --- a/openpype/hosts/aftereffects/api/pipeline.py +++ b/openpype/hosts/aftereffects/api/pipeline.py @@ -4,17 +4,17 @@ import sys from Qt import QtWidgets import pyblish.api -import avalon.api from avalon import io from openpype import lib from openpype.api import Logger from openpype.pipeline import ( - LegacyCreator, - BaseCreator, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, AVALON_CONTAINER_ID, + registered_host, ) import openpype.hosts.aftereffects from openpype.lib import register_event_callback @@ -40,8 +40,7 @@ def install(): pyblish.api.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) - avalon.api.register_plugin_path(BaseCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) log.info(PUBLISH_PATH) pyblish.api.register_callback( @@ -54,7 +53,7 @@ def install(): def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) def application_launch(): diff --git a/openpype/hosts/aftereffects/api/ws_stub.py b/openpype/hosts/aftereffects/api/ws_stub.py index 9a6462fcd4..8719a8f46e 100644 --- a/openpype/hosts/aftereffects/api/ws_stub.py +++ b/openpype/hosts/aftereffects/api/ws_stub.py @@ -29,6 +29,8 @@ class AEItem(object): frameRate = attr.ib(default=None) file_name = attr.ib(default=None) instance_id = attr.ib(default=None) # New Publisher + width = attr.ib(default=None) + height = attr.ib(default=None) class AfterEffectsServerStub(): @@ -609,7 +611,9 @@ class AfterEffectsServerStub(): d.get('workAreaDuration'), d.get('frameRate'), d.get('file_name'), - d.get("instance_id")) + d.get("instance_id"), + d.get("width"), + d.get("height")) ret.append(item) return ret diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py index aee660673b..78d43d259a 100644 --- a/openpype/hosts/aftereffects/plugins/create/create_render.py +++ b/openpype/hosts/aftereffects/plugins/create/create_render.py @@ -16,7 +16,7 @@ class RenderCreator(Creator): family = "render" description = "Render creator" - create_allow_context_change = False + create_allow_context_change = True def __init__( self, create_context, system_settings, project_settings, headless=False diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_render.py b/openpype/hosts/aftereffects/plugins/publish/collect_render.py index d64e7abc5f..fa23bf92b0 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_render.py @@ -2,7 +2,6 @@ import os import re import tempfile import attr -from copy import deepcopy import pyblish.api @@ -23,11 +22,12 @@ class AERenderInstance(RenderInstance): stagingDir = attr.ib(default=None) app_version = attr.ib(default=None) publish_attributes = attr.ib(default=None) + file_name = attr.ib(default=None) class CollectAERender(abstract_collect_render.AbstractCollectRender): - order = pyblish.api.CollectorOrder + 0.498 + order = pyblish.api.CollectorOrder + 0.405 label = "Collect After Effects Render Layers" hosts = ["aftereffects"] @@ -64,8 +64,6 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): if family not in ["render", "renderLocal"]: # legacy continue - asset_entity = inst.data["assetEntity"] - item_id = inst.data["members"][0] work_area_info = CollectAERender.get_stub().get_work_area( @@ -84,8 +82,11 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): fps = work_area_info.frameRate # TODO add resolution when supported by extension - task_name = (inst.data.get("task") or - list(asset_entity["data"]["tasks"].keys())[0]) # lega + task_name = inst.data.get("task") # legacy + + render_q = CollectAERender.get_stub().get_render_info() + if not render_q: + raise ValueError("No file extension set in Render Queue") subset_name = inst.data["subset"] instance = AERenderInstance( @@ -103,12 +104,8 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): publish=True, renderer='aerender', name=subset_name, - resolutionWidth=asset_entity["data"].get( - "resolutionWidth", - project_entity["data"]["resolutionWidth"]), - resolutionHeight=asset_entity["data"].get( - "resolutionHeight", - project_entity["data"]["resolutionHeight"]), + resolutionWidth=render_q.width, + resolutionHeight=render_q.height, pixelAspect=1, tileRendering=False, tilesX=0, @@ -119,8 +116,8 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): toBeRenderedOn='deadline', fps=fps, app_version=app_version, - anatomyData=deepcopy(inst.data["anatomyData"]), - publish_attributes=inst.data.get("publish_attributes") + publish_attributes=inst.data.get("publish_attributes"), + file_name=render_q.file_name ) comp = compositions_by_id.get(int(item_id)) @@ -165,15 +162,11 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): start = render_instance.frameStart end = render_instance.frameEnd - # pull file name from Render Queue Output module - render_q = CollectAERender.get_stub().get_render_info() - if not render_q: - raise ValueError("No file extension set in Render Queue") - _, ext = os.path.splitext(os.path.basename(render_q.file_name)) + _, ext = os.path.splitext(os.path.basename(render_instance.file_name)) base_dir = self._get_output_dir(render_instance) expected_files = [] - if "#" not in render_q.file_name: # single frame (mov)W + if "#" not in render_instance.file_name: # single frame (mov)W path = os.path.join(base_dir, "{}_{}_{}.{}".format( render_instance.asset, render_instance.subset, @@ -216,8 +209,6 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): def _update_for_local(self, instance, project_entity): """Update old saved instances to current publishing format""" - instance.anatomyData["version"] = instance.version - instance.anatomyData["subset"] = instance.subset instance.stagingDir = tempfile.mkdtemp() instance.projectEntity = project_entity fam = "render.local" diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py index 93c7a448c6..64a81b58eb 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py @@ -1,6 +1,7 @@ import os from avalon import api import pyblish.api +from openpype.lib import get_subset_name_with_asset_doc class CollectWorkfile(pyblish.api.ContextPlugin): @@ -33,19 +34,20 @@ class CollectWorkfile(pyblish.api.ContextPlugin): "stagingDir": staging_dir, } + if not instance.data.get("representations"): + instance.data["representations"] = [] instance.data["representations"].append(representation) + instance.data["publish"] = instance.data["active"] # for DL + def _get_new_instance(self, context, scene_file): task = api.Session["AVALON_TASK"] version = context.data["version"] asset_entity = context.data["assetEntity"] project_entity = context.data["projectEntity"] - # workfile instance - family = "workfile" - subset = family + task.capitalize() # TOOD use method - instance_data = { + "active": True, "asset": asset_entity["name"], "task": task, "frameStart": asset_entity["data"]["frameStart"], @@ -61,16 +63,31 @@ class CollectWorkfile(pyblish.api.ContextPlugin): project_entity["data"]["resolutionHeight"]), "pixelAspect": 1, "step": 1, - "version": version, + "version": version + } + + # workfile instance + family = "workfile" + subset = get_subset_name_with_asset_doc( + family, + "", + context.data["anatomyData"]["task"]["name"], + context.data["assetEntity"], + context.data["anatomyData"]["project"]["name"], + host_name=context.data["hostName"] + ) + # Create instance + instance = context.create_instance(subset) + + # creating instance data + instance.data.update({ "subset": subset, "label": scene_file, "family": family, "families": [family], "representations": list() - } + }) - # Create instance - instance = context.create_instance(subset) instance.data.update(instance_data) return instance diff --git a/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml b/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml index 36fa90456e..0591020ed3 100644 --- a/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml +++ b/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml @@ -12,6 +12,8 @@ One of the settings in a scene doesn't match to asset settings in database. ### How to repair? Change values for {invalid_keys_str} in the scene OR change them in the asset database if they are wrong there. + + In the scene it is right mouse click on published composition > `Composition Settings`. ### __Detailed Info__ (optional) diff --git a/openpype/hosts/blender/__init__.py b/openpype/hosts/blender/__init__.py index 3081d3c9ba..0f27882c7e 100644 --- a/openpype/hosts/blender/__init__.py +++ b/openpype/hosts/blender/__init__.py @@ -29,12 +29,12 @@ def add_implementation_envs(env, _app): env.get("OPENPYPE_BLENDER_USER_SCRIPTS") or "" ) for path in openpype_blender_user_scripts.split(os.pathsep): - if path and os.path.exists(path): + if path: previous_user_scripts.add(os.path.normpath(path)) blender_user_scripts = env.get("BLENDER_USER_SCRIPTS") or "" for path in blender_user_scripts.split(os.pathsep): - if path and os.path.exists(path): + if path: previous_user_scripts.add(os.path.normpath(path)) # Remove implementation path from user script paths as is set to diff --git a/openpype/hosts/blender/api/pipeline.py b/openpype/hosts/blender/api/pipeline.py index 8c580cf214..0ea579970e 100644 --- a/openpype/hosts/blender/api/pipeline.py +++ b/openpype/hosts/blender/api/pipeline.py @@ -14,10 +14,12 @@ import avalon.api from avalon import io, schema from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, AVALON_CONTAINER_ID, + uninstall_host, ) from openpype.api import Logger from openpype.lib import ( @@ -54,7 +56,7 @@ def install(): pyblish.api.register_plugin_path(str(PUBLISH_PATH)) register_loader_plugin_path(str(LOAD_PATH)) - avalon.api.register_plugin_path(LegacyCreator, str(CREATE_PATH)) + register_creator_plugin_path(str(CREATE_PATH)) lib.append_user_scripts() @@ -76,7 +78,7 @@ def uninstall(): pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) deregister_loader_plugin_path(str(LOAD_PATH)) - avalon.api.deregister_plugin_path(LegacyCreator, str(CREATE_PATH)) + deregister_creator_plugin_path(str(CREATE_PATH)) if not IS_HEADLESS: ops.unregister() @@ -208,11 +210,10 @@ def reload_pipeline(*args): """ - avalon.api.uninstall() + uninstall_host() for module in ( "avalon.io", - "avalon.lib", "avalon.pipeline", "avalon.api", ): diff --git a/openpype/hosts/blender/blender_addon/startup/init.py b/openpype/hosts/blender/blender_addon/startup/init.py index e43373bc6c..13a4b8a7a1 100644 --- a/openpype/hosts/blender/blender_addon/startup/init.py +++ b/openpype/hosts/blender/blender_addon/startup/init.py @@ -1,4 +1,4 @@ -from avalon import pipeline +from openpype.pipeline import install_host from openpype.hosts.blender import api -pipeline.install(api) +install_host(api) diff --git a/openpype/hosts/celaction/api/cli.py b/openpype/hosts/celaction/api/cli.py index bc1e3eaf89..8c7b3a2e74 100644 --- a/openpype/hosts/celaction/api/cli.py +++ b/openpype/hosts/celaction/api/cli.py @@ -3,8 +3,6 @@ import sys import copy import argparse -from avalon import io - import pyblish.api import pyblish.util @@ -13,6 +11,8 @@ import openpype import openpype.hosts.celaction from openpype.hosts.celaction import api as celaction from openpype.tools.utils import host_tools +from openpype.pipeline import install_openpype_plugins + log = Logger().get_logger("Celaction_cli_publisher") @@ -21,9 +21,6 @@ publish_host = "celaction" HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.celaction.__file__)) PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") def cli(): @@ -74,7 +71,7 @@ def main(): _prepare_publish_environments() # Registers pype's Global pyblish plugins - openpype.install() + install_openpype_plugins() if os.path.exists(PUBLISH_PATH): log.info(f"Registering path: {PUBLISH_PATH}") diff --git a/openpype/hosts/flame/api/__init__.py b/openpype/hosts/flame/api/__init__.py index f210c27f87..2c461e5f16 100644 --- a/openpype/hosts/flame/api/__init__.py +++ b/openpype/hosts/flame/api/__init__.py @@ -11,10 +11,8 @@ from .constants import ( from .lib import ( CTX, FlameAppFramework, - get_project_manager, get_current_project, get_current_sequence, - create_bin, create_segment_data_marker, get_segment_data_marker, set_segment_data_marker, @@ -29,7 +27,10 @@ from .lib import ( get_frame_from_filename, get_padding_from_filename, maintained_object_duplication, - get_clip_segment + maintained_temp_file_path, + get_clip_segment, + get_batch_group_from_desktop, + MediaInfoFile ) from .utils import ( setup, @@ -56,7 +57,6 @@ from .plugin import ( PublishableClip, ClipLoader, OpenClipSolver - ) from .workio import ( open_file, @@ -71,6 +71,10 @@ from .render_utils import ( get_preset_path_by_xml_name, modify_preset_file ) +from .batch_utils import ( + create_batch_group, + create_batch_group_conent +) __all__ = [ # constants @@ -83,10 +87,8 @@ __all__ = [ # lib "CTX", "FlameAppFramework", - "get_project_manager", "get_current_project", "get_current_sequence", - "create_bin", "create_segment_data_marker", "get_segment_data_marker", "set_segment_data_marker", @@ -101,7 +103,10 @@ __all__ = [ "get_frame_from_filename", "get_padding_from_filename", "maintained_object_duplication", + "maintained_temp_file_path", "get_clip_segment", + "get_batch_group_from_desktop", + "MediaInfoFile", # pipeline "install", @@ -142,5 +147,9 @@ __all__ = [ # render utils "export_clip", "get_preset_path_by_xml_name", - "modify_preset_file" + "modify_preset_file", + + # batch utils + "create_batch_group", + "create_batch_group_conent" ] diff --git a/openpype/hosts/flame/api/batch_utils.py b/openpype/hosts/flame/api/batch_utils.py new file mode 100644 index 0000000000..9d419a4a90 --- /dev/null +++ b/openpype/hosts/flame/api/batch_utils.py @@ -0,0 +1,151 @@ +import flame + + +def create_batch_group( + name, + frame_start, + frame_duration, + update_batch_group=None, + **kwargs +): + """Create Batch Group in active project's Desktop + + Args: + name (str): name of batch group to be created + frame_start (int): start frame of batch + frame_end (int): end frame of batch + update_batch_group (PyBatch)[optional]: batch group to update + + Return: + PyBatch: active flame batch group + """ + # make sure some batch obj is present + batch_group = update_batch_group or flame.batch + + schematic_reels = kwargs.get("shematic_reels") or ['LoadedReel1'] + shelf_reels = kwargs.get("shelf_reels") or ['ShelfReel1'] + + handle_start = kwargs.get("handleStart") or 0 + handle_end = kwargs.get("handleEnd") or 0 + + frame_start -= handle_start + frame_duration += handle_start + handle_end + + if not update_batch_group: + # Create batch group with name, start_frame value, duration value, + # set of schematic reel names, set of shelf reel names + batch_group = batch_group.create_batch_group( + name, + start_frame=frame_start, + duration=frame_duration, + reels=schematic_reels, + shelf_reels=shelf_reels + ) + else: + batch_group.name = name + batch_group.start_frame = frame_start + batch_group.duration = frame_duration + + # add reels to batch group + _add_reels_to_batch_group( + batch_group, schematic_reels, shelf_reels) + + # TODO: also update write node if there is any + # TODO: also update loaders to start from correct frameStart + + if kwargs.get("switch_batch_tab"): + # use this command to switch to the batch tab + batch_group.go_to() + + return batch_group + + +def _add_reels_to_batch_group(batch_group, reels, shelf_reels): + # update or create defined reels + # helper variables + reel_names = [ + r.name.get_value() + for r in batch_group.reels + ] + shelf_reel_names = [ + r.name.get_value() + for r in batch_group.shelf_reels + ] + # add schematic reels + for _r in reels: + if _r in reel_names: + continue + batch_group.create_reel(_r) + + # add shelf reels + for _sr in shelf_reels: + if _sr in shelf_reel_names: + continue + batch_group.create_shelf_reel(_sr) + + +def create_batch_group_conent(batch_nodes, batch_links, batch_group=None): + """Creating batch group with links + + Args: + batch_nodes (list of dict): each dict is node definition + batch_links (list of dict): each dict is link definition + batch_group (PyBatch, optional): batch group. Defaults to None. + + Return: + dict: all batch nodes {name or id: PyNode} + """ + # make sure some batch obj is present + batch_group = batch_group or flame.batch + all_batch_nodes = { + b.name.get_value(): b + for b in batch_group.nodes + } + for node in batch_nodes: + # NOTE: node_props needs to be ideally OrederDict type + node_id, node_type, node_props = ( + node["id"], node["type"], node["properties"]) + + # get node name for checking if exists + node_name = node_props.pop("name", None) or node_id + + if all_batch_nodes.get(node_name): + # update existing batch node + batch_node = all_batch_nodes[node_name] + else: + # create new batch node + batch_node = batch_group.create_node(node_type) + + # set name + batch_node.name.set_value(node_name) + + # set attributes found in node props + for key, value in node_props.items(): + if not hasattr(batch_node, key): + continue + setattr(batch_node, key, value) + + # add created node for possible linking + all_batch_nodes[node_id] = batch_node + + # link nodes to each other + for link in batch_links: + _from_n, _to_n = link["from_node"], link["to_node"] + + # check if all linking nodes are available + if not all([ + all_batch_nodes.get(_from_n["id"]), + all_batch_nodes.get(_to_n["id"]) + ]): + continue + + # link nodes in defined link + batch_group.connect_nodes( + all_batch_nodes[_from_n["id"]], _from_n["connector"], + all_batch_nodes[_to_n["id"]], _to_n["connector"] + ) + + # sort batch nodes + batch_group.organize() + + return all_batch_nodes diff --git a/openpype/hosts/flame/api/lib.py b/openpype/hosts/flame/api/lib.py index 74d9e7607a..c7c444c1fb 100644 --- a/openpype/hosts/flame/api/lib.py +++ b/openpype/hosts/flame/api/lib.py @@ -3,7 +3,12 @@ import os import re import json import pickle +import tempfile +import itertools import contextlib +import xml.etree.cElementTree as cET +from copy import deepcopy +from xml.etree import ElementTree as ET from pprint import pformat from .constants import ( MARKER_COLOR, @@ -12,12 +17,14 @@ from .constants import ( COLOR_MAP, MARKER_PUBLISH_DEFAULT ) -from openpype.api import Logger -log = Logger.get_logger(__name__) +import openpype.api as openpype + +log = openpype.Logger.get_logger(__name__) FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]") + class CTX: # singleton used for passing data between api modules app_framework = None @@ -226,16 +233,6 @@ class FlameAppFramework(object): return True -def get_project_manager(): - # TODO: get_project_manager - return - - -def get_media_storage(): - # TODO: get_media_storage - return - - def get_current_project(): import flame return flame.project.current_project @@ -265,11 +262,6 @@ def get_current_sequence(selection): return process_timeline -def create_bin(name, root=None): - # TODO: create_bin - return - - def rescan_hooks(): import flame try: @@ -279,6 +271,7 @@ def rescan_hooks(): def get_metadata(project_name, _log=None): + # TODO: can be replaced by MediaInfoFile class method from adsk.libwiretapPythonClientAPI import ( WireTapClient, WireTapServerHandle, @@ -538,9 +531,17 @@ def get_segment_attributes(segment): # head and tail with forward compatibility if segment.head: - clip_data["segment_head"] = int(segment.head) + # `infinite` can be also returned + if isinstance(segment.head, str): + clip_data["segment_head"] = 0 + else: + clip_data["segment_head"] = int(segment.head) if segment.tail: - clip_data["segment_tail"] = int(segment.tail) + # `infinite` can be also returned + if isinstance(segment.tail, str): + clip_data["segment_tail"] = 0 + else: + clip_data["segment_tail"] = int(segment.tail) # add all available shot tokens shot_tokens = _get_shot_tokens_values(segment, [ @@ -695,6 +696,25 @@ def maintained_object_duplication(item): flame.delete(duplicate) +@contextlib.contextmanager +def maintained_temp_file_path(suffix=None): + _suffix = suffix or "" + + try: + # Store dumped json to temporary file + temporary_file = tempfile.mktemp( + suffix=_suffix, prefix="flame_maintained_") + yield temporary_file.replace("\\", "/") + + except IOError as _error: + raise IOError( + "Not able to create temp json file: {}".format(_error)) + + finally: + # Remove the temporary json + os.remove(temporary_file) + + def get_clip_segment(flame_clip): name = flame_clip.name.get_value() version = flame_clip.versions[0] @@ -708,3 +728,213 @@ def get_clip_segment(flame_clip): raise ValueError("Clip `{}` has too many segments!".format(name)) return segments[0] + + +def get_batch_group_from_desktop(name): + project = get_current_project() + project_desktop = project.current_workspace.desktop + + for bgroup in project_desktop.batch_groups: + if bgroup.name.get_value() in name: + return bgroup + + +class MediaInfoFile(object): + """Class to get media info file clip data + + Raises: + IOError: MEDIA_SCRIPT_PATH path doesn't exists + TypeError: Not able to generate clip xml data file + ET.ParseError: Missing clip in xml clip data + IOError: Not able to save xml clip data to file + + Attributes: + str: `MEDIA_SCRIPT_PATH` path to flame binary + logging.Logger: `log` logger + + TODO: add method for getting metadata to dict + """ + MEDIA_SCRIPT_PATH = "/opt/Autodesk/mio/current/dl_get_media_info" + + log = log + + _clip_data = None + _start_frame = None + _fps = None + _drop_mode = None + + def __init__(self, path, **kwargs): + + # replace log if any + if kwargs.get("logger"): + self.log = kwargs["logger"] + + # test if `dl_get_media_info` paht exists + self._validate_media_script_path() + + # derivate other feed variables + self.feed_basename = os.path.basename(path) + self.feed_dir = os.path.dirname(path) + self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower() + + with maintained_temp_file_path(".clip") as tmp_path: + self.log.info("Temp File: {}".format(tmp_path)) + self._generate_media_info_file(tmp_path) + + # get clip data and make them single if there is multiple + # clips data + xml_data = self._make_single_clip_media_info(tmp_path) + self.log.debug("xml_data: {}".format(xml_data)) + self.log.debug("type: {}".format(type(xml_data))) + + # get all time related data and assign them + self._get_time_info_from_origin(xml_data) + self.log.debug("start_frame: {}".format(self.start_frame)) + self.log.debug("fps: {}".format(self.fps)) + self.log.debug("drop frame: {}".format(self.drop_mode)) + self.clip_data = xml_data + + @property + def clip_data(self): + """Clip's xml clip data + + Returns: + xml.etree.ElementTree: xml data + """ + return self._clip_data + + @clip_data.setter + def clip_data(self, data): + self._clip_data = data + + @property + def start_frame(self): + """ Clip's starting frame found in timecode + + Returns: + int: number of frames + """ + return self._start_frame + + @start_frame.setter + def start_frame(self, number): + self._start_frame = int(number) + + @property + def fps(self): + """ Clip's frame rate + + Returns: + float: frame rate + """ + return self._fps + + @fps.setter + def fps(self, fl_number): + self._fps = float(fl_number) + + @property + def drop_mode(self): + """ Clip's drop frame mode + + Returns: + str: drop frame flag + """ + return self._drop_mode + + @drop_mode.setter + def drop_mode(self, text): + self._drop_mode = str(text) + + def _validate_media_script_path(self): + if not os.path.isfile(self.MEDIA_SCRIPT_PATH): + raise IOError("Media Scirpt does not exist: `{}`".format( + self.MEDIA_SCRIPT_PATH)) + + def _generate_media_info_file(self, fpath): + # Create cmd arguments for gettig xml file info file + cmd_args = [ + self.MEDIA_SCRIPT_PATH, + "-e", self.feed_ext, + "-o", fpath, + self.feed_dir + ] + + try: + # execute creation of clip xml template data + openpype.run_subprocess(cmd_args) + except TypeError as error: + raise TypeError( + "Error creating `{}` due: {}".format(fpath, error)) + + def _make_single_clip_media_info(self, fpath): + with open(fpath) as f: + lines = f.readlines() + _added_root = itertools.chain( + "", deepcopy(lines)[1:], "") + new_root = ET.fromstringlist(_added_root) + + # find the clip which is matching to my input name + xml_clips = new_root.findall("clip") + matching_clip = None + for xml_clip in xml_clips: + if xml_clip.find("name").text in self.feed_basename: + matching_clip = xml_clip + + if matching_clip is None: + # return warning there is missing clip + raise ET.ParseError( + "Missing clip in `{}`. Available clips {}".format( + self.feed_basename, [ + xml_clip.find("name").text + for xml_clip in xml_clips + ] + )) + + return matching_clip + + def _get_time_info_from_origin(self, xml_data): + try: + for out_track in xml_data.iter('track'): + for out_feed in out_track.iter('feed'): + # start frame + out_feed_nb_ticks_obj = out_feed.find( + 'startTimecode/nbTicks') + self.start_frame = out_feed_nb_ticks_obj.text + + # fps + out_feed_fps_obj = out_feed.find( + 'startTimecode/rate') + self.fps = out_feed_fps_obj.text + + # drop frame mode + out_feed_drop_mode_obj = out_feed.find( + 'startTimecode/dropMode') + self.drop_mode = out_feed_drop_mode_obj.text + break + else: + continue + except Exception as msg: + self.log.warning(msg) + + @staticmethod + def write_clip_data_to_file(fpath, xml_element_data): + """ Write xml element of clip data to file + + Args: + fpath (string): file path + xml_element_data (xml.etree.ElementTree.Element): xml data + + Raises: + IOError: If data could not be written to file + """ + try: + # save it as new file + tree = cET.ElementTree(xml_element_data) + tree.write( + fpath, xml_declaration=True, + method='xml', encoding='UTF-8' + ) + except IOError as error: + raise IOError( + "Not able to write data to file: {}".format(error)) diff --git a/openpype/hosts/flame/api/pipeline.py b/openpype/hosts/flame/api/pipeline.py index ca3f38c1bc..da44be1b15 100644 --- a/openpype/hosts/flame/api/pipeline.py +++ b/openpype/hosts/flame/api/pipeline.py @@ -3,14 +3,14 @@ Basic avalon integration """ import os import contextlib -from avalon import api as avalon from pyblish import api as pyblish from openpype.api import Logger from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) from .lib import ( @@ -37,7 +37,7 @@ def install(): pyblish.register_host("flame") pyblish.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) log.info("OpenPype Flame plug-ins registred ...") # register callback for switching publishable @@ -52,7 +52,7 @@ def uninstall(): log.info("Deregistering Flame plug-ins..") pyblish.deregister_plugin_path(PUBLISH_PATH) deregister_loader_plugin_path(LOAD_PATH) - avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) # register callback for switching publishable pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) diff --git a/openpype/hosts/flame/api/plugin.py b/openpype/hosts/flame/api/plugin.py index 4c9d3c5383..c87445fdd3 100644 --- a/openpype/hosts/flame/api/plugin.py +++ b/openpype/hosts/flame/api/plugin.py @@ -1,24 +1,19 @@ import os import re import shutil -import sys -from xml.etree import ElementTree as ET -import six -import qargparse -from Qt import QtWidgets, QtCore -import openpype.api as openpype -from openpype.pipeline import ( - LegacyCreator, - LoaderPlugin, -) -from openpype import style -from . import ( - lib as flib, - pipeline as fpipeline, - constants -) - from copy import deepcopy +from xml.etree import ElementTree as ET + +from Qt import QtCore, QtWidgets + +import openpype.api as openpype +import qargparse +from openpype import style +from openpype.pipeline import LegacyCreator, LoaderPlugin + +from . import constants +from . import lib as flib +from . import pipeline as fpipeline log = openpype.Logger.get_logger(__name__) @@ -660,8 +655,8 @@ class PublishableClip: # Publishing plugin functions -# Loader plugin functions +# Loader plugin functions class ClipLoader(LoaderPlugin): """A basic clip loader for Flame @@ -681,50 +676,52 @@ class ClipLoader(LoaderPlugin): ] -class OpenClipSolver: - media_script_path = "/opt/Autodesk/mio/current/dl_get_media_info" - tmp_name = "_tmp.clip" - tmp_file = None +class OpenClipSolver(flib.MediaInfoFile): create_new_clip = False - out_feed_nb_ticks = None - out_feed_fps = None - out_feed_drop_mode = None - log = log def __init__(self, openclip_file_path, feed_data): - # test if media script paht exists - self._validate_media_script_path() + self.out_file = openclip_file_path # new feed variables: - feed_path = feed_data["path"] + feed_path = feed_data.pop("path") + + # initialize parent class + super(OpenClipSolver, self).__init__( + feed_path, + **feed_data + ) + + # get other metadata self.feed_version_name = feed_data["version"] self.feed_colorspace = feed_data.get("colorspace") - - if feed_data.get("logger"): - self.log = feed_data["logger"] + self.log.debug("feed_version_name: {}".format(self.feed_version_name)) # derivate other feed variables self.feed_basename = os.path.basename(feed_path) self.feed_dir = os.path.dirname(feed_path) self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower() - - if not os.path.isfile(openclip_file_path): - # openclip does not exist yet and will be created - self.tmp_file = self.out_file = openclip_file_path + self.log.debug("feed_ext: {}".format(self.feed_ext)) + self.log.debug("out_file: {}".format(self.out_file)) + if not self._is_valid_tmp_file(self.out_file): self.create_new_clip = True - else: - # output a temp file - self.out_file = openclip_file_path - self.tmp_file = os.path.join(self.feed_dir, self.tmp_name) - self._clear_tmp_file() + def _is_valid_tmp_file(self, file): + # check if file exists + if os.path.isfile(file): + # test also if file is not empty + with open(file) as f: + lines = f.readlines() - self.log.info("Temp File: {}".format(self.tmp_file)) + if len(lines) > 2: + return True + + # file is probably corrupted + os.remove(file) + return False def make(self): - self._generate_media_info_file() if self.create_new_clip: # New openClip @@ -732,42 +729,17 @@ class OpenClipSolver: else: self._update_open_clip() - def _validate_media_script_path(self): - if not os.path.isfile(self.media_script_path): - raise IOError("Media Scirpt does not exist: `{}`".format( - self.media_script_path)) - - def _generate_media_info_file(self): - # Create cmd arguments for gettig xml file info file - cmd_args = [ - self.media_script_path, - "-e", self.feed_ext, - "-o", self.tmp_file, - self.feed_dir - ] - - # execute creation of clip xml template data - try: - openpype.run_subprocess(cmd_args) - except TypeError: - self.log.error("Error creating self.tmp_file") - six.reraise(*sys.exc_info()) - - def _clear_tmp_file(self): - if os.path.isfile(self.tmp_file): - os.remove(self.tmp_file) - def _clear_handler(self, xml_object): for handler in xml_object.findall("./handler"): - self.log.debug("Handler found") + self.log.info("Handler found") xml_object.remove(handler) def _create_new_open_clip(self): self.log.info("Building new openClip") + self.log.debug(">> self.clip_data: {}".format(self.clip_data)) - tmp_xml = ET.parse(self.tmp_file) - - tmp_xml_feeds = tmp_xml.find('tracks/track/feeds') + # clip data comming from MediaInfoFile + tmp_xml_feeds = self.clip_data.find('tracks/track/feeds') tmp_xml_feeds.set('currentVersion', self.feed_version_name) for tmp_feed in tmp_xml_feeds: tmp_feed.set('vuid', self.feed_version_name) @@ -778,46 +750,48 @@ class OpenClipSolver: self._clear_handler(tmp_feed) - tmp_xml_versions_obj = tmp_xml.find('versions') + tmp_xml_versions_obj = self.clip_data.find('versions') tmp_xml_versions_obj.set('currentVersion', self.feed_version_name) for xml_new_version in tmp_xml_versions_obj: xml_new_version.set('uid', self.feed_version_name) xml_new_version.set('type', 'version') - xml_data = self._fix_xml_data(tmp_xml) + self._clear_handler(self.clip_data) self.log.info("Adding feed version: {}".format(self.feed_basename)) - self._write_result_xml_to_file(xml_data) - - self.log.info("openClip Updated: {}".format(self.tmp_file)) + self.write_clip_data_to_file(self.out_file, self.clip_data) def _update_open_clip(self): self.log.info("Updating openClip ..") out_xml = ET.parse(self.out_file) - tmp_xml = ET.parse(self.tmp_file) + out_xml = out_xml.getroot() self.log.debug(">> out_xml: {}".format(out_xml)) - self.log.debug(">> tmp_xml: {}".format(tmp_xml)) + self.log.debug(">> self.clip_data: {}".format(self.clip_data)) # Get new feed from tmp file - tmp_xml_feed = tmp_xml.find('tracks/track/feeds/feed') + tmp_xml_feed = self.clip_data.find('tracks/track/feeds/feed') self._clear_handler(tmp_xml_feed) - self._get_time_info_from_origin(out_xml) - if self.out_feed_fps: + # update fps from MediaInfoFile class + if self.fps: tmp_feed_fps_obj = tmp_xml_feed.find( "startTimecode/rate") - tmp_feed_fps_obj.text = self.out_feed_fps - if self.out_feed_nb_ticks: + tmp_feed_fps_obj.text = str(self.fps) + + # update start_frame from MediaInfoFile class + if self.start_frame: tmp_feed_nb_ticks_obj = tmp_xml_feed.find( "startTimecode/nbTicks") - tmp_feed_nb_ticks_obj.text = self.out_feed_nb_ticks - if self.out_feed_drop_mode: + tmp_feed_nb_ticks_obj.text = str(self.start_frame) + + # update drop_mode from MediaInfoFile class + if self.drop_mode: tmp_feed_drop_mode_obj = tmp_xml_feed.find( "startTimecode/dropMode") - tmp_feed_drop_mode_obj.text = self.out_feed_drop_mode + tmp_feed_drop_mode_obj.text = str(self.drop_mode) new_path_obj = tmp_xml_feed.find( "spans/span/path") @@ -850,7 +824,7 @@ class OpenClipSolver: "version", {"type": "version", "uid": self.feed_version_name}) out_xml_versions_obj.insert(0, new_version_obj) - xml_data = self._fix_xml_data(out_xml) + self._clear_handler(out_xml) # fist create backup self._create_openclip_backup_file(self.out_file) @@ -858,30 +832,9 @@ class OpenClipSolver: self.log.info("Adding feed version: {}".format( self.feed_version_name)) - self._write_result_xml_to_file(xml_data) + self.write_clip_data_to_file(self.out_file, out_xml) - self.log.info("openClip Updated: {}".format(self.out_file)) - - self._clear_tmp_file() - - def _get_time_info_from_origin(self, xml_data): - try: - for out_track in xml_data.iter('track'): - for out_feed in out_track.iter('feed'): - out_feed_nb_ticks_obj = out_feed.find( - 'startTimecode/nbTicks') - self.out_feed_nb_ticks = out_feed_nb_ticks_obj.text - out_feed_fps_obj = out_feed.find( - 'startTimecode/rate') - self.out_feed_fps = out_feed_fps_obj.text - out_feed_drop_mode_obj = out_feed.find( - 'startTimecode/dropMode') - self.out_feed_drop_mode = out_feed_drop_mode_obj.text - break - else: - continue - except Exception as msg: - self.log.warning(msg) + self.log.debug("OpenClip Updated: {}".format(self.out_file)) def _feed_exists(self, xml_data, path): # loop all available feed paths and check if @@ -892,15 +845,6 @@ class OpenClipSolver: "Not appending file as it already is in .clip file") return True - def _fix_xml_data(self, xml_data): - xml_root = xml_data.getroot() - self._clear_handler(xml_root) - return ET.tostring(xml_root).decode('utf-8') - - def _write_result_xml_to_file(self, xml_data): - with open(self.out_file, "w") as f: - f.write(xml_data) - def _create_openclip_backup_file(self, file): bck_file = "{}.bak".format(file) # if backup does not exist diff --git a/openpype/hosts/flame/api/scripts/wiretap_com.py b/openpype/hosts/flame/api/scripts/wiretap_com.py index ee906c2608..4825ff4386 100644 --- a/openpype/hosts/flame/api/scripts/wiretap_com.py +++ b/openpype/hosts/flame/api/scripts/wiretap_com.py @@ -185,7 +185,9 @@ class WireTapCom(object): exit_code = subprocess.call( project_create_cmd, - cwd=os.path.expanduser('~')) + cwd=os.path.expanduser('~'), + preexec_fn=_subprocess_preexec_fn + ) if exit_code != 0: RuntimeError("Cannot create project in flame db") @@ -254,7 +256,7 @@ class WireTapCom(object): filtered_users = [user for user in used_names if user_name in user] if filtered_users: - # todo: need to find lastly created following regex pattern for + # TODO: need to find lastly created following regex pattern for # date used in name return filtered_users.pop() @@ -422,7 +424,13 @@ class WireTapCom(object): color_policy = color_policy or "Legacy" # check if the colour policy in custom dir - if not os.path.exists(color_policy): + if "/" in color_policy: + # if unlikelly full path was used make it redundant + color_policy = color_policy.replace("/syncolor/policies/", "") + # expecting input is `Shared/NameOfPolicy` + color_policy = "/syncolor/policies/{}".format( + color_policy) + else: color_policy = "/syncolor/policies/Autodesk/{}".format( color_policy) @@ -442,7 +450,9 @@ class WireTapCom(object): exit_code = subprocess.call( project_colorspace_cmd, - cwd=os.path.expanduser('~')) + cwd=os.path.expanduser('~'), + preexec_fn=_subprocess_preexec_fn + ) if exit_code != 0: RuntimeError("Cannot set colorspace {} on project {}".format( @@ -450,6 +460,15 @@ class WireTapCom(object): )) +def _subprocess_preexec_fn(): + """ Helper function + + Setting permission mask to 0777 + """ + os.setpgrp() + os.umask(0o000) + + if __name__ == "__main__": # get json exchange data json_path = sys.argv[-1] diff --git a/openpype/hosts/flame/otio/flame_export.py b/openpype/hosts/flame/otio/flame_export.py index 8c240fc9d5..4fe05ec1d8 100644 --- a/openpype/hosts/flame/otio/flame_export.py +++ b/openpype/hosts/flame/otio/flame_export.py @@ -11,8 +11,6 @@ from . import utils import flame from pprint import pformat -reload(utils) # noqa - log = logging.getLogger(__name__) @@ -260,24 +258,15 @@ def create_otio_markers(otio_item, item): otio_item.markers.append(otio_marker) -def create_otio_reference(clip_data): +def create_otio_reference(clip_data, fps=None): metadata = _get_metadata(clip_data) # get file info for path and start frame frame_start = 0 - fps = CTX.get_fps() + fps = fps or CTX.get_fps() path = clip_data["fpath"] - reel_clip = None - match_reel_clip = [ - clip for clip in CTX.clips - if clip["fpath"] == path - ] - if match_reel_clip: - reel_clip = match_reel_clip.pop() - fps = reel_clip["fps"] - file_name = os.path.basename(path) file_head, extension = os.path.splitext(file_name) @@ -339,13 +328,22 @@ def create_otio_reference(clip_data): def create_otio_clip(clip_data): + from openpype.hosts.flame.api import MediaInfoFile + segment = clip_data["PySegment"] - # create media reference - media_reference = create_otio_reference(clip_data) - # calculate source in - first_frame = utils.get_frame_from_filename(clip_data["fpath"]) or 0 + media_info = MediaInfoFile(clip_data["fpath"]) + media_timecode_start = media_info.start_frame + media_fps = media_info.fps + + # create media reference + media_reference = create_otio_reference(clip_data, media_fps) + + # define first frame + first_frame = media_timecode_start or utils.get_frame_from_filename( + clip_data["fpath"]) or 0 + source_in = int(clip_data["source_in"]) - int(first_frame) # creatae source range @@ -378,38 +376,6 @@ def create_otio_gap(gap_start, clip_start, tl_start_frame, fps): ) -def get_clips_in_reels(project): - output_clips = [] - project_desktop = project.current_workspace.desktop - - for reel_group in project_desktop.reel_groups: - for reel in reel_group.reels: - for clip in reel.clips: - clip_data = { - "PyClip": clip, - "fps": float(str(clip.frame_rate)[:-4]) - } - - attrs = [ - "name", "width", "height", - "ratio", "sample_rate", "bit_depth" - ] - - for attr in attrs: - val = getattr(clip, attr) - clip_data[attr] = val - - version = clip.versions[-1] - track = version.tracks[-1] - for segment in track.segments: - segment_data = _get_segment_attributes(segment) - clip_data.update(segment_data) - - output_clips.append(clip_data) - - return output_clips - - def _get_colourspace_policy(): output = {} @@ -493,9 +459,6 @@ def _get_shot_tokens_values(clip, tokens): old_value = None output = {} - if not clip.shot_name: - return output - old_value = clip.shot_name.get_value() for token in tokens: @@ -513,15 +476,21 @@ def _get_shot_tokens_values(clip, tokens): def _get_segment_attributes(segment): - # log.debug(dir(segment)) - if str(segment.name)[1:-1] == "": + log.debug("Segment name|hidden: {}|{}".format( + segment.name.get_value(), segment.hidden + )) + if ( + segment.name.get_value() == "" + or segment.hidden.get_value() + ): return None # Add timeline segment to tree clip_data = { "segment_name": segment.name.get_value(), "segment_comment": segment.comment.get_value(), + "shot_name": segment.shot_name.get_value(), "tape_name": segment.tape_name, "source_name": segment.source_name, "fpath": segment.file_path, @@ -529,9 +498,10 @@ def _get_segment_attributes(segment): } # add all available shot tokens - shot_tokens = _get_shot_tokens_values(segment, [ - "", "", "", "", - ]) + shot_tokens = _get_shot_tokens_values( + segment, + ["", "", "", ""] + ) clip_data.update(shot_tokens) # populate shot source metadata @@ -561,11 +531,6 @@ def create_otio_timeline(sequence): log.info(sequence.attributes) CTX.project = get_current_flame_project() - CTX.clips = get_clips_in_reels(CTX.project) - - log.debug(pformat( - CTX.clips - )) # get current timeline CTX.set_fps( @@ -583,8 +548,13 @@ def create_otio_timeline(sequence): # create otio tracks and clips for ver in sequence.versions: for track in ver.tracks: - if len(track.segments) == 0 and track.hidden: - return None + # avoid all empty tracks + # or hidden tracks + if ( + len(track.segments) == 0 + or track.hidden.get_value() + ): + continue # convert track to otio otio_track = create_otio_track( @@ -597,11 +567,7 @@ def create_otio_timeline(sequence): continue all_segments.append(clip_data) - segments_ordered = { - itemindex: clip_data - for itemindex, clip_data in enumerate( - all_segments) - } + segments_ordered = dict(enumerate(all_segments)) log.debug("_ segments_ordered: {}".format( pformat(segments_ordered) )) @@ -612,15 +578,11 @@ def create_otio_timeline(sequence): log.debug("_ itemindex: {}".format(itemindex)) # Add Gap if needed - if itemindex == 0: - # if it is first track item at track then add - # it to previous item - prev_item = segment_data - - else: - # get previous item - prev_item = segments_ordered[itemindex - 1] - + prev_item = ( + segment_data + if itemindex == 0 + else segments_ordered[itemindex - 1] + ) log.debug("_ segment_data: {}".format(segment_data)) # calculate clip frame range difference from each other diff --git a/openpype/hosts/flame/plugins/load/load_clip.py b/openpype/hosts/flame/plugins/load/load_clip.py index 8980f72cb8..e0a7297381 100644 --- a/openpype/hosts/flame/plugins/load/load_clip.py +++ b/openpype/hosts/flame/plugins/load/load_clip.py @@ -22,7 +22,7 @@ class LoadClip(opfapi.ClipLoader): # settings reel_group_name = "OpenPype_Reels" reel_name = "Loaded" - clip_name_template = "{asset}_{subset}_{representation}" + clip_name_template = "{asset}_{subset}_{output}" def load(self, context, name, namespace, options): @@ -39,7 +39,7 @@ class LoadClip(opfapi.ClipLoader): clip_name = self.clip_name_template.format( **context["representation"]["context"]) - # todo: settings in imageio + # TODO: settings in imageio # convert colorspace with ocio to flame mapping # in imageio flame section colorspace = colorspace diff --git a/openpype/hosts/flame/plugins/load/load_clip_batch.py b/openpype/hosts/flame/plugins/load/load_clip_batch.py new file mode 100644 index 0000000000..5de3226035 --- /dev/null +++ b/openpype/hosts/flame/plugins/load/load_clip_batch.py @@ -0,0 +1,139 @@ +import os +import flame +from pprint import pformat +import openpype.hosts.flame.api as opfapi + + +class LoadClipBatch(opfapi.ClipLoader): + """Load a subset to timeline as clip + + Place clip to timeline on its asset origin timings collected + during conforming to project + """ + + families = ["render2d", "source", "plate", "render", "review"] + representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"] + + label = "Load as clip to current batch" + order = -10 + icon = "code-fork" + color = "orange" + + # settings + reel_name = "OP_LoadedReel" + clip_name_template = "{asset}_{subset}_{output}" + + def load(self, context, name, namespace, options): + + # get flame objects + self.batch = options.get("batch") or flame.batch + + # load clip to timeline and get main variables + namespace = namespace + version = context['version'] + version_data = version.get("data", {}) + version_name = version.get("name", None) + colorspace = version_data.get("colorspace", None) + + # in case output is not in context replace key to representation + if not context["representation"]["context"].get("output"): + self.clip_name_template.replace("output", "representation") + + clip_name = self.clip_name_template.format( + **context["representation"]["context"]) + + # TODO: settings in imageio + # convert colorspace with ocio to flame mapping + # in imageio flame section + colorspace = colorspace + + # create workfile path + workfile_dir = options.get("workdir") or os.environ["AVALON_WORKDIR"] + openclip_dir = os.path.join( + workfile_dir, clip_name + ) + openclip_path = os.path.join( + openclip_dir, clip_name + ".clip" + ) + if not os.path.exists(openclip_dir): + os.makedirs(openclip_dir) + + # prepare clip data from context ad send it to openClipLoader + loading_context = { + "path": self.fname.replace("\\", "/"), + "colorspace": colorspace, + "version": "v{:0>3}".format(version_name), + "logger": self.log + + } + self.log.debug(pformat( + loading_context + )) + self.log.debug(openclip_path) + + # make openpype clip file + opfapi.OpenClipSolver(openclip_path, loading_context).make() + + # prepare Reel group in actual desktop + opc = self._get_clip( + clip_name, + openclip_path + ) + + # add additional metadata from the version to imprint Avalon knob + add_keys = [ + "frameStart", "frameEnd", "source", "author", + "fps", "handleStart", "handleEnd" + ] + + # move all version data keys to tag data + data_imprint = { + key: version_data.get(key, str(None)) + for key in add_keys + } + # add variables related to version context + data_imprint.update({ + "version": version_name, + "colorspace": colorspace, + "objectName": clip_name + }) + + # TODO: finish the containerisation + # opc_segment = opfapi.get_clip_segment(opc) + + # return opfapi.containerise( + # opc_segment, + # name, namespace, context, + # self.__class__.__name__, + # data_imprint) + + return opc + + def _get_clip(self, name, clip_path): + reel = self._get_reel() + + # with maintained openclip as opc + matching_clip = None + for cl in reel.clips: + if cl.name.get_value() != name: + continue + matching_clip = cl + + if not matching_clip: + created_clips = flame.import_clips(str(clip_path), reel) + return created_clips.pop() + + return matching_clip + + def _get_reel(self): + + matching_reel = [ + rg for rg in self.batch.reels + if rg.name.get_value() == self.reel_name + ] + + return ( + matching_reel.pop() + if matching_reel + else self.batch.create_reel(str(self.reel_name)) + ) diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py index 70340ad7a2..95c2002bd9 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py @@ -21,132 +21,135 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): audio_track_items = [] - # TODO: add to settings # settings - xml_preset_attrs_from_comments = { - "width": "number", - "height": "number", - "pixelRatio": "float", - "resizeType": "string", - "resizeFilter": "string" - } + xml_preset_attrs_from_comments = [] + add_tasks = [] def process(self, context): project = context.data["flameProject"] - sequence = context.data["flameSequence"] + selected_segments = context.data["flameSelectedSegments"] + self.log.debug("__ selected_segments: {}".format(selected_segments)) + self.otio_timeline = context.data["otioTimeline"] self.clips_in_reels = opfapi.get_clips_in_reels(project) self.fps = context.data["fps"] # process all sellected - with opfapi.maintained_segment_selection(sequence) as segments: - for segment in segments: - comment_attributes = self._get_comment_attributes(segment) - self.log.debug("_ comment_attributes: {}".format( - pformat(comment_attributes))) + for segment in selected_segments: + # get openpype tag data + marker_data = opfapi.get_segment_data_marker(segment) + self.log.debug("__ marker_data: {}".format( + pformat(marker_data))) - clip_data = opfapi.get_segment_attributes(segment) - clip_name = clip_data["segment_name"] - self.log.debug("clip_name: {}".format(clip_name)) + if not marker_data: + continue - # get openpype tag data - marker_data = opfapi.get_segment_data_marker(segment) - self.log.debug("__ marker_data: {}".format( - pformat(marker_data))) + if marker_data.get("id") != "pyblish.avalon.instance": + continue - if not marker_data: - continue + self.log.debug("__ segment.name: {}".format( + segment.name + )) - if marker_data.get("id") != "pyblish.avalon.instance": - continue + comment_attributes = self._get_comment_attributes(segment) - # get file path - file_path = clip_data["fpath"] + self.log.debug("_ comment_attributes: {}".format( + pformat(comment_attributes))) - # get source clip - source_clip = self._get_reel_clip(file_path) + clip_data = opfapi.get_segment_attributes(segment) + clip_name = clip_data["segment_name"] + self.log.debug("clip_name: {}".format(clip_name)) - first_frame = opfapi.get_frame_from_filename(file_path) or 0 + # get file path + file_path = clip_data["fpath"] - head, tail = self._get_head_tail(clip_data, first_frame) + # get source clip + source_clip = self._get_reel_clip(file_path) - # solve handles length - marker_data["handleStart"] = min( - marker_data["handleStart"], head) - marker_data["handleEnd"] = min( - marker_data["handleEnd"], tail) + first_frame = opfapi.get_frame_from_filename(file_path) or 0 - with_audio = bool(marker_data.pop("audio")) + head, tail = self._get_head_tail(clip_data, first_frame) - # add marker data to instance data - inst_data = dict(marker_data.items()) + # solve handles length + marker_data["handleStart"] = min( + marker_data["handleStart"], abs(head)) + marker_data["handleEnd"] = min( + marker_data["handleEnd"], abs(tail)) - asset = marker_data["asset"] - subset = marker_data["subset"] + with_audio = bool(marker_data.pop("audio")) - # insert family into families - family = marker_data["family"] - families = [str(f) for f in marker_data["families"]] - families.insert(0, str(family)) + # add marker data to instance data + inst_data = dict(marker_data.items()) - # form label - label = asset - if asset != clip_name: - label += " ({})".format(clip_name) - label += " {}".format(subset) - label += " {}".format("[" + ", ".join(families) + "]") + asset = marker_data["asset"] + subset = marker_data["subset"] - inst_data.update({ - "name": "{}_{}".format(asset, subset), - "label": label, - "asset": asset, - "item": segment, - "families": families, - "publish": marker_data["publish"], - "fps": self.fps, - "flameSourceClip": source_clip, - "sourceFirstFrame": int(first_frame), - "path": file_path - }) + # insert family into families + family = marker_data["family"] + families = [str(f) for f in marker_data["families"]] + families.insert(0, str(family)) - # get otio clip data - otio_data = self._get_otio_clip_instance_data(clip_data) or {} - self.log.debug("__ otio_data: {}".format(pformat(otio_data))) + # form label + label = asset + if asset != clip_name: + label += " ({})".format(clip_name) + label += " {} [{}]".format(subset, ", ".join(families)) - # add to instance data - inst_data.update(otio_data) - self.log.debug("__ inst_data: {}".format(pformat(inst_data))) + inst_data.update({ + "name": "{}_{}".format(asset, subset), + "label": label, + "asset": asset, + "item": segment, + "families": families, + "publish": marker_data["publish"], + "fps": self.fps, + "flameSourceClip": source_clip, + "sourceFirstFrame": int(first_frame), + "path": file_path, + "flameAddTasks": self.add_tasks, + "tasks": { + task["name"]: {"type": task["type"]} + for task in self.add_tasks} + }) - # add resolution - self._get_resolution_to_data(inst_data, context) + # get otio clip data + otio_data = self._get_otio_clip_instance_data(clip_data) or {} + self.log.debug("__ otio_data: {}".format(pformat(otio_data))) - # add comment attributes if any - inst_data.update(comment_attributes) + # add to instance data + inst_data.update(otio_data) + self.log.debug("__ inst_data: {}".format(pformat(inst_data))) - # create instance - instance = context.create_instance(**inst_data) + # add resolution + self._get_resolution_to_data(inst_data, context) - # add colorspace data - instance.data.update({ - "versionData": { - "colorspace": clip_data["colour_space"], - } - }) + # add comment attributes if any + inst_data.update(comment_attributes) - # create shot instance for shot attributes create/update - self._create_shot_instance(context, clip_name, **inst_data) + # create instance + instance = context.create_instance(**inst_data) - self.log.info("Creating instance: {}".format(instance)) - self.log.info( - "_ instance.data: {}".format(pformat(instance.data))) + # add colorspace data + instance.data.update({ + "versionData": { + "colorspace": clip_data["colour_space"], + } + }) - if not with_audio: - continue + # create shot instance for shot attributes create/update + self._create_shot_instance(context, clip_name, **inst_data) - # add audioReview attribute to plate instance data - # if reviewTrack is on - if marker_data.get("reviewTrack") is not None: - instance.data["reviewAudio"] = True + self.log.info("Creating instance: {}".format(instance)) + self.log.info( + "_ instance.data: {}".format(pformat(instance.data))) + + if not with_audio: + continue + + # add audioReview attribute to plate instance data + # if reviewTrack is on + if marker_data.get("reviewTrack") is not None: + instance.data["reviewAudio"] = True def _get_comment_attributes(self, segment): comment = segment.comment.get_value() @@ -181,14 +184,17 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): # split to key and value key, value = split.split(":") - for a_name, a_type in self.xml_preset_attrs_from_comments.items(): + for attr_data in self.xml_preset_attrs_from_comments: + a_name = attr_data["name"] + a_type = attr_data["type"] + # exclude all not related attributes if a_name.lower() not in key.lower(): continue # get pattern defined by type pattern = TXT_PATERN - if a_type in ("number" , "float"): + if a_type in ("number", "float"): pattern = NUM_PATERN res_goup = pattern.findall(value) @@ -241,6 +247,7 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): head = clip_data.get("segment_head") tail = clip_data.get("segment_tail") + # HACK: it is here to serve for versions bellow 2021.1 if not head: head = int(clip_data["source_in"]) - int(first_frame) if not tail: diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py b/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py index faa5be9d68..c6aeae7730 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py @@ -31,27 +31,28 @@ class CollecTimelineOTIO(pyblish.api.ContextPlugin): ) # adding otio timeline to context - with opfapi.maintained_segment_selection(sequence): + with opfapi.maintained_segment_selection(sequence) as selected_seg: otio_timeline = flame_export.create_otio_timeline(sequence) - instance_data = { - "name": subset_name, - "asset": asset_doc["name"], - "subset": subset_name, - "family": "workfile" - } + instance_data = { + "name": subset_name, + "asset": asset_doc["name"], + "subset": subset_name, + "family": "workfile" + } - # create instance with workfile - instance = context.create_instance(**instance_data) - self.log.info("Creating instance: {}".format(instance)) + # create instance with workfile + instance = context.create_instance(**instance_data) + self.log.info("Creating instance: {}".format(instance)) - # update context with main project attributes - context.data.update({ - "flameProject": project, - "flameSequence": sequence, - "otioTimeline": otio_timeline, - "currentFile": "Flame/{}/{}".format( - project.name, sequence.name - ), - "fps": float(str(sequence.frame_rate)[:-4]) - }) + # update context with main project attributes + context.data.update({ + "flameProject": project, + "flameSequence": sequence, + "otioTimeline": otio_timeline, + "currentFile": "Flame/{}/{}".format( + project.name, sequence.name + ), + "flameSelectedSegments": selected_seg, + "fps": float(str(sequence.frame_rate)[:-4]) + }) diff --git a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py index 32f6b9508f..a780f8c9e5 100644 --- a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py +++ b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py @@ -61,9 +61,13 @@ class ExtractSubsetResources(openpype.api.Extractor): # flame objects segment = instance.data["item"] + segment_name = segment.name.get_value() sequence_clip = instance.context.data["flameSequence"] clip_data = instance.data["flameSourceClip"] - clip = clip_data["PyClip"] + + reel_clip = None + if clip_data: + reel_clip = clip_data["PyClip"] # segment's parent track name s_track_name = segment.parent.name.get_value() @@ -108,6 +112,16 @@ class ExtractSubsetResources(openpype.api.Extractor): ignore_comment_attrs = preset_config["ignore_comment_attrs"] color_out = preset_config["colorspace_out"] + # get attribures related loading in integrate_batch_group + load_to_batch_group = preset_config.get( + "load_to_batch_group") + batch_group_loader_name = preset_config.get( + "batch_group_loader_name") + + # convert to None if empty string + if batch_group_loader_name == "": + batch_group_loader_name = None + # get frame range with handles for representation range frame_start_handle = frame_start - handle_start source_duration_handles = ( @@ -117,8 +131,20 @@ class ExtractSubsetResources(openpype.api.Extractor): in_mark = (source_start_handles - source_first_frame) + 1 out_mark = in_mark + source_duration_handles + # make test for type of preset and available reel_clip + if ( + not reel_clip + and export_type != "Sequence Publish" + ): + self.log.warning(( + "Skipping preset {}. Not available " + "reel clip for {}").format( + preset_file, segment_name + )) + continue + # by default export source clips - exporting_clip = clip + exporting_clip = reel_clip if export_type == "Sequence Publish": # change export clip to sequence @@ -150,7 +176,7 @@ class ExtractSubsetResources(openpype.api.Extractor): if export_type == "Sequence Publish": # only keep visible layer where instance segment is child - self.hide_other_tracks(duplclip, s_track_name) + self.hide_others(duplclip, segment_name, s_track_name) # validate xml preset file is filled if preset_file == "": @@ -211,7 +237,9 @@ class ExtractSubsetResources(openpype.api.Extractor): "tags": repre_tags, "data": { "colorspace": color_out - } + }, + "load_to_batch_group": load_to_batch_group, + "batch_group_loader_name": batch_group_loader_name } # collect all available content of export dir @@ -322,18 +350,26 @@ class ExtractSubsetResources(openpype.api.Extractor): return new_stage_dir, new_files_list - def hide_other_tracks(self, sequence_clip, track_name): + def hide_others(self, sequence_clip, segment_name, track_name): """Helper method used only if sequence clip is used Args: sequence_clip (flame.Clip): sequence clip + segment_name (str): segment name track_name (str): track name """ # create otio tracks and clips for ver in sequence_clip.versions: for track in ver.tracks: - if len(track.segments) == 0 and track.hidden: + if len(track.segments) == 0 and track.hidden.get_value(): continue + # hide tracks which are not parent track if track.name.get_value() != track_name: track.hidden = True + continue + + # hidde all other segments + for segment in track.segments: + if segment.name.get_value() != segment_name: + segment.hidden = True diff --git a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py new file mode 100644 index 0000000000..da9553cc2a --- /dev/null +++ b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py @@ -0,0 +1,328 @@ +import os +import copy +from collections import OrderedDict +from pprint import pformat +import pyblish +from openpype.lib import get_workdir +import openpype.hosts.flame.api as opfapi +import openpype.pipeline as op_pipeline + + +class IntegrateBatchGroup(pyblish.api.InstancePlugin): + """Integrate published shot to batch group""" + + order = pyblish.api.IntegratorOrder + 0.45 + label = "Integrate Batch Groups" + hosts = ["flame"] + families = ["clip"] + + # settings + default_loader = "LoadClip" + + def process(self, instance): + add_tasks = instance.data["flameAddTasks"] + + # iterate all tasks from settings + for task_data in add_tasks: + # exclude batch group + if not task_data["create_batch_group"]: + continue + + # create or get already created batch group + bgroup = self._get_batch_group(instance, task_data) + + # add batch group content + all_batch_nodes = self._add_nodes_to_batch_with_links( + instance, task_data, bgroup) + + for name, node in all_batch_nodes.items(): + self.log.debug("name: {}, dir: {}".format( + name, dir(node) + )) + self.log.debug("__ node.attributes: {}".format( + node.attributes + )) + + # load plate to batch group + self.log.info("Loading subset `{}` into batch `{}`".format( + instance.data["subset"], bgroup.name.get_value() + )) + self._load_clip_to_context(instance, bgroup) + + def _add_nodes_to_batch_with_links(self, instance, task_data, batch_group): + # get write file node properties > OrederDict because order does mater + write_pref_data = self._get_write_prefs(instance, task_data) + + batch_nodes = [ + { + "type": "comp", + "properties": {}, + "id": "comp_node01" + }, + { + "type": "Write File", + "properties": write_pref_data, + "id": "write_file_node01" + } + ] + batch_links = [ + { + "from_node": { + "id": "comp_node01", + "connector": "Result" + }, + "to_node": { + "id": "write_file_node01", + "connector": "Front" + } + } + ] + + # add nodes into batch group + return opfapi.create_batch_group_conent( + batch_nodes, batch_links, batch_group) + + def _load_clip_to_context(self, instance, bgroup): + # get all loaders for host + loaders_by_name = { + loader.__name__: loader + for loader in op_pipeline.discover_loader_plugins() + } + + # get all published representations + published_representations = instance.data["published_representations"] + repres_db_id_by_name = { + repre_info["representation"]["name"]: repre_id + for repre_id, repre_info in published_representations.items() + } + + # get all loadable representations + repres_by_name = { + repre["name"]: repre for repre in instance.data["representations"] + } + + # get repre_id for the loadable representations + loader_name_by_repre_id = { + repres_db_id_by_name[repr_name]: { + "loader": repr_data["batch_group_loader_name"], + # add repre data for exception logging + "_repre_data": repr_data + } + for repr_name, repr_data in repres_by_name.items() + if repr_data.get("load_to_batch_group") + } + + self.log.debug("__ loader_name_by_repre_id: {}".format(pformat( + loader_name_by_repre_id))) + + # get representation context from the repre_id + repre_contexts = op_pipeline.load.get_repres_contexts( + loader_name_by_repre_id.keys()) + + self.log.debug("__ repre_contexts: {}".format(pformat( + repre_contexts))) + + # loop all returned repres from repre_context dict + for repre_id, repre_context in repre_contexts.items(): + self.log.debug("__ repre_id: {}".format(repre_id)) + # get loader name by representation id + loader_name = ( + loader_name_by_repre_id[repre_id]["loader"] + # if nothing was added to settings fallback to default + or self.default_loader + ) + + # get loader plugin + loader_plugin = loaders_by_name.get(loader_name) + if loader_plugin: + # load to flame by representation context + try: + op_pipeline.load.load_with_repre_context( + loader_plugin, repre_context, **{ + "data": { + "workdir": self.task_workdir, + "batch": bgroup + } + }) + except op_pipeline.load.IncompatibleLoaderError as msg: + self.log.error( + "Check allowed representations for Loader `{}` " + "in settings > error: {}".format( + loader_plugin.__name__, msg)) + self.log.error( + "Representaton context >>{}<< is not compatible " + "with loader `{}`".format( + pformat(repre_context), loader_plugin.__name__ + ) + ) + else: + self.log.warning( + "Something got wrong and there is not Loader found for " + "following data: {}".format( + pformat(loader_name_by_repre_id)) + ) + + def _get_batch_group(self, instance, task_data): + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + frame_duration = (frame_end - frame_start) + 1 + asset_name = instance.data["asset"] + + task_name = task_data["name"] + batchgroup_name = "{}_{}".format(asset_name, task_name) + + batch_data = { + "shematic_reels": [ + "OP_LoadedReel" + ], + "handleStart": handle_start, + "handleEnd": handle_end + } + self.log.debug( + "__ batch_data: {}".format(pformat(batch_data))) + + # check if the batch group already exists + bgroup = opfapi.get_batch_group_from_desktop(batchgroup_name) + + if not bgroup: + self.log.info( + "Creating new batch group: {}".format(batchgroup_name)) + # create batch with utils + bgroup = opfapi.create_batch_group( + batchgroup_name, + frame_start, + frame_duration, + **batch_data + ) + + else: + self.log.info( + "Updating batch group: {}".format(batchgroup_name)) + # update already created batch group + bgroup = opfapi.create_batch_group( + batchgroup_name, + frame_start, + frame_duration, + update_batch_group=bgroup, + **batch_data + ) + + return bgroup + + def _get_anamoty_data_with_current_task(self, instance, task_data): + anatomy_data = copy.deepcopy(instance.data["anatomyData"]) + task_name = task_data["name"] + task_type = task_data["type"] + anatomy_obj = instance.context.data["anatomy"] + + # update task data in anatomy data + project_task_types = anatomy_obj["tasks"] + task_code = project_task_types.get(task_type, {}).get("short_name") + anatomy_data.update({ + "task": { + "name": task_name, + "type": task_type, + "short": task_code + } + }) + return anatomy_data + + def _get_write_prefs(self, instance, task_data): + # update task in anatomy data + anatomy_data = self._get_anamoty_data_with_current_task( + instance, task_data) + + self.task_workdir = self._get_shot_task_dir_path( + instance, task_data) + self.log.debug("__ task_workdir: {}".format( + self.task_workdir)) + + # TODO: this might be done with template in settings + render_dir_path = os.path.join( + self.task_workdir, "render", "flame") + + if not os.path.exists(render_dir_path): + os.makedirs(render_dir_path, mode=0o777) + + # TODO: add most of these to `imageio/flame/batch/write_node` + name = "{project[code]}_{asset}_{task[name]}".format( + **anatomy_data + ) + + # The path attribute where the rendered clip is exported + # /path/to/file.[0001-0010].exr + media_path = render_dir_path + # name of file represented by tokens + media_path_pattern = ( + "_v/_v.") + # The Create Open Clip attribute of the Write File node. \ + # Determines if an Open Clip is created by the Write File node. + create_clip = True + # The Include Setup attribute of the Write File node. + # Determines if a Batch Setup file is created by the Write File node. + include_setup = True + # The path attribute where the Open Clip file is exported by + # the Write File node. + create_clip_path = "" + # The path attribute where the Batch setup file + # is exported by the Write File node. + include_setup_path = "./_v" + # The file type for the files written by the Write File node. + # Setting this attribute also overwrites format_extension, + # bit_depth and compress_mode to match the defaults for + # this file type. + file_type = "OpenEXR" + # The file extension for the files written by the Write File node. + # This attribute resets to match file_type whenever file_type + # is set. If you require a specific extension, you must + # set format_extension after setting file_type. + format_extension = "exr" + # The bit depth for the files written by the Write File node. + # This attribute resets to match file_type whenever file_type is set. + bit_depth = "16" + # The compressing attribute for the files exported by the Write + # File node. Only relevant when file_type in 'OpenEXR', 'Sgi', 'Tiff' + compress = True + # The compression format attribute for the specific File Types + # export by the Write File node. You must set compress_mode + # after setting file_type. + compress_mode = "DWAB" + # The frame index mode attribute of the Write File node. + # Value range: `Use Timecode` or `Use Start Frame` + frame_index_mode = "Use Start Frame" + frame_padding = 6 + # The versioning mode of the Open Clip exported by the Write File node. + # Only available if create_clip = True. + version_mode = "Follow Iteration" + version_name = "v" + version_padding = 3 + + # need to make sure the order of keys is correct + return OrderedDict(( + ("name", name), + ("media_path", media_path), + ("media_path_pattern", media_path_pattern), + ("create_clip", create_clip), + ("include_setup", include_setup), + ("create_clip_path", create_clip_path), + ("include_setup_path", include_setup_path), + ("file_type", file_type), + ("format_extension", format_extension), + ("bit_depth", bit_depth), + ("compress", compress), + ("compress_mode", compress_mode), + ("frame_index_mode", frame_index_mode), + ("frame_padding", frame_padding), + ("version_mode", version_mode), + ("version_name", version_name), + ("version_padding", version_padding) + )) + + def _get_shot_task_dir_path(self, instance, task_data): + project_doc = instance.data["projectEntity"] + asset_entity = instance.data["assetEntity"] + + return get_workdir( + project_doc, asset_entity, task_data["name"], "flame") diff --git a/openpype/hosts/flame/plugins/publish/validate_source_clip.py b/openpype/hosts/flame/plugins/publish/validate_source_clip.py index 9ff015f628..345c00e05a 100644 --- a/openpype/hosts/flame/plugins/publish/validate_source_clip.py +++ b/openpype/hosts/flame/plugins/publish/validate_source_clip.py @@ -9,6 +9,8 @@ class ValidateSourceClip(pyblish.api.InstancePlugin): label = "Validate Source Clip" hosts = ["flame"] families = ["clip"] + optional = True + active = False def process(self, instance): flame_source_clip = instance.data["flameSourceClip"] diff --git a/openpype/hosts/flame/startup/openpype_in_flame.py b/openpype/hosts/flame/startup/openpype_in_flame.py index 931c5a1b79..f2ac23b19e 100644 --- a/openpype/hosts/flame/startup/openpype_in_flame.py +++ b/openpype/hosts/flame/startup/openpype_in_flame.py @@ -3,18 +3,19 @@ import sys from Qt import QtWidgets from pprint import pformat import atexit -import openpype -import avalon + import openpype.hosts.flame.api as opfapi +from openpype.pipeline import ( + install_host, + registered_host, +) def openpype_install(): """Registering OpenPype in context """ - openpype.install() - avalon.api.install(opfapi) - print("Avalon registered hosts: {}".format( - avalon.api.registered_host())) + install_host(opfapi) + print("Registered host: {}".format(registered_host())) # Exception handler diff --git a/openpype/hosts/fusion/api/pipeline.py b/openpype/hosts/fusion/api/pipeline.py index c9cd76770a..0867b464d5 100644 --- a/openpype/hosts/fusion/api/pipeline.py +++ b/openpype/hosts/fusion/api/pipeline.py @@ -7,14 +7,14 @@ import logging import contextlib import pyblish.api -import avalon.api from openpype.api import Logger from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, - deregister_loader_plugin_path, + register_creator_plugin_path, register_inventory_action_path, + deregister_loader_plugin_path, + deregister_creator_plugin_path, deregister_inventory_action_path, AVALON_CONTAINER_ID, ) @@ -70,7 +70,7 @@ def install(): log.info("Registering Fusion plug-ins..") register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) register_inventory_action_path(INVENTORY_PATH) pyblish.api.register_callback( @@ -94,7 +94,7 @@ def uninstall(): log.info("Deregistering Fusion plug-ins..") deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) deregister_inventory_action_path(INVENTORY_PATH) pyblish.api.deregister_callback( diff --git a/openpype/hosts/fusion/plugins/create/create_exr_saver.py b/openpype/hosts/fusion/plugins/create/create_exr_saver.py index ff8bdb21ef..8bab5ee9b1 100644 --- a/openpype/hosts/fusion/plugins/create/create_exr_saver.py +++ b/openpype/hosts/fusion/plugins/create/create_exr_saver.py @@ -1,13 +1,13 @@ import os -from openpype.pipeline import create +from openpype.pipeline import LegacyCreator from openpype.hosts.fusion.api import ( get_current_comp, comp_lock_and_undo_chunk ) -class CreateOpenEXRSaver(create.LegacyCreator): +class CreateOpenEXRSaver(LegacyCreator): name = "openexrDefault" label = "Create OpenEXR Saver" diff --git a/openpype/hosts/fusion/scripts/fusion_switch_shot.py b/openpype/hosts/fusion/scripts/fusion_switch_shot.py index ca7efb9136..ca8e5c9e37 100644 --- a/openpype/hosts/fusion/scripts/fusion_switch_shot.py +++ b/openpype/hosts/fusion/scripts/fusion_switch_shot.py @@ -7,6 +7,10 @@ import logging import avalon.api from avalon import io +from openpype.pipeline import ( + install_host, + registered_host, +) from openpype.lib import version_up from openpype.hosts.fusion import api from openpype.hosts.fusion.api import lib @@ -218,7 +222,7 @@ def switch(asset_name, filepath=None, new=True): assert current_comp is not None, ( "Fusion could not load '{}'").format(filepath) - host = avalon.api.registered_host() + host = registered_host() containers = list(host.ls()) assert containers, "Nothing to update" @@ -279,7 +283,7 @@ if __name__ == '__main__': args, unknown = parser.parse_args() - avalon.api.install(api) + install_host(api) switch(args.asset_name, args.file_path) sys.exit(0) diff --git a/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py b/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py index 4b5e8f91a0..de8fc4b3b4 100644 --- a/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py +++ b/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py @@ -1,24 +1,23 @@ import os import sys -import openpype from openpype.api import Logger +from openpype.pipeline import ( + install_host, + registered_host, +) log = Logger().get_logger(__name__) def main(env): - import avalon.api from openpype.hosts.fusion import api from openpype.hosts.fusion.api import menu - # Registers pype's Global pyblish plugins - openpype.install() - # activate resolve from pype - avalon.api.install(api) + install_host(api) - log.info(f"Avalon registered hosts: {avalon.api.registered_host()}") + log.info(f"Registered host: {registered_host()}") menu.launch_openpype_menu() diff --git a/openpype/hosts/fusion/utility_scripts/switch_ui.py b/openpype/hosts/fusion/utility_scripts/switch_ui.py index d9eeae25ea..37306c7a2a 100644 --- a/openpype/hosts/fusion/utility_scripts/switch_ui.py +++ b/openpype/hosts/fusion/utility_scripts/switch_ui.py @@ -1,14 +1,15 @@ import os +import sys import glob import logging from Qt import QtWidgets, QtCore -import avalon.api from avalon import io import qtawesome as qta from openpype import style +from openpype.pipeline import install_host from openpype.hosts.fusion import api from openpype.lib.avalon_context import get_workdir_from_session @@ -181,8 +182,7 @@ class App(QtWidgets.QWidget): if __name__ == '__main__': - import sys - avalon.api.install(api) + install_host(api) app = QtWidgets.QApplication(sys.argv) window = App() diff --git a/openpype/hosts/harmony/api/lib.py b/openpype/hosts/harmony/api/lib.py index 66eeac1e3a..53fd0f07dd 100644 --- a/openpype/hosts/harmony/api/lib.py +++ b/openpype/hosts/harmony/api/lib.py @@ -183,10 +183,10 @@ def launch(application_path, *args): application_path (str): Path to Harmony. """ - from avalon import api + from openpype.pipeline import install_host from openpype.hosts.harmony import api as harmony - api.install(harmony) + install_host(harmony) ProcessContext.port = random.randrange(49152, 65535) os.environ["AVALON_HARMONY_PORT"] = str(ProcessContext.port) diff --git a/openpype/hosts/harmony/api/pipeline.py b/openpype/hosts/harmony/api/pipeline.py index 420e9720db..88f11dd16f 100644 --- a/openpype/hosts/harmony/api/pipeline.py +++ b/openpype/hosts/harmony/api/pipeline.py @@ -6,14 +6,14 @@ from bson.objectid import ObjectId import pyblish.api from avalon import io -import avalon.api from openpype import lib from openpype.lib import register_event_callback from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) import openpype.hosts.harmony @@ -108,9 +108,8 @@ def check_inventory(): if not lib.any_outdated(): return - host = avalon.api.registered_host() outdated_containers = [] - for container in host.ls(): + for container in ls(): representation = container['representation'] representation_doc = io.find_one( { @@ -186,7 +185,7 @@ def install(): pyblish.api.register_host("harmony") pyblish.api.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) log.info(PUBLISH_PATH) # Register callbacks. @@ -200,7 +199,7 @@ def install(): def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) def on_pyblish_instance_toggled(instance, old_value, new_value): diff --git a/openpype/hosts/harmony/plugins/publish/collect_workfile.py b/openpype/hosts/harmony/plugins/publish/collect_workfile.py index 63bfd5929b..c0493315a4 100644 --- a/openpype/hosts/harmony/plugins/publish/collect_workfile.py +++ b/openpype/hosts/harmony/plugins/publish/collect_workfile.py @@ -3,6 +3,8 @@ import pyblish.api import os +from openpype.lib import get_subset_name_with_asset_doc + class CollectWorkfile(pyblish.api.ContextPlugin): """Collect current script for publish.""" @@ -14,10 +16,15 @@ class CollectWorkfile(pyblish.api.ContextPlugin): def process(self, context): """Plugin entry point.""" family = "workfile" - task = os.getenv("AVALON_TASK", None) - sanitized_task_name = task[0].upper() + task[1:] basename = os.path.basename(context.data["currentFile"]) - subset = "{}{}".format(family, sanitized_task_name) + subset = get_subset_name_with_asset_doc( + family, + "", + context.data["anatomyData"]["task"]["name"], + context.data["assetEntity"], + context.data["anatomyData"]["project"]["name"], + host_name=context.data["hostName"] + ) # Create instance instance = context.create_instance(subset) diff --git a/openpype/hosts/hiero/__init__.py b/openpype/hosts/hiero/__init__.py index 2d674b3fa7..d2ac82391b 100644 --- a/openpype/hosts/hiero/__init__.py +++ b/openpype/hosts/hiero/__init__.py @@ -10,7 +10,7 @@ def add_implementation_envs(env, _app): ] old_hiero_path = env.get("HIERO_PLUGIN_PATH") or "" for path in old_hiero_path.split(os.pathsep): - if not path or not os.path.exists(path): + if not path: continue norm_path = os.path.normpath(path) diff --git a/openpype/hosts/hiero/api/pipeline.py b/openpype/hosts/hiero/api/pipeline.py index 0d3c8914ce..616ff53fd8 100644 --- a/openpype/hosts/hiero/api/pipeline.py +++ b/openpype/hosts/hiero/api/pipeline.py @@ -5,13 +5,13 @@ import os import contextlib from collections import OrderedDict -from avalon import api as avalon from avalon import schema from pyblish import api as pyblish from openpype.api import Logger from openpype.pipeline import ( - LegacyCreator, + register_creator_plugin_path, register_loader_plugin_path, + deregister_creator_plugin_path, deregister_loader_plugin_path, AVALON_CONTAINER_ID, ) @@ -34,14 +34,7 @@ AVALON_CONTAINERS = ":AVALON_CONTAINERS" def install(): - """ - Installing Hiero integration for avalon - - Args: - config (obj): avalon config module `pype` in our case, it is not - used but required by avalon.api.install() - - """ + """Installing Hiero integration.""" # adding all events events.register_events() @@ -50,7 +43,7 @@ def install(): pyblish.register_host("hiero") pyblish.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) # register callback for switching publishable pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) @@ -71,7 +64,7 @@ def uninstall(): pyblish.deregister_host("hiero") pyblish.deregister_plugin_path(PUBLISH_PATH) deregister_loader_plugin_path(LOAD_PATH) - avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) # register callback for switching publishable pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) diff --git a/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py b/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py index 21c21cd7c3..2e638c2088 100644 --- a/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py +++ b/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py @@ -1,9 +1,9 @@ import traceback # activate hiero from pype -import avalon.api +from openpype.pipeline import install_host import openpype.hosts.hiero.api as phiero -avalon.api.install(phiero) +install_host(phiero) try: __import__("openpype.hosts.hiero.api") diff --git a/openpype/hosts/houdini/__init__.py b/openpype/hosts/houdini/__init__.py index 8c12d13c81..a3ee38db8d 100644 --- a/openpype/hosts/houdini/__init__.py +++ b/openpype/hosts/houdini/__init__.py @@ -15,7 +15,7 @@ def add_implementation_envs(env, _app): old_houdini_menu_path = env.get("HOUDINI_MENU_PATH") or "" for path in old_houdini_path.split(os.pathsep): - if not path or not os.path.exists(path): + if not path: continue norm_path = os.path.normpath(path) @@ -23,7 +23,7 @@ def add_implementation_envs(env, _app): new_houdini_path.append(norm_path) for path in old_houdini_menu_path.split(os.pathsep): - if not path or not os.path.exists(path): + if not path: continue norm_path = os.path.normpath(path) diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py index d079c9ea81..7048accceb 100644 --- a/openpype/hosts/houdini/api/pipeline.py +++ b/openpype/hosts/houdini/api/pipeline.py @@ -4,14 +4,11 @@ import logging import contextlib import hou -import hdefereval import pyblish.api -import avalon.api -from avalon.lib import find_submodule from openpype.pipeline import ( - LegacyCreator, + register_creator_plugin_path, register_loader_plugin_path, AVALON_CONTAINER_ID, ) @@ -54,7 +51,7 @@ def install(): pyblish.api.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) log.info("Installing callbacks ... ") # register_event_callback("init", on_init) @@ -215,24 +212,12 @@ def ls(): "pyblish.mindbender.container"): containers += lib.lsattr("id", identifier) - has_metadata_collector = False - config_host = find_submodule(avalon.api.registered_config(), "houdini") - if hasattr(config_host, "collect_container_metadata"): - has_metadata_collector = True - for container in sorted(containers, # Hou 19+ Python 3 hou.ObjNode are not # sortable due to not supporting greater # than comparisons key=lambda node: node.path()): - data = parse_container(container) - - # Collect custom data if attribute is present - if has_metadata_collector: - metadata = config_host.collect_container_metadata(container) - data.update(metadata) - - yield data + yield parse_container(container) def before_save(): @@ -305,7 +290,13 @@ def on_new(): start = hou.playbar.playbackRange()[0] hou.setFrame(start) - hdefereval.executeDeferred(_enforce_start_frame) + if hou.isUIAvailable(): + import hdefereval + hdefereval.executeDeferred(_enforce_start_frame) + else: + # Run without execute deferred when no UI is available because + # without UI `hdefereval` is not available to import + _enforce_start_frame() def _set_context_settings(): diff --git a/openpype/hosts/houdini/plugins/publish/collect_inputs.py b/openpype/hosts/houdini/plugins/publish/collect_inputs.py index 39e2737e8c..8c7098c710 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_inputs.py +++ b/openpype/hosts/houdini/plugins/publish/collect_inputs.py @@ -1,6 +1,7 @@ -import avalon.api as api import pyblish.api +from openpype.pipeline import registered_host + def collect_input_containers(nodes): """Collect containers that contain any of the node in `nodes`. @@ -18,7 +19,7 @@ def collect_input_containers(nodes): lookup = frozenset(nodes) containers = [] - host = api.registered_host() + host = registered_host() for container in host.ls(): node = container["node"] diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file.py b/openpype/hosts/houdini/plugins/publish/increment_current_file.py index 31c2954ee7..c5cacd1880 100644 --- a/openpype/hosts/houdini/plugins/publish/increment_current_file.py +++ b/openpype/hosts/houdini/plugins/publish/increment_current_file.py @@ -1,8 +1,8 @@ import pyblish.api -import avalon.api from openpype.api import version_up from openpype.action import get_errored_plugins_from_data +from openpype.pipeline import registered_host class IncrementCurrentFile(pyblish.api.InstancePlugin): @@ -41,7 +41,7 @@ class IncrementCurrentFile(pyblish.api.InstancePlugin): ) # Filename must not have changed since collecting - host = avalon.api.registered_host() + host = registered_host() current_file = host.current_file() assert ( context.data["currentFile"] == current_file diff --git a/openpype/hosts/houdini/plugins/publish/save_scene.py b/openpype/hosts/houdini/plugins/publish/save_scene.py index fe5962fbd3..6128c7af77 100644 --- a/openpype/hosts/houdini/plugins/publish/save_scene.py +++ b/openpype/hosts/houdini/plugins/publish/save_scene.py @@ -1,5 +1,6 @@ import pyblish.api -import avalon.api + +from openpype.pipeline import registered_host class SaveCurrentScene(pyblish.api.ContextPlugin): @@ -12,7 +13,7 @@ class SaveCurrentScene(pyblish.api.ContextPlugin): def process(self, context): # Filename must not have changed since collecting - host = avalon.api.registered_host() + host = registered_host() current_file = host.current_file() assert context.data['currentFile'] == current_file, ( "Collected filename from current scene name." diff --git a/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py b/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py index eb33b49759..afadbffd3e 100644 --- a/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py +++ b/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py @@ -1,10 +1,10 @@ -import avalon.api +from openpype.pipeline import install_host from openpype.hosts.houdini import api def main(): print("Installing OpenPype ...") - avalon.api.install(api) + install_host(api) main() diff --git a/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py b/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py index eb33b49759..afadbffd3e 100644 --- a/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py +++ b/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py @@ -1,10 +1,10 @@ -import avalon.api +from openpype.pipeline import install_host from openpype.hosts.houdini import api def main(): print("Installing OpenPype ...") - avalon.api.install(api) + install_host(api) main() diff --git a/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py index 499b733570..8cd51e6641 100644 --- a/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py +++ b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py @@ -134,6 +134,7 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase): """ from avalon import api, io + from openpype.pipeline import registered_root PROJECT = api.Session["AVALON_PROJECT"] asset_doc = io.find_one({"name": asset, @@ -141,7 +142,7 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase): if not asset_doc: raise RuntimeError("Invalid asset name: '%s'" % asset) - root = api.registered_root() + root = registered_root() path = self._template.format(**{ "root": root, "project": PROJECT, diff --git a/openpype/hosts/maya/__init__.py b/openpype/hosts/maya/__init__.py index b7d26a7818..c1c82c62e5 100644 --- a/openpype/hosts/maya/__init__.py +++ b/openpype/hosts/maya/__init__.py @@ -9,7 +9,7 @@ def add_implementation_envs(env, _app): ] old_python_path = env.get("PYTHONPATH") or "" for path in old_python_path.split(os.pathsep): - if not path or not os.path.exists(path): + if not path: continue norm_path = os.path.normpath(path) diff --git a/openpype/hosts/maya/api/fbx.py b/openpype/hosts/maya/api/fbx.py new file mode 100644 index 0000000000..260241f5fc --- /dev/null +++ b/openpype/hosts/maya/api/fbx.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- +"""Tools to work with FBX.""" +import logging + +from pyblish.api import Instance + +from maya import cmds # noqa +import maya.mel as mel # noqa + + +class FBXExtractor: + """Extract FBX from Maya. + + This extracts reproducible FBX exports ignoring any of the settings set + on the local machine in the FBX export options window. + + All export settings are applied with the `FBXExport*` commands prior + to the `FBXExport` call itself. The options can be overridden with + their + nice names as seen in the "options" property on this class. + + For more information on FBX exports see: + - https://knowledge.autodesk.com/support/maya/learn-explore/caas + /CloudHelp/cloudhelp/2016/ENU/Maya/files/GUID-6CCE943A-2ED4-4CEE-96D4 + -9CB19C28F4E0-htm.html + - http://forums.cgsociety.org/archive/index.php?t-1032853.html + - https://groups.google.com/forum/#!msg/python_inside_maya/cLkaSo361oE + /LKs9hakE28kJ + + """ + @property + def options(self): + """Overridable options for FBX Export + + Given in the following format + - {NAME: EXPECTED TYPE} + + If the overridden option's type does not match, + the option is not included and a warning is logged. + + """ + + return { + "cameras": bool, + "smoothingGroups": bool, + "hardEdges": bool, + "tangents": bool, + "smoothMesh": bool, + "instances": bool, + # "referencedContainersContent": bool, # deprecated in Maya 2016+ + "bakeComplexAnimation": int, + "bakeComplexStart": int, + "bakeComplexEnd": int, + "bakeComplexStep": int, + "bakeResampleAnimation": bool, + "animationOnly": bool, + "useSceneName": bool, + "quaternion": str, # "euler" + "shapes": bool, + "skins": bool, + "constraints": bool, + "lights": bool, + "embeddedTextures": bool, + "inputConnections": bool, + "upAxis": str, # x, y or z, + "triangulate": bool + } + + @property + def default_options(self): + """The default options for FBX extraction. + + This includes shapes, skins, constraints, lights and incoming + connections and exports with the Y-axis as up-axis. + + By default this uses the time sliders start and end time. + + """ + + start_frame = int(cmds.playbackOptions(query=True, + animationStartTime=True)) + end_frame = int(cmds.playbackOptions(query=True, + animationEndTime=True)) + + return { + "cameras": False, + "smoothingGroups": True, + "hardEdges": False, + "tangents": False, + "smoothMesh": True, + "instances": False, + "bakeComplexAnimation": True, + "bakeComplexStart": start_frame, + "bakeComplexEnd": end_frame, + "bakeComplexStep": 1, + "bakeResampleAnimation": True, + "animationOnly": False, + "useSceneName": False, + "quaternion": "euler", + "shapes": True, + "skins": True, + "constraints": False, + "lights": True, + "embeddedTextures": False, + "inputConnections": True, + "upAxis": "y", + "triangulate": False + } + + def __init__(self, log=None): + # Ensure FBX plug-in is loaded + self.log = log or logging.getLogger(self.__class__.__name__) + cmds.loadPlugin("fbxmaya", quiet=True) + + def parse_overrides(self, instance, options): + """Inspect data of instance to determine overridden options + + An instance may supply any of the overridable options + as data, the option is then added to the extraction. + + """ + + for key in instance.data: + if key not in self.options: + continue + + # Ensure the data is of correct type + value = instance.data[key] + if not isinstance(value, self.options[key]): + self.log.warning( + "Overridden attribute {key} was of " + "the wrong type: {invalid_type} " + "- should have been {valid_type}".format( + key=key, + invalid_type=type(value).__name__, + valid_type=self.options[key].__name__)) + continue + + options[key] = value + + return options + + def set_options_from_instance(self, instance): + # type: (Instance) -> None + """Sets FBX export options from data in the instance. + + Args: + instance (Instance): Instance data. + + """ + # Parse export options + options = self.default_options + options = self.parse_overrides(instance, options) + self.log.info("Export options: {0}".format(options)) + + # Collect the start and end including handles + start = instance.data.get("frameStartHandle") or \ + instance.context.data.get("frameStartHandle") + end = instance.data.get("frameEndHandle") or \ + instance.context.data.get("frameEndHandle") + + options['bakeComplexStart'] = start + options['bakeComplexEnd'] = end + + # First apply the default export settings to be fully consistent + # each time for successive publishes + mel.eval("FBXResetExport") + + # Apply the FBX overrides through MEL since the commands + # only work correctly in MEL according to online + # available discussions on the topic + _iteritems = getattr(options, "iteritems", options.items) + for option, value in _iteritems(): + key = option[0].upper() + option[1:] # uppercase first letter + + # Boolean must be passed as lower-case strings + # as to MEL standards + if isinstance(value, bool): + value = str(value).lower() + + template = "FBXExport{0} {1}" if key == "UpAxis" else \ + "FBXExport{0} -v {1}" # noqa + cmd = template.format(key, value) + self.log.info(cmd) + mel.eval(cmd) + + # Never show the UI or generate a log + mel.eval("FBXExportShowUI -v false") + mel.eval("FBXExportGenerateLog -v false") + + @staticmethod + def export(members, path): + # type: (list, str) -> None + """Export members as FBX with given path. + + Args: + members (list): List of members to export. + path (str): Path to use for export. + + """ + cmds.select(members, r=True, noExpand=True) + mel.eval('FBXExport -f "{}" -s'.format(path)) diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 92fc5133a9..9e99b96477 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -26,6 +26,7 @@ from openpype.pipeline import ( loaders_from_representation, get_representation_path, load_container, + registered_host, ) from .commands import reset_frame_range @@ -1574,7 +1575,7 @@ def assign_look_by_version(nodes, version_id): "name": "json"}) # See if representation is already loaded, if so reuse it. - host = api.registered_host() + host = registered_host() representation_id = str(look_representation['_id']) for container in host.ls(): if (container['loader'] == "LookLoader" and @@ -2612,7 +2613,7 @@ def get_attr_in_layer(attr, layer): def fix_incompatible_containers(): """Backwards compatibility: old containers to use new ReferenceLoader""" - host = api.registered_host() + host = registered_host() for container in host.ls(): loader = container['loader'] @@ -3138,11 +3139,20 @@ def set_colorspace(): @contextlib.contextmanager -def root_parent(nodes): - # type: (list) -> list +def parent_nodes(nodes, parent=None): + # type: (list, str) -> list """Context manager to un-parent provided nodes and return them back.""" import pymel.core as pm # noqa + parent_node = None + delete_parent = False + + if parent: + if not cmds.objExists(parent): + parent_node = pm.createNode("transform", n=parent, ss=False) + delete_parent = True + else: + parent_node = pm.PyNode(parent) node_parents = [] for node in nodes: n = pm.PyNode(node) @@ -3153,9 +3163,14 @@ def root_parent(nodes): node_parents.append((n, root)) try: for node in node_parents: - node[0].setParent(world=True) + if not parent: + node[0].setParent(world=True) + else: + node[0].setParent(parent_node) yield finally: for node in node_parents: if node[1]: node[0].setParent(node[1]) + if delete_parent: + pm.delete(parent_node) diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index bb61128178..f6f3472eef 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -9,8 +9,6 @@ import maya.api.OpenMaya as om import pyblish.api import avalon.api -from avalon.lib import find_submodule - import openpype.hosts.maya from openpype.tools.utils import host_tools from openpype.lib import ( @@ -20,11 +18,12 @@ from openpype.lib import ( ) from openpype.lib.path_tools import HostDirmap from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, register_inventory_action_path, + register_creator_plugin_path, deregister_loader_plugin_path, deregister_inventory_action_path, + deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) from openpype.hosts.maya.lib import copy_workspace_mel @@ -60,7 +59,7 @@ def install(): pyblish.api.register_host("maya") register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) register_inventory_action_path(INVENTORY_PATH) log.info(PUBLISH_PATH) @@ -189,7 +188,7 @@ def uninstall(): pyblish.api.deregister_host("maya") deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) deregister_inventory_action_path(INVENTORY_PATH) menu.uninstall() @@ -268,21 +267,8 @@ def ls(): """ container_names = _ls() - - has_metadata_collector = False - config_host = find_submodule(avalon.api.registered_config(), "maya") - if hasattr(config_host, "collect_container_metadata"): - has_metadata_collector = True - for container in sorted(container_names): - data = parse_container(container) - - # Collect custom data if attribute is present - if has_metadata_collector: - metadata = config_host.collect_container_metadata(container) - data.update(metadata) - - yield data + yield parse_container(container) def containerise(name, diff --git a/openpype/hosts/maya/plugins/create/create_look.py b/openpype/hosts/maya/plugins/create/create_look.py index 56e2640919..44e439fe1f 100644 --- a/openpype/hosts/maya/plugins/create/create_look.py +++ b/openpype/hosts/maya/plugins/create/create_look.py @@ -22,4 +22,6 @@ class CreateLook(plugin.Creator): self.data["maketx"] = self.make_tx # Enable users to force a copy. + # - on Windows is "forceCopy" always changed to `True` because of + # windows implementation of hardlinks self.data["forceCopy"] = False diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd.py new file mode 100644 index 0000000000..b2266e5a57 --- /dev/null +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd.py @@ -0,0 +1,51 @@ +from openpype.hosts.maya.api import plugin, lib + + +class CreateMultiverseUsd(plugin.Creator): + """Multiverse USD data""" + + name = "usdMain" + label = "Multiverse USD" + family = "usd" + icon = "cubes" + + def __init__(self, *args, **kwargs): + super(CreateMultiverseUsd, self).__init__(*args, **kwargs) + + # Add animation data first, since it maintains order. + self.data.update(lib.collect_animation_data(True)) + + self.data["stripNamespaces"] = False + self.data["mergeTransformAndShape"] = False + self.data["writeAncestors"] = True + self.data["flattenParentXforms"] = False + self.data["writeSparseOverrides"] = False + self.data["useMetaPrimPath"] = False + self.data["customRootPath"] = '' + self.data["customAttributes"] = '' + self.data["nodeTypesToIgnore"] = '' + self.data["writeMeshes"] = True + self.data["writeCurves"] = True + self.data["writeParticles"] = True + self.data["writeCameras"] = False + self.data["writeLights"] = False + self.data["writeJoints"] = False + self.data["writeCollections"] = False + self.data["writePositions"] = True + self.data["writeNormals"] = True + self.data["writeUVs"] = True + self.data["writeColorSets"] = False + self.data["writeTangents"] = False + self.data["writeRefPositions"] = False + self.data["writeBlendShapes"] = False + self.data["writeDisplayColor"] = False + self.data["writeSkinWeights"] = False + self.data["writeMaterialAssignment"] = False + self.data["writeHardwareShader"] = False + self.data["writeShadingNetworks"] = False + self.data["writeTransformMatrix"] = True + self.data["writeUsdAttributes"] = False + self.data["timeVaryingTopology"] = False + self.data["customMaterialNamespace"] = '' + self.data["numTimeSamples"] = 1 + self.data["timeSamplesSpan"] = 0.0 diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py new file mode 100644 index 0000000000..77b808c459 --- /dev/null +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py @@ -0,0 +1,23 @@ +from openpype.hosts.maya.api import plugin, lib + + +class CreateMultiverseUsdComp(plugin.Creator): + """Create Multiverse USD Composition""" + + name = "usdCompositionMain" + label = "Multiverse USD Composition" + family = "usdComposition" + icon = "cubes" + + def __init__(self, *args, **kwargs): + super(CreateMultiverseUsdComp, self).__init__(*args, **kwargs) + + # Add animation data first, since it maintains order. + self.data.update(lib.collect_animation_data(True)) + + self.data["stripNamespaces"] = False + self.data["mergeTransformAndShape"] = False + self.data["flattenContent"] = False + self.data["writePendingOverrides"] = False + self.data["numTimeSamples"] = 1 + self.data["timeSamplesSpan"] = 0.0 diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py new file mode 100644 index 0000000000..bb82ab2039 --- /dev/null +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py @@ -0,0 +1,28 @@ +from openpype.hosts.maya.api import plugin, lib + + +class CreateMultiverseUsdOver(plugin.Creator): + """Multiverse USD data""" + + name = "usdOverrideMain" + label = "Multiverse USD Override" + family = "usdOverride" + icon = "cubes" + + def __init__(self, *args, **kwargs): + super(CreateMultiverseUsdOver, self).__init__(*args, **kwargs) + + # Add animation data first, since it maintains order. + self.data.update(lib.collect_animation_data(True)) + + self.data["writeAll"] = False + self.data["writeTransforms"] = True + self.data["writeVisibility"] = True + self.data["writeAttributes"] = True + self.data["writeMaterials"] = True + self.data["writeVariants"] = True + self.data["writeVariantsDefinition"] = True + self.data["writeActiveState"] = True + self.data["writeNamespaces"] = False + self.data["numTimeSamples"] = 1 + self.data["timeSamplesSpan"] = 0.0 diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 9002ae3876..15230519d2 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -4,8 +4,6 @@ import os import json import appdirs import requests -import six -import sys from maya import cmds import maya.app.renderSetup.model.renderSetup as renderSetup @@ -14,6 +12,7 @@ from openpype.hosts.maya.api import ( lib, plugin ) +from openpype.lib import requests_get from openpype.api import ( get_system_settings, get_project_settings, @@ -117,6 +116,8 @@ class CreateRender(plugin.Creator): except KeyError: self.aov_separator = "_" + manager = ModulesManager() + self.deadline_module = manager.modules_by_name["deadline"] try: default_servers = deadline_settings["deadline_urls"] project_servers = ( @@ -133,10 +134,8 @@ class CreateRender(plugin.Creator): except AttributeError: # Handle situation were we had only one url for deadline. - manager = ModulesManager() - deadline_module = manager.modules_by_name["deadline"] # get default deadline webservice url from deadline module - self.deadline_servers = deadline_module.deadline_urls + self.deadline_servers = self.deadline_module.deadline_urls def process(self): """Entry point.""" @@ -205,53 +204,37 @@ class CreateRender(plugin.Creator): def _deadline_webservice_changed(self): """Refresh Deadline server dependent options.""" # get selected server - from maya import cmds webservice = self.deadline_servers[ self.server_aliases[ cmds.getAttr("{}.deadlineServers".format(self.instance)) ] ] - pools = self._get_deadline_pools(webservice) + pools = self.deadline_module.get_deadline_pools(webservice, self.log) cmds.deleteAttr("{}.primaryPool".format(self.instance)) cmds.deleteAttr("{}.secondaryPool".format(self.instance)) + + pool_setting = (self._project_settings["deadline"] + ["publish"] + ["CollectDeadlinePools"]) + + primary_pool = pool_setting["primary_pool"] + sorted_pools = self._set_default_pool(list(pools), primary_pool) cmds.addAttr(self.instance, longName="primaryPool", attributeType="enum", - enumName=":".join(pools)) - cmds.addAttr(self.instance, longName="secondaryPool", + enumName=":".join(sorted_pools)) + + pools = ["-"] + pools + secondary_pool = pool_setting["secondary_pool"] + sorted_pools = self._set_default_pool(list(pools), secondary_pool) + cmds.addAttr("{}.secondaryPool".format(self.instance), attributeType="enum", - enumName=":".join(["-"] + pools)) - - def _get_deadline_pools(self, webservice): - # type: (str) -> list - """Get pools from Deadline. - Args: - webservice (str): Server url. - Returns: - list: Pools. - Throws: - RuntimeError: If deadline webservice is unreachable. - - """ - argument = "{}/api/pools?NamesOnly=true".format(webservice) - try: - response = self._requests_get(argument) - except requests.exceptions.ConnectionError as exc: - msg = 'Cannot connect to deadline web service' - self.log.error(msg) - six.reraise( - RuntimeError, - RuntimeError('{} - {}'.format(msg, exc)), - sys.exc_info()[2]) - if not response.ok: - self.log.warning("No pools retrieved") - return [] - - return response.json() + enumName=":".join(sorted_pools)) def _create_render_settings(self): """Create instance settings.""" # get pools pool_names = [] + default_priority = 50 self.server_aliases = list(self.deadline_servers.keys()) self.data["deadlineServers"] = self.server_aliases @@ -260,7 +243,8 @@ class CreateRender(plugin.Creator): self.data["extendFrames"] = False self.data["overrideExistingFrame"] = True # self.data["useLegacyRenderLayers"] = True - self.data["priority"] = 50 + self.data["priority"] = default_priority + self.data["tile_priority"] = default_priority self.data["framesPerTask"] = 1 self.data["whitelist"] = False self.data["machineList"] = "" @@ -293,7 +277,18 @@ class CreateRender(plugin.Creator): # use first one for initial list of pools. deadline_url = next(iter(self.deadline_servers.values())) - pool_names = self._get_deadline_pools(deadline_url) + pool_names = self.deadline_module.get_deadline_pools(deadline_url, + self.log) + maya_submit_dl = self._project_settings.get( + "deadline", {}).get( + "publish", {}).get( + "MayaSubmitDeadline", {}) + priority = maya_submit_dl.get("priority", default_priority) + self.data["priority"] = priority + + tile_priority = maya_submit_dl.get("tile_priority", + default_priority) + self.data["tile_priority"] = tile_priority if muster_enabled: self.log.info(">>> Loading Muster credentials ...") @@ -314,12 +309,27 @@ class CreateRender(plugin.Creator): self.log.info(" - pool: {}".format(pool["name"])) pool_names.append(pool["name"]) - self.data["primaryPool"] = pool_names + pool_setting = (self._project_settings["deadline"] + ["publish"] + ["CollectDeadlinePools"]) + primary_pool = pool_setting["primary_pool"] + self.data["primaryPool"] = self._set_default_pool(pool_names, + primary_pool) # We add a string "-" to allow the user to not # set any secondary pools - self.data["secondaryPool"] = ["-"] + pool_names + pool_names = ["-"] + pool_names + secondary_pool = pool_setting["secondary_pool"] + self.data["secondaryPool"] = self._set_default_pool(pool_names, + secondary_pool) self.options = {"useSelection": False} # Force no content + def _set_default_pool(self, pool_names, pool_value): + """Reorder pool names, default should come first""" + if pool_value and pool_value in pool_names: + pool_names.remove(pool_value) + pool_names = [pool_value] + pool_names + return pool_names + def _load_credentials(self): """Load Muster credentials. @@ -354,7 +364,7 @@ class CreateRender(plugin.Creator): """ params = {"authToken": self._token} api_entry = "/api/pools/list" - response = self._requests_get(self.MUSTER_REST_URL + api_entry, + response = requests_get(self.MUSTER_REST_URL + api_entry, params=params) if response.status_code != 200: if response.status_code == 401: @@ -380,45 +390,11 @@ class CreateRender(plugin.Creator): api_url = "{}/muster/show_login".format( os.environ["OPENPYPE_WEBSERVER_URL"]) self.log.debug(api_url) - login_response = self._requests_get(api_url, timeout=1) + login_response = requests_get(api_url, timeout=1) if login_response.status_code != 200: self.log.error("Cannot show login form to Muster") raise Exception("Cannot show login form to Muster") - def _requests_post(self, *args, **kwargs): - """Wrap request post method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if "verify" not in kwargs: - kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - return requests.post(*args, **kwargs) - - def _requests_get(self, *args, **kwargs): - """Wrap request get method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if "verify" not in kwargs: - kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - return requests.get(*args, **kwargs) - def _set_default_renderer_settings(self, renderer): """Set basic settings based on renderer. diff --git a/openpype/hosts/maya/plugins/create/create_review.py b/openpype/hosts/maya/plugins/create/create_review.py index 14a21d28ca..fbf3399f61 100644 --- a/openpype/hosts/maya/plugins/create/create_review.py +++ b/openpype/hosts/maya/plugins/create/create_review.py @@ -15,6 +15,14 @@ class CreateReview(plugin.Creator): keepImages = False isolate = False imagePlane = True + transparency = [ + "preset", + "simple", + "object sorting", + "weighted average", + "depth peeling", + "alpha cut" + ] def __init__(self, *args, **kwargs): super(CreateReview, self).__init__(*args, **kwargs) @@ -28,5 +36,6 @@ class CreateReview(plugin.Creator): data["isolate"] = self.isolate data["keepImages"] = self.keepImages data["imagePlane"] = self.imagePlane + data["transparency"] = self.transparency self.data = data diff --git a/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py b/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py new file mode 100644 index 0000000000..a6deeeee2e --- /dev/null +++ b/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +"""Creator for Unreal Skeletal Meshes.""" +from openpype.hosts.maya.api import plugin, lib +from avalon.api import Session +from maya import cmds # noqa + + +class CreateUnrealSkeletalMesh(plugin.Creator): + """Unreal Static Meshes with collisions.""" + name = "staticMeshMain" + label = "Unreal - Skeletal Mesh" + family = "skeletalMesh" + icon = "thumbs-up" + dynamic_subset_keys = ["asset"] + + joint_hints = [] + + def __init__(self, *args, **kwargs): + """Constructor.""" + super(CreateUnrealSkeletalMesh, self).__init__(*args, **kwargs) + + @classmethod + def get_dynamic_data( + cls, variant, task_name, asset_id, project_name, host_name + ): + dynamic_data = super(CreateUnrealSkeletalMesh, cls).get_dynamic_data( + variant, task_name, asset_id, project_name, host_name + ) + dynamic_data["asset"] = Session.get("AVALON_ASSET") + return dynamic_data + + def process(self): + self.name = "{}_{}".format(self.family, self.name) + with lib.undo_chunk(): + instance = super(CreateUnrealSkeletalMesh, self).process() + content = cmds.sets(instance, query=True) + + # empty set and process its former content + cmds.sets(content, rm=instance) + geometry_set = cmds.sets(name="geometry_SET", empty=True) + joints_set = cmds.sets(name="joints_SET", empty=True) + + cmds.sets([geometry_set, joints_set], forceElement=instance) + members = cmds.ls(content) or [] + + for node in members: + if node in self.joint_hints: + cmds.sets(node, forceElement=joints_set) + else: + cmds.sets(node, forceElement=geometry_set) diff --git a/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py b/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py index 9ad560ab7c..f62d15fe62 100644 --- a/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py +++ b/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py @@ -10,7 +10,7 @@ class CreateUnrealStaticMesh(plugin.Creator): """Unreal Static Meshes with collisions.""" name = "staticMeshMain" label = "Unreal - Static Mesh" - family = "unrealStaticMesh" + family = "staticMesh" icon = "cube" dynamic_subset_keys = ["asset"] @@ -28,10 +28,10 @@ class CreateUnrealStaticMesh(plugin.Creator): variant, task_name, asset_id, project_name, host_name ) dynamic_data["asset"] = Session.get("AVALON_ASSET") - return dynamic_data def process(self): + self.name = "{}_{}".format(self.family, self.name) with lib.undo_chunk(): instance = super(CreateUnrealStaticMesh, self).process() content = cmds.sets(instance, query=True) diff --git a/openpype/hosts/maya/plugins/create/create_vrayscene.py b/openpype/hosts/maya/plugins/create/create_vrayscene.py index fa9c59e016..98dfabbbcb 100644 --- a/openpype/hosts/maya/plugins/create/create_vrayscene.py +++ b/openpype/hosts/maya/plugins/create/create_vrayscene.py @@ -4,8 +4,6 @@ import os import json import appdirs import requests -import six -import sys from maya import cmds import maya.app.renderSetup.model.renderSetup as renderSetup @@ -19,6 +17,7 @@ from openpype.api import ( get_project_settings ) +from openpype.lib import requests_get from openpype.pipeline import CreatorError from openpype.modules import ModulesManager @@ -40,6 +39,10 @@ class CreateVRayScene(plugin.Creator): self._rs = renderSetup.instance() self.data["exportOnFarm"] = False deadline_settings = get_system_settings()["modules"]["deadline"] + + manager = ModulesManager() + self.deadline_module = manager.modules_by_name["deadline"] + if not deadline_settings["enabled"]: self.deadline_servers = {} return @@ -62,10 +65,8 @@ class CreateVRayScene(plugin.Creator): except AttributeError: # Handle situation were we had only one url for deadline. - manager = ModulesManager() - deadline_module = manager.modules_by_name["deadline"] # get default deadline webservice url from deadline module - self.deadline_servers = deadline_module.deadline_urls + self.deadline_servers = self.deadline_module.deadline_urls def process(self): """Entry point.""" @@ -128,7 +129,7 @@ class CreateVRayScene(plugin.Creator): cmds.getAttr("{}.deadlineServers".format(self.instance)) ] ] - pools = self._get_deadline_pools(webservice) + pools = self.deadline_module.get_deadline_pools(webservice) cmds.deleteAttr("{}.primaryPool".format(self.instance)) cmds.deleteAttr("{}.secondaryPool".format(self.instance)) cmds.addAttr(self.instance, longName="primaryPool", @@ -138,33 +139,6 @@ class CreateVRayScene(plugin.Creator): attributeType="enum", enumName=":".join(["-"] + pools)) - def _get_deadline_pools(self, webservice): - # type: (str) -> list - """Get pools from Deadline. - Args: - webservice (str): Server url. - Returns: - list: Pools. - Throws: - RuntimeError: If deadline webservice is unreachable. - - """ - argument = "{}/api/pools?NamesOnly=true".format(webservice) - try: - response = self._requests_get(argument) - except requests.exceptions.ConnectionError as exc: - msg = 'Cannot connect to deadline web service' - self.log.error(msg) - six.reraise( - CreatorError, - CreatorError('{} - {}'.format(msg, exc)), - sys.exc_info()[2]) - if not response.ok: - self.log.warning("No pools retrieved") - return [] - - return response.json() - def _create_vray_instance_settings(self): # get pools pools = [] @@ -195,7 +169,7 @@ class CreateVRayScene(plugin.Creator): for k in self.deadline_servers.keys() ][0] - pool_names = self._get_deadline_pools(deadline_url) + pool_names = self.deadline_module.get_deadline_pools(deadline_url) if muster_enabled: self.log.info(">>> Loading Muster credentials ...") @@ -259,8 +233,8 @@ class CreateVRayScene(plugin.Creator): """ params = {"authToken": self._token} api_entry = "/api/pools/list" - response = self._requests_get(self.MUSTER_REST_URL + api_entry, - params=params) + response = requests_get(self.MUSTER_REST_URL + api_entry, + params=params) if response.status_code != 200: if response.status_code == 401: self.log.warning("Authentication token expired.") @@ -285,45 +259,7 @@ class CreateVRayScene(plugin.Creator): api_url = "{}/muster/show_login".format( os.environ["OPENPYPE_WEBSERVER_URL"]) self.log.debug(api_url) - login_response = self._requests_get(api_url, timeout=1) + login_response = requests_get(api_url, timeout=1) if login_response.status_code != 200: self.log.error("Cannot show login form to Muster") raise CreatorError("Cannot show login form to Muster") - - def _requests_post(self, *args, **kwargs): - """Wrap request post method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if "verify" not in kwargs: - kwargs["verify"] = ( - False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True - ) # noqa - return requests.post(*args, **kwargs) - - def _requests_get(self, *args, **kwargs): - """Wrap request get method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if "verify" not in kwargs: - kwargs["verify"] = ( - False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True - ) # noqa - return requests.get(*args, **kwargs) diff --git a/openpype/hosts/maya/plugins/inventory/import_modelrender.py b/openpype/hosts/maya/plugins/inventory/import_modelrender.py index d9bb256fac..c2e43f196f 100644 --- a/openpype/hosts/maya/plugins/inventory/import_modelrender.py +++ b/openpype/hosts/maya/plugins/inventory/import_modelrender.py @@ -4,7 +4,6 @@ from bson.objectid import ObjectId from openpype.pipeline import ( InventoryAction, get_representation_context, - get_representation_path_from_context, ) from openpype.hosts.maya.api.lib import ( maintained_selection, @@ -80,10 +79,10 @@ class ImportModelRender(InventoryAction): }) context = get_representation_context(look_repr["_id"]) - maya_file = get_representation_path_from_context(context) + maya_file = self.filepath_from_context(context) context = get_representation_context(json_repr["_id"]) - json_file = get_representation_path_from_context(context) + json_file = self.filepath_from_context(context) # Import the look file with maintained_selection(): diff --git a/openpype/hosts/maya/plugins/load/load_multiverse_usd.py b/openpype/hosts/maya/plugins/load/load_multiverse_usd.py new file mode 100644 index 0000000000..c03f2c5d92 --- /dev/null +++ b/openpype/hosts/maya/plugins/load/load_multiverse_usd.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +import maya.cmds as cmds + +from openpype.pipeline import ( + load, + get_representation_path +) +from openpype.hosts.maya.api.lib import ( + maintained_selection, + namespaced, + unique_namespace +) +from openpype.hosts.maya.api.pipeline import containerise + + +class MultiverseUsdLoader(load.LoaderPlugin): + """Load the USD by Multiverse""" + + families = ["model", "usd", "usdComposition", "usdOverride", + "pointcache", "animation"] + representations = ["usd", "usda", "usdc", "usdz", "abc"] + + label = "Read USD by Multiverse" + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, options=None): + + asset = context['asset']['name'] + namespace = namespace or unique_namespace( + asset + "_", + prefix="_" if asset[0].isdigit() else "", + suffix="_", + ) + + # Create the shape + cmds.loadPlugin("MultiverseForMaya", quiet=True) + + shape = None + transform = None + with maintained_selection(): + cmds.namespace(addNamespace=namespace) + with namespaced(namespace, new=False): + import multiverse + shape = multiverse.CreateUsdCompound(self.fname) + transform = cmds.listRelatives( + shape, parent=True, fullPath=True)[0] + + # Lock the shape node so the user cannot delete it. + cmds.lockNode(shape, lock=True) + + nodes = [transform, shape] + self[:] = nodes + + return containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__) + + def update(self, container, representation): + # type: (dict, dict) -> None + """Update container with specified representation.""" + node = container['objectName'] + assert cmds.objExists(node), "Missing container" + + members = cmds.sets(node, query=True) or [] + shapes = cmds.ls(members, type="mvUsdCompoundShape") + assert shapes, "Cannot find mvUsdCompoundShape in container" + + path = get_representation_path(representation) + + import multiverse + for shape in shapes: + multiverse.SetUsdCompoundAssetPaths(shape, [path]) + + cmds.setAttr("{}.representation".format(node), + str(representation["_id"]), + type="string") + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + # type: (dict) -> None + """Remove loaded container.""" + # Delete container and its contents + if cmds.objExists(container['objectName']): + members = cmds.sets(container['objectName'], query=True) or [] + cmds.delete([container['objectName']] + members) + + # Remove the namespace, if empty + namespace = container['namespace'] + if cmds.namespace(exists=namespace): + members = cmds.namespaceInfo(namespace, listNamespace=True) + if not members: + cmds.namespace(removeNamespace=namespace) + else: + self.log.warning("Namespace not deleted because it " + "still has members: %s", namespace) diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py index 04a25f6493..a7222edfd4 100644 --- a/openpype/hosts/maya/plugins/load/load_reference.py +++ b/openpype/hosts/maya/plugins/load/load_reference.py @@ -22,7 +22,8 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): "camera", "rig", "camerarig", - "xgen"] + "xgen", + "staticMesh"] representations = ["ma", "abc", "fbx", "mb"] label = "Reference" diff --git a/openpype/hosts/maya/plugins/publish/clean_nodes.py b/openpype/hosts/maya/plugins/publish/clean_nodes.py deleted file mode 100644 index 03995cdabe..0000000000 --- a/openpype/hosts/maya/plugins/publish/clean_nodes.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -"""Cleanup leftover nodes.""" -from maya import cmds # noqa -import pyblish.api - - -class CleanNodesUp(pyblish.api.InstancePlugin): - """Cleans up the staging directory after a successful publish. - - This will also clean published renders and delete their parent directories. - - """ - - order = pyblish.api.IntegratorOrder + 10 - label = "Clean Nodes" - optional = True - active = True - - def process(self, instance): - if not instance.data.get("cleanNodes"): - self.log.info("Nothing to clean.") - return - - nodes_to_clean = instance.data.pop("cleanNodes", []) - self.log.info("Removing {} nodes".format(len(nodes_to_clean))) - for node in nodes_to_clean: - try: - cmds.delete(node) - except ValueError: - # object might be already deleted, don't complain about it - pass diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index a525b562f3..14b9157005 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -194,11 +194,13 @@ class CollectMayaRender(pyblish.api.ContextPlugin): assert render_products, "no render products generated" exp_files = [] multipart = False + render_cameras = [] for product in render_products: if product.multipart: multipart = True product_name = product.productName if product.camera and layer_render_products.has_camera_token(): + render_cameras.append(product.camera) product_name = "{}{}".format( product.camera, "_" + product_name if product_name else "") @@ -208,6 +210,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin): product) }) + assert render_cameras, "No render cameras found." + self.log.info("multipart: {}".format( multipart)) assert exp_files, "no file names were generated, this is bug" @@ -386,6 +390,12 @@ class CollectMayaRender(pyblish.api.ContextPlugin): overrides = self.parse_options(str(render_globals)) data.update(**overrides) + # get string values for pools + primary_pool = overrides["renderGlobals"]["Pool"] + secondary_pool = overrides["renderGlobals"].get("SecondaryPool") + data["primaryPool"] = primary_pool + data["secondaryPool"] = secondary_pool + # Define nice label label = "{0} ({1})".format(expected_layer_name, data["asset"]) label += " [{0}-{1}]".format( diff --git a/openpype/hosts/maya/plugins/publish/collect_unreal_skeletalmesh.py b/openpype/hosts/maya/plugins/publish/collect_unreal_skeletalmesh.py new file mode 100644 index 0000000000..79693bb35e --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/collect_unreal_skeletalmesh.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +from maya import cmds # noqa +import pyblish.api + + +class CollectUnrealSkeletalMesh(pyblish.api.InstancePlugin): + """Collect Unreal Skeletal Mesh.""" + + order = pyblish.api.CollectorOrder + 0.2 + label = "Collect Unreal Skeletal Meshes" + families = ["skeletalMesh"] + + def process(self, instance): + frame = cmds.currentTime(query=True) + instance.data["frameStart"] = frame + instance.data["frameEnd"] = frame + + geo_sets = [ + i for i in instance[:] + if i.lower().startswith("geometry_set") + ] + + joint_sets = [ + i for i in instance[:] + if i.lower().startswith("joints_set") + ] + + instance.data["geometry"] = [] + instance.data["joints"] = [] + + for geo_set in geo_sets: + geo_content = cmds.ls(cmds.sets(geo_set, query=True), long=True) + if geo_content: + instance.data["geometry"] += geo_content + + for join_set in joint_sets: + join_content = cmds.ls(cmds.sets(join_set, query=True), long=True) + if join_content: + instance.data["joints"] += join_content diff --git a/openpype/hosts/maya/plugins/publish/collect_unreal_staticmesh.py b/openpype/hosts/maya/plugins/publish/collect_unreal_staticmesh.py index b1fb0542f2..79d0856fa0 100644 --- a/openpype/hosts/maya/plugins/publish/collect_unreal_staticmesh.py +++ b/openpype/hosts/maya/plugins/publish/collect_unreal_staticmesh.py @@ -1,38 +1,36 @@ # -*- coding: utf-8 -*- -from maya import cmds +from maya import cmds # noqa import pyblish.api +from pprint import pformat class CollectUnrealStaticMesh(pyblish.api.InstancePlugin): - """Collect Unreal Static Mesh - - Ensures always only a single frame is extracted (current frame). This - also sets correct FBX options for later extraction. - - """ + """Collect Unreal Static Mesh.""" order = pyblish.api.CollectorOrder + 0.2 label = "Collect Unreal Static Meshes" - families = ["unrealStaticMesh"] + families = ["staticMesh"] def process(self, instance): - # add fbx family to trigger fbx extractor - instance.data["families"].append("fbx") - # take the name from instance (without the `S_` prefix) - instance.data["staticMeshCombinedName"] = instance.name[2:] - - geometry_set = [i for i in instance if i == "geometry_SET"] - instance.data["membersToCombine"] = cmds.sets( + geometry_set = [ + i for i in instance + if i.startswith("geometry_SET") + ] + instance.data["geometryMembers"] = cmds.sets( geometry_set, query=True) - collision_set = [i for i in instance if i == "collisions_SET"] + self.log.info("geometry: {}".format( + pformat(instance.data.get("geometryMembers")))) + + collision_set = [ + i for i in instance + if i.startswith("collisions_SET") + ] instance.data["collisionMembers"] = cmds.sets( collision_set, query=True) - # set fbx overrides on instance - instance.data["smoothingGroups"] = True - instance.data["smoothMesh"] = True - instance.data["triangulate"] = True + self.log.info("collisions: {}".format( + pformat(instance.data.get("collisionMembers")))) frame = cmds.currentTime(query=True) instance.data["frameStart"] = frame diff --git a/openpype/hosts/maya/plugins/publish/extract_fbx.py b/openpype/hosts/maya/plugins/publish/extract_fbx.py index a2adcb3091..fbbe8e06b0 100644 --- a/openpype/hosts/maya/plugins/publish/extract_fbx.py +++ b/openpype/hosts/maya/plugins/publish/extract_fbx.py @@ -5,152 +5,29 @@ from maya import cmds # noqa import maya.mel as mel # noqa import pyblish.api import openpype.api -from openpype.hosts.maya.api.lib import ( - root_parent, - maintained_selection -) +from openpype.hosts.maya.api.lib import maintained_selection + +from openpype.hosts.maya.api import fbx class ExtractFBX(openpype.api.Extractor): """Extract FBX from Maya. - This extracts reproducible FBX exports ignoring any of the settings set - on the local machine in the FBX export options window. - - All export settings are applied with the `FBXExport*` commands prior - to the `FBXExport` call itself. The options can be overridden with their - nice names as seen in the "options" property on this class. - - For more information on FBX exports see: - - https://knowledge.autodesk.com/support/maya/learn-explore/caas - /CloudHelp/cloudhelp/2016/ENU/Maya/files/GUID-6CCE943A-2ED4-4CEE-96D4 - -9CB19C28F4E0-htm.html - - http://forums.cgsociety.org/archive/index.php?t-1032853.html - - https://groups.google.com/forum/#!msg/python_inside_maya/cLkaSo361oE - /LKs9hakE28kJ + This extracts reproducible FBX exports ignoring any of the + settings set on the local machine in the FBX export options window. """ - order = pyblish.api.ExtractorOrder label = "Extract FBX" families = ["fbx"] - @property - def options(self): - """Overridable options for FBX Export - - Given in the following format - - {NAME: EXPECTED TYPE} - - If the overridden option's type does not match, - the option is not included and a warning is logged. - - """ - - return { - "cameras": bool, - "smoothingGroups": bool, - "hardEdges": bool, - "tangents": bool, - "smoothMesh": bool, - "instances": bool, - # "referencedContainersContent": bool, # deprecated in Maya 2016+ - "bakeComplexAnimation": int, - "bakeComplexStart": int, - "bakeComplexEnd": int, - "bakeComplexStep": int, - "bakeResampleAnimation": bool, - "animationOnly": bool, - "useSceneName": bool, - "quaternion": str, # "euler" - "shapes": bool, - "skins": bool, - "constraints": bool, - "lights": bool, - "embeddedTextures": bool, - "inputConnections": bool, - "upAxis": str, # x, y or z, - "triangulate": bool - } - - @property - def default_options(self): - """The default options for FBX extraction. - - This includes shapes, skins, constraints, lights and incoming - connections and exports with the Y-axis as up-axis. - - By default this uses the time sliders start and end time. - - """ - - start_frame = int(cmds.playbackOptions(query=True, - animationStartTime=True)) - end_frame = int(cmds.playbackOptions(query=True, - animationEndTime=True)) - - return { - "cameras": False, - "smoothingGroups": False, - "hardEdges": False, - "tangents": False, - "smoothMesh": False, - "instances": False, - "bakeComplexAnimation": True, - "bakeComplexStart": start_frame, - "bakeComplexEnd": end_frame, - "bakeComplexStep": 1, - "bakeResampleAnimation": True, - "animationOnly": False, - "useSceneName": False, - "quaternion": "euler", - "shapes": True, - "skins": True, - "constraints": False, - "lights": True, - "embeddedTextures": True, - "inputConnections": True, - "upAxis": "y", - "triangulate": False - } - - def parse_overrides(self, instance, options): - """Inspect data of instance to determine overridden options - - An instance may supply any of the overridable options - as data, the option is then added to the extraction. - - """ - - for key in instance.data: - if key not in self.options: - continue - - # Ensure the data is of correct type - value = instance.data[key] - if not isinstance(value, self.options[key]): - self.log.warning( - "Overridden attribute {key} was of " - "the wrong type: {invalid_type} " - "- should have been {valid_type}".format( - key=key, - invalid_type=type(value).__name__, - valid_type=self.options[key].__name__)) - continue - - options[key] = value - - return options - def process(self, instance): - - # Ensure FBX plug-in is loaded - cmds.loadPlugin("fbxmaya", quiet=True) + fbx_exporter = fbx.FBXExtractor(log=self.log) # Define output path - stagingDir = self.staging_dir(instance) + staging_dir = self.staging_dir(instance) filename = "{0}.fbx".format(instance.name) - path = os.path.join(stagingDir, filename) + path = os.path.join(staging_dir, filename) # The export requires forward slashes because we need # to format it into a string in a mel expression @@ -162,54 +39,13 @@ class ExtractFBX(openpype.api.Extractor): self.log.info("Members: {0}".format(members)) self.log.info("Instance: {0}".format(instance[:])) - # Parse export options - options = self.default_options - options = self.parse_overrides(instance, options) - self.log.info("Export options: {0}".format(options)) - - # Collect the start and end including handles - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - options['bakeComplexStart'] = start - options['bakeComplexEnd'] = end - - # First apply the default export settings to be fully consistent - # each time for successive publishes - mel.eval("FBXResetExport") - - # Apply the FBX overrides through MEL since the commands - # only work correctly in MEL according to online - # available discussions on the topic - _iteritems = getattr(options, "iteritems", options.items) - for option, value in _iteritems(): - key = option[0].upper() + option[1:] # uppercase first letter - - # Boolean must be passed as lower-case strings - # as to MEL standards - if isinstance(value, bool): - value = str(value).lower() - - template = "FBXExport{0} {1}" if key == "UpAxis" else "FBXExport{0} -v {1}" # noqa - cmd = template.format(key, value) - self.log.info(cmd) - mel.eval(cmd) - - # Never show the UI or generate a log - mel.eval("FBXExportShowUI -v false") - mel.eval("FBXExportGenerateLog -v false") + fbx_exporter.set_options_from_instance(instance) # Export - if "unrealStaticMesh" in instance.data["families"]: - with maintained_selection(): - with root_parent(members): - self.log.info("Un-parenting: {}".format(members)) - cmds.select(members, r=1, noExpand=True) - mel.eval('FBXExport -f "{}" -s'.format(path)) - else: - with maintained_selection(): - cmds.select(members, r=1, noExpand=True) - mel.eval('FBXExport -f "{}" -s'.format(path)) + with maintained_selection(): + fbx_exporter.export(members, path) + cmds.select(members, r=1, noExpand=True) + mel.eval('FBXExport -f "{}" -s'.format(path)) if "representations" not in instance.data: instance.data["representations"] = [] @@ -218,7 +54,7 @@ class ExtractFBX(openpype.api.Extractor): 'name': 'fbx', 'ext': 'fbx', 'files': filename, - "stagingDir": stagingDir, + "stagingDir": staging_dir, } instance.data["representations"].append(representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py index a8893072d0..6fcc308f78 100644 --- a/openpype/hosts/maya/plugins/publish/extract_look.py +++ b/openpype/hosts/maya/plugins/publish/extract_look.py @@ -4,6 +4,7 @@ import os import sys import json import tempfile +import platform import contextlib import subprocess from collections import OrderedDict @@ -334,7 +335,14 @@ class ExtractLook(openpype.api.Extractor): transfers = [] hardlinks = [] hashes = {} - force_copy = instance.data.get("forceCopy", False) + # Temporary fix to NOT create hardlinks on windows machines + if platform.system().lower() == "windows": + self.log.info( + "Forcing copy instead of hardlink due to issues on Windows..." + ) + force_copy = True + else: + force_copy = instance.data.get("forceCopy", False) for filepath in files_metadata: diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd.py b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd.py new file mode 100644 index 0000000000..4e4efdc32c --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd.py @@ -0,0 +1,210 @@ +import os +import six + +from maya import cmds + +import openpype.api +from openpype.hosts.maya.api.lib import maintained_selection + + +class ExtractMultiverseUsd(openpype.api.Extractor): + """Extractor for USD by Multiverse.""" + + label = "Extract Multiverse USD" + hosts = ["maya"] + families = ["usd"] + + @property + def options(self): + """Overridable options for Multiverse USD Export + + Given in the following format + - {NAME: EXPECTED TYPE} + + If the overridden option's type does not match, + the option is not included and a warning is logged. + + """ + + return { + "stripNamespaces": bool, + "mergeTransformAndShape": bool, + "writeAncestors": bool, + "flattenParentXforms": bool, + "writeSparseOverrides": bool, + "useMetaPrimPath": bool, + "customRootPath": str, + "customAttributes": str, + "nodeTypesToIgnore": str, + "writeMeshes": bool, + "writeCurves": bool, + "writeParticles": bool, + "writeCameras": bool, + "writeLights": bool, + "writeJoints": bool, + "writeCollections": bool, + "writePositions": bool, + "writeNormals": bool, + "writeUVs": bool, + "writeColorSets": bool, + "writeTangents": bool, + "writeRefPositions": bool, + "writeBlendShapes": bool, + "writeDisplayColor": bool, + "writeSkinWeights": bool, + "writeMaterialAssignment": bool, + "writeHardwareShader": bool, + "writeShadingNetworks": bool, + "writeTransformMatrix": bool, + "writeUsdAttributes": bool, + "timeVaryingTopology": bool, + "customMaterialNamespace": str, + "numTimeSamples": int, + "timeSamplesSpan": float + } + + @property + def default_options(self): + """The default options for Multiverse USD extraction.""" + + return { + "stripNamespaces": False, + "mergeTransformAndShape": False, + "writeAncestors": True, + "flattenParentXforms": False, + "writeSparseOverrides": False, + "useMetaPrimPath": False, + "customRootPath": str(), + "customAttributes": str(), + "nodeTypesToIgnore": str(), + "writeMeshes": True, + "writeCurves": True, + "writeParticles": True, + "writeCameras": False, + "writeLights": False, + "writeJoints": False, + "writeCollections": False, + "writePositions": True, + "writeNormals": True, + "writeUVs": True, + "writeColorSets": False, + "writeTangents": False, + "writeRefPositions": False, + "writeBlendShapes": False, + "writeDisplayColor": False, + "writeSkinWeights": False, + "writeMaterialAssignment": False, + "writeHardwareShader": False, + "writeShadingNetworks": False, + "writeTransformMatrix": True, + "writeUsdAttributes": False, + "timeVaryingTopology": False, + "customMaterialNamespace": str(), + "numTimeSamples": 1, + "timeSamplesSpan": 0.0 + } + + def parse_overrides(self, instance, options): + """Inspect data of instance to determine overridden options""" + + for key in instance.data: + if key not in self.options: + continue + + # Ensure the data is of correct type + value = instance.data[key] + if isinstance(value, six.text_type): + value = str(value) + if not isinstance(value, self.options[key]): + self.log.warning( + "Overridden attribute {key} was of " + "the wrong type: {invalid_type} " + "- should have been {valid_type}".format( + key=key, + invalid_type=type(value).__name__, + valid_type=self.options[key].__name__)) + continue + + options[key] = value + + return options + + def process(self, instance): + # Load plugin firstly + cmds.loadPlugin("MultiverseForMaya", quiet=True) + + # Define output file path + staging_dir = self.staging_dir(instance) + file_name = "{}.usd".format(instance.name) + file_path = os.path.join(staging_dir, file_name) + file_path = file_path.replace('\\', '/') + + # Parse export options + options = self.default_options + options = self.parse_overrides(instance, options) + self.log.info("Export options: {0}".format(options)) + + # Perform extraction + self.log.info("Performing extraction ...") + + with maintained_selection(): + members = instance.data("setMembers") + members = cmds.ls(members, + dag=True, + shapes=True, + type=("mesh"), + noIntermediate=True, + long=True) + self.log.info('Collected object {}'.format(members)) + + import multiverse + + time_opts = None + frame_start = instance.data['frameStart'] + frame_end = instance.data['frameEnd'] + handle_start = instance.data['handleStart'] + handle_end = instance.data['handleEnd'] + step = instance.data['step'] + fps = instance.data['fps'] + if frame_end != frame_start: + time_opts = multiverse.TimeOptions() + + time_opts.writeTimeRange = True + time_opts.frameRange = ( + frame_start - handle_start, frame_end + handle_end) + time_opts.frameIncrement = step + time_opts.numTimeSamples = instance.data["numTimeSamples"] + time_opts.timeSamplesSpan = instance.data["timeSamplesSpan"] + time_opts.framePerSecond = fps + + asset_write_opts = multiverse.AssetWriteOptions(time_opts) + options_discard_keys = { + 'numTimeSamples', + 'timeSamplesSpan', + 'frameStart', + 'frameEnd', + 'handleStart', + 'handleEnd', + 'step', + 'fps' + } + for key, value in options.items(): + if key in options_discard_keys: + continue + setattr(asset_write_opts, key, value) + + multiverse.WriteAsset(file_path, members, asset_write_opts) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'usd', + 'ext': 'usd', + 'files': file_name, + "stagingDir": staging_dir + } + instance.data["representations"].append(representation) + + self.log.info("Extracted instance {} to {}".format( + instance.name, file_path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py new file mode 100644 index 0000000000..8fccc412e6 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py @@ -0,0 +1,151 @@ +import os + +from maya import cmds + +import openpype.api +from openpype.hosts.maya.api.lib import maintained_selection + + +class ExtractMultiverseUsdComposition(openpype.api.Extractor): + """Extractor of Multiverse USD Composition.""" + + label = "Extract Multiverse USD Composition" + hosts = ["maya"] + families = ["usdComposition"] + + @property + def options(self): + """Overridable options for Multiverse USD Export + + Given in the following format + - {NAME: EXPECTED TYPE} + + If the overridden option's type does not match, + the option is not included and a warning is logged. + + """ + + return { + "stripNamespaces": bool, + "mergeTransformAndShape": bool, + "flattenContent": bool, + "writePendingOverrides": bool, + "numTimeSamples": int, + "timeSamplesSpan": float + } + + @property + def default_options(self): + """The default options for Multiverse USD extraction.""" + + return { + "stripNamespaces": True, + "mergeTransformAndShape": False, + "flattenContent": False, + "writePendingOverrides": False, + "numTimeSamples": 1, + "timeSamplesSpan": 0.0 + } + + def parse_overrides(self, instance, options): + """Inspect data of instance to determine overridden options""" + + for key in instance.data: + if key not in self.options: + continue + + # Ensure the data is of correct type + value = instance.data[key] + if not isinstance(value, self.options[key]): + self.log.warning( + "Overridden attribute {key} was of " + "the wrong type: {invalid_type} " + "- should have been {valid_type}".format( + key=key, + invalid_type=type(value).__name__, + valid_type=self.options[key].__name__)) + continue + + options[key] = value + + return options + + def process(self, instance): + # Load plugin firstly + cmds.loadPlugin("MultiverseForMaya", quiet=True) + + # Define output file path + staging_dir = self.staging_dir(instance) + file_name = "{}.usd".format(instance.name) + file_path = os.path.join(staging_dir, file_name) + file_path = file_path.replace('\\', '/') + + # Parse export options + options = self.default_options + options = self.parse_overrides(instance, options) + self.log.info("Export options: {0}".format(options)) + + # Perform extraction + self.log.info("Performing extraction ...") + + with maintained_selection(): + members = instance.data("setMembers") + members = cmds.ls(members, + dag=True, + shapes=True, + type="mvUsdCompoundShape", + noIntermediate=True, + long=True) + self.log.info('Collected object {}'.format(members)) + + import multiverse + + time_opts = None + frame_start = instance.data['frameStart'] + frame_end = instance.data['frameEnd'] + handle_start = instance.data['handleStart'] + handle_end = instance.data['handleEnd'] + step = instance.data['step'] + fps = instance.data['fps'] + if frame_end != frame_start: + time_opts = multiverse.TimeOptions() + + time_opts.writeTimeRange = True + time_opts.frameRange = ( + frame_start - handle_start, frame_end + handle_end) + time_opts.frameIncrement = step + time_opts.numTimeSamples = instance.data["numTimeSamples"] + time_opts.timeSamplesSpan = instance.data["timeSamplesSpan"] + time_opts.framePerSecond = fps + + comp_write_opts = multiverse.CompositionWriteOptions() + options_discard_keys = { + 'numTimeSamples', + 'timeSamplesSpan', + 'frameStart', + 'frameEnd', + 'handleStart', + 'handleEnd', + 'step', + 'fps' + } + for key, value in options.items(): + if key in options_discard_keys: + continue + setattr(comp_write_opts, key, value) + + multiverse.WriteComposition(file_path, members, comp_write_opts) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'usd', + 'ext': 'usd', + 'files': file_name, + "stagingDir": staging_dir + } + instance.data["representations"].append(representation) + + self.log.info("Extracted instance {} to {}".format( + instance.name, file_path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py new file mode 100644 index 0000000000..ce0e8a392a --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py @@ -0,0 +1,139 @@ +import os + +import openpype.api +from openpype.hosts.maya.api.lib import maintained_selection + +from maya import cmds + + +class ExtractMultiverseUsdOverride(openpype.api.Extractor): + """Extractor for USD Override by Multiverse.""" + + label = "Extract Multiverse USD Override" + hosts = ["maya"] + families = ["usdOverride"] + + @property + def options(self): + """Overridable options for Multiverse USD Export + + Given in the following format + - {NAME: EXPECTED TYPE} + + If the overridden option's type does not match, + the option is not included and a warning is logged. + + """ + + return { + "writeAll": bool, + "writeTransforms": bool, + "writeVisibility": bool, + "writeAttributes": bool, + "writeMaterials": bool, + "writeVariants": bool, + "writeVariantsDefinition": bool, + "writeActiveState": bool, + "writeNamespaces": bool, + "numTimeSamples": int, + "timeSamplesSpan": float + } + + @property + def default_options(self): + """The default options for Multiverse USD extraction.""" + + return { + "writeAll": False, + "writeTransforms": True, + "writeVisibility": True, + "writeAttributes": True, + "writeMaterials": True, + "writeVariants": True, + "writeVariantsDefinition": True, + "writeActiveState": True, + "writeNamespaces": False, + "numTimeSamples": 1, + "timeSamplesSpan": 0.0 + } + + def process(self, instance): + # Load plugin firstly + cmds.loadPlugin("MultiverseForMaya", quiet=True) + + # Define output file path + staging_dir = self.staging_dir(instance) + file_name = "{}.usda".format(instance.name) + file_path = os.path.join(staging_dir, file_name) + file_path = file_path.replace("\\", "/") + + # Parse export options + options = self.default_options + self.log.info("Export options: {0}".format(options)) + + # Perform extraction + self.log.info("Performing extraction ...") + + with maintained_selection(): + members = instance.data("setMembers") + members = cmds.ls(members, + dag=True, + shapes=True, + type="mvUsdCompoundShape", + noIntermediate=True, + long=True) + self.log.info("Collected object {}".format(members)) + + # TODO: Deal with asset, composition, overide with options. + import multiverse + + time_opts = None + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + step = instance.data["step"] + fps = instance.data["fps"] + if frame_end != frame_start: + time_opts = multiverse.TimeOptions() + + time_opts.writeTimeRange = True + time_opts.frameRange = ( + frame_start - handle_start, frame_end + handle_end) + time_opts.frameIncrement = step + time_opts.numTimeSamples = instance.data["numTimeSamples"] + time_opts.timeSamplesSpan = instance.data["timeSamplesSpan"] + time_opts.framePerSecond = fps + + over_write_opts = multiverse.OverridesWriteOptions(time_opts) + options_discard_keys = { + "numTimeSamples", + "timeSamplesSpan", + "frameStart", + "frameEnd", + "handleStart", + "handleEnd", + "step", + "fps" + } + for key, value in options.items(): + if key in options_discard_keys: + continue + setattr(over_write_opts, key, value) + + for member in members: + multiverse.WriteOverrides(file_path, member, over_write_opts) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "usd", + "ext": "usd", + "files": file_name, + "stagingDir": staging_dir + } + instance.data["representations"].append(representation) + + self.log.info("Extracted instance {} to {}".format( + instance.name, file_path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_playblast.py b/openpype/hosts/maya/plugins/publish/extract_playblast.py index b233a57453..bb1ecf279d 100644 --- a/openpype/hosts/maya/plugins/publish/extract_playblast.py +++ b/openpype/hosts/maya/plugins/publish/extract_playblast.py @@ -73,6 +73,11 @@ class ExtractPlayblast(openpype.api.Extractor): pm.currentTime(refreshFrameInt - 1, edit=True) pm.currentTime(refreshFrameInt, edit=True) + # Override transparency if requested. + transparency = instance.data.get("transparency", 0) + if transparency != 0: + preset["viewport2_options"]["transparencyAlgorithm"] = transparency + # Isolate view is requested by having objects in the set besides a # camera. if preset.pop("isolate_view", False) and instance.data.get("isolate"): diff --git a/openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh.py b/openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh.py new file mode 100644 index 0000000000..7ef7f2f181 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +"""Create Unreal Skeletal Mesh data to be extracted as FBX.""" +import os +from contextlib import contextmanager + +from maya import cmds # noqa + +import pyblish.api +import openpype.api +from openpype.hosts.maya.api import fbx + + +@contextmanager +def renamed(original_name, renamed_name): + # type: (str, str) -> None + try: + cmds.rename(original_name, renamed_name) + yield + finally: + cmds.rename(renamed_name, original_name) + + +class ExtractUnrealSkeletalMesh(openpype.api.Extractor): + """Extract Unreal Skeletal Mesh as FBX from Maya. """ + + order = pyblish.api.ExtractorOrder - 0.1 + label = "Extract Unreal Skeletal Mesh" + families = ["skeletalMesh"] + + def process(self, instance): + fbx_exporter = fbx.FBXExtractor(log=self.log) + + # Define output path + staging_dir = self.staging_dir(instance) + filename = "{0}.fbx".format(instance.name) + path = os.path.join(staging_dir, filename) + + geo = instance.data.get("geometry") + joints = instance.data.get("joints") + + to_extract = geo + joints + + # The export requires forward slashes because we need + # to format it into a string in a mel expression + path = path.replace('\\', '/') + + self.log.info("Extracting FBX to: {0}".format(path)) + self.log.info("Members: {0}".format(to_extract)) + self.log.info("Instance: {0}".format(instance[:])) + + fbx_exporter.set_options_from_instance(instance) + + # This magic is done for variants. To let Unreal merge correctly + # existing data, top node must have the same name. So for every + # variant we extract we need to rename top node of the rig correctly. + # It is finally done in context manager so it won't affect current + # scene. + + # we rely on hierarchy under one root. + original_parent = to_extract[0].split("|")[1] + + parent_node = instance.data.get("asset") + + renamed_to_extract = [] + for node in to_extract: + node_path = node.split("|") + node_path[1] = parent_node + renamed_to_extract.append("|".join(node_path)) + + with renamed(original_parent, parent_node): + self.log.info("Extracting: {}".format(renamed_to_extract, path)) + fbx_exporter.export(renamed_to_extract, path) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'fbx', + 'ext': 'fbx', + 'files': filename, + "stagingDir": staging_dir, + } + instance.data["representations"].append(representation) + + self.log.info("Extract FBX successful to: {0}".format(path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py b/openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py index 32dc9d1d1c..69d51f9ff1 100644 --- a/openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py +++ b/openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py @@ -1,33 +1,61 @@ # -*- coding: utf-8 -*- """Create Unreal Static Mesh data to be extracted as FBX.""" -import openpype.api -import pyblish.api +import os + from maya import cmds # noqa +import pyblish.api +import openpype.api +from openpype.hosts.maya.api.lib import ( + parent_nodes, + maintained_selection +) +from openpype.hosts.maya.api import fbx + class ExtractUnrealStaticMesh(openpype.api.Extractor): - """Extract FBX from Maya. """ + """Extract Unreal Static Mesh as FBX from Maya. """ order = pyblish.api.ExtractorOrder - 0.1 label = "Extract Unreal Static Mesh" - families = ["unrealStaticMesh"] + families = ["staticMesh"] def process(self, instance): - to_combine = instance.data.get("membersToCombine") - static_mesh_name = instance.data.get("staticMeshCombinedName") - self.log.info( - "merging {} into {}".format( - " + ".join(to_combine), static_mesh_name)) - duplicates = cmds.duplicate(to_combine, ic=True) - cmds.polyUnite( - *duplicates, - n=static_mesh_name, ch=False) + members = instance.data.get("geometryMembers", []) + if instance.data.get("collisionMembers"): + members = members + instance.data.get("collisionMembers") - if not instance.data.get("cleanNodes"): - instance.data["cleanNodes"] = [] + fbx_exporter = fbx.FBXExtractor(log=self.log) - instance.data["cleanNodes"].append(static_mesh_name) - instance.data["cleanNodes"] += duplicates + # Define output path + staging_dir = self.staging_dir(instance) + filename = "{0}.fbx".format(instance.name) + path = os.path.join(staging_dir, filename) - instance.data["setMembers"] = [static_mesh_name] - instance.data["setMembers"] += instance.data["collisionMembers"] + # The export requires forward slashes because we need + # to format it into a string in a mel expression + path = path.replace('\\', '/') + + self.log.info("Extracting FBX to: {0}".format(path)) + self.log.info("Members: {0}".format(members)) + self.log.info("Instance: {0}".format(instance[:])) + + fbx_exporter.set_options_from_instance(instance) + + with maintained_selection(): + with parent_nodes(members): + self.log.info("Un-parenting: {}".format(members)) + fbx_exporter.export(members, path) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'fbx', + 'ext': 'fbx', + 'files': filename, + "stagingDir": staging_dir, + } + instance.data["representations"].append(representation) + + self.log.info("Extract FBX successful to: {0}".format(path)) diff --git a/openpype/hosts/maya/plugins/publish/help/validate_skeletalmesh_hierarchy.xml b/openpype/hosts/maya/plugins/publish/help/validate_skeletalmesh_hierarchy.xml new file mode 100644 index 0000000000..d30c4cb69d --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/help/validate_skeletalmesh_hierarchy.xml @@ -0,0 +1,14 @@ + + + +Skeletal Mesh Top Node +## Skeletal meshes needs common root + +Skeletal meshes and their joints must be under one common root. + +### How to repair? + +Make sure all geometry and joints resides under same root. + + + diff --git a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py index f852904580..255ed96901 100644 --- a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py +++ b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py @@ -4,13 +4,13 @@ import getpass import platform import appdirs -import requests from maya import cmds from avalon import api import pyblish.api +from openpype.lib import requests_post from openpype.hosts.maya.api import lib from openpype.api import get_system_settings @@ -184,7 +184,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): "select": "name" } api_entry = '/api/templates/list' - response = self._requests_post( + response = requests_post( self.MUSTER_REST_URL + api_entry, params=params) if response.status_code != 200: self.log.error( @@ -235,7 +235,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): "name": "submit" } api_entry = '/api/queue/actions' - response = self._requests_post( + response = requests_post( self.MUSTER_REST_URL + api_entry, params=params, json=payload) if response.status_code != 200: @@ -549,16 +549,3 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): % (value, int(value)) ) - def _requests_post(self, *args, **kwargs): - """ Wrapper for requests, disabling SSL certificate validation if - DONT_VERIFY_SSL environment variable is found. This is useful when - Deadline or Muster server are running with self-signed certificates - and their certificate is not added to trusted certificates on - client machines. - - WARNING: disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa - return requests.post(*args, **kwargs) diff --git a/openpype/hosts/maya/plugins/publish/validate_camera_contents.py b/openpype/hosts/maya/plugins/publish/validate_camera_contents.py index d9e88edaac..20af8d2315 100644 --- a/openpype/hosts/maya/plugins/publish/validate_camera_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_camera_contents.py @@ -40,7 +40,14 @@ class ValidateCameraContents(pyblish.api.InstancePlugin): # list when there are no actual cameras results in # still an empty 'invalid' list if len(cameras) < 1: - raise RuntimeError("No cameras in instance.") + if members: + # If there are members in the instance return all of + # them as 'invalid' so the user can still select invalid + cls.log.error("No cameras found in instance " + "members: {}".format(members)) + return members + + raise RuntimeError("No cameras found in empty instance.") # non-camera shapes valid_shapes = cmds.ls(shapes, type=('camera', 'locator'), long=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_muster_connection.py b/openpype/hosts/maya/plugins/publish/validate_muster_connection.py index af32c82f97..6dc7bd3bc4 100644 --- a/openpype/hosts/maya/plugins/publish/validate_muster_connection.py +++ b/openpype/hosts/maya/plugins/publish/validate_muster_connection.py @@ -2,9 +2,9 @@ import os import json import appdirs -import requests import pyblish.api +from openpype.lib import requests_get from openpype.plugin import contextplugin_should_run import openpype.hosts.maya.api.action @@ -51,7 +51,7 @@ class ValidateMusterConnection(pyblish.api.ContextPlugin): 'authToken': self._token } api_entry = '/api/pools/list' - response = self._requests_get( + response = requests_get( MUSTER_REST_URL + api_entry, params=params) assert response.status_code == 200, "invalid response from server" assert response.json()['ResponseData'], "invalid data in response" @@ -88,35 +88,7 @@ class ValidateMusterConnection(pyblish.api.ContextPlugin): api_url = "{}/muster/show_login".format( os.environ["OPENPYPE_WEBSERVER_URL"]) cls.log.debug(api_url) - response = cls._requests_get(api_url, timeout=1) + response = requests_get(api_url, timeout=1) if response.status_code != 200: cls.log.error('Cannot show login form to Muster') raise Exception('Cannot show login form to Muster') - - def _requests_post(self, *args, **kwargs): - """ Wrapper for requests, disabling SSL certificate validation if - DONT_VERIFY_SSL environment variable is found. This is useful when - Deadline or Muster server are running with self-signed certificates - and their certificate is not added to trusted certificates on - client machines. - - WARNING: disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa - return requests.post(*args, **kwargs) - - def _requests_get(self, *args, **kwargs): - """ Wrapper for requests, disabling SSL certificate validation if - DONT_VERIFY_SSL environment variable is found. This is useful when - Deadline or Muster server are running with self-signed certificates - and their certificate is not added to trusted certificates on - client machines. - - WARNING: disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa - return requests.get(*args, **kwargs) diff --git a/openpype/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py b/openpype/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py new file mode 100644 index 0000000000..54a86d27cf --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +import pyblish.api +import openpype.api +from openpype.pipeline import PublishXmlValidationError + +from maya import cmds + + +class ValidateSkeletalMeshHierarchy(pyblish.api.InstancePlugin): + """Validates that nodes has common root.""" + + order = openpype.api.ValidateContentsOrder + hosts = ["maya"] + families = ["skeletalMesh"] + label = "Skeletal Mesh Top Node" + + def process(self, instance): + geo = instance.data.get("geometry") + joints = instance.data.get("joints") + + joints_parents = cmds.ls(joints, long=True) + geo_parents = cmds.ls(geo, long=True) + + parents_set = { + parent.split("|")[1] for parent in (joints_parents + geo_parents) + } + + if len(set(parents_set)) != 1: + raise PublishXmlValidationError( + self, + "Multiple roots on geometry or joints." + ) diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py b/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py index b2ef174374..c05121a1b0 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py @@ -10,10 +10,11 @@ class ValidateUnrealMeshTriangulated(pyblish.api.InstancePlugin): order = openpype.api.ValidateMeshOrder hosts = ["maya"] - families = ["unrealStaticMesh"] + families = ["staticMesh"] category = "geometry" label = "Mesh is Triangulated" actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + active = False @classmethod def get_invalid(cls, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py b/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py index 901a2ec75e..43f6c85827 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- - +"""Validator for correct naming of Static Meshes.""" from maya import cmds # noqa import pyblish.api import openpype.api @@ -52,8 +52,8 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): optional = True order = openpype.api.ValidateContentsOrder hosts = ["maya"] - families = ["unrealStaticMesh"] - label = "Unreal StaticMesh Name" + families = ["staticMesh"] + label = "Unreal Static Mesh Name" actions = [openpype.hosts.maya.api.action.SelectInvalidAction] regex_mesh = r"(?P.*))" regex_collision = r"(?P.*)" @@ -72,15 +72,13 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): ["collision_prefixes"] ) - combined_geometry_name = instance.data.get( - "staticMeshCombinedName", None) if cls.validate_mesh: # compile regex for testing names regex_mesh = "{}{}".format( ("_" + cls.static_mesh_prefix) or "", cls.regex_mesh ) sm_r = re.compile(regex_mesh) - if not sm_r.match(combined_geometry_name): + if not sm_r.match(instance.data.get("subset")): cls.log.error("Mesh doesn't comply with name validation.") return True @@ -91,7 +89,7 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): cls.log.warning("No collision objects to validate.") return False - regex_collision = "{}{}".format( + regex_collision = "{}{}_(\\d+)".format( "(?P({}))_".format( "|".join("{0}".format(p) for p in collision_prefixes) ) or "", cls.regex_collision @@ -99,6 +97,9 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): cl_r = re.compile(regex_collision) + mesh_name = "{}{}".format(instance.data["asset"], + instance.data.get("variant", [])) + for obj in collision_set: cl_m = cl_r.match(obj) if not cl_m: @@ -107,7 +108,7 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): else: expected_collision = "{}_{}".format( cl_m.group("prefix"), - combined_geometry_name + mesh_name ) if not obj.startswith(expected_collision): @@ -116,11 +117,11 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): "Collision object name doesn't match " "static mesh name" ) - cls.log.error("{}_{} != {}_{}".format( + cls.log.error("{}_{} != {}_{}*".format( cl_m.group("prefix"), cl_m.group("renderName"), cl_m.group("prefix"), - combined_geometry_name, + mesh_name, )) invalid.append(obj) diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py b/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py index 5a8c29c22d..5e1b04889f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py @@ -9,9 +9,10 @@ class ValidateUnrealUpAxis(pyblish.api.ContextPlugin): """Validate if Z is set as up axis in Maya""" optional = True + active = False order = openpype.api.ValidateContentsOrder hosts = ["maya"] - families = ["unrealStaticMesh"] + families = ["staticMesh"] label = "Unreal Up-Axis check" actions = [openpype.api.RepairAction] diff --git a/openpype/hosts/maya/startup/userSetup.py b/openpype/hosts/maya/startup/userSetup.py index b89244817a..a3ab483add 100644 --- a/openpype/hosts/maya/startup/userSetup.py +++ b/openpype/hosts/maya/startup/userSetup.py @@ -1,11 +1,10 @@ import os -import avalon.api from openpype.api import get_project_settings +from openpype.pipeline import install_host from openpype.hosts.maya import api -import openpype.hosts.maya.api.lib as mlib from maya import cmds -avalon.api.install(api) +install_host(api) print("starting OpenPype usersetup") diff --git a/openpype/hosts/nuke/__init__.py b/openpype/hosts/nuke/__init__.py index 60b37ce1dd..134a6621c4 100644 --- a/openpype/hosts/nuke/__init__.py +++ b/openpype/hosts/nuke/__init__.py @@ -10,7 +10,7 @@ def add_implementation_envs(env, _app): ] old_nuke_path = env.get("NUKE_PATH") or "" for path in old_nuke_path.split(os.pathsep): - if not path or not os.path.exists(path): + if not path: continue norm_path = os.path.normpath(path) diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index 3c8ba3e77c..b859454e8f 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -26,6 +26,7 @@ from openpype.tools.utils import host_tools from openpype.lib.path_tools import HostDirmap from openpype.settings import get_project_settings from openpype.modules import ModulesManager +from openpype.pipeline import discover_legacy_creator_plugins from .workio import ( save_file, @@ -1047,17 +1048,36 @@ def add_review_knob(node): def add_deadline_tab(node): node.addKnob(nuke.Tab_Knob("Deadline")) - knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size") - knob.setValue(0) - node.addKnob(knob) - knob = nuke.Int_Knob("deadlinePriority", "Priority") knob.setValue(50) node.addKnob(knob) + knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size") + knob.setValue(0) + node.addKnob(knob) + + knob = nuke.Int_Knob("deadlineConcurrentTasks", "Concurrent tasks") + # zero as default will get value from Settings during collection + # instead of being an explicit user override, see precollect_write.py + knob.setValue(0) + node.addKnob(knob) + + knob = nuke.Text_Knob("divd", '') + knob.setValue('') + node.addKnob(knob) + + knob = nuke.Boolean_Knob("suspend_publish", "Suspend publish") + knob.setValue(False) + node.addKnob(knob) + def get_deadline_knob_names(): - return ["Deadline", "deadlineChunkSize", "deadlinePriority"] + return [ + "Deadline", + "deadlineChunkSize", + "deadlinePriority", + "deadlineConcurrentTasks" + ] def create_backdrop(label="", color=None, layer=0, @@ -1902,7 +1922,7 @@ def recreate_instance(origin_node, avalon_data=None): # create new node # get appropriate plugin class creator_plugin = None - for Creator in api.discover(api.Creator): + for Creator in discover_legacy_creator_plugins(): if Creator.__name__ == data["creator"]: creator_plugin = Creator break diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py index 1d110cb94a..6ee3d2ce05 100644 --- a/openpype/hosts/nuke/api/pipeline.py +++ b/openpype/hosts/nuke/api/pipeline.py @@ -5,7 +5,6 @@ from collections import OrderedDict import nuke import pyblish.api -import avalon.api import openpype from openpype.api import ( @@ -15,10 +14,11 @@ from openpype.api import ( ) from openpype.lib import register_event_callback from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, + register_creator_plugin_path, register_inventory_action_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, deregister_inventory_action_path, AVALON_CONTAINER_ID, ) @@ -106,7 +106,7 @@ def install(): log.info("Registering Nuke plug-ins..") pyblish.api.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) register_inventory_action_path(INVENTORY_PATH) # Register Avalon event for workfiles loading. @@ -132,7 +132,7 @@ def uninstall(): pyblish.deregister_host("nuke") pyblish.api.deregister_plugin_path(PUBLISH_PATH) deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) deregister_inventory_action_path(INVENTORY_PATH) pyblish.api.deregister_callback( diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index d0bb45a05d..eaf0ab6911 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -1,6 +1,8 @@ import os import random import string +from collections import OrderedDict +from abc import abstractmethod import nuke @@ -450,6 +452,7 @@ class ExporterReviewMov(ExporterReview): def generate_mov(self, farm=False, **kwargs): self.publish_on_farm = farm + read_raw = kwargs["read_raw"] reformat_node_add = kwargs["reformat_node_add"] reformat_node_config = kwargs["reformat_node_config"] bake_viewer_process = kwargs["bake_viewer_process"] @@ -484,6 +487,9 @@ class ExporterReviewMov(ExporterReview): r_node["origlast"].setValue(self.last_frame) r_node["colorspace"].setValue(self.write_colorspace) + if read_raw: + r_node["raw"].setValue(1) + # connect self._temp_nodes[subset].append(r_node) self.previous_node = r_node @@ -590,3 +596,139 @@ class ExporterReviewMov(ExporterReview): nuke.scriptSave() return self.data + + +class AbstractWriteRender(OpenPypeCreator): + """Abstract creator to gather similar implementation for Write creators""" + name = "" + label = "" + hosts = ["nuke"] + n_class = "Write" + family = "render" + icon = "sign-out" + defaults = ["Main", "Mask"] + + def __init__(self, *args, **kwargs): + super(AbstractWriteRender, self).__init__(*args, **kwargs) + + data = OrderedDict() + + data["family"] = self.family + data["families"] = self.n_class + + for k, v in self.data.items(): + if k not in data.keys(): + data.update({k: v}) + + self.data = data + self.nodes = nuke.selectedNodes() + self.log.debug("_ self.data: '{}'".format(self.data)) + + def process(self): + + inputs = [] + outputs = [] + instance = nuke.toNode(self.data["subset"]) + selected_node = None + + # use selection + if (self.options or {}).get("useSelection"): + nodes = self.nodes + + if not (len(nodes) < 2): + msg = ("Select only one node. " + "The node you want to connect to, " + "or tick off `Use selection`") + self.log.error(msg) + nuke.message(msg) + return + + if len(nodes) == 0: + msg = ( + "No nodes selected. Please select a single node to connect" + " to or tick off `Use selection`" + ) + self.log.error(msg) + nuke.message(msg) + return + + selected_node = nodes[0] + inputs = [selected_node] + outputs = selected_node.dependent() + + if instance: + if (instance.name() in selected_node.name()): + selected_node = instance.dependencies()[0] + + # if node already exist + if instance: + # collect input / outputs + inputs = instance.dependencies() + outputs = instance.dependent() + selected_node = inputs[0] + # remove old one + nuke.delete(instance) + + # recreate new + write_data = { + "nodeclass": self.n_class, + "families": [self.family], + "avalon": self.data + } + + # add creator data + creator_data = {"creator": self.__class__.__name__} + self.data.update(creator_data) + write_data.update(creator_data) + + if self.presets.get('fpath_template'): + self.log.info("Adding template path from preset") + write_data.update( + {"fpath_template": self.presets["fpath_template"]} + ) + else: + self.log.info("Adding template path from plugin") + write_data.update({ + "fpath_template": + ("{work}/" + self.family + "s/nuke/{subset}" + "/{subset}.{frame}.{ext}")}) + + write_node = self._create_write_node(selected_node, + inputs, outputs, + write_data) + + # relinking to collected connections + for i, input in enumerate(inputs): + write_node.setInput(i, input) + + write_node.autoplace() + + for output in outputs: + output.setInput(0, write_node) + + write_node = self._modify_write_node(write_node) + + return write_node + + @abstractmethod + def _create_write_node(self, selected_node, inputs, outputs, write_data): + """Family dependent implementation of Write node creation + + Args: + selected_node (nuke.Node) + inputs (list of nuke.Node) - input dependencies (what is connected) + outputs (list of nuke.Node) - output dependencies + write_data (dict) - values used to fill Knobs + Returns: + node (nuke.Node): group node with data as Knobs + """ + pass + + @abstractmethod + def _modify_write_node(self, write_node): + """Family dependent modification of created 'write_node' + + Returns: + node (nuke.Node): group node with data as Knobs + """ + pass diff --git a/openpype/hosts/nuke/plugins/__init__.py b/openpype/hosts/nuke/plugins/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/nuke/plugins/create/__init__.py b/openpype/hosts/nuke/plugins/create/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/nuke/plugins/create/create_write_prerender.py b/openpype/hosts/nuke/plugins/create/create_write_prerender.py index 761439fdb2..7297f74c13 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_prerender.py +++ b/openpype/hosts/nuke/plugins/create/create_write_prerender.py @@ -1,12 +1,10 @@ -from collections import OrderedDict - import nuke from openpype.hosts.nuke.api import plugin from openpype.hosts.nuke.api.lib import create_write_node -class CreateWritePrerender(plugin.OpenPypeCreator): +class CreateWritePrerender(plugin.AbstractWriteRender): # change this to template preset name = "WritePrerender" label = "Create Write Prerender" @@ -19,85 +17,7 @@ class CreateWritePrerender(plugin.OpenPypeCreator): def __init__(self, *args, **kwargs): super(CreateWritePrerender, self).__init__(*args, **kwargs) - data = OrderedDict() - - data["family"] = self.family - data["families"] = self.n_class - - for k, v in self.data.items(): - if k not in data.keys(): - data.update({k: v}) - - self.data = data - self.nodes = nuke.selectedNodes() - self.log.debug("_ self.data: '{}'".format(self.data)) - - def process(self): - inputs = [] - outputs = [] - instance = nuke.toNode(self.data["subset"]) - selected_node = None - - # use selection - if (self.options or {}).get("useSelection"): - nodes = self.nodes - - if not (len(nodes) < 2): - msg = ("Select only one node. The node " - "you want to connect to, " - "or tick off `Use selection`") - self.log.error(msg) - nuke.message(msg) - - if len(nodes) == 0: - msg = ( - "No nodes selected. Please select a single node to connect" - " to or tick off `Use selection`" - ) - self.log.error(msg) - nuke.message(msg) - - selected_node = nodes[0] - inputs = [selected_node] - outputs = selected_node.dependent() - - if instance: - if (instance.name() in selected_node.name()): - selected_node = instance.dependencies()[0] - - # if node already exist - if instance: - # collect input / outputs - inputs = instance.dependencies() - outputs = instance.dependent() - selected_node = inputs[0] - # remove old one - nuke.delete(instance) - - # recreate new - write_data = { - "nodeclass": self.n_class, - "families": [self.family], - "avalon": self.data - } - - # add creator data - creator_data = {"creator": self.__class__.__name__} - self.data.update(creator_data) - write_data.update(creator_data) - - if self.presets.get('fpath_template'): - self.log.info("Adding template path from preset") - write_data.update( - {"fpath_template": self.presets["fpath_template"]} - ) - else: - self.log.info("Adding template path from plugin") - write_data.update({ - "fpath_template": ("{work}/prerenders/nuke/{subset}" - "/{subset}.{frame}.{ext}")}) - - self.log.info("write_data: {}".format(write_data)) + def _create_write_node(self, selected_node, inputs, outputs, write_data): reviewable = self.presets.get("reviewable") write_node = create_write_node( self.data["subset"], @@ -107,15 +27,9 @@ class CreateWritePrerender(plugin.OpenPypeCreator): review=reviewable, linked_knobs=["channels", "___", "first", "last", "use_limit"]) - # relinking to collected connections - for i, input in enumerate(inputs): - write_node.setInput(i, input) - - write_node.autoplace() - - for output in outputs: - output.setInput(0, write_node) + return write_node + def _modify_write_node(self, write_node): # open group node write_node.begin() for n in nuke.allNodes(): diff --git a/openpype/hosts/nuke/plugins/create/create_write_render.py b/openpype/hosts/nuke/plugins/create/create_write_render.py index a9c4b5341e..18a101546f 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_render.py +++ b/openpype/hosts/nuke/plugins/create/create_write_render.py @@ -1,12 +1,10 @@ -from collections import OrderedDict - import nuke from openpype.hosts.nuke.api import plugin from openpype.hosts.nuke.api.lib import create_write_node -class CreateWriteRender(plugin.OpenPypeCreator): +class CreateWriteRender(plugin.AbstractWriteRender): # change this to template preset name = "WriteRender" label = "Create Write Render" @@ -19,87 +17,7 @@ class CreateWriteRender(plugin.OpenPypeCreator): def __init__(self, *args, **kwargs): super(CreateWriteRender, self).__init__(*args, **kwargs) - data = OrderedDict() - - data["family"] = self.family - data["families"] = self.n_class - - for k, v in self.data.items(): - if k not in data.keys(): - data.update({k: v}) - - self.data = data - self.nodes = nuke.selectedNodes() - self.log.debug("_ self.data: '{}'".format(self.data)) - - def process(self): - - inputs = [] - outputs = [] - instance = nuke.toNode(self.data["subset"]) - selected_node = None - - # use selection - if (self.options or {}).get("useSelection"): - nodes = self.nodes - - if not (len(nodes) < 2): - msg = ("Select only one node. " - "The node you want to connect to, " - "or tick off `Use selection`") - self.log.error(msg) - nuke.message(msg) - return - - if len(nodes) == 0: - msg = ( - "No nodes selected. Please select a single node to connect" - " to or tick off `Use selection`" - ) - self.log.error(msg) - nuke.message(msg) - return - - selected_node = nodes[0] - inputs = [selected_node] - outputs = selected_node.dependent() - - if instance: - if (instance.name() in selected_node.name()): - selected_node = instance.dependencies()[0] - - # if node already exist - if instance: - # collect input / outputs - inputs = instance.dependencies() - outputs = instance.dependent() - selected_node = inputs[0] - # remove old one - nuke.delete(instance) - - # recreate new - write_data = { - "nodeclass": self.n_class, - "families": [self.family], - "avalon": self.data - } - - # add creator data - creator_data = {"creator": self.__class__.__name__} - self.data.update(creator_data) - write_data.update(creator_data) - - if self.presets.get('fpath_template'): - self.log.info("Adding template path from preset") - write_data.update( - {"fpath_template": self.presets["fpath_template"]} - ) - else: - self.log.info("Adding template path from plugin") - write_data.update({ - "fpath_template": ("{work}/renders/nuke/{subset}" - "/{subset}.{frame}.{ext}")}) - + def _create_write_node(self, selected_node, inputs, outputs, write_data): # add reformat node to cut off all outside of format bounding box # get width and height try: @@ -126,13 +44,7 @@ class CreateWriteRender(plugin.OpenPypeCreator): input=selected_node, prenodes=_prenodes) - # relinking to collected connections - for i, input in enumerate(inputs): - write_node.setInput(i, input) - - write_node.autoplace() - - for output in outputs: - output.setInput(0, write_node) - + return write_node + + def _modify_write_node(self, write_node): return write_node diff --git a/openpype/hosts/nuke/plugins/create/create_write_still.py b/openpype/hosts/nuke/plugins/create/create_write_still.py index 0037b64ce3..d22b5eab3f 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_still.py +++ b/openpype/hosts/nuke/plugins/create/create_write_still.py @@ -1,12 +1,10 @@ -from collections import OrderedDict - import nuke from openpype.hosts.nuke.api import plugin from openpype.hosts.nuke.api.lib import create_write_node -class CreateWriteStill(plugin.OpenPypeCreator): +class CreateWriteStill(plugin.AbstractWriteRender): # change this to template preset name = "WriteStillFrame" label = "Create Write Still Image" @@ -23,77 +21,8 @@ class CreateWriteStill(plugin.OpenPypeCreator): def __init__(self, *args, **kwargs): super(CreateWriteStill, self).__init__(*args, **kwargs) - data = OrderedDict() - - data["family"] = self.family - data["families"] = self.n_class - - for k, v in self.data.items(): - if k not in data.keys(): - data.update({k: v}) - - self.data = data - self.nodes = nuke.selectedNodes() - self.log.debug("_ self.data: '{}'".format(self.data)) - - def process(self): - - inputs = [] - outputs = [] - instance = nuke.toNode(self.data["subset"]) - selected_node = None - - # use selection - if (self.options or {}).get("useSelection"): - nodes = self.nodes - - if not (len(nodes) < 2): - msg = ("Select only one node. " - "The node you want to connect to, " - "or tick off `Use selection`") - self.log.error(msg) - nuke.message(msg) - return - - if len(nodes) == 0: - msg = ( - "No nodes selected. Please select a single node to connect" - " to or tick off `Use selection`" - ) - self.log.error(msg) - nuke.message(msg) - return - - selected_node = nodes[0] - inputs = [selected_node] - outputs = selected_node.dependent() - - if instance: - if (instance.name() in selected_node.name()): - selected_node = instance.dependencies()[0] - - # if node already exist - if instance: - # collect input / outputs - inputs = instance.dependencies() - outputs = instance.dependent() - selected_node = inputs[0] - # remove old one - nuke.delete(instance) - - # recreate new - write_data = { - "nodeclass": self.n_class, - "families": [self.family], - "avalon": self.data - } - - # add creator data - creator_data = {"creator": self.__class__.__name__} - self.data.update(creator_data) - write_data.update(creator_data) - - self.log.info("Adding template path from plugin") + def _create_write_node(self, selected_node, inputs, outputs, write_data): + # explicitly reset template to 'renders', not same as other 2 writes write_data.update({ "fpath_template": ( "{work}/renders/nuke/{subset}/{subset}.{ext}")}) @@ -118,16 +47,9 @@ class CreateWriteStill(plugin.OpenPypeCreator): farm=False, linked_knobs=["channels", "___", "first", "last", "use_limit"]) - # relinking to collected connections - for i, input in enumerate(inputs): - write_node.setInput(i, input) + return write_node - write_node.autoplace() - - for output in outputs: - output.setInput(0, write_node) - - # link frame hold to group node + def _modify_write_node(self, write_node): write_node.begin() for n in nuke.allNodes(): # get write node diff --git a/openpype/hosts/nuke/plugins/inventory/select_containers.py b/openpype/hosts/nuke/plugins/inventory/select_containers.py index d7d5f00b87..4e7a20fb26 100644 --- a/openpype/hosts/nuke/plugins/inventory/select_containers.py +++ b/openpype/hosts/nuke/plugins/inventory/select_containers.py @@ -1,5 +1,5 @@ from openpype.pipeline import InventoryAction -from openpype.hosts.nuke.api.commands import viewer_update_and_undo_stop +from openpype.hosts.nuke.api.command import viewer_update_and_undo_stop class SelectContainers(InventoryAction): diff --git a/openpype/hosts/nuke/plugins/load/load_backdrop.py b/openpype/hosts/nuke/plugins/load/load_backdrop.py index 36cec6f4c5..d55dd4cf71 100644 --- a/openpype/hosts/nuke/plugins/load/load_backdrop.py +++ b/openpype/hosts/nuke/plugins/load/load_backdrop.py @@ -14,7 +14,7 @@ from openpype.hosts.nuke.api.lib import ( get_avalon_knob_data, set_avalon_knob_data ) -from openpype.hosts.nuke.api.commands import viewer_update_and_undo_stop +from openpype.hosts.nuke.api.command import viewer_update_and_undo_stop from openpype.hosts.nuke.api import containerise, update_container diff --git a/openpype/hosts/nuke/plugins/load/load_effects.py b/openpype/hosts/nuke/plugins/load/load_effects.py index 68c3952942..56c5acbb0a 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects.py +++ b/openpype/hosts/nuke/plugins/load/load_effects.py @@ -1,6 +1,7 @@ import json from collections import OrderedDict import nuke +import six from avalon import io @@ -72,7 +73,7 @@ class LoadEffects(load.LoaderPlugin): # getting data from json file with unicode conversion with open(file, "r") as f: json_f = {self.byteify(key): self.byteify(value) - for key, value in json.load(f).iteritems()} + for key, value in json.load(f).items()} # get correct order of nodes by positions on track and subtrack nodes_order = self.reorder_nodes(json_f) @@ -188,7 +189,7 @@ class LoadEffects(load.LoaderPlugin): # getting data from json file with unicode conversion with open(file, "r") as f: json_f = {self.byteify(key): self.byteify(value) - for key, value in json.load(f).iteritems()} + for key, value in json.load(f).items()} # get correct order of nodes by positions on track and subtrack nodes_order = self.reorder_nodes(json_f) @@ -330,11 +331,11 @@ class LoadEffects(load.LoaderPlugin): if isinstance(input, dict): return {self.byteify(key): self.byteify(value) - for key, value in input.iteritems()} + for key, value in input.items()} elif isinstance(input, list): return [self.byteify(element) for element in input] - elif isinstance(input, unicode): - return input.encode('utf-8') + elif isinstance(input, six.text_type): + return str(input) else: return input diff --git a/openpype/hosts/nuke/plugins/load/load_effects_ip.py b/openpype/hosts/nuke/plugins/load/load_effects_ip.py index 9c4fd4c2c6..0bc5f5a514 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_effects_ip.py @@ -1,6 +1,6 @@ import json from collections import OrderedDict - +import six import nuke from avalon import io @@ -74,7 +74,7 @@ class LoadEffectsInputProcess(load.LoaderPlugin): # getting data from json file with unicode conversion with open(file, "r") as f: json_f = {self.byteify(key): self.byteify(value) - for key, value in json.load(f).iteritems()} + for key, value in json.load(f).items()} # get correct order of nodes by positions on track and subtrack nodes_order = self.reorder_nodes(json_f) @@ -194,7 +194,7 @@ class LoadEffectsInputProcess(load.LoaderPlugin): # getting data from json file with unicode conversion with open(file, "r") as f: json_f = {self.byteify(key): self.byteify(value) - for key, value in json.load(f).iteritems()} + for key, value in json.load(f).items()} # get correct order of nodes by positions on track and subtrack nodes_order = self.reorder_nodes(json_f) @@ -350,11 +350,11 @@ class LoadEffectsInputProcess(load.LoaderPlugin): if isinstance(input, dict): return {self.byteify(key): self.byteify(value) - for key, value in input.iteritems()} + for key, value in input.items()} elif isinstance(input, list): return [self.byteify(element) for element in input] - elif isinstance(input, unicode): - return input.encode('utf-8') + elif isinstance(input, six.text_type): + return str(input) else: return input diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py index 87bebce15b..46134afcf0 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py @@ -1,5 +1,5 @@ import nuke - +import six from avalon import io from openpype.pipeline import ( @@ -240,11 +240,11 @@ class LoadGizmoInputProcess(load.LoaderPlugin): if isinstance(input, dict): return {self.byteify(key): self.byteify(value) - for key, value in input.iteritems()} + for key, value in input.items()} elif isinstance(input, list): return [self.byteify(element) for element in input] - elif isinstance(input, unicode): - return input.encode('utf-8') + elif isinstance(input, six.text_type): + return str(input) else: return input diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data.py b/openpype/hosts/nuke/plugins/publish/extract_review_data.py new file mode 100644 index 0000000000..38a8140cff --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data.py @@ -0,0 +1,47 @@ +import os +import pyblish.api +import openpype +from pprint import pformat + + +class ExtractReviewData(openpype.api.Extractor): + """Extracts review tag into available representation + """ + + order = pyblish.api.ExtractorOrder + 0.01 + # order = pyblish.api.CollectorOrder + 0.499 + label = "Extract Review Data" + + families = ["review"] + hosts = ["nuke"] + + def process(self, instance): + fpath = instance.data["path"] + ext = os.path.splitext(fpath)[-1][1:] + + representations = instance.data.get("representations", []) + + # review can be removed since `ProcessSubmittedJobOnFarm` will create + # reviable representation if needed + if ( + "render.farm" in instance.data["families"] + and "review" in instance.data["families"] + ): + instance.data["families"].remove("review") + + # iterate representations and add `review` tag + for repre in representations: + if ext != repre["ext"]: + continue + + if not repre.get("tags"): + repre["tags"] = [] + + if "review" not in repre["tags"]: + repre["tags"].append("review") + + self.log.debug("Matching representation: {}".format( + pformat(repre) + )) + + instance.data["representations"] = representations diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py index 31a8ff18ee..2e8843d2e0 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py @@ -123,6 +123,7 @@ class ExtractReviewDataMov(openpype.api.Extractor): if generated_repres: # assign to representations instance.data["representations"] += generated_repres + instance.data["useSequenceForReview"] = False else: instance.data["families"].remove("review") self.log.info(( diff --git a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py index e917a28046..fb52fc18b4 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py +++ b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py @@ -1,6 +1,9 @@ import os import nuke +import copy + import pyblish.api + import openpype from openpype.hosts.nuke.api.lib import maintained_selection @@ -18,6 +21,13 @@ class ExtractSlateFrame(openpype.api.Extractor): families = ["slate"] hosts = ["nuke"] + # Settings values + # - can be extended by other attributes from node in the future + key_value_mapping = { + "f_submission_note": [True, "{comment}"], + "f_submitting_for": [True, "{intent[value]}"], + "f_vfx_scope_of_work": [False, ""] + } def process(self, instance): if hasattr(self, "viewer_lut_raw"): @@ -129,9 +139,7 @@ class ExtractSlateFrame(openpype.api.Extractor): for node in temporary_nodes: nuke.delete(node) - def get_view_process_node(self): - # Select only the target node if nuke.selectedNodes(): [n.setSelected(False) for n in nuke.selectedNodes()] @@ -162,13 +170,56 @@ class ExtractSlateFrame(openpype.api.Extractor): return comment = instance.context.data.get("comment") - intent_value = instance.context.data.get("intent") - if intent_value and isinstance(intent_value, dict): - intent_value = intent_value.get("value") + intent = instance.context.data.get("intent") + if not isinstance(intent, dict): + intent = { + "label": intent, + "value": intent + } - try: - node["f_submission_note"].setValue(comment) - node["f_submitting_for"].setValue(intent_value or "") - except NameError: - return - instance.data.pop("slateNode") + fill_data = copy.deepcopy(instance.data["anatomyData"]) + fill_data.update({ + "custom": copy.deepcopy( + instance.data.get("customData") or {} + ), + "comment": comment, + "intent": intent + }) + + for key, value in self.key_value_mapping.items(): + enabled, template = value + if not enabled: + self.log.debug("Key \"{}\" is disabled".format(key)) + continue + + try: + value = template.format(**fill_data) + + except ValueError: + self.log.warning( + "Couldn't fill template \"{}\" with data: {}".format( + template, fill_data + ), + exc_info=True + ) + continue + + except KeyError: + self.log.warning( + ( + "Template contains unknown key." + " Template \"{}\" Data: {}" + ).format(template, fill_data), + exc_info=True + ) + continue + + try: + node[key].setValue(value) + self.log.info("Change key \"{}\" to value \"{}\"".format( + key, value + )) + except NameError: + self.log.warning(( + "Failed to set value \"{}\" on node attribute \"{}\"" + ).format(value)) diff --git a/openpype/hosts/nuke/plugins/publish/precollect_instances.py b/openpype/hosts/nuke/plugins/publish/precollect_instances.py index 29c706f302..76d402164c 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_instances.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_instances.py @@ -69,6 +69,11 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): instance = context.create_instance(subset) instance.append(node) + suspend_publish = False + if "suspend_publish" in node.knobs(): + suspend_publish = node["suspend_publish"].value() + instance.data["suspend_publish"] = suspend_publish + # get review knob value review = False if "review" in node.knobs(): diff --git a/openpype/hosts/nuke/plugins/publish/precollect_writes.py b/openpype/hosts/nuke/plugins/publish/precollect_writes.py index 85e98db7ed..4826b2788f 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_writes.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_writes.py @@ -128,13 +128,17 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): } group_node = [x for x in instance if x.Class() == "Group"][0] - deadlineChunkSize = 1 + dl_chunk_size = 1 if "deadlineChunkSize" in group_node.knobs(): - deadlineChunkSize = group_node["deadlineChunkSize"].value() + dl_chunk_size = group_node["deadlineChunkSize"].value() - deadlinePriority = 50 + dl_priority = 50 if "deadlinePriority" in group_node.knobs(): - deadlinePriority = group_node["deadlinePriority"].value() + dl_priority = group_node["deadlinePriority"].value() + + dl_concurrent_tasks = 0 + if "deadlineConcurrentTasks" in group_node.knobs(): + dl_concurrent_tasks = group_node["deadlineConcurrentTasks"].value() instance.data.update({ "versionData": version_data, @@ -144,8 +148,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "label": label, "outputType": output_type, "colorspace": colorspace, - "deadlineChunkSize": deadlineChunkSize, - "deadlinePriority": deadlinePriority + "deadlineChunkSize": dl_chunk_size, + "deadlinePriority": dl_priority, + "deadlineConcurrentTasks": dl_concurrent_tasks }) if self.is_prerender(_families_test): diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py b/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py index 5ee93403d0..907577a97d 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py +++ b/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py @@ -25,7 +25,7 @@ class RepairNukeWriteDeadlineTab(pyblish.api.Action): # Remove existing knobs. knob_names = openpype.hosts.nuke.lib.get_deadline_knob_names() - for name, knob in group_node.knobs().iteritems(): + for name, knob in group_node.knobs().items(): if name in knob_names: group_node.removeKnob(knob) diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py b/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py index 08f09f8097..9fb57c1698 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py +++ b/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py @@ -1,11 +1,10 @@ -import os import toml import nuke -from avalon import api import pyblish.api import openpype.api +from openpype.pipeline import discover_creator_plugins from openpype.hosts.nuke.api.lib import get_avalon_knob_data @@ -79,7 +78,7 @@ class ValidateWriteLegacy(pyblish.api.InstancePlugin): # get appropriate plugin class creator_plugin = None - for Creator in api.discover(api.Creator): + for Creator in discover_creator_plugins(): if Creator.__name__ != Create_name: continue diff --git a/openpype/hosts/nuke/startup/menu.py b/openpype/hosts/nuke/startup/menu.py index 2cac6d09e7..9ed43b2110 100644 --- a/openpype/hosts/nuke/startup/menu.py +++ b/openpype/hosts/nuke/startup/menu.py @@ -1,7 +1,7 @@ import nuke -import avalon.api from openpype.api import Logger +from openpype.pipeline import install_host from openpype.hosts.nuke import api from openpype.hosts.nuke.api.lib import ( on_script_load, @@ -13,7 +13,7 @@ from openpype.hosts.nuke.api.lib import ( log = Logger.get_logger(__name__) -avalon.api.install(api) +install_host(api) # fix ffmpeg settings on script nuke.addOnScriptLoad(on_script_load) diff --git a/openpype/hosts/photoshop/api/lib.py b/openpype/hosts/photoshop/api/lib.py index 6d2a493a94..2f57d64464 100644 --- a/openpype/hosts/photoshop/api/lib.py +++ b/openpype/hosts/photoshop/api/lib.py @@ -5,9 +5,8 @@ import traceback from Qt import QtWidgets -import avalon.api - from openpype.api import Logger +from openpype.pipeline import install_host from openpype.tools.utils import host_tools from openpype.lib.remote_publish import headless_publish from openpype.lib import env_value_to_bool @@ -24,7 +23,7 @@ def safe_excepthook(*args): def main(*subprocess_args): from openpype.hosts.photoshop import api - avalon.api.install(api) + install_host(api) sys.excepthook = safe_excepthook # coloring in StdOutBroker diff --git a/openpype/hosts/photoshop/api/pipeline.py b/openpype/hosts/photoshop/api/pipeline.py index c2ad0ac7b0..1f069c2636 100644 --- a/openpype/hosts/photoshop/api/pipeline.py +++ b/openpype/hosts/photoshop/api/pipeline.py @@ -3,16 +3,17 @@ from Qt import QtWidgets from bson.objectid import ObjectId import pyblish.api -import avalon.api from avalon import io from openpype.api import Logger from openpype.lib import register_event_callback from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, AVALON_CONTAINER_ID, + registered_host, ) import openpype.hosts.photoshop @@ -32,7 +33,7 @@ def check_inventory(): if not lib.any_outdated(): return - host = avalon.api.registered_host() + host = registered_host() outdated_containers = [] for container in host.ls(): representation = container['representation'] @@ -75,7 +76,7 @@ def install(): pyblish.api.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) log.info(PUBLISH_PATH) pyblish.api.register_callback( @@ -88,7 +89,7 @@ def install(): def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) def ls(): diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py index a001b5f171..5078cbb587 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_image.py @@ -1,9 +1,9 @@ from Qt import QtWidgets -from openpype.pipeline import create +from openpype.pipeline import LegacyCreator from openpype.hosts.photoshop import api as photoshop -class CreateImage(create.LegacyCreator): +class CreateImage(LegacyCreator): """Image folder for publish.""" name = "imageDefault" diff --git a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py b/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py index 5f39121ae1..c25c5a8f2c 100644 --- a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py +++ b/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py @@ -2,7 +2,6 @@ import os import qargparse -from openpype.pipeline import get_representation_path_from_context from openpype.hosts.photoshop import api as photoshop from openpype.hosts.photoshop.api import get_unique_layer_name @@ -63,7 +62,7 @@ class ImageFromSequenceLoader(photoshop.PhotoshopLoader): """ files = [] for context in repre_contexts: - fname = get_representation_path_from_context(context) + fname = cls.filepath_from_context(context) _, file_extension = os.path.splitext(fname) for file_name in os.listdir(os.path.dirname(fname)): diff --git a/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py new file mode 100644 index 0000000000..5e6e916611 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py @@ -0,0 +1,73 @@ +"""Parses batch context from json and continues in publish process. + +Provides: + context -> Loaded batch file. + - asset + - task (task name) + - taskType + - project_name + - variant + +Code is practically copy of `openype/hosts/webpublish/collect_batch_data` as +webpublisher should be eventually ejected as an addon, eg. mentioned plugin +shouldn't be pushed into general publish plugins. +""" + +import os + +import pyblish.api +from avalon import io +from openpype.lib.plugin_tools import ( + parse_json, + get_batch_asset_task_info +) + + +class CollectBatchData(pyblish.api.ContextPlugin): + """Collect batch data from json stored in 'OPENPYPE_PUBLISH_DATA' env dir. + + The directory must contain 'manifest.json' file where batch data should be + stored. + """ + # must be really early, context values are only in json file + order = pyblish.api.CollectorOrder - 0.495 + label = "Collect batch data" + hosts = ["photoshop"] + targets = ["remotepublish"] + + def process(self, context): + self.log.info("CollectBatchData") + batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") + + assert batch_dir, ( + "Missing `OPENPYPE_PUBLISH_DATA`") + + assert os.path.exists(batch_dir), \ + "Folder {} doesn't exist".format(batch_dir) + + project_name = os.environ.get("AVALON_PROJECT") + if project_name is None: + raise AssertionError( + "Environment `AVALON_PROJECT` was not found." + "Could not set project `root` which may cause issues." + ) + + batch_data = parse_json(os.path.join(batch_dir, "manifest.json")) + + context.data["batchDir"] = batch_dir + context.data["batchData"] = batch_data + + asset_name, task_name, task_type = get_batch_asset_task_info( + batch_data["context"] + ) + + os.environ["AVALON_ASSET"] = asset_name + io.Session["AVALON_ASSET"] = asset_name + os.environ["AVALON_TASK"] = task_name + io.Session["AVALON_TASK"] = task_name + + context.data["asset"] = asset_name + context.data["task"] = task_name + context.data["taskType"] = task_type + context.data["project_name"] = project_name + context.data["variant"] = batch_data["variant"] diff --git a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py index 7d44d55a80..122428eea0 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py @@ -4,7 +4,6 @@ import re import pyblish.api from openpype.lib import prepare_template_data -from openpype.lib.plugin_tools import parse_json, get_batch_asset_task_info from openpype.hosts.photoshop import api as photoshop @@ -46,7 +45,10 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): existing_subset_names = self._get_existing_subset_names(context) - asset_name, task_name, variant = self._parse_batch(batch_dir) + # from CollectBatchData + asset_name = context.data["asset"] + task_name = context.data["task"] + variant = context.data["variant"] stub = photoshop.stub() layers = stub.get_layers() @@ -130,25 +132,6 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): return existing_subset_names - def _parse_batch(self, batch_dir): - """Parses asset_name, task_name, variant from batch manifest.""" - task_data = None - if batch_dir and os.path.exists(batch_dir): - task_data = parse_json(os.path.join(batch_dir, - "manifest.json")) - if not task_data: - raise ValueError( - "Cannot parse batch meta in {} folder".format(batch_dir)) - variant = task_data["variant"] - - asset, task_name, task_type = get_batch_asset_task_info( - task_data["context"]) - - if not task_name: - task_name = task_type - - return asset, task_name, variant - def _create_instance(self, context, layer, family, asset, subset, task_name): instance = context.create_instance(layer.name) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_instances.py b/openpype/hosts/photoshop/plugins/publish/collect_instances.py index c3e27e9646..9f95441e6f 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_instances.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_instances.py @@ -1,6 +1,9 @@ +from avalon import api import pyblish.api +from openpype.settings import get_project_settings from openpype.hosts.photoshop import api as photoshop +from openpype.lib import prepare_template_data class CollectInstances(pyblish.api.ContextPlugin): @@ -9,6 +12,10 @@ class CollectInstances(pyblish.api.ContextPlugin): This collector takes into account assets that are associated with an LayerSet and marked with a unique identifier; + If no image instances are explicitly created, it looks if there is value + in `flatten_subset_template` (configurable in Settings), in that case it + produces flatten image with all visible layers. + Identifier: id (str): "pyblish.avalon.instance" """ @@ -19,13 +26,17 @@ class CollectInstances(pyblish.api.ContextPlugin): families_mapping = { "image": [] } + # configurable in Settings + flatten_subset_template = "" def process(self, context): stub = photoshop.stub() layers = stub.get_layers() layers_meta = stub.get_layers_metadata() instance_names = [] + all_layer_ids = [] for layer in layers: + all_layer_ids.append(layer.id) layer_data = stub.read(layer, layers_meta) # Skip layers without metadata. @@ -59,3 +70,34 @@ class CollectInstances(pyblish.api.ContextPlugin): if len(instance_names) != len(set(instance_names)): self.log.warning("Duplicate instances found. " + "Remove unwanted via SubsetManager") + + if len(instance_names) == 0 and self.flatten_subset_template: + project_name = context.data["projectEntity"]["name"] + variants = get_project_settings(project_name).get( + "photoshop", {}).get( + "create", {}).get( + "CreateImage", {}).get( + "defaults", ['']) + family = "image" + task_name = api.Session["AVALON_TASK"] + asset_name = context.data["assetEntity"]["name"] + + variant = context.data.get("variant") or variants[0] + fill_pairs = { + "variant": variant, + "family": family, + "task": task_name + } + + subset = self.flatten_subset_template.format( + **prepare_template_data(fill_pairs)) + + instance = context.create_instance(subset) + instance.data["family"] = family + instance.data["asset"] = asset_name + instance.data["subset"] = subset + instance.data["ids"] = all_layer_ids + instance.data["families"] = self.families_mapping[family] + instance.data["publish"] = True + + self.log.info("flatten instance: {} ".format(instance.data)) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_review.py b/openpype/hosts/photoshop/plugins/publish/collect_review.py index 5ab48b76da..89432553c5 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_review.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_review.py @@ -2,18 +2,26 @@ import os import pyblish.api +from openpype.lib import get_subset_name_with_asset_doc + class CollectReview(pyblish.api.ContextPlugin): """Gather the active document as review instance.""" label = "Review" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder + 0.1 hosts = ["photoshop"] def process(self, context): family = "review" - task = os.getenv("AVALON_TASK", None) - subset = family + task.capitalize() + subset = get_subset_name_with_asset_doc( + family, + context.data.get("variant", ''), + context.data["anatomyData"]["task"]["name"], + context.data["assetEntity"], + context.data["anatomyData"]["project"]["name"], + host_name=context.data["hostName"] + ) file_path = context.data["currentFile"] base_name = os.path.basename(file_path) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py index db1ede14d5..0dbe2c6609 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py @@ -1,6 +1,8 @@ import os import pyblish.api +from openpype.lib import get_subset_name_with_asset_doc + class CollectWorkfile(pyblish.api.ContextPlugin): """Collect current script for publish.""" @@ -11,8 +13,14 @@ class CollectWorkfile(pyblish.api.ContextPlugin): def process(self, context): family = "workfile" - task = os.getenv("AVALON_TASK", None) - subset = family + task.capitalize() + subset = get_subset_name_with_asset_doc( + family, + "", + context.data["anatomyData"]["task"]["name"], + context.data["assetEntity"], + context.data["anatomyData"]["project"]["name"], + host_name=context.data["hostName"] + ) file_path = context.data["currentFile"] staging_dir = os.path.dirname(file_path) diff --git a/openpype/hosts/photoshop/plugins/publish/extract_image.py b/openpype/hosts/photoshop/plugins/publish/extract_image.py index 04ce77ee34..b07d0740c1 100644 --- a/openpype/hosts/photoshop/plugins/publish/extract_image.py +++ b/openpype/hosts/photoshop/plugins/publish/extract_image.py @@ -26,8 +26,10 @@ class ExtractImage(openpype.api.Extractor): with photoshop.maintained_selection(): self.log.info("Extracting %s" % str(list(instance))) with photoshop.maintained_visibility(): + ids = set() layer = instance.data.get("layer") - ids = set([layer.id]) + if layer: + ids.add(layer.id) add_ids = instance.data.pop("ids", None) if add_ids: ids.update(set(add_ids)) diff --git a/openpype/hosts/photoshop/plugins/publish/extract_review.py b/openpype/hosts/photoshop/plugins/publish/extract_review.py index b8f4470c7b..d076610ead 100644 --- a/openpype/hosts/photoshop/plugins/publish/extract_review.py +++ b/openpype/hosts/photoshop/plugins/publish/extract_review.py @@ -155,6 +155,9 @@ class ExtractReview(openpype.api.Extractor): for image_instance in instance.context: if image_instance.data["family"] != "image": continue + if not image_instance.data.get("layer"): + # dummy instance for flatten image + continue layers.append(image_instance.data.get("layer")) return sorted(layers) diff --git a/openpype/hosts/photoshop/plugins/publish/validate_naming.py b/openpype/hosts/photoshop/plugins/publish/validate_naming.py index b40e44d016..583e9c7a4e 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_naming.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_naming.py @@ -29,7 +29,8 @@ class ValidateNamingRepair(pyblish.api.Action): stub = photoshop.stub() for instance in instances: self.log.info("validate_naming instance {}".format(instance)) - metadata = stub.read(instance[0]) + layer_item = instance.data["layer"] + metadata = stub.read(layer_item) self.log.info("metadata instance {}".format(metadata)) layer_name = None if metadata.get("uuid"): @@ -43,11 +44,11 @@ class ValidateNamingRepair(pyblish.api.Action): stub.rename_layer(instance.data["uuid"], layer_name) subset_name = re.sub(invalid_chars, replace_char, - instance.data["name"]) + instance.data["subset"]) - instance[0].Name = layer_name or subset_name + layer_item.name = layer_name or subset_name metadata["subset"] = subset_name - stub.imprint(instance[0], metadata) + stub.imprint(layer_item, metadata) return True diff --git a/openpype/hosts/resolve/api/pipeline.py b/openpype/hosts/resolve/api/pipeline.py index e8b017ead5..636c826a11 100644 --- a/openpype/hosts/resolve/api/pipeline.py +++ b/openpype/hosts/resolve/api/pipeline.py @@ -4,14 +4,17 @@ Basic avalon integration import os import contextlib from collections import OrderedDict -from avalon import api as avalon -from avalon import schema + from pyblish import api as pyblish + +from avalon import schema + from openpype.api import Logger from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) from . import lib @@ -46,7 +49,7 @@ def install(): log.info("Registering DaVinci Resovle plug-ins..") register_loader_plugin_path(LOAD_PATH) - avalon.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) # register callback for switching publishable pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) @@ -70,7 +73,7 @@ def uninstall(): log.info("Deregistering DaVinci Resovle plug-ins..") deregister_loader_plugin_path(LOAD_PATH) - avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) # register callback for switching publishable pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) diff --git a/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py b/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py index ac66916b91..3a16b9c966 100644 --- a/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py +++ b/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py @@ -1,13 +1,14 @@ #!/usr/bin/env python import os import sys -import openpype + +from openpype.pipeline import install_host def main(env): import openpype.hosts.resolve as bmdvr # Registers openpype's Global pyblish plugins - openpype.install() + install_host(bmdvr) bmdvr.setup(env) diff --git a/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py b/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py index b0cef1838a..89ade9238b 100644 --- a/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py +++ b/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py @@ -1,8 +1,7 @@ import os import sys -import avalon.api as avalon -import openpype +from openpype.pipeline import install_host from openpype.api import Logger log = Logger().get_logger(__name__) @@ -10,13 +9,9 @@ log = Logger().get_logger(__name__) def main(env): import openpype.hosts.resolve as bmdvr - # Registers openpype's Global pyblish plugins - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) - - log.info(f"Avalon registered hosts: {avalon.registered_host()}") + install_host(bmdvr) bmdvr.launch_pype_menu() diff --git a/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py b/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py index 5430ad32df..8433bd9172 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py +++ b/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py @@ -1,9 +1,11 @@ #! python3 import os import sys -import avalon.api as avalon -import openpype + import opentimelineio as otio + +from openpype.pipeline import install_host + from openpype.hosts.resolve import TestGUI import openpype.hosts.resolve as bmdvr from openpype.hosts.resolve.otio import davinci_export as otio_export @@ -14,10 +16,8 @@ class ThisTestGUI(TestGUI): def __init__(self): super(ThisTestGUI, self).__init__() - # Registers openpype's Global pyblish plugins - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) + install_host(bmdvr) def _open_dir_button_pressed(self, event): # selected_path = self.fu.RequestFile(os.path.expanduser("~")) diff --git a/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py b/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py index afa311e0b8..477955d527 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py +++ b/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py @@ -1,8 +1,8 @@ #! python3 import os import sys -import avalon.api as avalon -import openpype + +from openpype.pipeline import install_host from openpype.hosts.resolve import TestGUI import openpype.hosts.resolve as bmdvr import clique @@ -13,10 +13,8 @@ class ThisTestGUI(TestGUI): def __init__(self): super(ThisTestGUI, self).__init__() - # Registers openpype's Global pyblish plugins - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) + install_host(bmdvr) def _open_dir_button_pressed(self, event): # selected_path = self.fu.RequestFile(os.path.expanduser("~")) diff --git a/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py b/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py index cfdbe890e5..872d620162 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py +++ b/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py @@ -1,6 +1,5 @@ #! python3 -import avalon.api as avalon -import openpype +from openpype.pipeline import install_host import openpype.hosts.resolve as bmdvr @@ -15,8 +14,7 @@ def file_processing(fpath): if __name__ == "__main__": path = "C:/CODE/__openpype_projects/jtest03dev/shots/sq01/mainsq01sh030/publish/plate/plateMain/v006/jt3d_mainsq01sh030_plateMain_v006.0996.exr" - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) + install_host(bmdvr) - file_processing(path) \ No newline at end of file + file_processing(path) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py new file mode 100644 index 0000000000..857f3dca20 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py @@ -0,0 +1,13 @@ +import pyblish.api + + +class CollectSAAppName(pyblish.api.ContextPlugin): + """Collect app name and label.""" + + label = "Collect App Name/Label" + order = pyblish.api.CollectorOrder - 0.5 + hosts = ["standalonepublisher"] + + def process(self, context): + context.data["appName"] = "standalone publisher" + context.data["appLabel"] = "Standalone publisher" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py index 6913e0836d..aabccc0328 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py @@ -247,7 +247,8 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): self.log.debug("collecting sequence: {}".format(collections)) instance.data["frameStart"] = int(component["frameStart"]) instance.data["frameEnd"] = int(component["frameEnd"]) - instance.data["fps"] = int(component["fps"]) + if component.get("fps"): + instance.data["fps"] = int(component["fps"]) ext = component["ext"] if ext.startswith("."): diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_original_basename.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_original_basename.py new file mode 100644 index 0000000000..b83a924d33 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_original_basename.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +"""Collect original base name for use in templates.""" +from pathlib import Path + +import pyblish.api + + +class CollectOriginalBasename(pyblish.api.InstancePlugin): + """Collect original file base name.""" + + order = pyblish.api.CollectorOrder + 0.498 + label = "Collect Base Name" + hosts = ["standalonepublisher"] + families = ["simpleUnrealTexture"] + + def process(self, instance): + file_name = Path(instance.data["representations"][0]["files"]) + instance.data["originalBasename"] = file_name.stem diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_simple_texture_naming.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_simple_texture_naming.xml new file mode 100644 index 0000000000..b65d274fe5 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_simple_texture_naming.xml @@ -0,0 +1,17 @@ + + + +Invalid texture name + +## Invalid file name + +Submitted file has invalid name: +'{invalid_file}' + +### How to repair? + + Texture file must adhere to naming conventions for Unreal: + T_{asset}_*.ext + + + \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_simple_unreal_texture_naming.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_simple_unreal_texture_naming.py new file mode 100644 index 0000000000..ef8da9f280 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_simple_unreal_texture_naming.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +"""Validator for correct file naming.""" +import pyblish.api +import openpype.api +import re +from openpype.pipeline import PublishXmlValidationError + + +class ValidateSimpleUnrealTextureNaming(pyblish.api.InstancePlugin): + label = "Validate Unreal Texture Names" + hosts = ["standalonepublisher"] + families = ["simpleUnrealTexture"] + order = openpype.api.ValidateContentsOrder + regex = "^T_{asset}.*" + + def process(self, instance): + file_name = instance.data.get("originalBasename") + self.log.info(file_name) + pattern = self.regex.format(asset=instance.data.get("asset")) + if not re.match(pattern, file_name): + msg = f"Invalid file name {file_name}" + raise PublishXmlValidationError( + self, msg, formatting_data={ + "invalid_file": file_name, + "asset": instance.data.get("asset") + }) diff --git a/openpype/hosts/testhost/plugins/create/auto_creator.py b/openpype/hosts/testhost/plugins/create/auto_creator.py index d5935602a0..4c22eea9dd 100644 --- a/openpype/hosts/testhost/plugins/create/auto_creator.py +++ b/openpype/hosts/testhost/plugins/create/auto_creator.py @@ -30,7 +30,7 @@ class MyAutoCreator(AutoCreator): def update_instances(self, update_list): pipeline.update_instances(update_list) - def create(self, options=None): + def create(self): existing_instance = None for instance in self.create_context.instances: if instance.family == self.family: diff --git a/openpype/hosts/testhost/run_publish.py b/openpype/hosts/testhost/run_publish.py index 44860a30e4..cc80bdc604 100644 --- a/openpype/hosts/testhost/run_publish.py +++ b/openpype/hosts/testhost/run_publish.py @@ -48,8 +48,8 @@ from openpype.tools.publisher.window import PublisherWindow def main(): """Main function for testing purposes.""" - import avalon.api import pyblish.api + from openpype.pipeline import install_host from openpype.modules import ModulesManager from openpype.hosts.testhost import api as testhost @@ -57,7 +57,7 @@ def main(): for plugin_path in manager.collect_plugin_paths()["publish"]: pyblish.api.register_plugin_path(plugin_path) - avalon.api.install(testhost) + install_host(testhost) QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling) app = QtWidgets.QApplication([]) diff --git a/openpype/hosts/traypublisher/api/pipeline.py b/openpype/hosts/traypublisher/api/pipeline.py index a39e5641ae..24175883d9 100644 --- a/openpype/hosts/traypublisher/api/pipeline.py +++ b/openpype/hosts/traypublisher/api/pipeline.py @@ -7,7 +7,7 @@ from avalon import io import avalon.api import pyblish.api -from openpype.pipeline import BaseCreator +from openpype.pipeline import register_creator_plugin_path ROOT_DIR = os.path.dirname(os.path.dirname( os.path.abspath(__file__) @@ -169,7 +169,7 @@ def install(): pyblish.api.register_host("traypublisher") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(BaseCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) def set_project_name(project_name): diff --git a/openpype/hosts/traypublisher/plugins/create/create_workfile.py b/openpype/hosts/traypublisher/plugins/create/create_workfile.py index 2db4770bbc..5e0af350f0 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_workfile.py +++ b/openpype/hosts/traypublisher/plugins/create/create_workfile.py @@ -1,8 +1,8 @@ from openpype.hosts.traypublisher.api import pipeline +from openpype.lib import FileDef from openpype.pipeline import ( Creator, - CreatedInstance, - lib + CreatedInstance ) @@ -80,7 +80,7 @@ class WorkfileCreator(Creator): def get_instance_attr_defs(self): output = [ - lib.FileDef( + FileDef( "filepath", folders=False, extensions=self.extensions, diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_app_name.py b/openpype/hosts/traypublisher/plugins/publish/collect_app_name.py new file mode 100644 index 0000000000..e38d10e70f --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_app_name.py @@ -0,0 +1,13 @@ +import pyblish.api + + +class CollectTrayPublisherAppName(pyblish.api.ContextPlugin): + """Collect app name and label.""" + + label = "Collect App Name/Label" + order = pyblish.api.CollectorOrder - 0.5 + hosts = ["traypublisher"] + + def process(self, context): + context.data["appName"] = "tray publisher" + context.data["appLabel"] = "Tray publisher" diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py b/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py index 88339d2aac..7501051669 100644 --- a/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py +++ b/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py @@ -6,7 +6,7 @@ from openpype.pipeline import PublishValidationError class ValidateWorkfilePath(pyblish.api.InstancePlugin): """Validate existence of workfile instance existence.""" - label = "Collect Workfile" + label = "Validate Workfile" order = pyblish.api.ValidatorOrder - 0.49 families = ["workfile"] hosts = ["traypublisher"] @@ -14,11 +14,22 @@ class ValidateWorkfilePath(pyblish.api.InstancePlugin): def process(self, instance): filepath = instance.data["sourceFilepath"] if not filepath: - raise PublishValidationError(( - "Filepath of 'workfile' instance \"{}\" is not set" - ).format(instance.data["name"])) + raise PublishValidationError( + ( + "Filepath of 'workfile' instance \"{}\" is not set" + ).format(instance.data["name"]), + "File not filled", + "## Missing file\nYou are supposed to fill the path." + ) if not os.path.exists(filepath): - raise PublishValidationError(( - "Filepath of 'workfile' instance \"{}\" does not exist: {}" - ).format(instance.data["name"], filepath)) + raise PublishValidationError( + ( + "Filepath of 'workfile' instance \"{}\" does not exist: {}" + ).format(instance.data["name"], filepath), + "File not found", + ( + "## File was not found\nFile \"{}\" was not found." + " Check if the path is still available." + ).format(filepath) + ) diff --git a/openpype/hosts/tvpaint/api/launch_script.py b/openpype/hosts/tvpaint/api/launch_script.py index e66bf61df6..0b25027fc6 100644 --- a/openpype/hosts/tvpaint/api/launch_script.py +++ b/openpype/hosts/tvpaint/api/launch_script.py @@ -8,8 +8,8 @@ import logging from Qt import QtWidgets, QtCore, QtGui -from avalon import api from openpype import style +from openpype.pipeline import install_host from openpype.hosts.tvpaint.api.communication_server import ( CommunicationWrapper ) @@ -31,7 +31,7 @@ def main(launch_args): qt_app = QtWidgets.QApplication([]) # Execute pipeline installation - api.install(tvpaint_host) + install_host(tvpaint_host) # Create Communicator object and trigger launch # - this must be done before anything is processed diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py index ec880a1abc..78c10c3dae 100644 --- a/openpype/hosts/tvpaint/api/pipeline.py +++ b/openpype/hosts/tvpaint/api/pipeline.py @@ -15,9 +15,10 @@ from openpype.hosts import tvpaint from openpype.api import get_current_project_settings from openpype.lib import register_event_callback from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) @@ -66,11 +67,8 @@ instances=2 def install(): - """Install Maya-specific functionality of avalon-core. + """Install TVPaint-specific functionality.""" - This function is called automatically on calling `api.install(maya)`. - - """ log.info("OpenPype - Installing TVPaint integration") io.install() @@ -82,7 +80,7 @@ def install(): pyblish.api.register_host("tvpaint") pyblish.api.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) registered_callbacks = ( pyblish.api.registered_callbacks().get("instanceToggled") or [] @@ -95,16 +93,16 @@ def install(): def uninstall(): - """Uninstall TVPaint-specific functionality of avalon-core. - - This function is called automatically on calling `api.uninstall()`. + """Uninstall TVPaint-specific functionality. + This function is called automatically on calling `uninstall_host()`. """ + log.info("OpenPype - Uninstalling TVPaint integration") pyblish.api.deregister_host("tvpaint") pyblish.api.deregister_plugin_path(PUBLISH_PATH) deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) def containerise( diff --git a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py b/openpype/hosts/tvpaint/plugins/load/load_reference_image.py index 5e4e3965d2..af1a4a9b6b 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py +++ b/openpype/hosts/tvpaint/plugins/load/load_reference_image.py @@ -1,6 +1,6 @@ import collections import qargparse -from avalon.pipeline import get_representation_context +from openpype.pipeline import get_representation_context from openpype.hosts.tvpaint.api import lib, pipeline, plugin diff --git a/openpype/hosts/tvpaint/plugins/load/load_workfile.py b/openpype/hosts/tvpaint/plugins/load/load_workfile.py index d224cfc390..1ce5449065 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_workfile.py +++ b/openpype/hosts/tvpaint/plugins/load/load_workfile.py @@ -1,12 +1,13 @@ import os -from avalon import api, io +from avalon import io from openpype.lib import ( StringTemplate, get_workfile_template_key_from_context, get_workdir_data, get_last_workfile_with_version, ) +from openpype.pipeline import registered_host from openpype.api import Anatomy from openpype.hosts.tvpaint.api import lib, pipeline, plugin @@ -22,7 +23,7 @@ class LoadWorkfile(plugin.Loader): def load(self, context, name, namespace, options): # Load context of current workfile as first thing # - which context and extension has - host = api.registered_host() + host = registered_host() current_file = host.current_file() context = pipeline.get_current_workfile_context() diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py index 9cbfb61550..5e8d13592c 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py @@ -20,21 +20,30 @@ class CollectInstances(pyblish.api.ContextPlugin): json.dumps(workfile_instances, indent=4) )) + filtered_instance_data = [] # Backwards compatibility for workfiles that already have review # instance in metadata. review_instance_exist = False for instance_data in workfile_instances: - if instance_data["family"] == "review": + family = instance_data["family"] + if family == "review": review_instance_exist = True - break + + elif family not in ("renderPass", "renderLayer"): + self.log.info("Unknown family \"{}\". Skipping {}".format( + family, json.dumps(instance_data, indent=4) + )) + continue + + filtered_instance_data.append(instance_data) # Fake review instance if review was not found in metadata families if not review_instance_exist: - workfile_instances.append( + filtered_instance_data.append( self._create_review_instance_data(context) ) - for instance_data in workfile_instances: + for instance_data in filtered_instance_data: instance_data["fps"] = context.data["sceneFps"] # Store workfile instance data to instance data @@ -42,8 +51,11 @@ class CollectInstances(pyblish.api.ContextPlugin): # Global instance data modifications # Fill families family = instance_data["family"] + families = [family] + if family != "review": + families.append("review") # Add `review` family for thumbnail integration - instance_data["families"] = [family, "review"] + instance_data["families"] = families # Instance name subset_name = instance_data["subset"] @@ -78,7 +90,7 @@ class CollectInstances(pyblish.api.ContextPlugin): # Project name from workfile context project_name = context.data["workfile_context"]["project"] # Host name from environment variable - host_name = os.environ["AVALON_APP"] + host_name = context.data["hostName"] # Use empty variant value variant = "" task_name = io.Session["AVALON_TASK"] @@ -106,12 +118,6 @@ class CollectInstances(pyblish.api.ContextPlugin): instance = self.create_render_pass_instance( context, instance_data ) - else: - raise AssertionError( - "Instance with unknown family \"{}\": {}".format( - family, instance_data - ) - ) if instance is None: continue diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py b/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py new file mode 100644 index 0000000000..0af9a9a400 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py @@ -0,0 +1,110 @@ +import json +import copy +import pyblish.api +from avalon import io + +from openpype.lib import get_subset_name_with_asset_doc + + +class CollectRenderScene(pyblish.api.ContextPlugin): + """Collect instance which renders whole scene in PNG. + + Creates instance with family 'renderScene' which will have all layers + to render which will be composite into one result. The instance is not + collected from scene. + + Scene will be rendered with all visible layers similar way like review is. + + Instance is disabled if there are any created instances of 'renderLayer' + or 'renderPass'. That is because it is expected that this instance is + used as lazy publish of TVPaint file. + + Subset name is created similar way like 'renderLayer' family. It can use + `renderPass` and `renderLayer` keys which can be set using settings and + `variant` is filled using `renderPass` value. + """ + label = "Collect Render Scene" + order = pyblish.api.CollectorOrder - 0.39 + hosts = ["tvpaint"] + + # Value of 'render_pass' in subset name template + render_pass = "beauty" + + # Settings attributes + enabled = False + # Value of 'render_layer' and 'variant' in subset name template + render_layer = "Main" + + def process(self, context): + # Check if there are created instances of renderPass and renderLayer + # - that will define if renderScene instance is enabled after + # collection + any_created_instance = False + for instance in context: + family = instance.data["family"] + if family in ("renderPass", "renderLayer"): + any_created_instance = True + break + + # Global instance data modifications + # Fill families + family = "renderScene" + # Add `review` family for thumbnail integration + families = [family, "review"] + + # Collect asset doc to get asset id + # - not sure if it's good idea to require asset id in + # get_subset_name? + workfile_context = context.data["workfile_context"] + asset_name = workfile_context["asset"] + asset_doc = io.find_one({ + "type": "asset", + "name": asset_name + }) + + # Project name from workfile context + project_name = context.data["workfile_context"]["project"] + # Host name from environment variable + host_name = context.data["hostName"] + # Variant is using render pass name + variant = self.render_layer + dynamic_data = { + "render_layer": self.render_layer, + "render_pass": self.render_pass + } + + task_name = workfile_context["task"] + subset_name = get_subset_name_with_asset_doc( + "render", + variant, + task_name, + asset_doc, + project_name, + host_name, + dynamic_data=dynamic_data + ) + + instance_data = { + "family": family, + "families": families, + "fps": context.data["sceneFps"], + "subset": subset_name, + "name": subset_name, + "label": "{} [{}-{}]".format( + subset_name, + context.data["sceneMarkIn"] + 1, + context.data["sceneMarkOut"] + 1 + ), + "active": not any_created_instance, + "publish": not any_created_instance, + "representations": [], + "layers": copy.deepcopy(context.data["layersData"]), + "asset": asset_name, + "task": task_name + } + + instance = context.create_instance(**instance_data) + + self.log.debug("Created instance: {}\n{}".format( + instance, json.dumps(instance.data, indent=4) + )) diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py b/openpype/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py new file mode 100644 index 0000000000..ab5bbc5e2c --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py @@ -0,0 +1,99 @@ +"""Plugin converting png files from ExtractSequence into exrs. + +Requires: + ExtractSequence - source of PNG + ExtractReview - review was already created so we can convert to any exr +""" +import os +import json + +import pyblish.api +from openpype.lib import ( + get_oiio_tools_path, + run_subprocess, +) +from openpype.pipeline import KnownPublishError + + +class ExtractConvertToEXR(pyblish.api.InstancePlugin): + # Offset to get after ExtractSequence plugin. + order = pyblish.api.ExtractorOrder + 0.1 + label = "Extract Sequence EXR" + hosts = ["tvpaint"] + families = ["render"] + + enabled = False + + # Replace source PNG files or just add + replace_pngs = True + # EXR compression + exr_compression = "ZIP" + + def process(self, instance): + repres = instance.data.get("representations") + if not repres: + return + + oiio_path = get_oiio_tools_path() + # Raise an exception when oiiotool is not available + # - this can currently happen on MacOS machines + if not os.path.exists(oiio_path): + KnownPublishError( + "OpenImageIO tool is not available on this machine." + ) + + new_repres = [] + for repre in repres: + if repre["name"] != "png": + continue + + self.log.info( + "Processing representation: {}".format( + json.dumps(repre, sort_keys=True, indent=4) + ) + ) + + src_filepaths = set() + new_filenames = [] + for src_filename in repre["files"]: + dst_filename = os.path.splitext(src_filename)[0] + ".exr" + new_filenames.append(dst_filename) + + src_filepath = os.path.join(repre["stagingDir"], src_filename) + dst_filepath = os.path.join(repre["stagingDir"], dst_filename) + + src_filepaths.add(src_filepath) + + args = [ + oiio_path, src_filepath, + "--compression", self.exr_compression, + # TODO how to define color conversion? + "--colorconvert", "sRGB", "linear", + "-o", dst_filepath + ] + run_subprocess(args) + + new_repres.append( + { + "name": "exr", + "ext": "exr", + "files": new_filenames, + "stagingDir": repre["stagingDir"], + "tags": list(repre["tags"]) + } + ) + + if self.replace_pngs: + instance.data["representations"].remove(repre) + + for filepath in src_filepaths: + instance.context.data["cleanupFullPaths"].append(filepath) + + instance.data["representations"].extend(new_repres) + self.log.info( + "Representations: {}".format( + json.dumps( + instance.data["representations"], sort_keys=True, indent=4 + ) + ) + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py index 729c545545..d4fd1dff4b 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -12,14 +12,13 @@ from openpype.hosts.tvpaint.lib import ( fill_reference_frames, composite_rendered_layers, rename_filepaths_by_frame_start, - composite_images ) class ExtractSequence(pyblish.api.Extractor): label = "Extract Sequence" hosts = ["tvpaint"] - families = ["review", "renderPass", "renderLayer"] + families = ["review", "renderPass", "renderLayer", "renderScene"] # Modifiable with settings review_bg = [255, 255, 255, 255] @@ -160,7 +159,7 @@ class ExtractSequence(pyblish.api.Extractor): # Fill tags and new families tags = [] - if family_lowered in ("review", "renderlayer"): + if family_lowered in ("review", "renderlayer", "renderscene"): tags.append("review") # Sequence of one frame @@ -186,7 +185,7 @@ class ExtractSequence(pyblish.api.Extractor): instance.data["representations"].append(new_repre) - if family_lowered in ("renderpass", "renderlayer"): + if family_lowered in ("renderpass", "renderlayer", "renderscene"): # Change family to render instance.data["family"] = "render" diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py index 7ea0587b8f..d3a04cc69f 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py @@ -8,7 +8,7 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin): label = "Validate Layers Visibility" order = pyblish.api.ValidatorOrder - families = ["review", "renderPass", "renderLayer"] + families = ["review", "renderPass", "renderLayer", "renderScene"] def process(self, instance): layer_names = set() diff --git a/openpype/hosts/tvpaint/worker/init_file.tvpp b/openpype/hosts/tvpaint/worker/init_file.tvpp new file mode 100644 index 0000000000..572d278fdb Binary files /dev/null and b/openpype/hosts/tvpaint/worker/init_file.tvpp differ diff --git a/openpype/hosts/tvpaint/worker/worker.py b/openpype/hosts/tvpaint/worker/worker.py index cfd40bc7ba..9295c8afb4 100644 --- a/openpype/hosts/tvpaint/worker/worker.py +++ b/openpype/hosts/tvpaint/worker/worker.py @@ -1,5 +1,8 @@ +import os import signal import time +import tempfile +import shutil import asyncio from openpype.hosts.tvpaint.api.communication_server import ( @@ -36,8 +39,28 @@ class TVPaintWorkerCommunicator(BaseCommunicator): super()._start_webserver() + def _open_init_file(self): + """Open init TVPaint file. + + File triggers dialog missing path to audio file which must be closed + once and is ignored for rest of running process. + """ + current_dir = os.path.dirname(os.path.abspath(__file__)) + init_filepath = os.path.join(current_dir, "init_file.tvpp") + with tempfile.NamedTemporaryFile( + mode="w", prefix="a_tvp_", suffix=".tvpp" + ) as tmp_file: + tmp_filepath = tmp_file.name.replace("\\", "/") + + shutil.copy(init_filepath, tmp_filepath) + george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(tmp_filepath) + self.execute_george_through_file(george_script) + self.execute_george("tv_projectclose") + os.remove(tmp_filepath) + def _on_client_connect(self, *args, **kwargs): super()._on_client_connect(*args, **kwargs) + self._open_init_file() # Register as "ready to work" worker self._worker_connection.register_as_worker() diff --git a/openpype/hosts/unreal/api/pipeline.py b/openpype/hosts/unreal/api/pipeline.py index 713c588976..f2c264e5a4 100644 --- a/openpype/hosts/unreal/api/pipeline.py +++ b/openpype/hosts/unreal/api/pipeline.py @@ -4,12 +4,12 @@ import logging from typing import List import pyblish.api -from avalon import api from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) from openpype.tools.utils import host_tools @@ -49,7 +49,7 @@ def install(): logger.info("installing OpenPype for Unreal") pyblish.api.register_plugin_path(str(PUBLISH_PATH)) register_loader_plugin_path(str(LOAD_PATH)) - api.register_plugin_path(LegacyCreator, str(CREATE_PATH)) + register_creator_plugin_path(str(CREATE_PATH)) _register_callbacks() _register_events() @@ -58,7 +58,7 @@ def uninstall(): """Uninstall Unreal configuration for Avalon.""" pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) deregister_loader_plugin_path(str(LOAD_PATH)) - api.deregister_plugin_path(LegacyCreator, str(CREATE_PATH)) + deregister_creator_plugin_path(str(CREATE_PATH)) def _register_callbacks(): @@ -75,30 +75,6 @@ def _register_events(): pass -class Creator(LegacyCreator): - hosts = ["unreal"] - asset_types = [] - - def process(self): - nodes = list() - - with unreal.ScopedEditorTransaction("OpenPype Creating Instance"): - if (self.options or {}).get("useSelection"): - self.log.info("setting ...") - print("settings ...") - nodes = unreal.EditorUtilityLibrary.get_selected_assets() - - asset_paths = [a.get_path_name() for a in nodes] - self.name = move_assets_to_path( - "/Game", self.name, asset_paths - ) - - instance = create_publish_instance("/Game", self.name) - imprint(instance, self.data) - - return instance - - def ls(): """List all containers. diff --git a/openpype/hosts/unreal/api/plugin.py b/openpype/hosts/unreal/api/plugin.py index b24bab831d..d8d2f2420d 100644 --- a/openpype/hosts/unreal/api/plugin.py +++ b/openpype/hosts/unreal/api/plugin.py @@ -10,6 +10,7 @@ from openpype.pipeline import ( class Creator(LegacyCreator): """This serves as skeleton for future OpenPype specific functionality""" defaults = ['Main'] + maintain_selection = False class Loader(LoaderPlugin, ABC): diff --git a/openpype/hosts/unreal/integration/Content/Python/init_unreal.py b/openpype/hosts/unreal/integration/Content/Python/init_unreal.py index 2ecd301c25..4bb03b07ed 100644 --- a/openpype/hosts/unreal/integration/Content/Python/init_unreal.py +++ b/openpype/hosts/unreal/integration/Content/Python/init_unreal.py @@ -2,13 +2,7 @@ import unreal openpype_detected = True try: - from avalon import api -except ImportError as exc: - api = None - openpype_detected = False - unreal.log_error("Avalon: cannot load Avalon [ {} ]".format(exc)) - -try: + from openpype.pipeline import install_host from openpype.hosts.unreal import api as openpype_host except ImportError as exc: openpype_host = None @@ -16,7 +10,7 @@ except ImportError as exc: unreal.log_error("OpenPype: cannot load OpenPype [ {} ]".format(exc)) if openpype_detected: - api.install(openpype_host) + install_host(openpype_host) @unreal.uclass() diff --git a/openpype/hosts/unreal/plugins/create/create_camera.py b/openpype/hosts/unreal/plugins/create/create_camera.py index c2905fb6dd..2842900834 100644 --- a/openpype/hosts/unreal/plugins/create/create_camera.py +++ b/openpype/hosts/unreal/plugins/create/create_camera.py @@ -2,13 +2,11 @@ import unreal from unreal import EditorAssetLibrary as eal from unreal import EditorLevelLibrary as ell -from openpype.hosts.unreal.api.plugin import Creator -from avalon.unreal import ( - instantiate, -) +from openpype.hosts.unreal.api import plugin +from openpype.hosts.unreal.api.pipeline import instantiate -class CreateCamera(Creator): +class CreateCamera(plugin.Creator): """Layout output for character rigs""" name = "layoutMain" diff --git a/openpype/hosts/unreal/plugins/create/create_layout.py b/openpype/hosts/unreal/plugins/create/create_layout.py index 00e83cf433..751bece167 100644 --- a/openpype/hosts/unreal/plugins/create/create_layout.py +++ b/openpype/hosts/unreal/plugins/create/create_layout.py @@ -1,12 +1,10 @@ # -*- coding: utf-8 -*- from unreal import EditorLevelLibrary as ell -from openpype.hosts.unreal.api.plugin import Creator -from avalon.unreal import ( - instantiate, -) +from openpype.hosts.unreal.api import plugin +from openpype.hosts.unreal.api.pipeline import instantiate -class CreateLayout(Creator): +class CreateLayout(plugin.Creator): """Layout output for character rigs.""" name = "layoutMain" diff --git a/openpype/hosts/unreal/plugins/create/create_look.py b/openpype/hosts/unreal/plugins/create/create_look.py index 59c40d3e74..12f6b70ae6 100644 --- a/openpype/hosts/unreal/plugins/create/create_look.py +++ b/openpype/hosts/unreal/plugins/create/create_look.py @@ -1,11 +1,10 @@ # -*- coding: utf-8 -*- """Create look in Unreal.""" import unreal # noqa -from openpype.hosts.unreal.api.plugin import Creator -from openpype.hosts.unreal.api import pipeline +from openpype.hosts.unreal.api import pipeline, plugin -class CreateLook(Creator): +class CreateLook(plugin.Creator): """Shader connections defining shape look.""" name = "unrealLook" diff --git a/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py b/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py index 700eac7366..601c2fae06 100644 --- a/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py +++ b/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- """Create Static Meshes as FBX geometry.""" import unreal # noqa -from openpype.hosts.unreal.api.plugin import Creator +from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api.pipeline import ( instantiate, ) -class CreateStaticMeshFBX(Creator): +class CreateStaticMeshFBX(plugin.Creator): """Static FBX geometry.""" name = "unrealStaticMeshMain" diff --git a/openpype/hosts/webpublisher/api/__init__.py b/openpype/hosts/webpublisher/api/__init__.py index dbeb628073..72bbffd099 100644 --- a/openpype/hosts/webpublisher/api/__init__.py +++ b/openpype/hosts/webpublisher/api/__init__.py @@ -1,7 +1,6 @@ import os import logging -from avalon import api as avalon from avalon import io from pyblish import api as pyblish import openpype.hosts.webpublisher diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py index ca14538d7d..c9ba903007 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py @@ -1,7 +1,12 @@ -"""Loads batch context from json and continues in publish process. +"""Parses batch context from json and continues in publish process. Provides: context -> Loaded batch file. + - asset + - task (task name) + - taskType + - project_name + - variant """ import os @@ -24,7 +29,7 @@ class CollectBatchData(pyblish.api.ContextPlugin): # must be really early, context values are only in json file order = pyblish.api.CollectorOrder - 0.495 label = "Collect batch data" - host = ["webpublisher"] + hosts = ["webpublisher"] def process(self, context): batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") @@ -60,6 +65,7 @@ class CollectBatchData(pyblish.api.ContextPlugin): context.data["task"] = task_name context.data["taskType"] = task_type context.data["project_name"] = project_name + context.data["variant"] = batch_data["variant"] self._set_ctx_path(batch_data) diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py index 65cef14703..65db9d7e2e 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py @@ -40,7 +40,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): # must be really early, context values are only in json file order = pyblish.api.CollectorOrder - 0.490 label = "Collect rendered frames" - host = ["webpublisher"] + hosts = ["webpublisher"] targets = ["filespublish"] # from Settings @@ -61,6 +61,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): task_name = context.data["task"] task_type = context.data["taskType"] project_name = context.data["project_name"] + variant = context.data["variant"] for task_dir in task_subfolders: task_data = parse_json(os.path.join(task_dir, "manifest.json")) @@ -76,7 +77,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): extension.replace(".", '')) subset_name = get_subset_name_with_asset_doc( - family, task_data["variant"], task_name, asset_doc, + family, variant, task_name, asset_doc, project_name=project_name, host_name="webpublisher" ) version = self._get_last_version(asset_name, subset_name) + 1 @@ -108,15 +109,18 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): instance.data["representations"] = self._get_single_repre( task_dir, task_data["files"], tags ) - file_url = os.path.join(task_dir, task_data["files"][0]) - no_of_frames = self._get_number_of_frames(file_url) - if no_of_frames: + if family != 'workfile': + file_url = os.path.join(task_dir, task_data["files"][0]) try: - frame_end = int(frame_start) + math.ceil(no_of_frames) - instance.data["frameEnd"] = math.ceil(frame_end) - 1 - self.log.debug("frameEnd:: {}".format( - instance.data["frameEnd"])) - except ValueError: + no_of_frames = self._get_number_of_frames(file_url) + if no_of_frames: + frame_end = int(frame_start) + \ + math.ceil(no_of_frames) + frame_end = math.ceil(frame_end) - 1 + instance.data["frameEnd"] = frame_end + self.log.debug("frameEnd:: {}".format( + instance.data["frameEnd"])) + except Exception: self.log.warning("Unable to count frames " "duration {}".format(no_of_frames)) @@ -209,7 +213,6 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): msg = "No family found for combination of " +\ "task_type: {}, is_sequence:{}, extension: {}".format( task_type, is_sequence, extension) - found_family = "render" assert found_family, msg return (found_family, diff --git a/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py b/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py index cb6ed8481c..a56521891b 100644 --- a/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py @@ -8,7 +8,7 @@ from openpype.lib import ( run_subprocess, get_transcode_temp_directory, - convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, should_convert_for_ffmpeg ) @@ -59,11 +59,9 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): if do_convert: convert_dir = get_transcode_temp_directory() filename = os.path.basename(full_input_path) - convert_for_ffmpeg( - full_input_path, + convert_input_paths_for_ffmpeg( + [full_input_path], convert_dir, - None, - None, self.log ) full_input_path = os.path.join(convert_dir, filename) diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index e8b6d18f4e..29719b63bd 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -105,6 +105,7 @@ from .transcoding import ( get_transcode_temp_directory, should_convert_for_ffmpeg, convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, get_ffprobe_data, get_ffprobe_streams, get_ffmpeg_codec_args, @@ -221,6 +222,12 @@ from .openpype_version import ( is_current_version_higher_than_expected ) + +from .connections import ( + requests_get, + requests_post +) + terminal = Terminal __all__ = [ @@ -270,6 +277,7 @@ __all__ = [ "get_transcode_temp_directory", "should_convert_for_ffmpeg", "convert_for_ffmpeg", + "convert_input_paths_for_ffmpeg", "get_ffprobe_data", "get_ffprobe_streams", "get_ffmpeg_codec_args", @@ -390,4 +398,7 @@ __all__ = [ "is_running_from_build", "is_running_staging", "is_current_version_studio_latest", + + "requests_get", + "requests_post" ] diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index e72585c75a..07b91dda03 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -13,7 +13,8 @@ import six from openpype.settings import ( get_system_settings, - get_project_settings + get_project_settings, + get_local_settings ) from openpype.settings.constants import ( METADATA_KEYS, @@ -211,6 +212,7 @@ class ApplicationGroup: data (dict): Group defying data loaded from settings. manager (ApplicationManager): Manager that created the group. """ + def __init__(self, name, data, manager): self.name = name self.manager = manager @@ -374,6 +376,7 @@ class ApplicationManager: will always use these values. Gives ability to create manager using different settings. """ + def __init__(self, system_settings=None): self.log = PypeLogger.get_logger(self.__class__.__name__) @@ -530,13 +533,13 @@ class EnvironmentToolGroup: variants = data.get("variants") or {} label_by_key = variants.pop(M_DYNAMIC_KEY_LABEL, {}) variants_by_name = {} - for variant_name, variant_env in variants.items(): + for variant_name, variant_data in variants.items(): if variant_name in METADATA_KEYS: continue variant_label = label_by_key.get(variant_name) or variant_name tool = EnvironmentTool( - variant_name, variant_label, variant_env, self + variant_name, variant_label, variant_data, self ) variants_by_name[variant_name] = tool self.variants = variants_by_name @@ -560,15 +563,30 @@ class EnvironmentTool: Args: name (str): Name of the tool. - environment (dict): Variant environments. + variant_data (dict): Variant data with environments and + host and app variant filters. group (str): Name of group which wraps tool. """ - def __init__(self, name, label, environment, group): + def __init__(self, name, label, variant_data, group): + # Backwards compatibility 3.9.1 - 3.9.2 + # - 'variant_data' contained only environments but contain also host + # and application variant filters + host_names = variant_data.get("host_names", []) + app_variants = variant_data.get("app_variants", []) + + if "environment" in variant_data: + environment = variant_data["environment"] + else: + environment = variant_data + + self.host_names = host_names + self.app_variants = app_variants self.name = name self.variant_label = label self.label = " ".join((group.label, label)) self.group = group + self._environment = environment self.full_name = "/".join((group.name, name)) @@ -579,6 +597,19 @@ class EnvironmentTool: def environment(self): return copy.deepcopy(self._environment) + def is_valid_for_app(self, app): + """Is tool valid for application. + + Args: + app (Application): Application for which are prepared environments. + """ + if self.app_variants and app.full_name not in self.app_variants: + return False + + if self.host_names and app.host_name not in self.host_names: + return False + return True + class ApplicationExecutable: """Representation of executable loaded from settings.""" @@ -1242,6 +1273,9 @@ class EnvironmentPrepData(dict): if data.get("env") is None: data["env"] = os.environ.copy() + if "system_settings" not in data: + data["system_settings"] = get_system_settings() + super(EnvironmentPrepData, self).__init__(data) @@ -1319,6 +1353,41 @@ def _merge_env(env, current_env): return result +def _add_python_version_paths(app, env, logger): + """Add vendor packages specific for a Python version.""" + + # Skip adding if host name is not set + if not app.host_name: + return + + # Add Python 2/3 modules + openpype_root = os.getenv("OPENPYPE_REPOS_ROOT") + python_vendor_dir = os.path.join( + openpype_root, + "openpype", + "vendor", + "python" + ) + if app.use_python_2: + pythonpath = os.path.join(python_vendor_dir, "python_2") + else: + pythonpath = os.path.join(python_vendor_dir, "python_3") + + if not os.path.exists(pythonpath): + return + + logger.debug("Adding Python version specific paths to PYTHONPATH") + python_paths = [pythonpath] + + # Load PYTHONPATH from current launch context + python_path = env.get("PYTHONPATH") + if python_path: + python_paths.append(python_path) + + # Set new PYTHONPATH to launch context environments + env["PYTHONPATH"] = os.pathsep.join(python_paths) + + def prepare_app_environments(data, env_group=None, implementation_envs=True): """Modify launch environments based on launched app and context. @@ -1330,6 +1399,27 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): app = data["app"] log = data["log"] + source_env = data["env"].copy() + + _add_python_version_paths(app, source_env, log) + + # Use environments from local settings + filtered_local_envs = {} + system_settings = data["system_settings"] + whitelist_envs = system_settings["general"].get("local_env_white_list") + if whitelist_envs: + local_settings = get_local_settings() + local_envs = local_settings.get("environments") or {} + filtered_local_envs = { + key: value + for key, value in local_envs.items() + if key in whitelist_envs + } + + # Apply local environment variables for already existing values + for key, value in filtered_local_envs.items(): + if key in source_env: + source_env[key] = value # `added_env_keys` has debug purpose added_env_keys = {app.group.name, app.name} @@ -1347,7 +1437,7 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): # Make sure each tool group can be added only once for key in asset_doc["data"].get("tools_env") or []: tool = app.manager.tools.get(key) - if not tool: + if not tool or not tool.is_valid_for_app(app): continue groups_by_name[tool.group.name] = tool.group tool_by_group_name[tool.group.name][tool.name] = tool @@ -1374,10 +1464,19 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): # Choose right platform tool_env = parse_environments(_env_values, env_group) + + # Apply local environment variables + # - must happen between all values because they may be used during + # merge + for key, value in filtered_local_envs.items(): + if key in tool_env: + tool_env[key] = value + # Merge dictionaries env_values = _merge_env(tool_env, env_values) - merged_env = _merge_env(env_values, data["env"]) + merged_env = _merge_env(env_values, source_env) + loaded_env = acre.compute(merged_env, cleanup=False) final_env = None @@ -1397,7 +1496,7 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): if final_env is None: final_env = loaded_env - keys_to_remove = set(data["env"].keys()) - set(final_env.keys()) + keys_to_remove = set(source_env.keys()) - set(final_env.keys()) # Update env data["env"].update(final_env) @@ -1544,7 +1643,6 @@ def _prepare_last_workfile(data, workdir): result will be stored. workdir (str): Path to folder where workfiles should be stored. """ - import avalon.api from openpype.pipeline import HOST_WORKFILE_EXTENSIONS log = data["log"] diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index b4e6abb72d..e82dcc558f 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -161,9 +161,10 @@ def is_latest(representation): @with_avalon def any_outdated(): """Return whether the current scene has any outdated content""" + from openpype.pipeline import registered_host checked = set() - host = avalon.api.registered_host() + host = registered_host() for container in host.ls(): representation = container['representation'] if representation in checked: @@ -1604,13 +1605,13 @@ def get_creator_by_name(creator_name, case_sensitive=False): Returns: Creator: Return first matching plugin or `None`. """ - from openpype.pipeline import LegacyCreator + from openpype.pipeline import discover_legacy_creator_plugins # Lower input creator name if is not case sensitive if not case_sensitive: creator_name = creator_name.lower() - for creator_plugin in avalon.api.discover(LegacyCreator): + for creator_plugin in discover_legacy_creator_plugins(): _creator_name = creator_plugin.__name__ # Lower creator plugin name if is not case sensitive @@ -1965,6 +1966,7 @@ def get_last_workfile( data.pop("comment", None) if not data.get("ext"): data["ext"] = extensions[0] + data["ext"] = data["ext"].replace('.', '') filename = StringTemplate.format_strict_template(file_template, data) if full_path: diff --git a/openpype/lib/connections.py b/openpype/lib/connections.py new file mode 100644 index 0000000000..91b745a4c1 --- /dev/null +++ b/openpype/lib/connections.py @@ -0,0 +1,38 @@ +import requests +import os + + +def requests_post(*args, **kwargs): + """Wrap request post method. + + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. + + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. + + """ + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.post(*args, **kwargs) + + +def requests_get(*args, **kwargs): + """Wrap request get method. + + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. + + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. + + """ + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.get(*args, **kwargs) diff --git a/openpype/lib/profiles_filtering.py b/openpype/lib/profiles_filtering.py index 0bb901aff8..370703a68b 100644 --- a/openpype/lib/profiles_filtering.py +++ b/openpype/lib/profiles_filtering.py @@ -44,12 +44,6 @@ def _profile_exclusion(matching_profiles, logger): Returns: dict: Most matching profile. """ - - logger.info( - "Search for first most matching profile in match order:" - " Host name -> Task name -> Family." - ) - if not matching_profiles: return None @@ -168,6 +162,15 @@ def filter_profiles(profiles_data, key_values, keys_order=None, logger=None): _keys_order.append(key) keys_order = tuple(_keys_order) + log_parts = " | ".join([ + "{}: \"{}\"".format(*item) + for item in key_values.items() + ]) + + logger.info( + "Looking for matching profile for: {}".format(log_parts) + ) + matching_profiles = None highest_profile_points = -1 # Each profile get 1 point for each matching filter. Profile with most @@ -205,11 +208,6 @@ def filter_profiles(profiles_data, key_values, keys_order=None, logger=None): if profile_points == highest_profile_points: matching_profiles.append((profile, profile_scores)) - log_parts = " | ".join([ - "{}: \"{}\"".format(*item) - for item in key_values.items() - ]) - if not matching_profiles: logger.info( "None of profiles match your setup. {}".format(log_parts) @@ -221,4 +219,9 @@ def filter_profiles(profiles_data, key_values, keys_order=None, logger=None): "More than one profile match your setup. {}".format(log_parts) ) - return _profile_exclusion(matching_profiles, logger) + profile = _profile_exclusion(matching_profiles, logger) + if profile: + logger.info( + "Profile selected: {}".format(profile) + ) + return profile diff --git a/openpype/lib/python_module_tools.py b/openpype/lib/python_module_tools.py index f62c848e4a..6fad3b547f 100644 --- a/openpype/lib/python_module_tools.py +++ b/openpype/lib/python_module_tools.py @@ -5,8 +5,9 @@ import importlib import inspect import logging +import six + log = logging.getLogger(__name__) -PY3 = sys.version_info[0] == 3 def import_filepath(filepath, module_name=None): @@ -28,7 +29,7 @@ def import_filepath(filepath, module_name=None): # Prepare module object where content of file will be parsed module = types.ModuleType(module_name) - if PY3: + if six.PY3: # Use loader so module has full specs module_loader = importlib.machinery.SourceFileLoader( module_name, filepath @@ -38,7 +39,7 @@ def import_filepath(filepath, module_name=None): # Execute module code and store content to module with open(filepath) as _stream: # Execute content and store it to module object - exec(_stream.read(), module.__dict__) + six.exec_(_stream.read(), module.__dict__) module.__file__ = filepath return module @@ -129,20 +130,12 @@ def classes_from_module(superclass, module): for name in dir(module): # It could be anything at this point obj = getattr(module, name) - if not inspect.isclass(obj): + if not inspect.isclass(obj) or obj is superclass: continue - # These are subclassed from nothing, not even `object` - if not len(obj.__bases__) > 0: - continue + if issubclass(obj, superclass): + classes.append(obj) - # Use string comparison rather than `issubclass` - # in order to support reloading of this module. - bases = recursive_bases_from_class(obj) - if not any(base.__name__ == superclass.__name__ for base in bases): - continue - - classes.append(obj) return classes @@ -228,7 +221,7 @@ def import_module_from_dirpath(dirpath, folder_name, dst_module_name=None): dst_module_name(str): Parent module name under which can be loaded module added. """ - if PY3: + if six.PY3: module = _import_module_from_dirpath_py3( dirpath, folder_name, dst_module_name ) diff --git a/openpype/lib/remote_publish.py b/openpype/lib/remote_publish.py index 9d97671a61..8a42daf4e9 100644 --- a/openpype/lib/remote_publish.py +++ b/openpype/lib/remote_publish.py @@ -1,13 +1,12 @@ import os from datetime import datetime -import sys -from bson.objectid import ObjectId import collections +from bson.objectid import ObjectId + import pyblish.util import pyblish.api -from openpype import uninstall from openpype.lib.mongo import OpenPypeMongoConnection from openpype.lib.plugin_tools import parse_json @@ -81,7 +80,6 @@ def publish(log, close_plugin_name=None): if result["error"]: log.error(error_format.format(**result)) - uninstall() if close_plugin: # close host app explicitly after error context = pyblish.api.Context() close_plugin().process(context) @@ -118,7 +116,6 @@ def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): if result["error"]: log.error(error_format.format(**result)) - uninstall() log_lines = [error_format.format(**result)] + log_lines dbcon.update_one( {"_id": _id}, diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py index 6bab6a8160..fcec5d4216 100644 --- a/openpype/lib/transcoding.py +++ b/openpype/lib/transcoding.py @@ -17,6 +17,9 @@ from .vendor_bin_utils import ( # Max length of string that is supported by ffmpeg MAX_FFMPEG_STRING_LEN = 8196 +# Not allowed symbols in attributes for ffmpeg +NOT_ALLOWED_FFMPEG_CHARS = ("\"", ) + # OIIO known xml tags STRING_TAGS = { "format" @@ -367,14 +370,23 @@ def should_convert_for_ffmpeg(src_filepath): return None for attr_value in input_info["attribs"].values(): - if ( - isinstance(attr_value, str) - and len(attr_value) > MAX_FFMPEG_STRING_LEN - ): + if not isinstance(attr_value, str): + continue + + if len(attr_value) > MAX_FFMPEG_STRING_LEN: return True + + for char in NOT_ALLOWED_FFMPEG_CHARS: + if char in attr_value: + return True return False +# Deprecated since 2022 4 20 +# - Reason - Doesn't convert sequences right way: Can't handle gaps, reuse +# first frame for all frames and changes filenames when input +# is sequence. +# - use 'convert_input_paths_for_ffmpeg' instead def convert_for_ffmpeg( first_input_path, output_dir, @@ -402,6 +414,12 @@ def convert_for_ffmpeg( if logger is None: logger = logging.getLogger(__name__) + logger.warning(( + "DEPRECATED: 'openpype.lib.transcoding.convert_for_ffmpeg' is" + " deprecated function of conversion for FFMpeg. Please replace usage" + " with 'openpype.lib.transcoding.convert_input_paths_for_ffmpeg'" + )) + ext = os.path.splitext(first_input_path)[1].lower() if ext != ".exr": raise ValueError(( @@ -422,7 +440,12 @@ def convert_for_ffmpeg( compression = "none" # Prepare subprocess arguments - oiio_cmd = [get_oiio_tools_path()] + oiio_cmd = [ + get_oiio_tools_path(), + + # Don't add any additional attributes + "--nosoftwareattrib", + ] # Add input compression if available if compression: oiio_cmd.extend(["--compression", compression]) @@ -458,28 +481,44 @@ def convert_for_ffmpeg( "--frames", "{}-{}".format(input_frame_start, input_frame_end) ]) - ignore_attr_changes_added = False for attr_name, attr_value in input_info["attribs"].items(): if not isinstance(attr_value, str): continue # Remove attributes that have string value longer than allowed length - # for ffmpeg + # for ffmpeg or when containt unallowed symbols + erase_reason = "Missing reason" + erase_attribute = False if len(attr_value) > MAX_FFMPEG_STRING_LEN: - if not ignore_attr_changes_added: - # Attrite changes won't be added to attributes itself - ignore_attr_changes_added = True - oiio_cmd.append("--sansattrib") + erase_reason = "has too long value ({} chars).".format( + len(attr_value) + ) + + if erase_attribute: + for char in NOT_ALLOWED_FFMPEG_CHARS: + if char in attr_value: + erase_attribute = True + erase_reason = ( + "contains unsupported character \"{}\"." + ).format(char) + break + + if erase_attribute: # Set attribute to empty string logger.info(( - "Removed attribute \"{}\" from metadata" - " because has too long value ({} chars)." - ).format(attr_name, len(attr_value))) + "Removed attribute \"{}\" from metadata because {}." + ).format(attr_name, erase_reason)) oiio_cmd.extend(["--eraseattrib", attr_name]) # Add last argument - path to output - base_file_name = os.path.basename(first_input_path) - output_path = os.path.join(output_dir, base_file_name) + if is_sequence: + ext = os.path.splitext(first_input_path)[1] + base_filename = "tmp.%{:0>2}d{}".format( + len(str(input_frame_end)), ext + ) + else: + base_filename = os.path.basename(first_input_path) + output_path = os.path.join(output_dir, base_filename) oiio_cmd.extend([ "-o", output_path ]) @@ -488,6 +527,130 @@ def convert_for_ffmpeg( run_subprocess(oiio_cmd, logger=logger) +def convert_input_paths_for_ffmpeg( + input_paths, + output_dir, + logger=None +): + """Contert source file to format supported in ffmpeg. + + Currently can convert only exrs. The input filepaths should be files + with same type. Information about input is loaded only from first found + file. + + Filenames of input files are kept so make sure that output directory + is not the same directory as input files have. + - This way it can handle gaps and can keep input filenames without handling + frame template + + Args: + input_paths (str): Paths that should be converted. It is expected that + contains single file or image sequence of samy type. + output_dir (str): Path to directory where output will be rendered. + Must not be same as input's directory. + logger (logging.Logger): Logger used for logging. + + Raises: + ValueError: If input filepath has extension not supported by function. + Currently is supported only ".exr" extension. + """ + if logger is None: + logger = logging.getLogger(__name__) + + first_input_path = input_paths[0] + ext = os.path.splitext(first_input_path)[1].lower() + if ext != ".exr": + raise ValueError(( + "Function 'convert_for_ffmpeg' currently support only" + " \".exr\" extension. Got \"{}\"." + ).format(ext)) + + input_info = get_oiio_info_for_input(first_input_path) + + # Change compression only if source compression is "dwaa" or "dwab" + # - they're not supported in ffmpeg + compression = input_info["attribs"].get("compression") + if compression in ("dwaa", "dwab"): + compression = "none" + + # Collect channels to export + channel_names = input_info["channelnames"] + review_channels = get_convert_rgb_channels(channel_names) + if review_channels is None: + raise ValueError( + "Couldn't find channels that can be used for conversion." + ) + + red, green, blue, alpha = review_channels + input_channels = [red, green, blue] + channels_arg = "R={},G={},B={}".format(red, green, blue) + if alpha is not None: + channels_arg += ",A={}".format(alpha) + input_channels.append(alpha) + input_channels_str = ",".join(input_channels) + + for input_path in input_paths: + # Prepare subprocess arguments + oiio_cmd = [ + get_oiio_tools_path(), + + # Don't add any additional attributes + "--nosoftwareattrib", + ] + # Add input compression if available + if compression: + oiio_cmd.extend(["--compression", compression]) + + oiio_cmd.extend([ + # Tell oiiotool which channels should be loaded + # - other channels are not loaded to memory so helps to + # avoid memory leak issues + "-i:ch={}".format(input_channels_str), input_path, + # Tell oiiotool which channels should be put to top stack + # (and output) + "--ch", channels_arg + ]) + + for attr_name, attr_value in input_info["attribs"].items(): + if not isinstance(attr_value, str): + continue + + # Remove attributes that have string value longer than allowed + # length for ffmpeg or when containt unallowed symbols + erase_reason = "Missing reason" + erase_attribute = False + if len(attr_value) > MAX_FFMPEG_STRING_LEN: + erase_reason = "has too long value ({} chars).".format( + len(attr_value) + ) + + if erase_attribute: + for char in NOT_ALLOWED_FFMPEG_CHARS: + if char in attr_value: + erase_attribute = True + erase_reason = ( + "contains unsupported character \"{}\"." + ).format(char) + break + + if erase_attribute: + # Set attribute to empty string + logger.info(( + "Removed attribute \"{}\" from metadata because {}." + ).format(attr_name, erase_reason)) + oiio_cmd.extend(["--eraseattrib", attr_name]) + + # Add last argument - path to output + base_filename = os.path.basename(input_path) + output_path = os.path.join(output_dir, base_filename) + oiio_cmd.extend([ + "-o", output_path + ]) + + logger.debug("Conversion command: {}".format(" ".join(oiio_cmd))) + run_subprocess(oiio_cmd, logger=logger) + + # FFMPEG functions def get_ffprobe_data(path_to_file, logger=None): """Load data about entered filepath via ffprobe. diff --git a/openpype/lib/usdlib.py b/openpype/lib/usdlib.py index 89021156b4..7b3b7112de 100644 --- a/openpype/lib/usdlib.py +++ b/openpype/lib/usdlib.py @@ -9,6 +9,7 @@ except ImportError: from mvpxr import Usd, UsdGeom, Sdf, Kind from avalon import io, api +from openpype.pipeline import registered_root log = logging.getLogger(__name__) @@ -323,7 +324,7 @@ def get_usd_master_path(asset, subset, representation): path = template.format( **{ - "root": api.registered_root(), + "root": registered_root(), "project": api.Session["AVALON_PROJECT"], "asset": asset_doc["name"], "subset": subset, diff --git a/openpype/modules/avalon_apps/avalon_app.py b/openpype/modules/avalon_apps/avalon_app.py index 51a22323f1..1d21de129b 100644 --- a/openpype/modules/avalon_apps/avalon_app.py +++ b/openpype/modules/avalon_apps/avalon_app.py @@ -1,5 +1,5 @@ import os -import openpype + from openpype.modules import OpenPypeModule from openpype_interfaces import ITrayModule @@ -26,7 +26,8 @@ class AvalonModule(OpenPypeModule, ITrayModule): self.avalon_mongo_timeout = avalon_mongo_timeout # Tray attributes - self.libraryloader = None + self._library_loader_imported = None + self._library_loader_window = None self.rest_api_obj = None def get_global_environments(self): @@ -41,21 +42,11 @@ class AvalonModule(OpenPypeModule, ITrayModule): def tray_init(self): # Add library tool + self._library_loader_imported = False try: - from Qt import QtCore from openpype.tools.libraryloader import LibraryLoaderWindow - libraryloader = LibraryLoaderWindow( - show_projects=True, - show_libraries=True - ) - # Remove always on top flag for tray - window_flags = libraryloader.windowFlags() - if window_flags | QtCore.Qt.WindowStaysOnTopHint: - window_flags ^= QtCore.Qt.WindowStaysOnTopHint - libraryloader.setWindowFlags(window_flags) - self.libraryloader = libraryloader - + self._library_loader_imported = True except Exception: self.log.warning( "Couldn't load Library loader tool for tray.", @@ -64,7 +55,7 @@ class AvalonModule(OpenPypeModule, ITrayModule): # Definition of Tray menu def tray_menu(self, tray_menu): - if self.libraryloader is None: + if not self._library_loader_imported: return from Qt import QtWidgets @@ -84,17 +75,31 @@ class AvalonModule(OpenPypeModule, ITrayModule): return def show_library_loader(self): - if self.libraryloader is None: - return + if self._library_loader_window is None: + from Qt import QtCore + from openpype.tools.libraryloader import LibraryLoaderWindow + from openpype.pipeline import install_openpype_plugins - self.libraryloader.show() + libraryloader = LibraryLoaderWindow( + show_projects=True, + show_libraries=True + ) + # Remove always on top flag for tray + window_flags = libraryloader.windowFlags() + if window_flags | QtCore.Qt.WindowStaysOnTopHint: + window_flags ^= QtCore.Qt.WindowStaysOnTopHint + libraryloader.setWindowFlags(window_flags) + self._library_loader_window = libraryloader + + install_openpype_plugins() + + self._library_loader_window.show() # Raise and activate the window # for MacOS - self.libraryloader.raise_() + self._library_loader_window.raise_() # for Windows - self.libraryloader.activateWindow() - self.libraryloader.refresh() + self._library_loader_window.activateWindow() # Webserver module implementation def webserver_initialization(self, server_manager): diff --git a/openpype/modules/base.py b/openpype/modules/base.py index 175957ae39..23c908299f 100644 --- a/openpype/modules/base.py +++ b/openpype/modules/base.py @@ -28,26 +28,17 @@ from openpype.settings.lib import ( ) from openpype.lib import PypeLogger - -DEFAULT_OPENPYPE_MODULES = ( - "avalon_apps", - "clockify", - "log_viewer", - "deadline", - "muster", - "royalrender", - "python_console_interpreter", - "ftrack", - "slack", - "webserver", - "launcher_action", - "project_manager_action", - "settings_action", - "standalonepublish_action", - "traypublish_action", - "job_queue", - "timers_manager", - "sync_server", +# Files that will be always ignored on modules import +IGNORED_FILENAMES = ( + "__pycache__", +) +# Files ignored on modules import from "./openpype/modules" +IGNORED_DEFAULT_FILENAMES = ( + "__init__.py", + "base.py", + "interfaces.py", + "example_addons", + "default_modules", ) @@ -146,9 +137,16 @@ class _LoadCache: def get_default_modules_dir(): """Path to default OpenPype modules.""" + current_dir = os.path.abspath(os.path.dirname(__file__)) - return os.path.join(current_dir, "default_modules") + output = [] + for folder_name in ("default_modules", ): + path = os.path.join(current_dir, folder_name) + if os.path.exists(path) and os.path.isdir(path): + output.append(path) + + return output def get_dynamic_modules_dirs(): @@ -186,7 +184,7 @@ def get_dynamic_modules_dirs(): def get_module_dirs(): """List of paths where OpenPype modules can be found.""" _dirpaths = [] - _dirpaths.append(get_default_modules_dir()) + _dirpaths.extend(get_default_modules_dir()) _dirpaths.extend(get_dynamic_modules_dirs()) dirpaths = [] @@ -292,25 +290,54 @@ def _load_modules(): log = PypeLogger.get_logger("ModulesLoader") + current_dir = os.path.abspath(os.path.dirname(__file__)) + processed_paths = set() + processed_paths.add(current_dir) # Import default modules imported from 'openpype.modules' - for default_module_name in DEFAULT_OPENPYPE_MODULES: + for filename in os.listdir(current_dir): + # Ignore filenames + if ( + filename in IGNORED_FILENAMES + or filename in IGNORED_DEFAULT_FILENAMES + ): + continue + + fullpath = os.path.join(current_dir, filename) + basename, ext = os.path.splitext(filename) + + if os.path.isdir(fullpath): + # Check existence of init fil + init_path = os.path.join(fullpath, "__init__.py") + if not os.path.exists(init_path): + log.debug(( + "Module directory does not contan __init__.py file {}" + ).format(fullpath)) + continue + + elif ext not in (".py", ): + continue + try: - import_str = "openpype.modules.{}".format(default_module_name) - new_import_str = "{}.{}".format(modules_key, default_module_name) + import_str = "openpype.modules.{}".format(basename) + new_import_str = "{}.{}".format(modules_key, basename) default_module = __import__(import_str, fromlist=("", )) sys.modules[new_import_str] = default_module - setattr(openpype_modules, default_module_name, default_module) + setattr(openpype_modules, basename, default_module) except Exception: msg = ( "Failed to import default module '{}'." - ).format(default_module_name) + ).format(basename) log.error(msg, exc_info=True) # Look for OpenPype modules in paths defined with `get_module_dirs` # - dynamically imported OpenPype modules and addons - dirpaths = get_module_dirs() - for dirpath in dirpaths: + for dirpath in get_module_dirs(): + # Skip already processed paths + if dirpath in processed_paths: + continue + processed_paths.add(dirpath) + if not os.path.exists(dirpath): log.warning(( "Could not find path when loading OpenPype modules \"{}\"" @@ -319,12 +346,24 @@ def _load_modules(): for filename in os.listdir(dirpath): # Ignore filenames - if filename in ("__pycache__", ): + if filename in IGNORED_FILENAMES: continue fullpath = os.path.join(dirpath, filename) basename, ext = os.path.splitext(filename) + if os.path.isdir(fullpath): + # Check existence of init fil + init_path = os.path.join(fullpath, "__init__.py") + if not os.path.exists(init_path): + log.debug(( + "Module directory does not contan __init__.py file {}" + ).format(fullpath)) + continue + + elif ext not in (".py", ): + continue + # TODO add more logic how to define if folder is module or not # - check manifest and content of manifest try: diff --git a/openpype/modules/clockify/launcher_actions/ClockifyStart.py b/openpype/modules/clockify/launcher_actions/ClockifyStart.py index db51964eb7..6428d5e7aa 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifyStart.py +++ b/openpype/modules/clockify/launcher_actions/ClockifyStart.py @@ -1,12 +1,14 @@ -from avalon import api, io +from avalon import io + from openpype.api import Logger +from openpype.pipeline import LauncherAction from openpype_modules.clockify.clockify_api import ClockifyAPI -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) -class ClockifyStart(api.Action): +class ClockifyStart(LauncherAction): name = "clockify_start_timer" label = "Clockify - Start Timer" diff --git a/openpype/modules/clockify/launcher_actions/ClockifySync.py b/openpype/modules/clockify/launcher_actions/ClockifySync.py index 02982d373a..3c81e2766c 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifySync.py +++ b/openpype/modules/clockify/launcher_actions/ClockifySync.py @@ -1,10 +1,13 @@ -from avalon import api, io +from avalon import io + from openpype_modules.clockify.clockify_api import ClockifyAPI from openpype.api import Logger -log = Logger().get_logger(__name__) +from openpype.pipeline import LauncherAction + +log = Logger.get_logger(__name__) -class ClockifySync(api.Action): +class ClockifySync(LauncherAction): name = "sync_to_clockify" label = "Sync to Clockify" diff --git a/openpype/modules/deadline/deadline_module.py b/openpype/modules/deadline/deadline_module.py index 1a179e9aaf..c30db75188 100644 --- a/openpype/modules/deadline/deadline_module.py +++ b/openpype/modules/deadline/deadline_module.py @@ -1,8 +1,19 @@ import os +import requests +import six +import sys + +from openpype.lib import requests_get, PypeLogger from openpype.modules import OpenPypeModule from openpype_interfaces import IPluginPaths +class DeadlineWebserviceError(Exception): + """ + Exception to throw when connection to Deadline server fails. + """ + + class DeadlineModule(OpenPypeModule, IPluginPaths): name = "deadline" @@ -32,3 +43,35 @@ class DeadlineModule(OpenPypeModule, IPluginPaths): return { "publish": [os.path.join(current_dir, "plugins", "publish")] } + + @staticmethod + def get_deadline_pools(webservice, log=None): + # type: (str) -> list + """Get pools from Deadline. + Args: + webservice (str): Server url. + log (Logger) + Returns: + list: Pools. + Throws: + RuntimeError: If deadline webservice is unreachable. + + """ + if not log: + log = PypeLogger.get_logger(__name__) + + argument = "{}/api/pools?NamesOnly=true".format(webservice) + try: + response = requests_get(argument) + except requests.exceptions.ConnectionError as exc: + msg = 'Cannot connect to DL web service {}'.format(webservice) + log.error(msg) + six.reraise( + DeadlineWebserviceError, + DeadlineWebserviceError('{} - {}'.format(msg, exc)), + sys.exc_info()[2]) + if not response.ok: + log.warning("No pools retrieved") + return [] + + return response.json() diff --git a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py index 1bc4eaa067..a7035cd99f 100644 --- a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py +++ b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py @@ -11,7 +11,7 @@ import pyblish.api class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): """Collect Deadline Webservice URL from instance.""" - order = pyblish.api.CollectorOrder + 0.02 + order = pyblish.api.CollectorOrder + 0.415 label = "Deadline Webservice from the Instance" families = ["rendering"] diff --git a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py index fc056342a8..e6ad6a9aa1 100644 --- a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py +++ b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py @@ -6,7 +6,7 @@ import pyblish.api class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): """Collect default Deadline Webservice URL.""" - order = pyblish.api.CollectorOrder + 0.01 + order = pyblish.api.CollectorOrder + 0.410 label = "Default Deadline Webservice" pass_mongo_url = False diff --git a/openpype/modules/deadline/plugins/publish/collect_pools.py b/openpype/modules/deadline/plugins/publish/collect_pools.py new file mode 100644 index 0000000000..48130848d5 --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/collect_pools.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +"""Collect Deadline pools. Choose default one from Settings + +""" +import pyblish.api + + +class CollectDeadlinePools(pyblish.api.InstancePlugin): + """Collect pools from instance if present, from Setting otherwise.""" + + order = pyblish.api.CollectorOrder + 0.420 + label = "Collect Deadline Pools" + families = ["rendering", "render.farm", "renderFarm", "renderlayer"] + + primary_pool = None + secondary_pool = None + + def process(self, instance): + if not instance.data.get("primaryPool"): + instance.data["primaryPool"] = self.primary_pool or "none" + + if not instance.data.get("secondaryPool"): + instance.data["secondaryPool"] = self.secondary_pool or "none" diff --git a/openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml b/openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml new file mode 100644 index 0000000000..0e7d72910e --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml @@ -0,0 +1,31 @@ + + + + Scene setting + + ## Invalid Deadline pools found + + Configured pools don't match what is set in Deadline. + + {invalid_value_str} + + ### How to repair? + + If your instance had deadline pools set on creation, remove or + change them. + + In other cases inform admin to change them in Settings. + + Available deadline pools {pools_str}. + + + ### __Detailed Info__ + + This error is shown when deadline pool is not on Deadline anymore. It + could happen in case of republish old workfile which was created with + previous deadline pools, + or someone changed pools on Deadline side, but didn't modify Openpype + Settings. + + + \ No newline at end of file diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index c499c14d40..1295d40654 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -37,8 +37,6 @@ class AfterEffectsSubmitDeadline( priority = 50 chunk_size = 1000000 - primary_pool = None - secondary_pool = None group = None department = None multiprocess = True @@ -62,8 +60,8 @@ class AfterEffectsSubmitDeadline( dln_job_info.Frames = frame_range dln_job_info.Priority = self.priority - dln_job_info.Pool = self.primary_pool - dln_job_info.SecondaryPool = self.secondary_pool + dln_job_info.Pool = self._instance.data.get("primaryPool") + dln_job_info.SecondaryPool = self._instance.data.get("secondaryPool") dln_job_info.Group = self.group dln_job_info.Department = self.department dln_job_info.ChunkSize = self.chunk_size diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py index 918efb6630..e320b6df4b 100644 --- a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py @@ -241,8 +241,6 @@ class HarmonySubmitDeadline( optional = True use_published = False - primary_pool = "" - secondary_pool = "" priority = 50 chunk_size = 1000000 group = "none" @@ -259,8 +257,8 @@ class HarmonySubmitDeadline( # for now, get those from presets. Later on it should be # configurable in Harmony UI directly. job_info.Priority = self.priority - job_info.Pool = self.primary_pool - job_info.SecondaryPool = self.secondary_pool + job_info.Pool = self._instance.data.get("primaryPool") + job_info.SecondaryPool = self._instance.data.get("secondaryPool") job_info.ChunkSize = self.chunk_size job_info.BatchName = os.path.basename(self._instance.data["source"]) job_info.Department = self.department diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index 59aeb68b79..82ff723e84 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -7,7 +7,7 @@ from avalon import api import pyblish.api -import hou +# import hou ??? class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): @@ -71,7 +71,8 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): "UserName": deadline_user, "Plugin": "Houdini", - "Pool": "houdini_redshift", # todo: remove hardcoded pool + "Pool": instance.data.get("primaryPool"), + "secondaryPool": instance.data.get("secondaryPool"), "Frames": frames, "ChunkSize": instance.data.get("chunkSize", 10), diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 15a6f8d828..02e89edd1e 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -35,6 +35,7 @@ from maya import cmds from avalon import api import pyblish.api +from openpype.lib import requests_post from openpype.hosts.maya.api import lib # Documentation for keys available at: @@ -254,7 +255,11 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): use_published = True tile_assembler_plugin = "OpenPypeTileAssembler" asset_dependencies = False + priority = 50 + tile_priority = 50 limit_groups = [] + jobInfo = {} + pluginInfo = {} group = "none" def process(self, instance): @@ -272,37 +277,12 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): self.deadline_url = instance.data.get("deadlineUrl") assert self.deadline_url, "Requires Deadline Webservice URL" - self._job_info = ( - context.data["project_settings"].get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}).get( - "jobInfo", {}) - ) + # just using existing names from Setting + self._job_info = self.jobInfo - self._plugin_info = ( - context.data["project_settings"].get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}).get( - "pluginInfo", {}) - ) + self._plugin_info = self.pluginInfo - self.limit_groups = ( - context.data["project_settings"].get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}).get( - "limit", []) - ) - - self.group = ( - context.data["project_settings"].get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}).get( - "group", "none") - ) + self.limit_groups = self.limit context = instance.context workspace = context.data["workspaceDir"] @@ -465,7 +445,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): self.payload_skeleton["JobInfo"]["UserName"] = deadline_user # Set job priority self.payload_skeleton["JobInfo"]["Priority"] = \ - self._instance.data.get("priority", 50) + self._instance.data.get("priority", self.priority) if self.group != "none" and self.group: self.payload_skeleton["JobInfo"]["Group"] = self.group @@ -635,7 +615,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): } assembly_payload["JobInfo"].update(output_filenames) assembly_payload["JobInfo"]["Priority"] = self._instance.data.get( - "priority", 50) + "tile_priority", self.tile_priority) assembly_payload["JobInfo"]["UserName"] = deadline_user frame_payloads = [] @@ -721,7 +701,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): tiles_count = instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501 for tile_job in frame_payloads: - response = self._requests_post(url, json=tile_job) + response = requests_post(url, json=tile_job) if not response.ok: raise Exception(response.text) @@ -784,7 +764,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): job_idx, len(assembly_payloads) )) self.log.debug(json.dumps(ass_job, indent=4, sort_keys=True)) - response = self._requests_post(url, json=ass_job) + response = requests_post(url, json=ass_job) if not response.ok: raise Exception(response.text) @@ -802,7 +782,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # E.g. http://192.168.0.1:8082/api/jobs url = "{}/api/jobs".format(self.deadline_url) - response = self._requests_post(url, json=payload) + response = requests_post(url, json=payload) if not response.ok: raise Exception(response.text) instance.data["deadlineSubmissionJob"] = response.json() @@ -1010,7 +990,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): self.log.info("Submitting ass export job.") url = "{}/api/jobs".format(self.deadline_url) - response = self._requests_post(url, json=payload) + response = requests_post(url, json=payload) if not response.ok: self.log.error("Submition failed!") self.log.error(response.status_code) @@ -1034,44 +1014,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): % (value, int(value)) ) - def _requests_post(self, *args, **kwargs): - """Wrap request post method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if 'verify' not in kwargs: - kwargs['verify'] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - # add 10sec timeout before bailing out - kwargs['timeout'] = 10 - return requests.post(*args, **kwargs) - - def _requests_get(self, *args, **kwargs): - """Wrap request get method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if 'verify' not in kwargs: - kwargs['verify'] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - # add 10sec timeout before bailing out - kwargs['timeout'] = 10 - return requests.get(*args, **kwargs) - def format_vray_output_filename(self, filename, template, dir=False): """Format the expected output file of the Export job. diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py index d6bd11620d..2980193254 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -27,8 +27,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): # presets priority = 50 chunk_size = 1 - primary_pool = "" - secondary_pool = "" + concurrent_tasks = 1 group = "" department = "" limit_groups = {} @@ -149,11 +148,16 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): pass # define chunk and priority - chunk_size = instance.data.get("deadlineChunkSize") + chunk_size = instance.data["deadlineChunkSize"] if chunk_size == 0 and self.chunk_size: chunk_size = self.chunk_size - priority = instance.data.get("deadlinePriority") + # define chunk and priority + concurrent_tasks = instance.data["deadlineConcurrentTasks"] + if concurrent_tasks == 0 and self.concurrent_tasks: + concurrent_tasks = self.concurrent_tasks + + priority = instance.data["deadlinePriority"] if not priority: priority = self.priority @@ -177,10 +181,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "Priority": priority, "ChunkSize": chunk_size, + "ConcurrentTasks": concurrent_tasks, + "Department": self.department, - "Pool": self.primary_pool, - "SecondaryPool": self.secondary_pool, + "Pool": instance.data.get("primaryPool"), + "SecondaryPool": instance.data.get("secondaryPool"), "Group": self.group, "Plugin": "Nuke", diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index fad4d14ea0..715d9a8336 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -8,6 +8,7 @@ from copy import copy, deepcopy import requests import clique import openpype.api +from openpype.pipeline.farm.patterning import match_aov_pattern from avalon import api, io @@ -107,7 +108,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): families = ["render.farm", "prerender.farm", "renderlayer", "imagesequence", "vrayscene"] - aov_filter = {"maya": [r".*(?:[\._-])*([Bb]eauty)(?:[\.|_])*.*"], + aov_filter = {"maya": [r".*([Bb]eauty).*"], "aftereffects": [r".*"], # for everything from AE "harmony": [r".*"], # for everything from AE "celaction": [r".*"]} @@ -129,7 +130,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "OPENPYPE_PUBLISH_JOB" ] - # custom deadline atributes + # custom deadline attributes deadline_department = "" deadline_pool = "" deadline_pool_secondary = "" @@ -235,6 +236,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if mongo_url: environment["OPENPYPE_MONGO"] = mongo_url + priority = self.deadline_priority or instance.data.get("priority", 50) + args = [ "--headless", 'publish', @@ -254,11 +257,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "Department": self.deadline_department, "ChunkSize": self.deadline_chunk_size, - "Priority": job["Props"]["Pri"], + "Priority": priority, "Group": self.deadline_group, - "Pool": self.deadline_pool, - "SecondaryPool": self.deadline_pool_secondary, + "Pool": instance.data.get("primaryPool"), + "SecondaryPool": instance.data.get("secondaryPool"), "OutputDirectory0": output_dir }, @@ -281,6 +284,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): else: payload["JobInfo"]["JobDependency0"] = job["_id"] + if instance.data.get("suspend_publish"): + payload["JobInfo"]["InitialStatus"] = "Suspended" + index = 0 for key in environment: if key.upper() in self.enviro_filter: @@ -447,12 +453,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): app = os.environ.get("AVALON_APP", "") preview = False - if app in self.aov_filter.keys(): - for aov_pattern in self.aov_filter[app]: - if re.match(aov_pattern, aov): - preview = True - break + if isinstance(col, list): + render_file_name = os.path.basename(col[0]) + else: + render_file_name = os.path.basename(col) + aov_patterns = self.aov_filter + preview = match_aov_pattern(app, aov_patterns, render_file_name) + + # toggle preview on if multipart is on if instance_data.get("multipartExr"): preview = True @@ -509,8 +518,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): most cases, but if not - we create representation from each of them. Arguments: - instance (pyblish.plugin.Instance): instance for which we are - setting representations + instance (dict): instance data for which we are + setting representations exp_files (list): list of expected files Returns: @@ -518,27 +527,29 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): """ representations = [] + host_name = os.environ.get("AVALON_APP", "") collections, remainders = clique.assemble(exp_files) # create representation for every collected sequento ce for collection in collections: ext = collection.tail.lstrip(".") preview = False - # if filtered aov name is found in filename, toggle it for - # preview video rendering - for app in self.aov_filter.keys(): - if os.environ.get("AVALON_APP", "") == app: - for aov in self.aov_filter[app]: - if re.match( - aov, - list(collection)[0] - ): - preview = True - break - - # toggle preview on if multipart is on - if instance.get("multipartExr", False): - preview = True + # TODO 'useSequenceForReview' is temporary solution which does + # not work for 100% of cases. We must be able to tell what + # expected files contains more explicitly and from what + # should be review made. + # - "review" tag is never added when is set to 'False' + if instance["useSequenceForReview"]: + # toggle preview on if multipart is on + if instance.get("multipartExr", False): + preview = True + else: + render_file_name = list(collection)[0] + # if filtered aov name is found in filename, toggle it for + # preview video rendering + preview = match_aov_pattern( + host_name, self.aov_filter, render_file_name + ) staging = os.path.dirname(list(collection)[0]) success, rootless_staging_dir = ( @@ -602,12 +613,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "files": os.path.basename(remainder), "stagingDir": os.path.dirname(remainder), } - if "render" in instance.get("families"): + + preview = match_aov_pattern( + host_name, self.aov_filter, remainder + ) + if preview: rep.update({ "fps": instance.get("fps"), "tags": ["review"] }) - self._solve_families(instance, True) + self._solve_families(instance, preview) already_there = False for repre in instance.get("representations", []): @@ -724,7 +739,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "resolutionWidth": data.get("resolutionWidth", 1920), "resolutionHeight": data.get("resolutionHeight", 1080), "multipartExr": data.get("multipartExr", False), - "jobBatchName": data.get("jobBatchName", "") + "jobBatchName": data.get("jobBatchName", ""), + "useSequenceForReview": data.get("useSequenceForReview", True) } if "prerender" in instance.data["families"]: @@ -916,12 +932,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # User is deadline user render_job["Props"]["User"] = context.data.get( "deadlineUser", getpass.getuser()) - # Priority is now not handled at all - - if self.deadline_priority: - render_job["Props"]["Pri"] = self.deadline_priority - else: - render_job["Props"]["Pri"] = instance.data.get("priority") render_job["Props"]["Env"] = { "FTRACK_API_USER": os.environ.get("FTRACK_API_USER"), diff --git a/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py new file mode 100644 index 0000000000..78eed17c98 --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py @@ -0,0 +1,48 @@ +import pyblish.api + +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) +from openpype.modules.deadline.deadline_module import DeadlineModule + + +class ValidateDeadlinePools(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """Validate primaryPool and secondaryPool on instance. + + Values are on instance based on value insertion when Creating instance or + by Settings in CollectDeadlinePools. + """ + + label = "Validate Deadline Pools" + order = pyblish.api.ValidatorOrder + families = ["rendering", "render.farm", "renderFarm", "renderlayer"] + optional = True + + def process(self, instance): + # get default deadline webservice url from deadline module + deadline_url = instance.context.data["defaultDeadline"] + self.log.info("deadline_url::{}".format(deadline_url)) + pools = DeadlineModule.get_deadline_pools(deadline_url, log=self.log) + self.log.info("pools::{}".format(pools)) + + formatting_data = { + "pools_str": ",".join(pools) + } + + primary_pool = instance.data.get("primaryPool") + if primary_pool and primary_pool not in pools: + msg = "Configured primary '{}' not present on Deadline".format( + instance.data["primaryPool"]) + formatting_data["invalid_value_str"] = msg + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + secondary_pool = instance.data.get("secondaryPool") + if secondary_pool and secondary_pool not in pools: + msg = "Configured secondary '{}' not present on Deadline".format( + instance.data["secondaryPool"]) + formatting_data["invalid_value_str"] = msg + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py index d15a865124..81f38e0c39 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py @@ -1,11 +1,8 @@ import os +import collections +import copy +from openpype.api import Anatomy from openpype_modules.ftrack.lib import BaseAction, statics_icon -from avalon import lib as avalonlib -from openpype.api import ( - Anatomy, - get_project_settings -) -from openpype.lib import ApplicationManager class CreateFolders(BaseAction): @@ -14,55 +11,59 @@ class CreateFolders(BaseAction): icon = statics_icon("ftrack", "action_icons", "CreateFolders.svg") def discover(self, session, entities, event): - if len(entities) != 1: - return False - - not_allowed = ["assetversion", "project"] - if entities[0].entity_type.lower() in not_allowed: - return False - - return True + for entity_item in event["data"]["selection"]: + if entity_item.get("entityType").lower() in ("task", "show"): + return True + return False def interface(self, session, entities, event): if event["data"].get("values", {}): return - entity = entities[0] - without_interface = True - for child in entity["children"]: - if child["object_type"]["name"].lower() != "task": - without_interface = False + + with_interface = False + for entity in entities: + if entity.entity_type.lower() != "task": + with_interface = True break - self.without_interface = without_interface - if without_interface: + + if "values" not in event["data"]: + event["data"]["values"] = {} + + event["data"]["values"]["with_interface"] = with_interface + if not with_interface: return + title = "Create folders" entity_name = entity["name"] msg = ( "

Do you want create folders also" - " for all children of \"{}\"?

" + " for all children of your selection?" ) if entity.entity_type.lower() == "project": entity_name = entity["full_name"] msg = msg.replace(" also", "") msg += "

(Project root won't be created if not checked)

" - items = [] - item_msg = { - "type": "label", - "value": msg.format(entity_name) - } - item_label = { - "type": "label", - "value": "With all chilren entities" - } - item = { - "name": "children_included", - "type": "boolean", - "value": False - } - items.append(item_msg) - items.append(item_label) - items.append(item) + items = [ + { + "type": "label", + "value": msg.format(entity_name) + }, + { + "type": "label", + "value": "With all chilren entities" + }, + { + "name": "children_included", + "type": "boolean", + "value": False + }, + { + "type": "hidden", + "name": "with_interface", + "value": with_interface + } + ] return { "items": items, @@ -71,30 +72,47 @@ class CreateFolders(BaseAction): def launch(self, session, entities, event): '''Callback method for custom action.''' + + if "values" not in event["data"]: + return + + with_interface = event["data"]["values"]["with_interface"] with_childrens = True - if self.without_interface is False: - if "values" not in event["data"]: - return + if with_interface: with_childrens = event["data"]["values"]["children_included"] - entity = entities[0] - if entity.entity_type.lower() == "project": - proj = entity - else: - proj = entity["project"] - project_name = proj["full_name"] - project_code = proj["name"] + filtered_entities = [] + for entity in entities: + low_context_type = entity["context_type"].lower() + if low_context_type in ("task", "show"): + if not with_childrens and low_context_type == "show": + continue + filtered_entities.append(entity) - if entity.entity_type.lower() == 'project' and with_childrens is False: + if not filtered_entities: return { - 'success': True, - 'message': 'Nothing was created' + "success": True, + "message": 'Nothing was created' } - all_entities = [] - all_entities.append(entity) - if with_childrens: - all_entities = self.get_notask_children(entity) + project_entity = self.get_project_from_entity(filtered_entities[0]) + + project_name = project_entity["full_name"] + project_code = project_entity["name"] + + task_entities = [] + other_entities = [] + self.get_all_entities( + session, entities, task_entities, other_entities + ) + hierarchy = self.get_entities_hierarchy( + session, task_entities, other_entities + ) + task_types = session.query("select id, name from Type").all() + task_type_names_by_id = { + task_type["id"]: task_type["name"] + for task_type in task_types + } anatomy = Anatomy(project_name) @@ -102,77 +120,67 @@ class CreateFolders(BaseAction): work_template = anatomy.templates for key in work_keys: work_template = work_template[key] - work_has_apps = "{app" in work_template publish_keys = ["publish", "folder"] publish_template = anatomy.templates for key in publish_keys: publish_template = publish_template[key] - publish_has_apps = "{app" in publish_template + + project_data = { + "project": { + "name": project_name, + "code": project_code + } + } collected_paths = [] - for entity in all_entities: - if entity.entity_type.lower() == "project": - continue - ent_data = { - "project": { - "name": project_name, - "code": project_code - } - } + for item in hierarchy: + parent_entity, task_entities = item - ent_data["asset"] = entity["name"] + parent_data = copy.deepcopy(project_data) - parents = entity["link"][1:-1] + parents = parent_entity["link"][1:-1] hierarchy_names = [p["name"] for p in parents] - hierarchy = "" + hierarchy = "/".join(hierarchy_names) + if hierarchy_names: - hierarchy = os.path.sep.join(hierarchy_names) - ent_data["hierarchy"] = hierarchy + parent_name = hierarchy_names[-1] + else: + parent_name = project_name - tasks_created = False - for child in entity["children"]: - if child["object_type"]["name"].lower() != "task": - continue - tasks_created = True - task_data = ent_data.copy() - task_data["task"] = child["name"] + parent_data.update({ + "asset": parent_entity["name"], + "hierarchy": hierarchy, + "parent": parent_name + }) - apps = [] - - # Template wok - if work_has_apps: - app_data = task_data.copy() - for app in apps: - app_data["app"] = app - collected_paths.append(self.compute_template( - anatomy, app_data, work_keys - )) - else: - collected_paths.append(self.compute_template( - anatomy, task_data, work_keys - )) - - # Template publish - if publish_has_apps: - app_data = task_data.copy() - for app in apps: - app_data["app"] = app - collected_paths.append(self.compute_template( - anatomy, app_data, publish_keys - )) - else: - collected_paths.append(self.compute_template( - anatomy, task_data, publish_keys - )) - - if not tasks_created: + if not task_entities: # create path for entity collected_paths.append(self.compute_template( - anatomy, ent_data, work_keys + anatomy, parent_data, work_keys )) collected_paths.append(self.compute_template( - anatomy, ent_data, publish_keys + anatomy, parent_data, publish_keys + )) + continue + + for task_entity in task_entities: + task_type_id = task_entity["type_id"] + task_type_name = task_type_names_by_id[task_type_id] + task_data = copy.deepcopy(parent_data) + task_data["task"] = { + "name": task_entity["name"], + "type": task_type_name + } + + # Template wok + collected_paths.append(self.compute_template( + anatomy, task_data, work_keys + )) + + # Template publish + collected_paths.append(self.compute_template( + anatomy, task_data, publish_keys )) if len(collected_paths) == 0: @@ -193,14 +201,65 @@ class CreateFolders(BaseAction): "message": "Successfully created project folders." } - def get_notask_children(self, entity): + def get_all_entities( + self, session, entities, task_entities, other_entities + ): + if not entities: + return + + no_task_entities = [] + for entity in entities: + if entity.entity_type.lower() == "task": + task_entities.append(entity) + else: + no_task_entities.append(entity) + + if not no_task_entities: + return task_entities + + other_entities.extend(no_task_entities) + + no_task_entity_ids = [entity["id"] for entity in no_task_entities] + next_entities = session.query(( + "select id, parent_id" + " from TypedContext where parent_id in ({})" + ).format(self.join_query_keys(no_task_entity_ids))).all() + + self.get_all_entities( + session, next_entities, task_entities, other_entities + ) + + def get_entities_hierarchy(self, session, task_entities, other_entities): + task_entity_ids = [entity["id"] for entity in task_entities] + full_task_entities = session.query(( + "select id, name, type_id, parent_id" + " from TypedContext where id in ({})" + ).format(self.join_query_keys(task_entity_ids))) + task_entities_by_parent_id = collections.defaultdict(list) + for entity in full_task_entities: + parent_id = entity["parent_id"] + task_entities_by_parent_id[parent_id].append(entity) + output = [] - if entity.entity_type.lower() == "task": + if not task_entities_by_parent_id: return output - output.append(entity) - for child in entity["children"]: - output.extend(self.get_notask_children(child)) + other_ids = set() + for entity in other_entities: + other_ids.add(entity["id"]) + other_ids |= set(task_entities_by_parent_id.keys()) + + parent_entities = session.query(( + "select id, name from TypedContext where id in ({})" + ).format(self.join_query_keys(other_ids))).all() + + for parent_entity in parent_entities: + parent_id = parent_entity["id"] + output.append(( + parent_entity, + task_entities_by_parent_id[parent_id] + )) + return output def compute_template(self, anatomy, data, anatomy_keys): diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py index 94f359c317..ebea8872f9 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py @@ -1,6 +1,4 @@ -import os import re -import json from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype.api import get_project_basic_paths, create_project_folders diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py index 1b694e25f1..5871646b20 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py @@ -492,7 +492,8 @@ class DeleteOldVersions(BaseAction): os.remove(file_path) self.log.debug("Removed file: {}".format(file_path)) - remainders.remove(file_path_base) + if file_path_base in remainders: + remainders.remove(file_path_base) continue seq_path_base = os.path.split(seq_path)[1] diff --git a/openpype/modules/ftrack/lib/avalon_sync.py b/openpype/modules/ftrack/lib/avalon_sync.py index 5301ec568e..c5b58ca94d 100644 --- a/openpype/modules/ftrack/lib/avalon_sync.py +++ b/openpype/modules/ftrack/lib/avalon_sync.py @@ -286,21 +286,6 @@ def from_dict_to_set(data, is_project): return result -def get_avalon_project_template(project_name): - """Get avalon template - Args: - project_name: (string) - Returns: - dictionary with templates - """ - templates = Anatomy(project_name).templates - return { - "workfile": templates["avalon"]["workfile"], - "work": templates["avalon"]["work"], - "publish": templates["avalon"]["publish"] - } - - def get_project_apps(in_app_list): """ Application definitions for app name. diff --git a/openpype/modules/ftrack/lib/custom_attributes.py b/openpype/modules/ftrack/lib/custom_attributes.py index 29c6b5e7f8..2f53815368 100644 --- a/openpype/modules/ftrack/lib/custom_attributes.py +++ b/openpype/modules/ftrack/lib/custom_attributes.py @@ -135,7 +135,7 @@ def query_custom_attributes( output.extend( session.query( ( - "select value, entity_id from {}" + "select value, entity_id, configuration_id from {}" " where entity_id in ({}) and configuration_id in ({})" ).format( table_name, diff --git a/openpype/modules/ftrack/plugins/publish/collect_custom_attributes_data.py b/openpype/modules/ftrack/plugins/publish/collect_custom_attributes_data.py new file mode 100644 index 0000000000..43fa3bc3f8 --- /dev/null +++ b/openpype/modules/ftrack/plugins/publish/collect_custom_attributes_data.py @@ -0,0 +1,148 @@ +""" +Requires: + context > ftrackSession + context > ftrackEntity + instance > ftrackEntity + +Provides: + instance > customData > ftrack +""" +import copy + +import pyblish.api + + +class CollectFtrackCustomAttributeData(pyblish.api.ContextPlugin): + """Collect custom attribute values and store them to customData. + + Data are stored into each instance in context under + instance.data["customData"]["ftrack"]. + + Hierarchical attributes are not looked up properly for that functionality + custom attribute values lookup must be extended. + """ + + order = pyblish.api.CollectorOrder + 0.4992 + label = "Collect Ftrack Custom Attribute Data" + + # Name of custom attributes for which will be look for + custom_attribute_keys = [] + + def process(self, context): + if not self.custom_attribute_keys: + self.log.info("Custom attribute keys are not set. Skipping") + return + + ftrack_entities_by_id = {} + default_entity_id = None + + context_entity = context.data.get("ftrackEntity") + if context_entity: + entity_id = context_entity["id"] + default_entity_id = entity_id + ftrack_entities_by_id[entity_id] = context_entity + + instances_by_entity_id = { + default_entity_id: [] + } + for instance in context: + entity = instance.data.get("ftrackEntity") + if not entity: + instances_by_entity_id[default_entity_id].append(instance) + continue + + entity_id = entity["id"] + ftrack_entities_by_id[entity_id] = entity + if entity_id not in instances_by_entity_id: + instances_by_entity_id[entity_id] = [] + instances_by_entity_id[entity_id].append(instance) + + if not ftrack_entities_by_id: + self.log.info("Ftrack entities are not set. Skipping") + return + + session = context.data["ftrackSession"] + custom_attr_key_by_id = self.query_attr_confs(session) + if not custom_attr_key_by_id: + self.log.info(( + "Didn't find any of defined custom attributes {}" + ).format(", ".join(self.custom_attribute_keys))) + return + + entity_ids = list(instances_by_entity_id.keys()) + values_by_entity_id = self.query_attr_values( + session, entity_ids, custom_attr_key_by_id + ) + + for entity_id, instances in instances_by_entity_id.items(): + if entity_id not in values_by_entity_id: + # Use defaut empty values + entity_id = None + + for instance in instances: + value = copy.deepcopy(values_by_entity_id[entity_id]) + if "customData" not in instance.data: + instance.data["customData"] = {} + instance.data["customData"]["ftrack"] = value + instance_label = ( + instance.data.get("label") or instance.data["name"] + ) + self.log.debug(( + "Added ftrack custom data to instance \"{}\": {}" + ).format(instance_label, value)) + + def query_attr_values(self, session, entity_ids, custom_attr_key_by_id): + # Prepare values for query + entity_ids_joined = ",".join([ + '"{}"'.format(entity_id) + for entity_id in entity_ids + ]) + conf_ids_joined = ",".join([ + '"{}"'.format(conf_id) + for conf_id in custom_attr_key_by_id.keys() + ]) + # Query custom attribute values + value_items = session.query( + ( + "select value, entity_id, configuration_id" + " from CustomAttributeValue" + " where entity_id in ({}) and configuration_id in ({})" + ).format( + entity_ids_joined, + conf_ids_joined + ) + ).all() + + # Prepare default value output per entity id + values_by_key = { + key: None for key in self.custom_attribute_keys + } + # Prepare all entity ids that were queried + values_by_entity_id = { + entity_id: copy.deepcopy(values_by_key) + for entity_id in entity_ids + } + # Add none entity id which is used as default value + values_by_entity_id[None] = copy.deepcopy(values_by_key) + # Go through queried data and store them + for item in value_items: + conf_id = item["configuration_id"] + conf_key = custom_attr_key_by_id[conf_id] + entity_id = item["entity_id"] + values_by_entity_id[entity_id][conf_key] = item["value"] + return values_by_entity_id + + def query_attr_confs(self, session): + custom_attributes = set(self.custom_attribute_keys) + cust_attrs_query = ( + "select id, key from CustomAttributeConfiguration" + " where key in ({})" + ).format(", ".join( + ["\"{}\"".format(attr_name) for attr_name in custom_attributes] + )) + + custom_attr_confs = session.query(cust_attrs_query).all() + return { + conf["id"]: conf["key"] + for conf in custom_attr_confs + } diff --git a/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py index 07af217fb6..436a61cc18 100644 --- a/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py +++ b/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py @@ -6,7 +6,7 @@ import avalon.api class CollectFtrackApi(pyblish.api.ContextPlugin): """ Collects an ftrack session and the current task id. """ - order = pyblish.api.CollectorOrder + 0.4999 + order = pyblish.api.CollectorOrder + 0.4991 label = "Collect Ftrack Api" def process(self, context): diff --git a/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py b/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py index 70030acad9..158135c952 100644 --- a/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py +++ b/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py @@ -25,7 +25,7 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): based on 'families' (editorial drives it by presence of 'review') """ label = "Collect Ftrack Family" - order = pyblish.api.CollectorOrder + 0.4998 + order = pyblish.api.CollectorOrder + 0.4990 profiles = None @@ -34,6 +34,7 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): self.log.warning("No profiles present for adding Ftrack family") return + add_ftrack_family = False task_name = instance.data.get("task", avalon.api.Session["AVALON_TASK"]) host_name = avalon.api.Session["AVALON_APP"] @@ -53,6 +54,8 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): additional_filters = profile.get("advanced_filtering") if additional_filters: + self.log.info("'{}' families used for additional filtering". + format(families)) add_ftrack_family = self._get_add_ftrack_f_from_addit_filters( additional_filters, families, @@ -69,6 +72,13 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): else: instance.data["families"] = ["ftrack"] + result_str = "Adding" + if not add_ftrack_family: + result_str = "Not adding" + self.log.info("{} 'ftrack' family for instance with '{}'".format( + result_str, family + )) + def _get_add_ftrack_f_from_addit_filters(self, additional_filters, families, diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py index 6c25b9191e..650c59fae8 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py @@ -1,3 +1,15 @@ +"""Integrate components into ftrack + +Requires: + context -> ftrackSession - connected ftrack.Session + instance -> ftrackComponentsList - list of components to integrate + +Provides: + instance -> ftrackIntegratedAssetVersionsData + # legacy + instance -> ftrackIntegratedAssetVersions +""" + import os import sys import six @@ -54,6 +66,114 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): self.log.debug(query) return query + def process(self, instance): + session = instance.context.data["ftrackSession"] + context = instance.context + component_list = instance.data.get("ftrackComponentsList") + if not component_list: + self.log.info( + "Instance don't have components to integrate to Ftrack." + " Skipping." + ) + return + + session = instance.context.data["ftrackSession"] + context = instance.context + + parent_entity = None + default_asset_name = None + # If instance has set "ftrackEntity" or "ftrackTask" then use them from + # instance. Even if they are set to None. If they are set to None it + # has a reason. (like has different context) + if "ftrackEntity" in instance.data or "ftrackTask" in instance.data: + task_entity = instance.data.get("ftrackTask") + parent_entity = instance.data.get("ftrackEntity") + + elif "ftrackEntity" in context.data or "ftrackTask" in context.data: + task_entity = context.data.get("ftrackTask") + parent_entity = context.data.get("ftrackEntity") + + if task_entity: + default_asset_name = task_entity["name"] + parent_entity = task_entity["parent"] + + if parent_entity is None: + self.log.info(( + "Skipping ftrack integration. Instance \"{}\" does not" + " have specified ftrack entities." + ).format(str(instance))) + return + + if not default_asset_name: + default_asset_name = parent_entity["name"] + + # Change status on task + self._set_task_status(instance, task_entity, session) + + # Prepare AssetTypes + asset_types_by_short = self._ensure_asset_types_exists( + session, component_list + ) + + asset_versions_data_by_id = {} + used_asset_versions = [] + # Iterate over components and publish + for data in component_list: + self.log.debug("data: {}".format(data)) + + # AssetType + asset_type_short = data["assettype_data"]["short"] + asset_type_entity = asset_types_by_short[asset_type_short] + + # Asset + asset_data = data.get("asset_data") or {} + if "name" not in asset_data: + asset_data["name"] = default_asset_name + asset_entity = self._ensure_asset_exists( + session, + asset_data, + asset_type_entity["id"], + parent_entity["id"] + ) + + # Asset Version + asset_version_data = data.get("assetversion_data") or {} + asset_version_entity = self._ensure_asset_version_exists( + session, asset_version_data, asset_entity["id"], task_entity + ) + + # Component + self.create_component(session, asset_version_entity, data) + + # Store asset version and components items that were + version_id = asset_version_entity["id"] + if version_id not in asset_versions_data_by_id: + asset_versions_data_by_id[version_id] = { + "asset_version": asset_version_entity, + "component_items": [] + } + + asset_versions_data_by_id[version_id]["component_items"].append( + data + ) + + # Backwards compatibility + if asset_version_entity not in used_asset_versions: + used_asset_versions.append(asset_version_entity) + + instance.data["ftrackIntegratedAssetVersionsData"] = ( + asset_versions_data_by_id + ) + + # Backwards compatibility + asset_versions_key = "ftrackIntegratedAssetVersions" + if asset_versions_key not in instance.data: + instance.data[asset_versions_key] = [] + + for asset_version in used_asset_versions: + if asset_version not in instance.data[asset_versions_key]: + instance.data[asset_versions_key].append(asset_version) + def _set_task_status(self, instance, task_entity, session): project_entity = instance.context.data.get("ftrackProject") if not project_entity: @@ -100,190 +220,224 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): session._configure_locations() six.reraise(tp, value, tb) - def process(self, instance): - session = instance.context.data["ftrackSession"] - context = instance.context + def _ensure_asset_types_exists(self, session, component_list): + """Make sure that all AssetType entities exists for integration. - name = None - # If instance has set "ftrackEntity" or "ftrackTask" then use them from - # instance. Even if they are set to None. If they are set to None it - # has a reason. (like has different context) - if "ftrackEntity" in instance.data or "ftrackTask" in instance.data: - task = instance.data.get("ftrackTask") - parent = instance.data.get("ftrackEntity") + Returns: + dict: All asset types by short name. + """ + # Query existing asset types + asset_types = session.query("select id, short from AssetType").all() + # Stpore all existing short names + asset_type_shorts = {asset_type["short"] for asset_type in asset_types} + # Check which asset types are missing and store them + asset_type_names_by_missing_shorts = {} + default_short_name = "upload" + for data in component_list: + asset_type_data = data.get("assettype_data") or {} + asset_type_short = asset_type_data.get("short") + if not asset_type_short: + # Use default asset type name if not set and change the + # input data + asset_type_short = default_short_name + asset_type_data["short"] = asset_type_short + data["assettype_data"] = asset_type_data - elif "ftrackEntity" in context.data or "ftrackTask" in context.data: - task = context.data.get("ftrackTask") - parent = context.data.get("ftrackEntity") + if ( + # Skip if short name exists + asset_type_short in asset_type_shorts + # Skip if short name was already added to missing types + # and asset type name is filled + # - if asset type name is missing then try use name from other + # data + or asset_type_names_by_missing_shorts.get(asset_type_short) + ): + continue - if task: - parent = task["parent"] - name = task - elif parent: - name = parent["name"] + asset_type_names_by_missing_shorts[asset_type_short] = ( + asset_type_data.get("name") + ) - if not name: - self.log.info(( - "Skipping ftrack integration. Instance \"{}\" does not" - " have specified ftrack entities." - ).format(str(instance))) - return + # Create missing asset types if there are any + if asset_type_names_by_missing_shorts: + self.log.info("Creating asset types with short names: {}".format( + ", ".join(asset_type_names_by_missing_shorts.keys()) + )) + for missing_short, type_name in ( + asset_type_names_by_missing_shorts.items() + ): + # Use short for name if name is not defined + if not type_name: + type_name = missing_short + # Use short name also for name + # - there is not other source for 'name' + session.create( + "AssetType", + { + "short": missing_short, + "name": type_name + } + ) - info_msg = ( - "Created new {entity_type} with data: {data}" - ", metadata: {metadata}." + # Commit creation + session.commit() + # Requery asset types + asset_types = session.query( + "select id, short from AssetType" + ).all() + + return {asset_type["short"]: asset_type for asset_type in asset_types} + + def _ensure_asset_exists( + self, session, asset_data, asset_type_id, parent_id + ): + asset_name = asset_data["name"] + asset_entity = self._query_asset( + session, asset_name, asset_type_id, parent_id + ) + if asset_entity is not None: + return asset_entity + + asset_data = { + "name": asset_name, + "type_id": asset_type_id, + "context_id": parent_id + } + self.log.info("Created new Asset with data: {}.".format(asset_data)) + session.create("Asset", asset_data) + session.commit() + return self._query_asset(session, asset_name, asset_type_id, parent_id) + + def _query_asset(self, session, asset_name, asset_type_id, parent_id): + return session.query( + ( + "select id from Asset" + " where name is \"{}\"" + " and type_id is \"{}\"" + " and context_id is \"{}\"" + ).format(asset_name, asset_type_id, parent_id) + ).first() + + def _ensure_asset_version_exists( + self, session, asset_version_data, asset_id, task_entity + ): + task_id = None + if task_entity: + task_id = task_entity["id"] + + # Try query asset version by criteria (asset id and version) + version = asset_version_data.get("version") or 0 + asset_version_entity = self._query_asset_version( + session, version, asset_id ) - used_asset_versions = [] + # Prepare comment value + comment = asset_version_data.get("comment") or "" + if asset_version_entity is not None: + changed = False + if comment != asset_version_entity["comment"]: + asset_version_entity["comment"] = comment + changed = True - self._set_task_status(instance, task, session) + if task_id != asset_version_entity["task_id"]: + asset_version_entity["task_id"] = task_id + changed = True - # Iterate over components and publish - for data in instance.data.get("ftrackComponentsList", []): - # AssetType - # Get existing entity. - assettype_data = {"short": "upload"} - assettype_data.update(data.get("assettype_data", {})) - self.log.debug("data: {}".format(data)) + if changed: + session.commit() - assettype_entity = session.query( - self.query("AssetType", assettype_data) - ).first() - - # Create a new entity if none exits. - if not assettype_entity: - assettype_entity = session.create("AssetType", assettype_data) - self.log.debug("Created new AssetType with data: {}".format( - assettype_data - )) - - # Asset - # Get existing entity. - asset_data = { - "name": name, - "type": assettype_entity, - "parent": parent, + else: + new_asset_version_data = { + "version": version, + "asset_id": asset_id } - asset_data.update(data.get("asset_data", {})) + if task_id: + new_asset_version_data["task_id"] = task_id - asset_entity = session.query( - self.query("Asset", asset_data) - ).first() + if comment: + new_asset_version_data["comment"] = comment - self.log.info("asset entity: {}".format(asset_entity)) - - # Extracting metadata, and adding after entity creation. This is - # due to a ftrack_api bug where you can't add metadata on creation. - asset_metadata = asset_data.pop("metadata", {}) - - # Create a new entity if none exits. - if not asset_entity: - asset_entity = session.create("Asset", asset_data) - self.log.debug( - info_msg.format( - entity_type="Asset", - data=asset_data, - metadata=asset_metadata - ) - ) - try: - session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - session._configure_locations() - six.reraise(tp, value, tb) - - # Adding metadata - existing_asset_metadata = asset_entity["metadata"] - existing_asset_metadata.update(asset_metadata) - asset_entity["metadata"] = existing_asset_metadata - - # AssetVersion - # Get existing entity. - assetversion_data = { - "version": 0, - "asset": asset_entity, - } - _assetversion_data = data.get("assetversion_data", {}) - assetversion_cust_attrs = _assetversion_data.pop( - "custom_attributes", {} + self.log.info("Created new AssetVersion with data {}".format( + new_asset_version_data + )) + session.create("AssetVersion", new_asset_version_data) + session.commit() + asset_version_entity = self._query_asset_version( + session, version, asset_id ) - asset_version_comment = _assetversion_data.pop( - "comment", None - ) - assetversion_data.update(_assetversion_data) - assetversion_entity = session.query( - self.query("AssetVersion", assetversion_data) - ).first() - - # Extracting metadata, and adding after entity creation. This is - # due to a ftrack_api bug where you can't add metadata on creation. - assetversion_metadata = assetversion_data.pop("metadata", {}) - - if task: - assetversion_data['task'] = task - - # Create a new entity if none exits. - if not assetversion_entity: - assetversion_entity = session.create( - "AssetVersion", assetversion_data - ) - self.log.debug( - info_msg.format( - entity_type="AssetVersion", - data=assetversion_data, - metadata=assetversion_metadata + # Set custom attributes if there were any set + custom_attrs = asset_version_data.get("custom_attributes") or {} + for attr_key, attr_value in custom_attrs.items(): + if attr_key in asset_version_entity["custom_attributes"]: + try: + asset_version_entity["custom_attributes"][attr_key] = ( + attr_value ) + session.commit() + continue + except Exception: + session.rollback() + session._configure_locations() + + self.log.warning( + ( + "Custom Attrubute \"{0}\" is not available for" + " AssetVersion <{1}>. Can't set it's value to: \"{2}\"" + ).format( + attr_key, asset_version_entity["id"], str(attr_value) ) - try: - session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - session._configure_locations() - six.reraise(tp, value, tb) + ) - # Adding metadata - existing_assetversion_metadata = assetversion_entity["metadata"] - existing_assetversion_metadata.update(assetversion_metadata) - assetversion_entity["metadata"] = existing_assetversion_metadata + return asset_version_entity - # Add comment - if asset_version_comment: - assetversion_entity["comment"] = asset_version_comment - try: - session.commit() - except Exception: - session.rollback() - session._configure_locations() - self.log.warning(( - "Comment was not possible to set for AssetVersion" - "\"{0}\". Can't set it's value to: \"{1}\"" - ).format( - assetversion_entity["id"], str(asset_version_comment) - )) + def _query_asset_version(self, session, version, asset_id): + return session.query( + ( + "select id, task_id, comment from AssetVersion" + " where version is \"{}\" and asset_id is \"{}\"" + ).format(version, asset_id) + ).first() - # Adding Custom Attributes - for attr, val in assetversion_cust_attrs.items(): - if attr in assetversion_entity["custom_attributes"]: - try: - assetversion_entity["custom_attributes"][attr] = val - session.commit() - continue - except Exception: - session.rollback() - session._configure_locations() + def create_component(self, session, asset_version_entity, data): + component_data = data.get("component_data") or {} - self.log.warning(( - "Custom Attrubute \"{0}\"" - " is not available for AssetVersion <{1}>." - " Can't set it's value to: \"{2}\"" - ).format(attr, assetversion_entity["id"], str(val))) + if not component_data.get("name"): + component_data["name"] = "main" + + version_id = asset_version_entity["id"] + component_data["version_id"] = version_id + component_entity = session.query( + ( + "select id, name from Component where name is \"{}\"" + " and version_id is \"{}\"" + ).format(component_data["name"], version_id) + ).first() + + component_overwrite = data.get("component_overwrite", False) + location = data.get("component_location", session.pick_location()) + + # Overwrite existing component data if requested. + if component_entity and component_overwrite: + origin_location = session.query( + "Location where name is \"ftrack.origin\"" + ).one() + + # Removing existing members from location + components = list(component_entity.get("members", [])) + components += [component_entity] + for component in components: + for loc in component["component_locations"]: + if location["id"] == loc["location_id"]: + location.remove_component( + component, recursive=False + ) + + # Deleting existing members on component entity + for member in component_entity.get("members", []): + session.delete(member) + del(member) - # Have to commit the version and asset, because location can't - # determine the final location without. try: session.commit() except Exception: @@ -292,175 +446,124 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): session._configure_locations() six.reraise(tp, value, tb) - # Component - # Get existing entity. - component_data = { - "name": "main", - "version": assetversion_entity - } - component_data.update(data.get("component_data", {})) + # Reset members in memory + if "members" in component_entity.keys(): + component_entity["members"] = [] - component_entity = session.query( - self.query("Component", component_data) - ).first() + # Add components to origin location + try: + collection = clique.parse(data["component_path"]) + except ValueError: + # Assume its a single file + # Changing file type + name, ext = os.path.splitext(data["component_path"]) + component_entity["file_type"] = ext - component_overwrite = data.get("component_overwrite", False) - location = data.get("component_location", session.pick_location()) - - # Overwrite existing component data if requested. - if component_entity and component_overwrite: - - origin_location = session.query( - "Location where name is \"ftrack.origin\"" - ).one() - - # Removing existing members from location - components = list(component_entity.get("members", [])) - components += [component_entity] - for component in components: - for loc in component["component_locations"]: - if location["id"] == loc["location_id"]: - location.remove_component( - component, recursive=False - ) - - # Deleting existing members on component entity - for member in component_entity.get("members", []): - session.delete(member) - del(member) - - try: - session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - session._configure_locations() - six.reraise(tp, value, tb) - - # Reset members in memory - if "members" in component_entity.keys(): - component_entity["members"] = [] - - # Add components to origin location - try: - collection = clique.parse(data["component_path"]) - except ValueError: - # Assume its a single file - # Changing file type - name, ext = os.path.splitext(data["component_path"]) - component_entity["file_type"] = ext - - origin_location.add_component( - component_entity, data["component_path"] - ) - else: - # Changing file type - component_entity["file_type"] = collection.format("{tail}") - - # Create member components for sequence. - for member_path in collection: - - size = 0 - try: - size = os.path.getsize(member_path) - except OSError: - pass - - name = collection.match(member_path).group("index") - - member_data = { - "name": name, - "container": component_entity, - "size": size, - "file_type": os.path.splitext(member_path)[-1] - } - - component = session.create( - "FileComponent", member_data - ) - origin_location.add_component( - component, member_path, recursive=False - ) - component_entity["members"].append(component) - - # Add components to location. - location.add_component( - component_entity, origin_location, recursive=True - ) - - data["component"] = component_entity - msg = "Overwriting Component with path: {0}, data: {1}, " - msg += "location: {2}" - self.log.info( - msg.format( - data["component_path"], - component_data, - location - ) - ) - - # Extracting metadata, and adding after entity creation. This is - # due to a ftrack_api bug where you can't add metadata on creation. - component_metadata = component_data.pop("metadata", {}) - - # Create new component if none exists. - new_component = False - if not component_entity: - component_entity = assetversion_entity.create_component( - data["component_path"], - data=component_data, - location=location - ) - data["component"] = component_entity - msg = "Created new Component with path: {0}, data: {1}" - msg += ", metadata: {2}, location: {3}" - self.log.info( - msg.format( - data["component_path"], - component_data, - component_metadata, - location - ) - ) - new_component = True - - # Adding metadata - existing_component_metadata = component_entity["metadata"] - existing_component_metadata.update(component_metadata) - component_entity["metadata"] = existing_component_metadata - - # if component_data['name'] = 'ftrackreview-mp4-mp4': - # assetversion_entity["thumbnail_id"] - - # Setting assetversion thumbnail - if data.get("thumbnail", False): - assetversion_entity["thumbnail_id"] = component_entity["id"] - - # Inform user about no changes to the database. - if (component_entity and not component_overwrite and - not new_component): - data["component"] = component_entity - self.log.info( - "Found existing component, and no request to overwrite. " - "Nothing has been changed." + origin_location.add_component( + component_entity, data["component_path"] ) else: - # Commit changes. - try: - session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - session._configure_locations() - six.reraise(tp, value, tb) + # Changing file type + component_entity["file_type"] = collection.format("{tail}") - if assetversion_entity not in used_asset_versions: - used_asset_versions.append(assetversion_entity) + # Create member components for sequence. + for member_path in collection: - asset_versions_key = "ftrackIntegratedAssetVersions" - if asset_versions_key not in instance.data: - instance.data[asset_versions_key] = [] + size = 0 + try: + size = os.path.getsize(member_path) + except OSError: + pass - for asset_version in used_asset_versions: - if asset_version not in instance.data[asset_versions_key]: - instance.data[asset_versions_key].append(asset_version) + name = collection.match(member_path).group("index") + + member_data = { + "name": name, + "container": component_entity, + "size": size, + "file_type": os.path.splitext(member_path)[-1] + } + + component = session.create( + "FileComponent", member_data + ) + origin_location.add_component( + component, member_path, recursive=False + ) + component_entity["members"].append(component) + + # Add components to location. + location.add_component( + component_entity, origin_location, recursive=True + ) + + data["component"] = component_entity + self.log.info( + ( + "Overwriting Component with path: {0}, data: {1}," + " location: {2}" + ).format( + data["component_path"], + component_data, + location + ) + ) + + # Extracting metadata, and adding after entity creation. This is + # due to a ftrack_api bug where you can't add metadata on creation. + component_metadata = component_data.pop("metadata", {}) + + # Create new component if none exists. + new_component = False + if not component_entity: + component_entity = asset_version_entity.create_component( + data["component_path"], + data=component_data, + location=location + ) + data["component"] = component_entity + self.log.info( + ( + "Created new Component with path: {0}, data: {1}," + " metadata: {2}, location: {3}" + ).format( + data["component_path"], + component_data, + component_metadata, + location + ) + ) + new_component = True + + # Adding metadata + existing_component_metadata = component_entity["metadata"] + existing_component_metadata.update(component_metadata) + component_entity["metadata"] = existing_component_metadata + + # if component_data['name'] = 'ftrackreview-mp4-mp4': + # assetversion_entity["thumbnail_id"] + + # Setting assetversion thumbnail + if data.get("thumbnail"): + asset_version_entity["thumbnail_id"] = component_entity["id"] + + # Inform user about no changes to the database. + if ( + component_entity + and not component_overwrite + and not new_component + ): + data["component"] = component_entity + self.log.info( + "Found existing component, and no request to overwrite. " + "Nothing has been changed." + ) + else: + # Commit changes. + try: + session.commit() + except Exception: + tp, value, tb = sys.exc_info() + session.rollback() + session._configure_locations() + six.reraise(tp, value, tb) diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py new file mode 100644 index 0000000000..c6a3d47f66 --- /dev/null +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py @@ -0,0 +1,84 @@ +""" +Requires: + context > comment + context > ftrackSession + instance > ftrackIntegratedAssetVersionsData +""" + +import sys + +import six +import pyblish.api + + +class IntegrateFtrackDescription(pyblish.api.InstancePlugin): + """Add description to AssetVersions in Ftrack.""" + + # Must be after integrate asset new + order = pyblish.api.IntegratorOrder + 0.4999 + label = "Integrate Ftrack description" + families = ["ftrack"] + optional = True + + # Can be set in settings: + # - Allows `intent` and `comment` keys + description_template = "{comment}" + + def process(self, instance): + # Check if there are any integrated AssetVersion entities + asset_versions_key = "ftrackIntegratedAssetVersionsData" + asset_versions_data_by_id = instance.data.get(asset_versions_key) + if not asset_versions_data_by_id: + self.log.info("There are any integrated AssetVersions") + return + + comment = (instance.context.data.get("comment") or "").strip() + if not comment: + self.log.info("Comment is not set.") + else: + self.log.debug("Comment is set to `{}`".format(comment)) + + session = instance.context.data["ftrackSession"] + + intent = instance.context.data.get("intent") + intent_label = None + if intent and isinstance(intent, dict): + intent_val = intent.get("value") + intent_label = intent.get("label") + else: + intent_val = intent + + if not intent_label: + intent_label = intent_val or "" + + # if intent label is set then format comment + # - it is possible that intent_label is equal to "" (empty string) + if intent_label: + self.log.debug( + "Intent label is set to `{}`.".format(intent_label) + ) + + else: + self.log.debug("Intent is not set.") + + for asset_version_data in asset_versions_data_by_id.values(): + asset_version = asset_version_data["asset_version"] + + # Backwards compatibility for older settings using + # attribute 'note_with_intent_template' + comment = self.description_template.format(**{ + "intent": intent_label, + "comment": comment + }) + asset_version["comment"] = comment + + try: + session.commit() + self.log.debug("Comment added to AssetVersion \"{}\"".format( + str(asset_version) + )) + except Exception: + tp, value, tb = sys.exc_info() + session.rollback() + session._configure_locations() + six.reraise(tp, value, tb) diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py index cff7cd32cb..5ea0469bce 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py @@ -35,10 +35,18 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): "image": "img", "reference": "reference" } + keep_first_subset_name_for_review = True def process(self, instance): self.log.debug("instance {}".format(instance)) + instance_repres = instance.data.get("representations") + if not instance_repres: + self.log.info(( + "Skipping instance. Does not have any representations {}" + ).format(str(instance))) + return + instance_version = instance.data.get("version") if instance_version is None: raise ValueError("Instance version not set") @@ -52,8 +60,12 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): if not asset_type and family_low in self.family_mapping: asset_type = self.family_mapping[family_low] - self.log.debug(self.family_mapping) - self.log.debug(family_low) + if not asset_type: + asset_type = "upload" + + self.log.debug( + "Family: {}\nMapping: {}".format(family_low, self.family_mapping) + ) # Ignore this instance if neither "ftrackFamily" or a family mapping is # found. @@ -63,13 +75,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ).format(family)) return - instance_repres = instance.data.get("representations") - if not instance_repres: - self.log.info(( - "Skipping instance. Does not have any representations {}" - ).format(str(instance))) - return - # Prepare FPS instance_fps = instance.data.get("fps") if instance_fps is None: @@ -168,7 +173,47 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Change asset name of each new component for review is_first_review_repre = True not_first_components = [] + extended_asset_name = "" + multiple_reviewable = len(review_representations) > 1 for repre in review_representations: + # Create copy of base comp item and append it + review_item = copy.deepcopy(base_component_item) + + # get asset name and define extended name variant + asset_name = review_item["asset_data"]["name"] + extended_asset_name = "_".join( + (asset_name, repre["name"]) + ) + + # reset extended if no need for extended asset name + if ( + self.keep_first_subset_name_for_review + and is_first_review_repre + ): + extended_asset_name = "" + else: + # only rename if multiple reviewable + if multiple_reviewable: + review_item["asset_data"]["name"] = extended_asset_name + else: + extended_asset_name = "" + + # rename all already created components + # only if first repre and extended name available + if is_first_review_repre and extended_asset_name: + # and rename all already created components + for _ci in component_list: + _ci["asset_data"]["name"] = extended_asset_name + + # and rename all already created src components + for _sci in src_components_to_add: + _sci["asset_data"]["name"] = extended_asset_name + + # rename also first thumbnail component if any + if first_thumbnail_component is not None: + first_thumbnail_component[ + "asset_data"]["name"] = extended_asset_name + frame_start = repre.get("frameStartFtrack") frame_end = repre.get("frameEndFtrack") if frame_start is None or frame_end is None: @@ -184,8 +229,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): if fps is None: fps = instance_fps - # Create copy of base comp item and append it - review_item = copy.deepcopy(base_component_item) # Change location review_item["component_path"] = repre["published_path"] # Change component data @@ -200,18 +243,16 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): }) } } - # Create copy of item before setting location or changing asset - src_components_to_add.append(copy.deepcopy(review_item)) + if is_first_review_repre: is_first_review_repre = False else: - # Add representation name to asset name of "not first" review - asset_name = review_item["asset_data"]["name"] - review_item["asset_data"]["name"] = "_".join( - (asset_name, repre["name"]) - ) + # later detection for thumbnail duplication not_first_components.append(review_item) + # Create copy of item before setting location + src_components_to_add.append(copy.deepcopy(review_item)) + # Set location review_item["component_location"] = ftrack_server_location # Add item to component list @@ -249,6 +290,14 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): continue # Create copy of base comp item and append it other_item = copy.deepcopy(base_component_item) + + # add extended name if any + if ( + not self.keep_first_subset_name_for_review + and extended_asset_name + ): + other_item["asset_data"]["name"] = extended_asset_name + other_item["component_data"] = { "name": repre["name"] } diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py index acd295854d..952b21546d 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py @@ -1,7 +1,17 @@ +""" +Requires: + context > hostName + context > appName + context > appLabel + context > comment + context > ftrackSession + instance > ftrackIntegratedAssetVersionsData +""" + import sys -import json -import pyblish.api + import six +import pyblish.api class IntegrateFtrackNote(pyblish.api.InstancePlugin): @@ -15,100 +25,52 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): # Can be set in presets: # - Allows only `intent` and `comment` keys + note_template = None + # Backwards compatibility note_with_intent_template = "{intent}: {comment}" # - note label must exist in Ftrack note_labels = [] - def get_intent_label(self, session, intent_value): - if not intent_value: - return - - intent_configurations = session.query( - "CustomAttributeConfiguration where key is intent" - ).all() - if not intent_configurations: - return - - intent_configuration = intent_configurations[0] - if len(intent_configuration) > 1: - self.log.warning(( - "Found more than one `intent` custom attribute." - " Using first found." - )) - - config = intent_configuration.get("config") - if not config: - return - - configuration = json.loads(config) - items = configuration.get("data") - if not items: - return - - if sys.version_info[0] < 3: - string_type = basestring - else: - string_type = str - - if isinstance(items, string_type): - items = json.loads(items) - - intent_label = None - for item in items: - if item["value"] == intent_value: - intent_label = item["menu"] - break - - return intent_label - def process(self, instance): - comment = (instance.context.data.get("comment") or "").strip() + # Check if there are any integrated AssetVersion entities + asset_versions_key = "ftrackIntegratedAssetVersionsData" + asset_versions_data_by_id = instance.data.get(asset_versions_key) + if not asset_versions_data_by_id: + self.log.info("There are any integrated AssetVersions") + return + + context = instance.context + host_name = context.data["hostName"] + app_name = context.data["appName"] + app_label = context.data["appLabel"] + comment = (context.data.get("comment") or "").strip() if not comment: self.log.info("Comment is not set.") - return + else: + self.log.debug("Comment is set to `{}`".format(comment)) - self.log.debug("Comment is set to `{}`".format(comment)) - - session = instance.context.data["ftrackSession"] + session = context.data["ftrackSession"] intent = instance.context.data.get("intent") + intent_label = None if intent and isinstance(intent, dict): intent_val = intent.get("value") intent_label = intent.get("label") else: - intent_val = intent_label = intent + intent_val = intent - final_label = None - if intent_val: - final_label = self.get_intent_label(session, intent_val) - if final_label is None: - final_label = intent_label + if not intent_label: + intent_label = intent_val or "" # if intent label is set then format comment # - it is possible that intent_label is equal to "" (empty string) - if final_label: - msg = "Intent label is set to `{}`.".format(final_label) - comment = self.note_with_intent_template.format(**{ - "intent": final_label, - "comment": comment - }) - - elif intent_val: - msg = ( - "Intent is set to `{}` and was not added" - " to comment because label is set to `{}`." - ).format(intent_val, final_label) + if intent_label: + self.log.debug( + "Intent label is set to `{}`.".format(intent_label) + ) else: - msg = "Intent is not set." - - self.log.debug(msg) - - asset_versions_key = "ftrackIntegratedAssetVersions" - asset_versions = instance.data.get(asset_versions_key) - if not asset_versions: - self.log.info("There are any integrated AssetVersions") - return + self.log.debug("Intent is not set.") user = session.query( "User where username is \"{}\"".format(session.api_user) @@ -122,7 +84,7 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): labels = [] if self.note_labels: - all_labels = session.query("NoteLabel").all() + all_labels = session.query("select id, name from NoteLabel").all() labels_by_low_name = {lab["name"].lower(): lab for lab in all_labels} for _label in self.note_labels: label = labels_by_low_name.get(_label.lower()) @@ -134,7 +96,34 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): labels.append(label) - for asset_version in asset_versions: + for asset_version_data in asset_versions_data_by_id.values(): + asset_version = asset_version_data["asset_version"] + component_items = asset_version_data["component_items"] + + published_paths = set() + for component_item in component_items: + published_paths.add(component_item["component_path"]) + + # Backwards compatibility for older settings using + # attribute 'note_with_intent_template' + template = self.note_template + if template is None: + template = self.note_with_intent_template + format_data = { + "intent": intent_label, + "comment": comment, + "host_name": host_name, + "app_name": app_name, + "app_label": app_label, + "published_paths": "
".join(sorted(published_paths)), + } + comment = template.format(**format_data) + if not comment: + self.log.info(( + "Note for AssetVersion {} would be empty. Skipping." + "\nTemplate: {}\nData: {}" + ).format(asset_version["id"], template, format_data)) + continue asset_version.create_note(comment, author=user, labels=labels) try: diff --git a/openpype/modules/log_viewer/tray/app.py b/openpype/modules/log_viewer/tray/app.py index 71827fcac9..def319e0e3 100644 --- a/openpype/modules/log_viewer/tray/app.py +++ b/openpype/modules/log_viewer/tray/app.py @@ -27,11 +27,11 @@ class LogsWindow(QtWidgets.QWidget): self.setStyleSheet(style.load_stylesheet()) - self._frist_show = True + self._first_show = True def showEvent(self, event): super(LogsWindow, self).showEvent(event) - if self._frist_show: - self._frist_show = False + if self._first_show: + self._first_show = False self.logs_widget.refresh() diff --git a/openpype/modules/python_console_interpreter/window/widgets.py b/openpype/modules/python_console_interpreter/window/widgets.py index ecf41eaf3e..36ce1b61a2 100644 --- a/openpype/modules/python_console_interpreter/window/widgets.py +++ b/openpype/modules/python_console_interpreter/window/widgets.py @@ -389,7 +389,8 @@ class PythonInterpreterWidget(QtWidgets.QWidget): self._append_lines([openpype_art]) - self.setStyleSheet(load_stylesheet()) + self._first_show = True + self._splitter_size_ratio = None self._init_from_registry() @@ -416,9 +417,9 @@ class PythonInterpreterWidget(QtWidgets.QWidget): self.resize(width, height) try: - sizes = setting_registry.get_item("splitter_sizes") - if len(sizes) == len(self._widgets_splitter.sizes()): - self._widgets_splitter.setSizes(sizes) + self._splitter_size_ratio = ( + setting_registry.get_item("splitter_sizes") + ) except ValueError: pass @@ -627,8 +628,29 @@ class PythonInterpreterWidget(QtWidgets.QWidget): def showEvent(self, event): self._line_check_timer.start() super(PythonInterpreterWidget, self).showEvent(event) + # First show setup + if self._first_show: + self._first_show = False + self._on_first_show() + self._output_widget.scroll_to_bottom() + def _on_first_show(self): + # Change stylesheet + self.setStyleSheet(load_stylesheet()) + # Check if splitter size ratio is set + # - first store value to local variable and then unset it + splitter_size_ratio = self._splitter_size_ratio + self._splitter_size_ratio = None + # Skip if is not set + if not splitter_size_ratio: + return + + # Skip if number of size items does not match to splitter + splitters_count = len(self._widgets_splitter.sizes()) + if len(splitter_size_ratio) == splitters_count: + self._widgets_splitter.setSizes(splitter_size_ratio) + def closeEvent(self, event): self.save_registry() super(PythonInterpreterWidget, self).closeEvent(event) diff --git a/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py b/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py index 82a79daf3b..cdc37588cd 100644 --- a/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py +++ b/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py @@ -119,7 +119,7 @@ class OpenPypeContextSelector: # app names and versions, but since app_name is not used # currently down the line (but it is required by OP publish command # right now). - self.context["app_name"] = "maya/2020" + # self.context["app_name"] = "maya/2022" return True @staticmethod @@ -139,7 +139,8 @@ class OpenPypeContextSelector: env = {"AVALON_PROJECT": str(self.context.get("project")), "AVALON_ASSET": str(self.context.get("asset")), "AVALON_TASK": str(self.context.get("task")), - "AVALON_APP_NAME": str(self.context.get("app_name"))} + # "AVALON_APP_NAME": str(self.context.get("app_name")) + } print(">>> setting environment:") for k, v in env.items(): @@ -184,7 +185,7 @@ selector = OpenPypeContextSelector() selector.context["project"] = os.getenv("AVALON_PROJECT") selector.context["asset"] = os.getenv("AVALON_ASSET") selector.context["task"] = os.getenv("AVALON_TASK") -selector.context["app_name"] = os.getenv("AVALON_APP_NAME") +# selector.context["app_name"] = os.getenv("AVALON_APP_NAME") # if anything inside is None, scratch the whole thing and # ask user for context. diff --git a/openpype/modules/sync_server/providers/dropbox.py b/openpype/modules/sync_server/providers/dropbox.py index f5910299e5..dfc42fed75 100644 --- a/openpype/modules/sync_server/providers/dropbox.py +++ b/openpype/modules/sync_server/providers/dropbox.py @@ -17,6 +17,7 @@ class DropboxHandler(AbstractProvider): self.active = False self.site_name = site_name self.presets = presets + self.dbx = None if not self.presets: log.info( @@ -24,6 +25,11 @@ class DropboxHandler(AbstractProvider): ) return + if not self.presets["enabled"]: + log.debug("Sync Server: Site {} not enabled for {}.". + format(site_name, project_name)) + return + token = self.presets.get("token", "") if not token: msg = "Sync Server: No access token for dropbox provider" @@ -44,16 +50,13 @@ class DropboxHandler(AbstractProvider): log.info(msg) return - self.dbx = None - - if self.presets["enabled"]: - try: - self.dbx = self._get_service( - token, acting_as_member, team_folder_name - ) - except Exception as e: - log.info("Could not establish dropbox object: {}".format(e)) - return + try: + self.dbx = self._get_service( + token, acting_as_member, team_folder_name + ) + except Exception as e: + log.info("Could not establish dropbox object: {}".format(e)) + return super(AbstractProvider, self).__init__() diff --git a/openpype/modules/sync_server/providers/gdrive.py b/openpype/modules/sync_server/providers/gdrive.py index 0b586613b5..aa7329b104 100644 --- a/openpype/modules/sync_server/providers/gdrive.py +++ b/openpype/modules/sync_server/providers/gdrive.py @@ -73,8 +73,28 @@ class GDriveHandler(AbstractProvider): format(site_name)) return - cred_path = self.presets.get("credentials_url", {}).\ - get(platform.system().lower()) or '' + if not self.presets["enabled"]: + log.debug("Sync Server: Site {} not enabled for {}.". + format(site_name, project_name)) + return + + current_platform = platform.system().lower() + cred_path = self.presets.get("credentials_url", {}). \ + get(current_platform) or '' + + if not cred_path: + msg = "Sync Server: Please, fill the credentials for gdrive "\ + "provider for platform '{}' !".format(current_platform) + log.info(msg) + return + + try: + cred_path = cred_path.format(**os.environ) + except KeyError as e: + log.info("Sync Server: The key(s) {} does not exist in the " + "environment variables".format(" ".join(e.args))) + return + if not os.path.exists(cred_path): msg = "Sync Server: No credentials for gdrive provider " + \ "for '{}' on path '{}'!".format(site_name, cred_path) @@ -82,11 +102,10 @@ class GDriveHandler(AbstractProvider): return self.service = None - if self.presets["enabled"]: - self.service = self._get_gd_service(cred_path) + self.service = self._get_gd_service(cred_path) - self._tree = tree - self.active = True + self._tree = tree + self.active = True def is_active(self): """ diff --git a/openpype/modules/sync_server/sync_server_module.py b/openpype/modules/sync_server/sync_server_module.py index caf58503f1..3744a21b43 100644 --- a/openpype/modules/sync_server/sync_server_module.py +++ b/openpype/modules/sync_server/sync_server_module.py @@ -4,7 +4,7 @@ from datetime import datetime import threading import platform import copy -from collections import deque +from collections import deque, defaultdict from avalon.api import AvalonMongoDB @@ -157,7 +157,6 @@ class SyncServerModule(OpenPypeModule, ITrayModule): representation_id, site_name=site_name, force=force) - # public facing API def remove_site(self, collection, representation_id, site_name, remove_local_files=False): """ @@ -184,6 +183,151 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if remove_local_files: self._remove_local_file(collection, representation_id, site_name) + def compute_resource_sync_sites(self, project_name): + """Get available resource sync sites state for publish process. + + Returns dict with prepared state of sync sites for 'project_name'. + It checks if Site Sync is enabled, handles alternative sites. + Publish process stores this dictionary as a part of representation + document in DB. + + Example: + [ + { + 'name': '42abbc09-d62a-44a4-815c-a12cd679d2d7', + 'created_dt': datetime.datetime(2022, 3, 30, 12, 16, 9, 778637) + }, + {'name': 'studio'}, + {'name': 'SFTP'} + ] -- representation is published locally, artist or Settings have set + remote site as 'studio'. 'SFTP' is alternate site to 'studio'. Eg. + whenever file is on 'studio', it is also on 'SFTP'. + """ + + def create_metadata(name, created=True): + """Create sync site metadata for site with `name`""" + metadata = {"name": name} + if created: + metadata["created_dt"] = datetime.now() + return metadata + + if ( + not self.sync_system_settings["enabled"] or + not self.sync_project_settings[project_name]["enabled"]): + return [create_metadata(self.DEFAULT_SITE)] + + local_site = self.get_active_site(project_name) + remote_site = self.get_remote_site(project_name) + + # Attached sites metadata by site name + # That is the local site, remote site, the always accesible sites + # and their alternate sites (alias of sites with different protocol) + attached_sites = dict() + attached_sites[local_site] = create_metadata(local_site) + + if remote_site and remote_site not in attached_sites: + attached_sites[remote_site] = create_metadata(remote_site, + created=False) + + attached_sites = self._add_alternative_sites(attached_sites) + # add skeleton for sites where it should be always synced to + # usually it would be a backup site which is handled by separate + # background process + for site in self._get_always_accessible_sites(project_name): + if site not in attached_sites: + attached_sites[site] = create_metadata(site, created=False) + + return list(attached_sites.values()) + + def _get_always_accessible_sites(self, project_name): + """Sites that synced to as a part of background process. + + Artist machine doesn't handle those, explicit Tray with that site name + as a local id must be running. + Example is dropbox site serving as a backup solution + """ + always_accessible_sites = ( + self.get_sync_project_setting(project_name)["config"]. + get("always_accessible_on", []) + ) + return [site.strip() for site in always_accessible_sites] + + def _add_alternative_sites(self, attached_sites): + """Add skeleton document for alternative sites + + Each new configured site in System Setting could serve as a alternative + site, it's a kind of alias. It means that files on 'a site' are + physically accessible also on 'a alternative' site. + Example is sftp site serving studio files via sftp protocol, physically + file is only in studio, sftp server has this location mounted. + """ + additional_sites = self.sync_system_settings.get("sites", {}) + + alt_site_pairs = self._get_alt_site_pairs(additional_sites) + + for site_name in additional_sites.keys(): + # Get alternate sites (stripped names) for this site name + alt_sites = alt_site_pairs.get(site_name) + alt_sites = [site.strip() for site in alt_sites] + alt_sites = set(alt_sites) + + # If no alternative sites we don't need to add + if not alt_sites: + continue + + # Take a copy of data of the first alternate site that is already + # defined as an attached site to match the same state. + match_meta = next((attached_sites[site] for site in alt_sites + if site in attached_sites), None) + if not match_meta: + continue + + alt_site_meta = copy.deepcopy(match_meta) + alt_site_meta["name"] = site_name + + # Note: We change mutable `attached_site` dict in-place + attached_sites[site_name] = alt_site_meta + + return attached_sites + + def _get_alt_site_pairs(self, conf_sites): + """Returns dict of site and its alternative sites. + + If `site` has alternative site, it means that alt_site has 'site' as + alternative site + Args: + conf_sites (dict) + Returns: + (dict): {'site': [alternative sites]...} + """ + alt_site_pairs = defaultdict(set) + for site_name, site_info in conf_sites.items(): + alt_sites = set(site_info.get("alternative_sites", [])) + alt_site_pairs[site_name].update(alt_sites) + + for alt_site in alt_sites: + alt_site_pairs[alt_site].add(site_name) + + for site_name, alt_sites in alt_site_pairs.items(): + sites_queue = deque(alt_sites) + while sites_queue: + alt_site = sites_queue.popleft() + + # safety against wrong config + # {"SFTP": {"alternative_site": "SFTP"} + if alt_site == site_name or alt_site not in alt_site_pairs: + continue + + for alt_alt_site in alt_site_pairs[alt_site]: + if ( + alt_alt_site != site_name + and alt_alt_site not in alt_sites + ): + alt_sites.add(alt_alt_site) + sites_queue.append(alt_alt_site) + + return alt_site_pairs + def clear_project(self, collection, site_name): """ Clear 'collection' of 'site_name' and its local files @@ -848,6 +992,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if self.enabled and sync_settings.get('enabled'): sites.append(self.LOCAL_SITE) + active_site = sync_settings["config"]["active_site"] + # for Tray running background process + if active_site not in sites and active_site == get_local_site_id(): + sites.append(active_site) + return sites def tray_init(self): diff --git a/openpype/pipeline/__init__.py b/openpype/pipeline/__init__.py index 5c077b0914..b7aaa33d49 100644 --- a/openpype/pipeline/__init__.py +++ b/openpype/pipeline/__init__.py @@ -12,6 +12,13 @@ from .create import ( LegacyCreator, legacy_create, + + discover_creator_plugins, + discover_legacy_creator_plugins, + register_creator_plugin, + deregister_creator_plugin, + register_creator_plugin_path, + deregister_creator_plugin_path, ) from .load import ( @@ -33,6 +40,7 @@ from .load import ( loaders_from_representation, get_representation_path, + get_representation_context, get_repres_contexts, ) @@ -60,6 +68,22 @@ from .actions import ( deregister_inventory_action_path, ) +from .context_tools import ( + install_openpype_plugins, + install_host, + uninstall_host, + is_installed, + + register_root, + registered_root, + + register_host, + registered_host, + deregister_host, +) +install = install_host +uninstall = uninstall_host + __all__ = ( "AVALON_CONTAINER_ID", @@ -80,6 +104,13 @@ __all__ = ( "LegacyCreator", "legacy_create", + "discover_creator_plugins", + "discover_legacy_creator_plugins", + "register_creator_plugin", + "deregister_creator_plugin", + "register_creator_plugin_path", + "deregister_creator_plugin_path", + # --- Load --- "HeroVersionType", "IncompatibleLoaderError", @@ -99,6 +130,7 @@ __all__ = ( "loaders_from_representation", "get_representation_path", + "get_representation_context", "get_repres_contexts", # --- Publish --- @@ -121,4 +153,21 @@ __all__ = ( "register_inventory_action_path", "deregister_inventory_action", "deregister_inventory_action_path", + + # --- Process context --- + "install_openpype_plugins", + "install_host", + "uninstall_host", + "is_installed", + + "register_root", + "registered_root", + + "register_host", + "registered_host", + "deregister_host", + + # Backwards compatible function names + "install", + "uninstall", ) diff --git a/openpype/pipeline/actions.py b/openpype/pipeline/actions.py index 141e277db3..b488fe3e1f 100644 --- a/openpype/pipeline/actions.py +++ b/openpype/pipeline/actions.py @@ -1,4 +1,11 @@ import logging +from openpype.pipeline.plugin_discover import ( + discover, + register_plugin, + register_plugin_path, + deregister_plugin, + deregister_plugin_path +) class LauncherAction(object): @@ -90,28 +97,20 @@ class InventoryAction(object): # Launcher action def discover_launcher_actions(): - import avalon.api - - return avalon.api.discover(LauncherAction) + return discover(LauncherAction) def register_launcher_action(plugin): - import avalon.api - - return avalon.api.register_plugin(LauncherAction, plugin) + return register_plugin(LauncherAction, plugin) def register_launcher_action_path(path): - import avalon.api - - return avalon.api.register_plugin_path(LauncherAction, path) + return register_plugin_path(LauncherAction, path) # Inventory action def discover_inventory_actions(): - import avalon.api - - actions = avalon.api.discover(InventoryAction) + actions = discover(InventoryAction) filtered_actions = [] for action in actions: if action is not InventoryAction: @@ -121,24 +120,16 @@ def discover_inventory_actions(): def register_inventory_action(plugin): - import avalon.api - - return avalon.api.register_plugin(InventoryAction, plugin) + return register_plugin(InventoryAction, plugin) def deregister_inventory_action(plugin): - import avalon.api - - avalon.api.deregister_plugin(InventoryAction, plugin) + deregister_plugin(InventoryAction, plugin) def register_inventory_action_path(path): - import avalon.api - - return avalon.api.register_plugin_path(InventoryAction, path) + return register_plugin_path(InventoryAction, path) def deregister_inventory_action_path(path): - import avalon.api - - return avalon.api.deregister_plugin_path(InventoryAction, path) + return deregister_plugin_path(InventoryAction, path) diff --git a/openpype/pipeline/context_tools.py b/openpype/pipeline/context_tools.py new file mode 100644 index 0000000000..1bef260ec9 --- /dev/null +++ b/openpype/pipeline/context_tools.py @@ -0,0 +1,335 @@ +"""Core pipeline functionality""" + +import os +import sys +import json +import types +import logging +import inspect +import platform + +import pyblish.api +from pyblish.lib import MessageHandler + +from avalon import io, Session + +import openpype +from openpype.modules import load_modules +from openpype.settings import get_project_settings +from openpype.lib import ( + Anatomy, + register_event_callback, + filter_pyblish_plugins, + change_timer_to_current_context, +) + +from . import ( + register_loader_plugin_path, + register_inventory_action, + register_creator_plugin_path, + deregister_loader_plugin_path, +) + + +_is_installed = False +_registered_root = {"_": ""} +_registered_host = {"_": None} + +log = logging.getLogger(__name__) + +PACKAGE_DIR = os.path.dirname(os.path.abspath(openpype.__file__)) +PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") + +# Global plugin paths +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") + + +def register_root(path): + """Register currently active root""" + log.info("Registering root: %s" % path) + _registered_root["_"] = path + + +def registered_root(): + """Return currently registered root""" + root = _registered_root["_"] + if root: + return root + + root = Session.get("AVALON_PROJECTS") + if root: + return os.path.normpath(root) + return "" + + +def install_host(host): + """Install `host` into the running Python session. + + Args: + host (module): A Python module containing the Avalon + avalon host-interface. + """ + global _is_installed + + _is_installed = True + + io.install() + + missing = list() + for key in ("AVALON_PROJECT", "AVALON_ASSET"): + if key not in Session: + missing.append(key) + + assert not missing, ( + "%s missing from environment, %s" % ( + ", ".join(missing), + json.dumps(Session, indent=4, sort_keys=True) + )) + + project_name = Session["AVALON_PROJECT"] + log.info("Activating %s.." % project_name) + + # Optional host install function + if hasattr(host, "install"): + host.install() + + register_host(host) + + register_event_callback("taskChanged", _on_task_change) + + def modified_emit(obj, record): + """Method replacing `emit` in Pyblish's MessageHandler.""" + record.msg = record.getMessage() + obj.records.append(record) + + MessageHandler.emit = modified_emit + + install_openpype_plugins() + + +def install_openpype_plugins(project_name=None): + # Make sure modules are loaded + load_modules() + + log.info("Registering global plug-ins..") + pyblish.api.register_plugin_path(PUBLISH_PATH) + pyblish.api.register_discovery_filter(filter_pyblish_plugins) + register_loader_plugin_path(LOAD_PATH) + + if project_name is None: + project_name = os.environ.get("AVALON_PROJECT") + + # Register studio specific plugins + if project_name: + anatomy = Anatomy(project_name) + anatomy.set_root_environments() + register_root(anatomy.roots) + + project_settings = get_project_settings(project_name) + platform_name = platform.system().lower() + project_plugins = ( + project_settings + .get("global", {}) + .get("project_plugins", {}) + .get(platform_name) + ) or [] + for path in project_plugins: + try: + path = str(path.format(**os.environ)) + except KeyError: + pass + + if not path or not os.path.exists(path): + continue + + pyblish.api.register_plugin_path(path) + register_loader_plugin_path(path) + register_creator_plugin_path(path) + register_inventory_action(path) + + +def _on_task_change(): + change_timer_to_current_context() + + +def uninstall_host(): + """Undo all of what `install()` did""" + host = registered_host() + + try: + host.uninstall() + except AttributeError: + pass + + log.info("Deregistering global plug-ins..") + pyblish.api.deregister_plugin_path(PUBLISH_PATH) + pyblish.api.deregister_discovery_filter(filter_pyblish_plugins) + deregister_loader_plugin_path(LOAD_PATH) + log.info("Global plug-ins unregistred") + + deregister_host() + + io.uninstall() + + log.info("Successfully uninstalled Avalon!") + + +def is_installed(): + """Return state of installation + + Returns: + True if installed, False otherwise + + """ + + return _is_installed + + +def register_host(host): + """Register a new host for the current process + + Arguments: + host (ModuleType): A module implementing the + Host API interface. See the Host API + documentation for details on what is + required, or browse the source code. + + """ + signatures = { + "ls": [] + } + + _validate_signature(host, signatures) + _registered_host["_"] = host + + +def _validate_signature(module, signatures): + # Required signatures for each member + + missing = list() + invalid = list() + success = True + + for member in signatures: + if not hasattr(module, member): + missing.append(member) + success = False + + else: + attr = getattr(module, member) + if sys.version_info.major >= 3: + signature = inspect.getfullargspec(attr)[0] + else: + signature = inspect.getargspec(attr)[0] + required_signature = signatures[member] + + assert isinstance(signature, list) + assert isinstance(required_signature, list) + + if not all(member in signature + for member in required_signature): + invalid.append({ + "member": member, + "signature": ", ".join(signature), + "required": ", ".join(required_signature) + }) + success = False + + if not success: + report = list() + + if missing: + report.append( + "Incomplete interface for module: '%s'\n" + "Missing: %s" % (module, ", ".join( + "'%s'" % member for member in missing)) + ) + + if invalid: + report.append( + "'%s': One or more members were found, but didn't " + "have the right argument signature." % module.__name__ + ) + + for member in invalid: + report.append( + " Found: {member}({signature})".format(**member) + ) + report.append( + " Expected: {member}({required})".format(**member) + ) + + raise ValueError("\n".join(report)) + + +def registered_host(): + """Return currently registered host""" + return _registered_host["_"] + + +def deregister_host(): + _registered_host["_"] = default_host() + + +def default_host(): + """A default host, in place of anything better + + This may be considered as reference for the + interface a host must implement. It also ensures + that the system runs, even when nothing is there + to support it. + + """ + + host = types.ModuleType("defaultHost") + + def ls(): + return list() + + host.__dict__.update({ + "ls": ls + }) + + return host + + +def debug_host(): + """A debug host, useful to debugging features that depend on a host""" + + host = types.ModuleType("debugHost") + + def ls(): + containers = [ + { + "representation": "ee-ft-a-uuid1", + "schema": "openpype:container-1.0", + "name": "Bruce01", + "objectName": "Bruce01_node", + "namespace": "_bruce01_", + "version": 3, + }, + { + "representation": "aa-bc-s-uuid2", + "schema": "openpype:container-1.0", + "name": "Bruce02", + "objectName": "Bruce01_node", + "namespace": "_bruce02_", + "version": 2, + } + ] + + for container in containers: + yield container + + host.__dict__.update({ + "ls": ls, + "open_file": lambda fname: None, + "save_file": lambda fname: None, + "current_file": lambda: os.path.expanduser("~/temp.txt"), + "has_unsaved_changes": lambda: False, + "work_root": lambda: os.path.expanduser("~/temp"), + "file_extensions": lambda: ["txt"], + }) + + return host diff --git a/openpype/pipeline/create/__init__.py b/openpype/pipeline/create/__init__.py index 9571f56b8f..1beeb4267b 100644 --- a/openpype/pipeline/create/__init__.py +++ b/openpype/pipeline/create/__init__.py @@ -6,7 +6,14 @@ from .creator_plugins import ( BaseCreator, Creator, - AutoCreator + AutoCreator, + + discover_creator_plugins, + discover_legacy_creator_plugins, + register_creator_plugin, + deregister_creator_plugin, + register_creator_plugin_path, + deregister_creator_plugin_path, ) from .context import ( @@ -29,6 +36,13 @@ __all__ = ( "Creator", "AutoCreator", + "discover_creator_plugins", + "discover_legacy_creator_plugins", + "register_creator_plugin", + "deregister_creator_plugin", + "register_creator_plugin_path", + "deregister_creator_plugin_path", + "CreatedInstance", "CreateContext", diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index eeb08a6294..0cc2819172 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -9,7 +9,8 @@ from contextlib import contextmanager from .creator_plugins import ( BaseCreator, Creator, - AutoCreator + AutoCreator, + discover_creator_plugins, ) from openpype.api import ( @@ -17,6 +18,8 @@ from openpype.api import ( get_project_settings ) +UpdateData = collections.namedtuple("UpdateData", ["instance", "changes"]) + class ImmutableKeyError(TypeError): """Accessed key is immutable so does not allow changes or removements.""" @@ -353,7 +356,7 @@ class CreatedInstance: already existing instance. creator(BaseCreator): Creator responsible for instance. host(ModuleType): Host implementation loaded with - `avalon.api.registered_host`. + `openpype.pipeline.registered_host`. new(bool): Is instance new. """ # Keys that can't be changed or removed from data after loading using @@ -843,7 +846,7 @@ class CreateContext: creators = {} autocreators = {} manual_creators = {} - for creator_class in avalon.api.discover(BaseCreator): + for creator_class in discover_creator_plugins(): if inspect.isabstract(creator_class): self.log.info( "Skipping abstract Creator {}".format(str(creator_class)) @@ -1081,7 +1084,7 @@ class CreateContext: for instance in cretor_instances: instance_changes = instance.changes() if instance_changes: - update_list.append((instance, instance_changes)) + update_list.append(UpdateData(instance, instance_changes)) creator = self.creators[identifier] if update_list: diff --git a/openpype/pipeline/create/creator_plugins.py b/openpype/pipeline/create/creator_plugins.py index 1ac2c420a2..36bccd427e 100644 --- a/openpype/pipeline/create/creator_plugins.py +++ b/openpype/pipeline/create/creator_plugins.py @@ -8,7 +8,19 @@ from abc import ( ) import six -from openpype.lib import get_subset_name_with_asset_doc +from openpype.lib import ( + get_subset_name_with_asset_doc, + set_plugin_attributes_from_settings, +) +from openpype.pipeline.plugin_discover import ( + discover, + register_plugin, + register_plugin_path, + deregister_plugin, + deregister_plugin_path +) + +from .legacy_create import LegacyCreator class CreatorError(Exception): @@ -46,6 +58,11 @@ class BaseCreator: # - may not be used if `get_icon` is reimplemented icon = None + # Instance attribute definitions that can be changed per instance + # - returns list of attribute definitions from + # `openpype.pipeline.attribute_definitions` + instance_attr_defs = [] + def __init__( self, create_context, system_settings, project_settings, headless=False ): @@ -56,10 +73,13 @@ class BaseCreator: # - we may use UI inside processing this attribute should be checked self.headless = headless - @abstractproperty + @property def identifier(self): - """Identifier of creator (must be unique).""" - pass + """Identifier of creator (must be unique). + + Default implementation returns plugin's family. + """ + return self.family @abstractproperty def family(self): @@ -90,11 +110,39 @@ class BaseCreator: pass @abstractmethod - def collect_instances(self, attr_plugins=None): + def collect_instances(self): + """Collect existing instances related to this creator plugin. + + The implementation differs on host abilities. The creator has to + collect metadata about instance and create 'CreatedInstance' object + which should be added to 'CreateContext'. + + Example: + ```python + def collect_instances(self): + # Getting existing instances is different per host implementation + for instance_data in pipeline.list_instances(): + # Process only instances that were created by this creator + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + # Create instance object from existing data + instance = CreatedInstance.from_existing( + instance_data, self + ) + # Add instance to create context + self._add_instance_to_context(instance) + ``` + """ pass @abstractmethod def update_instances(self, update_list): + """Store changes of existing instances so they can be recollected. + + Args: + update_list(list): Gets list of tuples. Each item + contain changed instance and it's changes. + """ pass @abstractmethod @@ -178,7 +226,7 @@ class BaseCreator: list: Attribute definitions that can be tweaked for created instance. """ - return [] + return self.instance_attr_defs class Creator(BaseCreator): @@ -191,6 +239,9 @@ class Creator(BaseCreator): # - default_variants may not be used if `get_default_variants` is overriden default_variants = [] + # Default variant used in 'get_default_variant' + default_variant = None + # Short description of family # - may not be used if `get_description` is overriden description = None @@ -204,6 +255,10 @@ class Creator(BaseCreator): # e.g. for buld creators create_allow_context_change = True + # Precreate attribute definitions showed before creation + # - similar to instance attribute definitions + pre_create_attr_defs = [] + @abstractmethod def create(self, subset_name, instance_data, pre_create_data): """Create new instance and store it. @@ -263,7 +318,7 @@ class Creator(BaseCreator): `get_default_variants` should be used. """ - return None + return self.default_variant def get_pre_create_attr_defs(self): """Plugin attribute definitions needed for creation. @@ -276,7 +331,7 @@ class Creator(BaseCreator): list: Attribute definitions that can be tweaked for created instance. """ - return [] + return self.pre_create_attr_defs class AutoCreator(BaseCreator): @@ -284,6 +339,43 @@ class AutoCreator(BaseCreator): Can be used e.g. for `workfile`. """ + def remove_instances(self, instances): """Skip removement.""" pass + + +def discover_creator_plugins(): + return discover(BaseCreator) + + +def discover_legacy_creator_plugins(): + plugins = discover(LegacyCreator) + set_plugin_attributes_from_settings(plugins, LegacyCreator) + return plugins + + +def register_creator_plugin(plugin): + if issubclass(plugin, BaseCreator): + register_plugin(BaseCreator, plugin) + + elif issubclass(plugin, LegacyCreator): + register_plugin(LegacyCreator, plugin) + + +def deregister_creator_plugin(plugin): + if issubclass(plugin, BaseCreator): + deregister_plugin(BaseCreator, plugin) + + elif issubclass(plugin, LegacyCreator): + deregister_plugin(LegacyCreator, plugin) + + +def register_creator_plugin_path(path): + register_plugin_path(BaseCreator, path) + register_plugin_path(LegacyCreator, path) + + +def deregister_creator_plugin_path(path): + deregister_plugin_path(BaseCreator, path) + deregister_plugin_path(LegacyCreator, path) diff --git a/openpype/pipeline/create/legacy_create.py b/openpype/pipeline/create/legacy_create.py index cf6629047e..46e0e3d663 100644 --- a/openpype/pipeline/create/legacy_create.py +++ b/openpype/pipeline/create/legacy_create.py @@ -142,7 +142,8 @@ def legacy_create(Creator, name, asset, options=None, data=None): Name of instance """ - from avalon.api import registered_host + from openpype.pipeline import registered_host + host = registered_host() plugin = Creator(name, asset, options, data) diff --git a/openpype/pipeline/farm/__init__.py b/openpype/pipeline/farm/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/pipeline/farm/patterning.py b/openpype/pipeline/farm/patterning.py new file mode 100644 index 0000000000..1e4b5bf37d --- /dev/null +++ b/openpype/pipeline/farm/patterning.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +import re + + +def match_aov_pattern(host_name, aov_patterns, render_file_name): + """Matching against a `AOV` pattern in the render files. + + In order to match the AOV name we must compare + against the render filename string that we are + grabbing the render filename string from the collection + that we have grabbed from `exp_files`. + + Args: + app (str): Host name. + aov_patterns (dict): AOV patterns from AOV filters. + render_file_name (str): Incoming file name to match against. + + Returns: + bool: Review state for rendered file (render_file_name). + """ + aov_pattern = aov_patterns.get(host_name, []) + if not aov_pattern: + return False + return any(re.match(p, render_file_name) for p in aov_pattern) diff --git a/openpype/pipeline/load/plugins.py b/openpype/pipeline/load/plugins.py index 9b2b6bb084..a30a2188a4 100644 --- a/openpype/pipeline/load/plugins.py +++ b/openpype/pipeline/load/plugins.py @@ -1,5 +1,13 @@ import logging +from openpype.lib import set_plugin_attributes_from_settings +from openpype.pipeline.plugin_discover import ( + discover, + register_plugin, + register_plugin_path, + deregister_plugin, + deregister_plugin_path +) from .utils import get_representation_path_from_context @@ -33,7 +41,8 @@ class LoaderPlugin(list): def get_representations(cls): return cls.representations - def filepath_from_context(self, context): + @classmethod + def filepath_from_context(cls, context): return get_representation_path_from_context(context) def load(self, context, name=None, namespace=None, options=None): @@ -102,30 +111,22 @@ class SubsetLoaderPlugin(LoaderPlugin): def discover_loader_plugins(): - import avalon.api - - return avalon.api.discover(LoaderPlugin) + plugins = discover(LoaderPlugin) + set_plugin_attributes_from_settings(plugins, LoaderPlugin) + return plugins def register_loader_plugin(plugin): - import avalon.api - - return avalon.api.register_plugin(LoaderPlugin, plugin) - - -def deregister_loader_plugin_path(path): - import avalon.api - - avalon.api.deregister_plugin_path(LoaderPlugin, path) - - -def register_loader_plugin_path(path): - import avalon.api - - return avalon.api.register_plugin_path(LoaderPlugin, path) + return register_plugin(LoaderPlugin, plugin) def deregister_loader_plugin(plugin): - import avalon.api + deregister_plugin(LoaderPlugin, plugin) - avalon.api.deregister_plugin(LoaderPlugin, plugin) + +def deregister_loader_plugin_path(path): + deregister_plugin_path(LoaderPlugin, path) + + +def register_loader_plugin_path(path): + return register_plugin_path(LoaderPlugin, path) diff --git a/openpype/pipeline/load/utils.py b/openpype/pipeline/load/utils.py index 53ac6b626d..cb7c76f133 100644 --- a/openpype/pipeline/load/utils.py +++ b/openpype/pipeline/load/utils.py @@ -10,7 +10,7 @@ import six from bson.objectid import ObjectId from avalon import io, schema -from avalon.api import Session, registered_root +from avalon.api import Session from openpype.lib import Anatomy @@ -532,6 +532,8 @@ def get_representation_path(representation, root=None, dbcon=None): dbcon = io if root is None: + from openpype.pipeline import registered_root + root = registered_root() def path_from_represenation(): diff --git a/openpype/pipeline/plugin_discover.py b/openpype/pipeline/plugin_discover.py new file mode 100644 index 0000000000..fb860fe5f2 --- /dev/null +++ b/openpype/pipeline/plugin_discover.py @@ -0,0 +1,298 @@ +import os +import inspect +import traceback + +from openpype.api import Logger +from openpype.lib.python_module_tools import ( + modules_from_path, + classes_from_module, +) + +log = Logger.get_logger(__name__) + + +class DiscoverResult: + """Result of Plug-ins discovery of a single superclass type. + + Stores discovered, duplicated, ignored and abstract plugins and file paths + which crashed on execution of file. + """ + + def __init__(self, superclass): + self.superclass = superclass + self.plugins = [] + self.crashed_file_paths = {} + self.duplicated_plugins = [] + self.abstract_plugins = [] + self.ignored_plugins = set() + # Store loaded modules to keep them in memory + self._modules = set() + + def __iter__(self): + for plugin in self.plugins: + yield plugin + + def __getitem__(self, item): + return self.plugins[item] + + def __setitem__(self, item, value): + self.plugins[item] = value + + def add_module(self, module): + """Add dynamically loaded python module to keep it in memory.""" + self._modules.add(module) + + def get_report(self, only_errors=True, exc_info=True, full_report=False): + lines = [] + if not only_errors: + # Successfully discovered plugins + if self.plugins or full_report: + lines.append( + "*** Discovered {} plugins".format(len(self.plugins)) + ) + for cls in self.plugins: + lines.append("- {}".format(cls.__class__.__name__)) + + # Plugin that were defined to be ignored + if self.ignored_plugins or full_report: + lines.append("*** Ignored plugins {}".format(len( + self.ignored_plugins + ))) + for cls in self.ignored_plugins: + lines.append("- {}".format(cls.__class__.__name__)) + + # Abstract classes + if self.abstract_plugins or full_report: + lines.append("*** Discovered {} abstract plugins".format(len( + self.abstract_plugins + ))) + for cls in self.abstract_plugins: + lines.append("- {}".format(cls.__class__.__name__)) + + # Abstract classes + if self.duplicated_plugins or full_report: + lines.append("*** There were {} duplicated plugins".format(len( + self.duplicated_plugins + ))) + for cls in self.duplicated_plugins: + lines.append("- {}".format(cls.__class__.__name__)) + + if self.crashed_file_paths or full_report: + lines.append("*** Failed to load {} files".format(len( + self.crashed_file_paths + ))) + for path, exc_info_args in self.crashed_file_paths.items(): + lines.append("- {}".format(path)) + if exc_info: + lines.append(10 * "*") + lines.extend(traceback.format_exception(*exc_info_args)) + lines.append(10 * "*") + + return "\n".join(lines) + + def log_report(self, only_errors=True, exc_info=True): + report = self.get_report(only_errors, exc_info) + if report: + log.info(report) + + +class PluginDiscoverContext(object): + """Store and discover registered types nad registered paths to types. + + Keeps in memory all registered types and their paths. Paths are dynamically + loaded on discover so different discover calls won't return the same + class objects even if were loaded from same file. + """ + + def __init__(self): + self._registered_plugins = {} + self._registered_plugin_paths = {} + self._last_discovered_plugins = {} + # Store the last result to memory + self._last_discovered_results = {} + + def get_last_discovered_plugins(self, superclass): + """Access last discovered plugin by a subperclass. + + Returns: + None: When superclass was not discovered yet. + list: Lastly discovered plugins of the superclass. + """ + + return self._last_discovered_plugins.get(superclass) + + def discover( + self, + superclass, + allow_duplicates=True, + ignore_classes=None, + return_report=False + ): + """Find and return subclasses of `superclass` + + Args: + superclass (type): Class which determines discovered subclasses. + allow_duplicates (bool): Validate class name duplications. + ignore_classes (list): List of classes that will be ignored + and not added to result. + + Returns: + DiscoverResult: Object holding succesfully discovered plugins, + ignored plugins, plugins with missing abstract implementation + and duplicated plugin. + """ + + if not ignore_classes: + ignore_classes = [] + + result = DiscoverResult(superclass) + plugin_names = set() + registered_classes = self._registered_plugins.get(superclass) or [] + registered_paths = self._registered_plugin_paths.get(superclass) or [] + for cls in registered_classes: + if cls is superclass or cls in ignore_classes: + result.ignored_plugins.add(cls) + continue + + if inspect.isabstract(cls): + result.abstract_plugins.append(cls) + continue + + class_name = cls.__name__ + if class_name in plugin_names: + result.duplicated_plugins.append(cls) + continue + plugin_names.add(class_name) + result.plugins.append(cls) + + # Include plug-ins from registered paths + for path in registered_paths: + modules, crashed = modules_from_path(path) + for item in crashed: + filepath, exc_info = item + result.crashed_file_paths[filepath] = exc_info + + for item in modules: + filepath, module = item + result.add_module(module) + for cls in classes_from_module(superclass, module): + if cls is superclass or cls in ignore_classes: + result.ignored_plugins.add(cls) + continue + + if inspect.isabstract(cls): + result.abstract_plugins.append(cls) + continue + + if not allow_duplicates: + class_name = cls.__name__ + if class_name in plugin_names: + result.duplicated_plugins.append(cls) + continue + plugin_names.add(class_name) + + result.plugins.append(cls) + + # Store in memory last result to keep in memory loaded modules + self._last_discovered_results[superclass] = result + self._last_discovered_plugins[superclass] = list( + result.plugins + ) + result.log_report() + if return_report: + return result + return result.plugins + + def register_plugin(self, superclass, cls): + """Register a directory containing plug-ins of type `superclass` + + Arguments: + superclass (type): Superclass of plug-in + cls (object): Subclass of `superclass` + """ + + if superclass not in self._registered_plugins: + self._registered_plugins[superclass] = list() + + if cls not in self._registered_plugins[superclass]: + self._registered_plugins[superclass].append(cls) + + def register_plugin_path(self, superclass, path): + """Register a directory of one or more plug-ins + + Arguments: + superclass (type): Superclass of plug-ins to look for during + discovery + path (str): Absolute path to directory in which to discover + plug-ins + """ + + if superclass not in self._registered_plugin_paths: + self._registered_plugin_paths[superclass] = list() + + path = os.path.normpath(path) + if path not in self._registered_plugin_paths[superclass]: + self._registered_plugin_paths[superclass].append(path) + + def registered_plugin_paths(self): + """Return all currently registered plug-in paths""" + # Return shallow copy so we the original data can't be changed + return { + superclass: paths[:] + for superclass, paths in self._registered_plugin_paths.items() + } + + def deregister_plugin(self, superclass, plugin): + """Opposite of `register_plugin()`""" + if superclass in self._registered_plugins: + self._registered_plugins[superclass].remove(plugin) + + def deregister_plugin_path(self, superclass, path): + """Opposite of `register_plugin_path()`""" + self._registered_plugin_paths[superclass].remove(path) + + +class _GlobalDiscover: + """Access to global object of PluginDiscoverContext. + + Using singleton object to register/deregister plugins and plugin paths + and then discover them by superclass. + """ + + _context = None + + @classmethod + def get_context(cls): + if cls._context is None: + cls._context = PluginDiscoverContext() + return cls._context + + +def discover(superclass, allow_duplicates=True): + context = _GlobalDiscover.get_context() + return context.discover(superclass, allow_duplicates) + + +def get_last_discovered_plugins(superclass): + context = _GlobalDiscover.get_context() + return context.get_last_discovered_plugins(superclass) + + +def register_plugin(superclass, cls): + context = _GlobalDiscover.get_context() + context.register_plugin(superclass, cls) + + +def register_plugin_path(superclass, path): + context = _GlobalDiscover.get_context() + context.register_plugin_path(superclass, path) + + +def deregister_plugin(superclass, cls): + context = _GlobalDiscover.get_context() + context.deregister_plugin(superclass, cls) + + +def deregister_plugin_path(superclass, path): + context = _GlobalDiscover.get_context() + context.deregister_plugin_path(superclass, path) diff --git a/openpype/pipeline/thumbnail.py b/openpype/pipeline/thumbnail.py index 12bab83be6..c09dab70eb 100644 --- a/openpype/pipeline/thumbnail.py +++ b/openpype/pipeline/thumbnail.py @@ -2,6 +2,11 @@ import os import copy import logging +from .plugin_discover import ( + discover, + register_plugin, + register_plugin_path, +) log = logging.getLogger(__name__) @@ -126,21 +131,15 @@ class BinaryThumbnail(ThumbnailResolver): # Thumbnail resolvers def discover_thumbnail_resolvers(): - import avalon.api - - return avalon.api.discover(ThumbnailResolver) + return discover(ThumbnailResolver) def register_thumbnail_resolver(plugin): - import avalon.api - - return avalon.api.register_plugin(ThumbnailResolver, plugin) + register_plugin(ThumbnailResolver, plugin) def register_thumbnail_resolver_path(path): - import avalon.api - - return avalon.api.register_plugin_path(ThumbnailResolver, path) + register_plugin_path(ThumbnailResolver, path) register_thumbnail_resolver(TemplateResolver) diff --git a/openpype/plugins/load/delete_old_versions.py b/openpype/plugins/load/delete_old_versions.py index 692acdec02..2789f4ea23 100644 --- a/openpype/plugins/load/delete_old_versions.py +++ b/openpype/plugins/load/delete_old_versions.py @@ -126,7 +126,8 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): os.remove(file_path) self.log.debug("Removed file: {}".format(file_path)) - remainders.remove(file_path_base) + if file_path_base in remainders: + remainders.remove(file_path_base) continue seq_path_base = os.path.split(seq_path)[1] @@ -333,6 +334,8 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): def main(self, data, remove_publish_folder): # Size of files. size = 0 + if not data: + return size if remove_publish_folder: size = self.delete_whole_dir_paths(data["dir_paths"].values()) @@ -418,6 +421,8 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): ) data = self.get_data(context, versions_to_keep) + if not data: + continue size += self.main(data, remove_publish_folder) print("Progressing {}/{}".format(count + 1, len(contexts))) diff --git a/openpype/plugins/publish/collect_cleanup_keys.py b/openpype/plugins/publish/collect_cleanup_keys.py new file mode 100644 index 0000000000..635b038387 --- /dev/null +++ b/openpype/plugins/publish/collect_cleanup_keys.py @@ -0,0 +1,21 @@ +""" +Requires: + None +Provides: + context + - cleanupFullPaths (list) + - cleanupEmptyDirs (list) +""" + +import pyblish.api + + +class CollectCleanupKeys(pyblish.api.ContextPlugin): + """Prepare keys for 'ExplicitCleanUp' plugin.""" + + label = "Collect Cleanup Keys" + order = pyblish.api.CollectorOrder + + def process(self, context): + context.data["cleanupFullPaths"] = [] + context.data["cleanupEmptyDirs"] = [] diff --git a/openpype/plugins/publish/collect_host_name.py b/openpype/plugins/publish/collect_host_name.py index b731e3ed26..d64af4d049 100644 --- a/openpype/plugins/publish/collect_host_name.py +++ b/openpype/plugins/publish/collect_host_name.py @@ -18,20 +18,30 @@ class CollectHostName(pyblish.api.ContextPlugin): def process(self, context): host_name = context.data.get("hostName") + app_name = context.data.get("appName") + app_label = context.data.get("appLabel") # Don't override value if is already set - if host_name: + if host_name and app_name and app_label: return - # Use AVALON_APP as first if available it is the same as host name - # - only if is not defined use AVALON_APP_NAME (e.g. on Farm) and - # set it back to AVALON_APP env variable - host_name = os.environ.get("AVALON_APP") + # Use AVALON_APP to get host name if available if not host_name: + host_name = os.environ.get("AVALON_APP") + + # Use AVALON_APP_NAME to get full app name + if not app_name: app_name = os.environ.get("AVALON_APP_NAME") - if app_name: - app_manager = ApplicationManager() - app = app_manager.applications.get(app_name) - if app: + + # Fill missing values based on app full name + if (not host_name or not app_label) and app_name: + app_manager = ApplicationManager() + app = app_manager.applications.get(app_name) + if app: + if not host_name: host_name = app.host_name + if not app_label: + app_label = app.full_label context.data["hostName"] = host_name + context.data["appName"] = app_name + context.data["appLabel"] = app_label diff --git a/openpype/plugins/publish/collect_resources_path.py b/openpype/plugins/publish/collect_resources_path.py index fa181301ee..1f509365c7 100644 --- a/openpype/plugins/publish/collect_resources_path.py +++ b/openpype/plugins/publish/collect_resources_path.py @@ -53,7 +53,10 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): "textures", "action", "background", - "effect" + "effect", + "staticMesh", + "skeletalMesh" + ] def process(self, instance): diff --git a/openpype/plugins/publish/collect_scene_loaded_versions.py b/openpype/plugins/publish/collect_scene_loaded_versions.py index 6746757e5f..e54592abb8 100644 --- a/openpype/plugins/publish/collect_scene_loaded_versions.py +++ b/openpype/plugins/publish/collect_scene_loaded_versions.py @@ -1,7 +1,8 @@ from bson.objectid import ObjectId import pyblish.api -from avalon import api, io +from avalon import io +from openpype.pipeline import registered_host class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): @@ -24,7 +25,7 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): ] def process(self, context): - host = api.registered_host() + host = registered_host() if host is None: self.log.warn("No registered host.") return diff --git a/openpype/plugins/publish/extract_burnin.py b/openpype/plugins/publish/extract_burnin.py index b2ca8850b6..544c763b52 100644 --- a/openpype/plugins/publish/extract_burnin.py +++ b/openpype/plugins/publish/extract_burnin.py @@ -16,7 +16,7 @@ from openpype.lib import ( run_openpype_process, get_transcode_temp_directory, - convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, should_convert_for_ffmpeg, CREATE_NO_WINDOW @@ -187,8 +187,13 @@ class ExtractBurnin(openpype.api.Extractor): repre_files = repre["files"] if isinstance(repre_files, (tuple, list)): filename = repre_files[0] + src_filepaths = [ + os.path.join(src_repre_staging_dir, filename) + for filename in repre_files + ] else: filename = repre_files + src_filepaths = [os.path.join(src_repre_staging_dir, filename)] first_input_path = os.path.join(src_repre_staging_dir, filename) # Determine if representation requires pre conversion for ffmpeg @@ -209,11 +214,9 @@ class ExtractBurnin(openpype.api.Extractor): new_staging_dir = get_transcode_temp_directory() repre["stagingDir"] = new_staging_dir - convert_for_ffmpeg( - first_input_path, + convert_input_paths_for_ffmpeg( + src_filepaths, new_staging_dir, - _temp_data["frameStart"], - _temp_data["frameEnd"], self.log ) @@ -221,11 +224,17 @@ class ExtractBurnin(openpype.api.Extractor): filled_anatomy = anatomy.format_all(burnin_data) burnin_data["anatomy"] = filled_anatomy.get_solved() - # Add context data burnin_data. - burnin_data["custom"] = ( + custom_data = copy.deepcopy( + instance.data.get("customData") or {} + ) + # Backwards compatibility (since 2022/04/07) + custom_data.update( instance.data.get("custom_burnin_data") or {} ) + # Add context data burnin_data. + burnin_data["custom"] = custom_data + # Add source camera name to burnin data camera_name = repre.get("camera_name") if camera_name: diff --git a/openpype/plugins/publish/extract_jpeg_exr.py b/openpype/plugins/publish/extract_jpeg_exr.py index 468ed96199..d6d6854092 100644 --- a/openpype/plugins/publish/extract_jpeg_exr.py +++ b/openpype/plugins/publish/extract_jpeg_exr.py @@ -8,7 +8,7 @@ from openpype.lib import ( path_to_subprocess_arg, get_transcode_temp_directory, - convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, should_convert_for_ffmpeg ) @@ -79,11 +79,9 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): if do_convert: convert_dir = get_transcode_temp_directory() filename = os.path.basename(full_input_path) - convert_for_ffmpeg( - full_input_path, + convert_input_paths_for_ffmpeg( + [full_input_path], convert_dir, - None, - None, self.log ) full_input_path = os.path.join(convert_dir, filename) diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index 3ecea1f8bd..f2473839d9 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -18,7 +18,7 @@ from openpype.lib import ( path_to_subprocess_arg, should_convert_for_ffmpeg, - convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, get_transcode_temp_directory ) import speedcopy @@ -188,23 +188,26 @@ class ExtractReview(pyblish.api.InstancePlugin): outputs_per_repres = self._get_outputs_per_representations( instance, profile_outputs ) - fill_data = copy.deepcopy(instance.data["anatomyData"]) - for repre, outputs in outputs_per_repres: + for repre, outpu_defs in outputs_per_repres: # Check if input should be preconverted before processing # Store original staging dir (it's value may change) src_repre_staging_dir = repre["stagingDir"] # Receive filepath to first file in representation first_input_path = None + input_filepaths = [] if not self.input_is_sequence(repre): first_input_path = os.path.join( src_repre_staging_dir, repre["files"] ) + input_filepaths.append(first_input_path) else: for filename in repre["files"]: - first_input_path = os.path.join( + filepath = os.path.join( src_repre_staging_dir, filename ) - break + input_filepaths.append(filepath) + if first_input_path is None: + first_input_path = filepath # Skip if file is not set if first_input_path is None: @@ -231,136 +234,149 @@ class ExtractReview(pyblish.api.InstancePlugin): new_staging_dir = get_transcode_temp_directory() repre["stagingDir"] = new_staging_dir - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] - convert_for_ffmpeg( - first_input_path, + convert_input_paths_for_ffmpeg( + input_filepaths, new_staging_dir, - frame_start, - frame_end, self.log ) - for _output_def in outputs: - output_def = copy.deepcopy(_output_def) - # Make sure output definition has "tags" key - if "tags" not in output_def: - output_def["tags"] = [] - - if "burnins" not in output_def: - output_def["burnins"] = [] - - # Create copy of representation - new_repre = copy.deepcopy(repre) - # Make sure new representation has origin staging dir - # - this is because source representation may change - # it's staging dir because of ffmpeg conversion - new_repre["stagingDir"] = src_repre_staging_dir - - # Remove "delete" tag from new repre if there is - if "delete" in new_repre["tags"]: - new_repre["tags"].remove("delete") - - # Add additional tags from output definition to representation - for tag in output_def["tags"]: - if tag not in new_repre["tags"]: - new_repre["tags"].append(tag) - - # Add burnin link from output definition to representation - for burnin in output_def["burnins"]: - if burnin not in new_repre.get("burnins", []): - if not new_repre.get("burnins"): - new_repre["burnins"] = [] - new_repre["burnins"].append(str(burnin)) - - self.log.debug( - "Linked burnins: `{}`".format(new_repre.get("burnins")) + try: + self._render_output_definitions( + instance, repre, src_repre_staging_dir, outpu_defs ) - self.log.debug( - "New representation tags: `{}`".format( - new_repre.get("tags")) + finally: + # Make sure temporary staging is cleaned up and representation + # has set origin stagingDir + if do_convert: + # Set staging dir of source representation back to previous + # value + repre["stagingDir"] = src_repre_staging_dir + if os.path.exists(new_staging_dir): + shutil.rmtree(new_staging_dir) + + def _render_output_definitions( + self, instance, repre, src_repre_staging_dir, outpu_defs + ): + fill_data = copy.deepcopy(instance.data["anatomyData"]) + for _output_def in outpu_defs: + output_def = copy.deepcopy(_output_def) + # Make sure output definition has "tags" key + if "tags" not in output_def: + output_def["tags"] = [] + + if "burnins" not in output_def: + output_def["burnins"] = [] + + # Create copy of representation + new_repre = copy.deepcopy(repre) + # Make sure new representation has origin staging dir + # - this is because source representation may change + # it's staging dir because of ffmpeg conversion + new_repre["stagingDir"] = src_repre_staging_dir + + # Remove "delete" tag from new repre if there is + if "delete" in new_repre["tags"]: + new_repre["tags"].remove("delete") + + # Add additional tags from output definition to representation + for tag in output_def["tags"]: + if tag not in new_repre["tags"]: + new_repre["tags"].append(tag) + + # Add burnin link from output definition to representation + for burnin in output_def["burnins"]: + if burnin not in new_repre.get("burnins", []): + if not new_repre.get("burnins"): + new_repre["burnins"] = [] + new_repre["burnins"].append(str(burnin)) + + self.log.debug( + "Linked burnins: `{}`".format(new_repre.get("burnins")) + ) + + self.log.debug( + "New representation tags: `{}`".format( + new_repre.get("tags")) + ) + + temp_data = self.prepare_temp_data(instance, repre, output_def) + files_to_clean = [] + if temp_data["input_is_sequence"]: + self.log.info("Filling gaps in sequence.") + files_to_clean = self.fill_sequence_gaps( + temp_data["origin_repre"]["files"], + new_repre["stagingDir"], + temp_data["frame_start"], + temp_data["frame_end"]) + + # create or update outputName + output_name = new_repre.get("outputName", "") + output_ext = new_repre["ext"] + if output_name: + output_name += "_" + output_name += output_def["filename_suffix"] + if temp_data["without_handles"]: + output_name += "_noHandles" + + # add outputName to anatomy format fill_data + fill_data.update({ + "output": output_name, + "ext": output_ext + }) + + try: # temporary until oiiotool is supported cross platform + ffmpeg_args = self._ffmpeg_arguments( + output_def, instance, new_repre, temp_data, fill_data ) - - temp_data = self.prepare_temp_data( - instance, repre, output_def) - files_to_clean = [] - if temp_data["input_is_sequence"]: - self.log.info("Filling gaps in sequence.") - files_to_clean = self.fill_sequence_gaps( - temp_data["origin_repre"]["files"], - new_repre["stagingDir"], - temp_data["frame_start"], - temp_data["frame_end"]) - - # create or update outputName - output_name = new_repre.get("outputName", "") - output_ext = new_repre["ext"] - if output_name: - output_name += "_" - output_name += output_def["filename_suffix"] - if temp_data["without_handles"]: - output_name += "_noHandles" - - # add outputName to anatomy format fill_data - fill_data.update({ - "output": output_name, - "ext": output_ext - }) - - try: # temporary until oiiotool is supported cross platform - ffmpeg_args = self._ffmpeg_arguments( - output_def, instance, new_repre, temp_data, fill_data + except ZeroDivisionError: + # TODO recalculate width and height using OIIO before + # conversion + if 'exr' in temp_data["origin_repre"]["ext"]: + self.log.warning( + ( + "Unsupported compression on input files." + " Skipping!!!" + ), + exc_info=True ) - except ZeroDivisionError: - if 'exr' in temp_data["origin_repre"]["ext"]: - self.log.debug("Unsupported compression on input " + - "files. Skipping!!!") - return - raise NotImplementedError + return + raise NotImplementedError - subprcs_cmd = " ".join(ffmpeg_args) + subprcs_cmd = " ".join(ffmpeg_args) - # run subprocess - self.log.debug("Executing: {}".format(subprcs_cmd)) + # run subprocess + self.log.debug("Executing: {}".format(subprcs_cmd)) - openpype.api.run_subprocess( - subprcs_cmd, shell=True, logger=self.log - ) + openpype.api.run_subprocess( + subprcs_cmd, shell=True, logger=self.log + ) - # delete files added to fill gaps - if files_to_clean: - for f in files_to_clean: - os.unlink(f) + # delete files added to fill gaps + if files_to_clean: + for f in files_to_clean: + os.unlink(f) - new_repre.update({ - "name": "{}_{}".format(output_name, output_ext), - "outputName": output_name, - "outputDef": output_def, - "frameStartFtrack": temp_data["output_frame_start"], - "frameEndFtrack": temp_data["output_frame_end"], - "ffmpeg_cmd": subprcs_cmd - }) + new_repre.update({ + "name": "{}_{}".format(output_name, output_ext), + "outputName": output_name, + "outputDef": output_def, + "frameStartFtrack": temp_data["output_frame_start"], + "frameEndFtrack": temp_data["output_frame_end"], + "ffmpeg_cmd": subprcs_cmd + }) - # Force to pop these key if are in new repre - new_repre.pop("preview", None) - new_repre.pop("thumbnail", None) - if "clean_name" in new_repre.get("tags", []): - new_repre.pop("outputName") + # Force to pop these key if are in new repre + new_repre.pop("preview", None) + new_repre.pop("thumbnail", None) + if "clean_name" in new_repre.get("tags", []): + new_repre.pop("outputName") - # adding representation - self.log.debug( - "Adding new representation: {}".format(new_repre) - ) - instance.data["representations"].append(new_repre) - - # Cleanup temp staging dir after procesisng of output definitions - if do_convert: - temp_dir = repre["stagingDir"] - shutil.rmtree(temp_dir) - # Set staging dir of source representation back to previous - # value - repre["stagingDir"] = src_repre_staging_dir + # adding representation + self.log.debug( + "Adding new representation: {}".format(new_repre) + ) + instance.data["representations"].append(new_repre) def input_is_sequence(self, repre): """Deduce from representation data if input is sequence.""" diff --git a/openpype/plugins/publish/extract_review_slate.py b/openpype/plugins/publish/extract_review_slate.py index 505ae75169..49f0eac41d 100644 --- a/openpype/plugins/publish/extract_review_slate.py +++ b/openpype/plugins/publish/extract_review_slate.py @@ -158,13 +158,15 @@ class ExtractReviewSlate(openpype.api.Extractor): ]) if use_legacy_code: + format_args = [] codec_args = repre["_profile"].get('codec', []) output_args.extend(codec_args) # preset's output data output_args.extend(repre["_profile"].get('output', [])) else: # Codecs are copied from source for whole input - codec_args = self._get_codec_args(repre) + format_args, codec_args = self._get_format_codec_args(repre) + output_args.extend(format_args) output_args.extend(codec_args) # make sure colors are correct @@ -266,8 +268,14 @@ class ExtractReviewSlate(openpype.api.Extractor): "-safe", "0", "-i", conc_text_path, "-c", "copy", - output_path ] + # NOTE: Added because of OP Atom demuxers + # Add format arguments if there are any + # - keep format of output + if format_args: + concat_args.extend(format_args) + # Add final output path + concat_args.append(output_path) # ffmpeg concat subprocess self.log.debug( @@ -338,7 +346,7 @@ class ExtractReviewSlate(openpype.api.Extractor): return vf_back - def _get_codec_args(self, repre): + def _get_format_codec_args(self, repre): """Detect possible codec arguments from representation.""" codec_args = [] @@ -361,13 +369,9 @@ class ExtractReviewSlate(openpype.api.Extractor): return codec_args source_ffmpeg_cmd = repre.get("ffmpeg_cmd") - codec_args.extend( - get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd) - ) - codec_args.extend( - get_ffmpeg_codec_args( - ffprobe_data, source_ffmpeg_cmd, logger=self.log - ) + format_args = get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd) + codec_args = get_ffmpeg_codec_args( + ffprobe_data, source_ffmpeg_cmd, logger=self.log ) - return codec_args + return format_args, codec_args diff --git a/openpype/plugins/publish/integrate_hero_version.py b/openpype/plugins/publish/integrate_hero_version.py index 466606d08b..ded149bdd0 100644 --- a/openpype/plugins/publish/integrate_hero_version.py +++ b/openpype/plugins/publish/integrate_hero_version.py @@ -7,8 +7,12 @@ import shutil from bson.objectid import ObjectId from pymongo import InsertOne, ReplaceOne import pyblish.api + from avalon import api, io, schema -from openpype.lib import create_hard_link +from openpype.lib import ( + create_hard_link, + filter_profiles +) class IntegrateHeroVersion(pyblish.api.InstancePlugin): @@ -17,7 +21,9 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): order = pyblish.api.IntegratorOrder + 0.1 optional = True + active = True + # Families are modified using settings families = [ "model", "rig", @@ -33,11 +39,13 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): "project", "asset", "task", "subset", "representation", "family", "hierarchy", "task", "username" ] - # TODO add family filtering # QUESTION/TODO this process should happen on server if crashed due to # permissions error on files (files were used or user didn't have perms) # *but all other plugins must be sucessfully completed + template_name_profiles = [] + _default_template_name = "hero" + def process(self, instance): self.log.debug( "--- Integration of Hero version for subset `{}` begins.".format( @@ -51,27 +59,35 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): ) return - project_name = api.Session["AVALON_PROJECT"] + template_key = self._get_template_key(instance) - # TODO raise error if Hero not set? anatomy = instance.context.data["anatomy"] - if "hero" not in anatomy.templates: - self.log.warning("!!! Anatomy does not have set `hero` key!") - return - - if "path" not in anatomy.templates["hero"]: + project_name = api.Session["AVALON_PROJECT"] + if template_key not in anatomy.templates: self.log.warning(( - "!!! There is not set `path` template in `hero` anatomy" - " for project \"{}\"." - ).format(project_name)) + "!!! Anatomy of project \"{}\" does not have set" + " \"{}\" template key!" + ).format(project_name, template_key)) return - hero_template = anatomy.templates["hero"]["path"] + if "path" not in anatomy.templates[template_key]: + self.log.warning(( + "!!! There is not set \"path\" template in \"{}\" anatomy" + " for project \"{}\"." + ).format(template_key, project_name)) + return + + hero_template = anatomy.templates[template_key]["path"] self.log.debug("`hero` template check was successful. `{}`".format( hero_template )) - hero_publish_dir = self.get_publish_dir(instance) + self.integrate_instance(instance, template_key, hero_template) + + def integrate_instance(self, instance, template_key, hero_template): + anatomy = instance.context.data["anatomy"] + published_repres = instance.data["published_representations"] + hero_publish_dir = self.get_publish_dir(instance, template_key) src_version_entity = instance.data.get("versionEntity") filtered_repre_ids = [] @@ -271,12 +287,12 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): continue # Prepare anatomy data - anatomy_data = repre_info["anatomy_data"] + anatomy_data = copy.deepcopy(repre_info["anatomy_data"]) anatomy_data.pop("version", None) # Get filled path to repre context anatomy_filled = anatomy.format(anatomy_data) - template_filled = anatomy_filled["hero"]["path"] + template_filled = anatomy_filled[template_key]["path"] repre_data = { "path": str(template_filled), @@ -308,11 +324,11 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): collections, remainders = clique.assemble(published_files) if remainders or not collections or len(collections) > 1: raise Exception(( - "Integrity error. Files of published representation " - "is combination of frame collections and single files." - "Collections: `{}` Single files: `{}`" - ).format(str(collections), - str(remainders))) + "Integrity error. Files of published" + " representation is combination of frame" + " collections and single files. Collections:" + " `{}` Single files: `{}`" + ).format(str(collections), str(remainders))) src_col = collections[0] @@ -320,13 +336,10 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): frame_splitter = "_-_FRAME_SPLIT_-_" anatomy_data["frame"] = frame_splitter _anatomy_filled = anatomy.format(anatomy_data) - _template_filled = _anatomy_filled["hero"]["path"] + _template_filled = _anatomy_filled[template_key]["path"] head, tail = _template_filled.split(frame_splitter) padding = int( - anatomy.templates["render"].get( - "frame_padding", - anatomy.templates["render"].get("padding") - ) + anatomy.templates[template_key]["frame_padding"] ) dst_col = clique.Collection( @@ -444,6 +457,8 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): backup_hero_publish_dir is not None and os.path.exists(backup_hero_publish_dir) ): + if os.path.exists(hero_publish_dir): + shutil.rmtree(hero_publish_dir) os.rename(backup_hero_publish_dir, hero_publish_dir) self.log.error(( "!!! Creating of hero version failed." @@ -466,13 +481,18 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): files.append(_path) return files - def get_publish_dir(self, instance): + def get_publish_dir(self, instance, template_key): anatomy = instance.context.data["anatomy"] template_data = copy.deepcopy(instance.data["anatomyData"]) - if "folder" in anatomy.templates["hero"]: + if "originalBasename" in instance.data: + template_data.update({ + "originalBasename": instance.data.get("originalBasename") + }) + + if "folder" in anatomy.templates[template_key]: anatomy_filled = anatomy.format(template_data) - publish_folder = anatomy_filled["hero"]["folder"] + publish_folder = anatomy_filled[template_key]["folder"] else: # This is for cases of Deprecated anatomy without `folder` # TODO remove when all clients have solved this issue @@ -489,7 +509,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): " key underneath `publish` (in global of for project `{}`)." ).format(project_name)) - file_path = anatomy_filled["hero"]["path"] + file_path = anatomy_filled[template_key]["path"] # Directory publish_folder = os.path.dirname(file_path) @@ -499,6 +519,38 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): return publish_folder + def _get_template_key(self, instance): + anatomy_data = instance.data["anatomyData"] + task_data = anatomy_data.get("task") or {} + task_name = task_data.get("name") + task_type = task_data.get("type") + host_name = instance.context.data["hostName"] + # TODO raise error if Hero not set? + family = self.main_family_from_instance(instance) + key_values = { + "families": family, + "task_names": task_name, + "task_types": task_type, + "hosts": host_name + } + profile = filter_profiles( + self.template_name_profiles, + key_values, + logger=self.log + ) + if profile: + template_name = profile["template_name"] + else: + template_name = self._default_template_name + return template_name + + def main_family_from_instance(self, instance): + """Returns main family of entered instance.""" + family = instance.data.get("family") + if not family: + family = instance.data["families"][0] + return family + def copy_file(self, src_path, dst_path): # TODO check drives if are the same to check if cas hardlink dirname = os.path.dirname(dst_path) @@ -564,22 +616,16 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): src_file (string) - original file path dst_file (string) - hero file path """ - _, rootless = anatomy.find_root_template_from_path( - dst_file - ) - _, rtls_src = anatomy.find_root_template_from_path( - src_file - ) + _, rootless = anatomy.find_root_template_from_path(dst_file) + _, rtls_src = anatomy.find_root_template_from_path(src_file) return path.replace(rtls_src, rootless) def _update_hash(self, hash, src_file_name, dst_file): """ Updates hash value with proper hero name """ - src_file_name = self._get_name_without_ext( - src_file_name) - hero_file_name = self._get_name_without_ext( - dst_file) + src_file_name = self._get_name_without_ext(src_file_name) + hero_file_name = self._get_name_without_ext(dst_file) return hash.replace(src_file_name, hero_file_name) def _get_name_without_ext(self, value): diff --git a/openpype/plugins/publish/integrate_new.py b/openpype/plugins/publish/integrate_new.py index 51b7c26235..1e53f2bcfa 100644 --- a/openpype/plugins/publish/integrate_new.py +++ b/openpype/plugins/publish/integrate_new.py @@ -8,6 +8,7 @@ import errno import six import re import shutil +from collections import deque, defaultdict from bson.objectid import ObjectId from pymongo import DeleteOne, InsertOne @@ -105,7 +106,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "effect", "xgen", "hda", - "usd" + "usd", + "staticMesh", + "skeletalMesh", + "usdComposition", + "usdOverride", + "simpleUnrealTexture" ] exclude_families = ["clip", "render.farm"] db_representation_context_keys = [ @@ -360,6 +366,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if profile: template_name = profile["template_name"] + + published_representations = {} for idx, repre in enumerate(instance.data["representations"]): # reset transfers for next representation @@ -388,6 +396,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if resolution_width: template_data["fps"] = fps + if "originalBasename" in instance.data: + template_data.update({ + "originalBasename": instance.data.get("originalBasename") + }) + files = repre['files'] if repre.get('stagingDir'): stagingdir = repre['stagingDir'] @@ -559,6 +572,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): repre['published_path'] = dst self.log.debug("__ dst: {}".format(dst)) + if not instance.data.get("publishDir"): + instance.data["publishDir"] = ( + anatomy_filled + [template_name] + ["folder"] + ) if repre.get("udim"): repre_context["udim"] = repre.get("udim") # store list @@ -1105,18 +1124,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): rec["sites"].append(meta) already_attached_sites[meta["name"]] = None + # add alternative sites + rec, already_attached_sites = self._add_alternative_sites( + system_sync_server_presets, already_attached_sites, rec) + # add skeleton for site where it should be always synced to - for always_on_site in always_accesible: + for always_on_site in set(always_accesible): if always_on_site not in already_attached_sites.keys(): meta = {"name": always_on_site.strip()} rec["sites"].append(meta) already_attached_sites[meta["name"]] = None - # add alternative sites - rec = self._add_alternative_sites(system_sync_server_presets, - already_attached_sites, - rec) - log.debug("final sites:: {}".format(rec["sites"])) return rec @@ -1147,22 +1165,60 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): """ conf_sites = system_sync_server_presets.get("sites", {}) + alt_site_pairs = self._get_alt_site_pairs(conf_sites) + + already_attached_keys = list(already_attached_sites.keys()) + for added_site in already_attached_keys: + real_created = already_attached_sites[added_site] + for alt_site in alt_site_pairs.get(added_site, []): + if alt_site in already_attached_sites.keys(): + continue + meta = {"name": alt_site} + # alt site inherits state of 'created_dt' + if real_created: + meta["created_dt"] = real_created + rec["sites"].append(meta) + already_attached_sites[meta["name"]] = real_created + + return rec, already_attached_sites + + def _get_alt_site_pairs(self, conf_sites): + """Returns dict of site and its alternative sites. + + If `site` has alternative site, it means that alt_site has 'site' as + alternative site + Args: + conf_sites (dict) + Returns: + (dict): {'site': [alternative sites]...} + """ + alt_site_pairs = defaultdict(list) for site_name, site_info in conf_sites.items(): alt_sites = set(site_info.get("alternative_sites", [])) - already_attached_keys = list(already_attached_sites.keys()) - for added_site in already_attached_keys: - if added_site in alt_sites: - if site_name in already_attached_keys: - continue - meta = {"name": site_name} - real_created = already_attached_sites[added_site] - # alt site inherits state of 'created_dt' - if real_created: - meta["created_dt"] = real_created - rec["sites"].append(meta) - already_attached_sites[meta["name"]] = real_created + alt_site_pairs[site_name].extend(alt_sites) - return rec + for alt_site in alt_sites: + alt_site_pairs[alt_site].append(site_name) + + for site_name, alt_sites in alt_site_pairs.items(): + sites_queue = deque(alt_sites) + while sites_queue: + alt_site = sites_queue.popleft() + + # safety against wrong config + # {"SFTP": {"alternative_site": "SFTP"} + if alt_site == site_name or alt_site not in alt_site_pairs: + continue + + for alt_alt_site in alt_site_pairs[alt_site]: + if ( + alt_alt_site != site_name + and alt_alt_site not in alt_sites + ): + alt_sites.append(alt_alt_site) + sites_queue.append(alt_alt_site) + + return alt_site_pairs def handle_destination_files(self, integrated_file_sizes, mode): """ Clean destination files diff --git a/openpype/plugins/publish/validate_aseset_docs.py b/openpype/plugins/publish/validate_asset_docs.py similarity index 69% rename from openpype/plugins/publish/validate_aseset_docs.py rename to openpype/plugins/publish/validate_asset_docs.py index eed75cdf8a..bc1f9b9e6c 100644 --- a/openpype/plugins/publish/validate_aseset_docs.py +++ b/openpype/plugins/publish/validate_asset_docs.py @@ -2,8 +2,8 @@ import pyblish.api from openpype.pipeline import PublishValidationError -class ValidateContainers(pyblish.api.InstancePlugin): - """Validate existence of asset asset documents on instances. +class ValidateAssetDocs(pyblish.api.InstancePlugin): + """Validate existence of asset documents on instances. Without asset document it is not possible to publish the instance. @@ -22,10 +22,10 @@ class ValidateContainers(pyblish.api.InstancePlugin): return if instance.data.get("assetEntity"): - self.log.info("Instance have set asset document in it's data.") + self.log.info("Instance has set asset document in its data.") else: raise PublishValidationError(( - "Instance \"{}\" don't have set asset" - " document which is needed for publishing." + "Instance \"{}\" doesn't have asset document " + "set which is needed for publishing." ).format(instance.data["name"])) diff --git a/openpype/pype_commands.py b/openpype/pype_commands.py index c05eece2be..bd2008e144 100644 --- a/openpype/pype_commands.py +++ b/openpype/pype_commands.py @@ -101,7 +101,8 @@ class PypeCommands: RuntimeError: When there is no path to process. """ from openpype.modules import ModulesManager - from openpype import install, uninstall + from openpype.pipeline import install_openpype_plugins + from openpype.api import Logger from openpype.tools.utils.host_tools import show_publish from openpype.tools.utils.lib import qt_app_context @@ -112,7 +113,7 @@ class PypeCommands: log = Logger.get_logger() - install() + install_openpype_plugins() manager = ModulesManager() @@ -124,13 +125,14 @@ class PypeCommands: if not any(paths): raise RuntimeError("No publish paths specified") - env = get_app_environments_for_context( - os.environ["AVALON_PROJECT"], - os.environ["AVALON_ASSET"], - os.environ["AVALON_TASK"], - os.environ["AVALON_APP_NAME"] - ) - os.environ.update(env) + if os.getenv("AVALON_APP_NAME"): + env = get_app_environments_for_context( + os.environ["AVALON_PROJECT"], + os.environ["AVALON_ASSET"], + os.environ["AVALON_TASK"], + os.environ["AVALON_APP_NAME"] + ) + os.environ.update(env) pyblish.api.register_host("shell") @@ -294,7 +296,8 @@ class PypeCommands: # Register target and host import pyblish.api import pyblish.util - import avalon.api + + from openpype.pipeline import install_host from openpype.hosts.webpublisher import api as webpublisher log = PypeLogger.get_logger() @@ -315,7 +318,7 @@ class PypeCommands: for target in targets: pyblish.api.register_target(target) - avalon.api.install(webpublisher) + install_host(webpublisher) log.info("Running publish ...") diff --git a/openpype/scripts/fusion_switch_shot.py b/openpype/scripts/fusion_switch_shot.py index 6db8ff36a8..3ba150902e 100644 --- a/openpype/scripts/fusion_switch_shot.py +++ b/openpype/scripts/fusion_switch_shot.py @@ -4,12 +4,16 @@ import sys import logging # Pipeline imports -from avalon import api, io -import avalon.fusion +from avalon import io +from openpype.hosts.fusion import api +import openpype.hosts.fusion.api.lib as fusion_lib # Config imports -import openpype.lib as pype -import openpype.hosts.fusion.lib as fusion_lib +from openpype.lib import version_up +from openpype.pipeline import ( + install_host, + registered_host, +) from openpype.lib.avalon_context import get_workdir_from_session @@ -79,7 +83,7 @@ def _format_filepath(session): # Create new unqiue filepath if os.path.exists(new_filepath): - new_filepath = pype.version_up(new_filepath) + new_filepath = version_up(new_filepath) return new_filepath @@ -102,7 +106,7 @@ def _update_savers(comp, session): comp.Print("New renders to: %s\n" % renders) - with avalon.fusion.comp_lock_and_undo_chunk(comp): + with api.comp_lock_and_undo_chunk(comp): savers = comp.GetToolList(False, "Saver").values() for saver in savers: filepath = saver.GetAttrs("TOOLST_Clip_Name")[1.0] @@ -164,19 +168,19 @@ def switch(asset_name, filepath=None, new=True): # Get current project self._project = io.find_one({ "type": "project", - "name": api.Session["AVALON_PROJECT"] + "name": io.Session["AVALON_PROJECT"] }) # Go to comp if not filepath: - current_comp = avalon.fusion.get_current_comp() + current_comp = api.get_current_comp() assert current_comp is not None, "Could not find current comp" else: fusion = _get_fusion_instance() current_comp = fusion.LoadComp(filepath, quiet=True) assert current_comp is not None, "Fusion could not load '%s'" % filepath - host = api.registered_host() + host = registered_host() containers = list(host.ls()) assert containers, "Nothing to update" @@ -194,7 +198,7 @@ def switch(asset_name, filepath=None, new=True): current_comp.Print(message) # Build the session to switch to - switch_to_session = api.Session.copy() + switch_to_session = io.Session.copy() switch_to_session["AVALON_ASSET"] = asset['name'] if new: @@ -203,7 +207,7 @@ def switch(asset_name, filepath=None, new=True): # Update savers output based on new session _update_savers(current_comp, switch_to_session) else: - comp_path = pype.version_up(filepath) + comp_path = version_up(filepath) current_comp.Print(comp_path) @@ -234,7 +238,7 @@ if __name__ == '__main__': args, unknown = parser.parse_args() - api.install(avalon.fusion) + install_host(api) switch(args.asset_name, args.file_path) sys.exit(0) diff --git a/openpype/scripts/non_python_host_launch.py b/openpype/scripts/non_python_host_launch.py index 43921f0483..f795af7bb3 100644 --- a/openpype/scripts/non_python_host_launch.py +++ b/openpype/scripts/non_python_host_launch.py @@ -15,7 +15,7 @@ CURRENT_FILE = os.path.abspath(__file__) def show_error_messagebox(title, message, detail_message=None): """Function will show message and process ends after closing it.""" from Qt import QtWidgets, QtCore - from avalon import style + from openpype import style app = QtWidgets.QApplication([]) app.setStyleSheet(style.load_stylesheet()) diff --git a/openpype/settings/constants.py b/openpype/settings/constants.py index 8b8acf5714..19ff953eb4 100644 --- a/openpype/settings/constants.py +++ b/openpype/settings/constants.py @@ -8,11 +8,11 @@ M_ENVIRONMENT_KEY = "__environment_keys__" # Metadata key for storing dynamic created labels M_DYNAMIC_KEY_LABEL = "__dynamic_keys_labels__" -METADATA_KEYS = ( +METADATA_KEYS = frozenset([ M_OVERRIDDEN_KEY, M_ENVIRONMENT_KEY, M_DYNAMIC_KEY_LABEL -) +]) # Keys where studio's system overrides are stored GLOBAL_SETTINGS_KEY = "global_settings" diff --git a/openpype/settings/defaults/project_anatomy/templates.json b/openpype/settings/defaults/project_anatomy/templates.json index d46d449c77..caf399a903 100644 --- a/openpype/settings/defaults/project_anatomy/templates.json +++ b/openpype/settings/defaults/project_anatomy/templates.json @@ -28,9 +28,30 @@ }, "delivery": {}, "unreal": { - "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/{@version}", - "file": "{subset}_{@version}<_{output}><.{@frame}>.{ext}", + "folder": "{root[work]}/{project[name]}/unreal/{task[name]}", + "file": "{project[code]}_{asset}", "path": "{@folder}/{@file}" }, - "others": {} + "others": { + "maya2unreal": { + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}", + "file": "{subset}_{@version}<_{output}><.{@frame}>.{ext}", + "path": "{@folder}/{@file}" + }, + "simpleUnrealTextureHero": { + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/hero", + "file": "{originalBasename}.{ext}", + "path": "{@folder}/{@file}" + }, + "simpleUnrealTexture": { + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{@version}", + "file": "{originalBasename}_{@version}.{ext}", + "path": "{@folder}/{@file}" + }, + "__dynamic_keys_labels__": { + "maya2unreal": "Maya to Unreal", + "simpleUnrealTextureHero": "Simple Unreal Texture - Hero", + "simpleUnrealTexture": "Simple Unreal Texture" + } + } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/deadline.json b/openpype/settings/defaults/project_settings/deadline.json index 4ff1d3b54d..f0b2a7e555 100644 --- a/openpype/settings/defaults/project_settings/deadline.json +++ b/openpype/settings/defaults/project_settings/deadline.json @@ -4,6 +4,10 @@ "CollectDefaultDeadlineServer": { "pass_mongo_url": false }, + "CollectDeadlinePools": { + "primary_pool": "", + "secondary_pool": "" + }, "ValidateExpectedFiles": { "enabled": true, "active": true, @@ -22,6 +26,8 @@ "tile_assembler_plugin": "OpenPypeTileAssembler", "use_published": true, "asset_dependencies": true, + "priority": 50, + "tile_priority": 50, "group": "none", "limit": [], "jobInfo": {}, @@ -35,8 +41,7 @@ "use_published": true, "priority": 50, "chunk_size": 10, - "primary_pool": "", - "secondary_pool": "", + "concurrent_tasks": 1, "group": "", "department": "", "use_gpu": true, @@ -51,8 +56,6 @@ "use_published": true, "priority": 50, "chunk_size": 10000, - "primary_pool": "", - "secondary_pool": "", "group": "", "department": "" }, @@ -63,8 +66,6 @@ "use_published": true, "priority": 50, "chunk_size": 10000, - "primary_pool": "", - "secondary_pool": "", "group": "", "department": "", "multiprocess": true @@ -80,7 +81,7 @@ "skip_integration_repre_list": [], "aov_filter": { "maya": [ - ".+(?:\\.|_)([Bb]eauty)(?:\\.|_).*" + ".*([Bb]eauty).*" ], "nuke": [ ".*" @@ -97,4 +98,4 @@ } } } -} \ No newline at end of file +} diff --git a/openpype/settings/defaults/project_settings/flame.json b/openpype/settings/defaults/project_settings/flame.json index c7188b10b5..ef7a2a4467 100644 --- a/openpype/settings/defaults/project_settings/flame.json +++ b/openpype/settings/defaults/project_settings/flame.json @@ -20,6 +20,37 @@ } }, "publish": { + "CollectTimelineInstances": { + "xml_preset_attrs_from_comments": [ + { + "name": "width", + "type": "number" + }, + { + "name": "height", + "type": "number" + }, + { + "name": "pixelRatio", + "type": "float" + }, + { + "name": "resizeType", + "type": "string" + }, + { + "name": "resizeFilter", + "type": "string" + } + ], + "add_tasks": [ + { + "name": "compositing", + "type": "Compositing", + "create_batch_group": true + } + ] + }, "ExtractSubsetResources": { "keep_original_representation": false, "export_presets_mapping": { @@ -31,7 +62,9 @@ "ignore_comment_attrs": false, "colorspace_out": "ACES - ACEScg", "representation_add_range": true, - "representation_tags": [] + "representation_tags": [], + "load_to_batch_group": true, + "batch_group_loader_name": "LoadClip" } } } @@ -58,7 +91,29 @@ ], "reel_group_name": "OpenPype_Reels", "reel_name": "Loaded", - "clip_name_template": "{asset}_{subset}_{representation}" + "clip_name_template": "{asset}_{subset}_{output}" + }, + "LoadClipBatch": { + "enabled": true, + "families": [ + "render2d", + "source", + "plate", + "render", + "review" + ], + "representations": [ + "exr", + "dpx", + "jpg", + "jpeg", + "png", + "h264", + "mov", + "mp4" + ], + "reel_name": "OP_LoadedReel", + "clip_name_template": "{asset}_{subset}_{output}" } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json index 89bb41a164..deade08c0b 100644 --- a/openpype/settings/defaults/project_settings/ftrack.json +++ b/openpype/settings/defaults/project_settings/ftrack.json @@ -352,11 +352,21 @@ } ] }, + "CollectFtrackCustomAttributeData": { + "enabled": false, + "custom_attribute_keys": [] + }, "IntegrateFtrackNote": { "enabled": true, - "note_with_intent_template": "{intent}: {comment}", + "note_template": "{intent}: {comment}", "note_labels": [] }, + "IntegrateFtrackDescription": { + "enabled": false, + "optional": true, + "active": true, + "description_template": "{comment}" + }, "ValidateFtrackAttributes": { "enabled": false, "ftrack_custom_attributes": {} @@ -395,7 +405,8 @@ "vrayproxy": "cache", "redshiftproxy": "cache", "usd": "usd" - } + }, + "keep_first_subset_name_for_review": true } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json index 132d35990d..7317a3da1c 100644 --- a/openpype/settings/defaults/project_settings/global.json +++ b/openpype/settings/defaults/project_settings/global.json @@ -33,20 +33,6 @@ "enabled": false, "profiles": [] }, - "IntegrateHeroVersion": { - "enabled": true, - "optional": true, - "families": [ - "model", - "rig", - "look", - "pointcache", - "animation", - "setdress", - "layout", - "mayaScene" - ] - }, "ExtractJpegEXR": { "enabled": true, "ffmpeg_args": { @@ -192,6 +178,29 @@ "task_types": [], "tasks": [], "template_name": "render" + }, + { + "families": [ + "simpleUnrealTexture" + ], + "hosts": [ + "standalonepublisher" + ], + "task_types": [], + "tasks": [], + "template_name": "simpleUnrealTexture" + }, + { + "families": [ + "staticMesh", + "skeletalMesh" + ], + "hosts": [ + "maya" + ], + "task_types": [], + "tasks": [], + "template_name": "maya2unreal" } ], "subset_grouping_profiles": [ @@ -204,6 +213,35 @@ } ] }, + "IntegrateHeroVersion": { + "enabled": true, + "optional": true, + "active": true, + "families": [ + "model", + "rig", + "look", + "pointcache", + "animation", + "setdress", + "layout", + "mayaScene", + "simpleUnrealTexture" + ], + "template_name_profiles": [ + { + "families": [ + "simpleUnrealTexture" + ], + "hosts": [ + "standalonepublisher" + ], + "task_types": [], + "task_names": [], + "template_name": "simpleUnrealTextureHero" + } + ] + }, "CleanUp": { "paterns": [], "remove_temp_renders": false @@ -241,6 +279,15 @@ "tasks": [], "template": "{family}{variant}" }, + { + "families": [ + "workfile" + ], + "hosts": [], + "task_types": [], + "tasks": [], + "template": "{family}{Task}" + }, { "families": [ "render" @@ -288,7 +335,7 @@ }, { "families": [ - "unrealStaticMesh" + "staticMesh" ], "hosts": [ "maya" @@ -296,6 +343,17 @@ "task_types": [], "tasks": [], "template": "S_{asset}{variant}" + }, + { + "families": [ + "skeletalMesh" + ], + "hosts": [ + "maya" + ], + "task_types": [], + "tasks": [], + "template": "SK_{asset}{variant}" } ] }, @@ -305,6 +363,13 @@ "task_types": [], "hosts": [], "workfile_template": "work" + }, + { + "task_types": [], + "hosts": [ + "unreal" + ], + "workfile_template": "unreal" } ], "last_workfile_on_startup": [ diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index 19d9a95595..4cdfe1ca5d 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -52,7 +52,7 @@ "", "_Main" ], - "static_mesh_prefix": "S_", + "static_mesh_prefix": "S", "collision_prefixes": [ "UBX", "UCP", @@ -60,6 +60,11 @@ "UCX" ] }, + "CreateUnrealSkeletalMesh": { + "enabled": true, + "defaults": [], + "joint_hints": "jnt_org" + }, "CreateAnimation": { "enabled": true, "defaults": [ diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json index 6992fb6e3e..ab015271ff 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/openpype/settings/defaults/project_settings/nuke.json @@ -106,6 +106,9 @@ ] } }, + "ExtractReviewData": { + "enabled": false + }, "ExtractReviewDataLut": { "enabled": false }, @@ -119,11 +122,10 @@ "families": [], "sebsets": [] }, - "extension": "mov", + "read_raw": false, "viewer_process_override": "", "bake_viewer_process": true, "bake_viewer_input_process": true, - "add_tags": [], "reformat_node_add": false, "reformat_node_config": [ { @@ -151,12 +153,28 @@ "name": "pbb", "value": false } - ] + ], + "extension": "mov", + "add_tags": [] } } }, "ExtractSlateFrame": { - "viewer_lut_raw": false + "viewer_lut_raw": false, + "key_value_mapping": { + "f_submission_note": [ + true, + "{comment}" + ], + "f_submitting_for": [ + true, + "{intent[value]}" + ], + "f_vfx_scope_of_work": [ + false, + "" + ] + } }, "IncrementScriptVersion": { "enabled": true, diff --git a/openpype/settings/defaults/project_settings/photoshop.json b/openpype/settings/defaults/project_settings/photoshop.json index 4bfc0e04e7..d9b7a8083f 100644 --- a/openpype/settings/defaults/project_settings/photoshop.json +++ b/openpype/settings/defaults/project_settings/photoshop.json @@ -12,13 +12,16 @@ "flatten_subset_template": "", "color_code_mapping": [] }, + "CollectInstances": { + "flatten_subset_template": "" + }, "ValidateContainers": { "enabled": true, "optional": true, "active": true }, "ValidateNaming": { - "invalid_chars": "[ \\\\/+\\*\\?\\(\\)\\[\\]\\{\\}:,]", + "invalid_chars": "[ \\\\/+\\*\\?\\(\\)\\[\\]\\{\\}:,;]", "replace_char": "_" }, "ExtractImage": { diff --git a/openpype/settings/defaults/project_settings/slack.json b/openpype/settings/defaults/project_settings/slack.json index d77b8c2208..c156fed08e 100644 --- a/openpype/settings/defaults/project_settings/slack.json +++ b/openpype/settings/defaults/project_settings/slack.json @@ -11,6 +11,7 @@ "task_types": [], "tasks": [], "subsets": [], + "review_upload_limit": 50.0, "channel_messages": [] } ] diff --git a/openpype/settings/defaults/project_settings/standalonepublisher.json b/openpype/settings/defaults/project_settings/standalonepublisher.json index 6858c4f34d..e36232d3f7 100644 --- a/openpype/settings/defaults/project_settings/standalonepublisher.json +++ b/openpype/settings/defaults/project_settings/standalonepublisher.json @@ -133,6 +133,22 @@ ], "help": "Texture files with UDIM together with worfile" }, + "create_simple_unreal_texture": { + "name": "simple_unreal_texture", + "label": "Simple Unreal Texture", + "family": "simpleUnrealTexture", + "icon": "Image", + "defaults": [], + "help": "Texture files with Unreal naming convention" + }, + "create_vdb": { + "name": "vdb", + "label": "VDB Volumetric Data", + "family": "vdbcache", + "icon": "cloud", + "defaults": [], + "help": "Hierarchical data structure for the efficient storage and manipulation of sparse volumetric data discretized on three-dimensional grids" + }, "__dynamic_keys_labels__": { "create_workfile": "Workfile", "create_model": "Model", @@ -145,7 +161,9 @@ "create_matchmove": "Matchmove", "create_render": "Render", "create_mov_batch": "Batch Mov", - "create_texture_batch": "Batch Texture" + "create_texture_batch": "Batch Texture", + "create_simple_unreal_texture": "Simple Unreal Texture", + "create_vdb": "VDB Cache" } }, "publish": { diff --git a/openpype/settings/defaults/project_settings/tvpaint.json b/openpype/settings/defaults/project_settings/tvpaint.json index 528bf6de8e..88b5a598cd 100644 --- a/openpype/settings/defaults/project_settings/tvpaint.json +++ b/openpype/settings/defaults/project_settings/tvpaint.json @@ -1,6 +1,10 @@ { "stop_timer_on_application_exit": false, "publish": { + "CollectRenderScene": { + "enabled": false, + "render_layer": "Main" + }, "ExtractSequence": { "review_bg": [ 255, @@ -28,6 +32,11 @@ "enabled": true, "optional": true, "active": true + }, + "ExtractConvertToEXR": { + "enabled": false, + "replace_pngs": true, + "exr_compression": "ZIP" } }, "load": { diff --git a/openpype/settings/defaults/system_settings/general.json b/openpype/settings/defaults/system_settings/general.json index 5a3e39e5b6..e1785f8709 100644 --- a/openpype/settings/defaults/system_settings/general.json +++ b/openpype/settings/defaults/system_settings/general.json @@ -12,6 +12,7 @@ "linux": [], "darwin": [] }, + "local_env_white_list": [], "openpype_path": { "windows": [], "darwin": [], diff --git a/openpype/settings/defaults/system_settings/tools.json b/openpype/settings/defaults/system_settings/tools.json index 181236abe8..9e08465195 100644 --- a/openpype/settings/defaults/system_settings/tools.json +++ b/openpype/settings/defaults/system_settings/tools.json @@ -25,10 +25,18 @@ }, "variants": { "3-2": { - "MTOA_VERSION": "3.2" + "host_names": [], + "app_variants": [], + "environment": { + "MTOA_VERSION": "3.2" + } }, "3-1": { - "MTOA_VERSION": "3.1" + "host_names": [], + "app_variants": [], + "environment": { + "MTOA_VERSION": "3.1" + } }, "__dynamic_keys_labels__": { "3-2": "3.2", diff --git a/openpype/settings/entities/base_entity.py b/openpype/settings/entities/base_entity.py index 76700d605d..21ee44ae77 100644 --- a/openpype/settings/entities/base_entity.py +++ b/openpype/settings/entities/base_entity.py @@ -173,6 +173,10 @@ class BaseItemEntity(BaseEntity): # Entity has set `_project_override_value` (is not NOT_SET) self.had_project_override = False + self._default_log_invalid_types = True + self._studio_log_invalid_types = True + self._project_log_invalid_types = True + # Callbacks that are called on change. # - main current purspose is to register GUI callbacks self.on_change_callbacks = [] @@ -419,7 +423,7 @@ class BaseItemEntity(BaseEntity): raise InvalidValueType(self.valid_value_types, type(value), self.path) # TODO convert to private method - def _check_update_value(self, value, value_source): + def _check_update_value(self, value, value_source, log_invalid_types=True): """Validation of value on update methods. Update methods update data from currently saved settings so it is @@ -447,16 +451,17 @@ class BaseItemEntity(BaseEntity): if new_value is not NOT_SET: return new_value - # Warning log about invalid value type. - self.log.warning( - ( - "{} Got invalid value type for {} values." - " Expected types: {} | Got Type: {} | Value: \"{}\"" - ).format( - self.path, value_source, - self.valid_value_types, type(value), str(value) + if log_invalid_types: + # Warning log about invalid value type. + self.log.warning( + ( + "{} Got invalid value type for {} values." + " Expected types: {} | Got Type: {} | Value: \"{}\"" + ).format( + self.path, value_source, + self.valid_value_types, type(value), str(value) + ) ) - ) return NOT_SET def available_for_role(self, role_name=None): @@ -985,7 +990,7 @@ class ItemEntity(BaseItemEntity): return self.root_item.get_entity_from_path(path) @abstractmethod - def update_default_value(self, parent_values): + def update_default_value(self, parent_values, log_invalid_types=True): """Fill default values on startup or on refresh. Default values stored in `openpype` repository should update all items @@ -995,11 +1000,13 @@ class ItemEntity(BaseItemEntity): Args: parent_values (dict): Values of parent's item. But in case item is used as widget, `parent_values` contain value for item. + log_invalid_types (bool): Log invalid type of value. Used when + entity can have children with same keys and different types. """ pass @abstractmethod - def update_studio_value(self, parent_values): + def update_studio_value(self, parent_values, log_invalid_types=True): """Fill studio override values on startup or on refresh. Set studio value if is not set to NOT_SET, in that case studio @@ -1008,11 +1015,13 @@ class ItemEntity(BaseItemEntity): Args: parent_values (dict): Values of parent's item. But in case item is used as widget, `parent_values` contain value for item. + log_invalid_types (bool): Log invalid type of value. Used when + entity can have children with same keys and different types. """ pass @abstractmethod - def update_project_value(self, parent_values): + def update_project_value(self, parent_values, log_invalid_types=True): """Fill project override values on startup, refresh or project change. Set project value if is not set to NOT_SET, in that case project @@ -1021,5 +1030,7 @@ class ItemEntity(BaseItemEntity): Args: parent_values (dict): Values of parent's item. But in case item is used as widget, `parent_values` contain value for item. + log_invalid_types (bool): Log invalid type of value. Used when + entity can have children with same keys and different types. """ pass diff --git a/openpype/settings/entities/dict_conditional.py b/openpype/settings/entities/dict_conditional.py index 19f326aea7..88d2dc8296 100644 --- a/openpype/settings/entities/dict_conditional.py +++ b/openpype/settings/entities/dict_conditional.py @@ -518,12 +518,18 @@ class DictConditionalEntity(ItemEntity): output.update(self._current_metadata) return output - def _prepare_value(self, value): + def _prepare_value(self, value, log_invalid_types): if value is NOT_SET or self.enum_key not in value: return NOT_SET, NOT_SET enum_value = value.get(self.enum_key) if enum_value not in self.non_gui_children: + if log_invalid_types: + self.log.warning( + "{} Unknown enum key in default values: {}".format( + self.path, enum_value + ) + ) return NOT_SET, NOT_SET # Create copy of value before poping values @@ -551,22 +557,25 @@ class DictConditionalEntity(ItemEntity): return value, metadata - def update_default_value(self, value): + def update_default_value(self, value, log_invalid_types=True): """Update default values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "default") + self._default_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "default", log_invalid_types + ) self.has_default_value = value is not NOT_SET # TODO add value validation - value, metadata = self._prepare_value(value) + value, metadata = self._prepare_value(value, log_invalid_types) self._default_metadata = metadata if value is NOT_SET: - self.enum_entity.update_default_value(value) + self.enum_entity.update_default_value(value, log_invalid_types) for children_by_key in self.non_gui_children.values(): for child_obj in children_by_key.values(): - child_obj.update_default_value(value) + child_obj.update_default_value(value, log_invalid_types) return value_keys = set(value.keys()) @@ -574,7 +583,7 @@ class DictConditionalEntity(ItemEntity): expected_keys = set(self.non_gui_children[enum_value].keys()) expected_keys.add(self.enum_key) unknown_keys = value_keys - expected_keys - if unknown_keys: + if unknown_keys and log_invalid_types: self.log.warning( "{} Unknown keys in default values: {}".format( self.path, @@ -582,28 +591,37 @@ class DictConditionalEntity(ItemEntity): ) ) - self.enum_entity.update_default_value(enum_value) - for children_by_key in self.non_gui_children.values(): + self.enum_entity.update_default_value(enum_value, log_invalid_types) + + for enum_key, children_by_key in self.non_gui_children.items(): + _log_invalid_types = log_invalid_types + if _log_invalid_types: + _log_invalid_types = enum_key == enum_value + value_copy = copy.deepcopy(value) for key, child_obj in children_by_key.items(): child_value = value_copy.get(key, NOT_SET) - child_obj.update_default_value(child_value) + child_obj.update_default_value(child_value, _log_invalid_types) - def update_studio_value(self, value): + def update_studio_value(self, value, log_invalid_types=True): """Update studio override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "studio override") - value, metadata = self._prepare_value(value) + + self._studio_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "studio override", log_invalid_types + ) + value, metadata = self._prepare_value(value, log_invalid_types) self._studio_override_metadata = metadata self.had_studio_override = metadata is not NOT_SET if value is NOT_SET: - self.enum_entity.update_studio_value(value) + self.enum_entity.update_studio_value(value, log_invalid_types) for children_by_key in self.non_gui_children.values(): for child_obj in children_by_key.values(): - child_obj.update_studio_value(value) + child_obj.update_studio_value(value, log_invalid_types) return value_keys = set(value.keys()) @@ -611,7 +629,7 @@ class DictConditionalEntity(ItemEntity): expected_keys = set(self.non_gui_children[enum_value]) expected_keys.add(self.enum_key) unknown_keys = value_keys - expected_keys - if unknown_keys: + if unknown_keys and log_invalid_types: self.log.warning( "{} Unknown keys in studio overrides: {}".format( self.path, @@ -619,28 +637,36 @@ class DictConditionalEntity(ItemEntity): ) ) - self.enum_entity.update_studio_value(enum_value) - for children_by_key in self.non_gui_children.values(): + self.enum_entity.update_studio_value(enum_value, log_invalid_types) + for enum_key, children_by_key in self.non_gui_children.items(): + _log_invalid_types = log_invalid_types + if _log_invalid_types: + _log_invalid_types = enum_key == enum_value + value_copy = copy.deepcopy(value) for key, child_obj in children_by_key.items(): child_value = value_copy.get(key, NOT_SET) - child_obj.update_studio_value(child_value) + child_obj.update_studio_value(child_value, _log_invalid_types) - def update_project_value(self, value): + def update_project_value(self, value, log_invalid_types=True): """Update project override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "project override") - value, metadata = self._prepare_value(value) + + self._project_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "project override", log_invalid_types + ) + value, metadata = self._prepare_value(value, log_invalid_types) self._project_override_metadata = metadata self.had_project_override = metadata is not NOT_SET if value is NOT_SET: - self.enum_entity.update_project_value(value) + self.enum_entity.update_project_value(value, log_invalid_types) for children_by_key in self.non_gui_children.values(): for child_obj in children_by_key.values(): - child_obj.update_project_value(value) + child_obj.update_project_value(value, log_invalid_types) return value_keys = set(value.keys()) @@ -648,7 +674,7 @@ class DictConditionalEntity(ItemEntity): expected_keys = set(self.non_gui_children[enum_value]) expected_keys.add(self.enum_key) unknown_keys = value_keys - expected_keys - if unknown_keys: + if unknown_keys and log_invalid_types: self.log.warning( "{} Unknown keys in project overrides: {}".format( self.path, @@ -656,12 +682,16 @@ class DictConditionalEntity(ItemEntity): ) ) - self.enum_entity.update_project_value(enum_value) - for children_by_key in self.non_gui_children.values(): + self.enum_entity.update_project_value(enum_value, log_invalid_types) + for enum_key, children_by_key in self.non_gui_children.items(): + _log_invalid_types = log_invalid_types + if _log_invalid_types: + _log_invalid_types = enum_key == enum_value + value_copy = copy.deepcopy(value) for key, child_obj in children_by_key.items(): child_value = value_copy.get(key, NOT_SET) - child_obj.update_project_value(child_value) + child_obj.update_project_value(child_value, _log_invalid_types) def _discard_changes(self, on_change_trigger): self._ignore_child_changes = True diff --git a/openpype/settings/entities/dict_immutable_keys_entity.py b/openpype/settings/entities/dict_immutable_keys_entity.py index 060f8d522e..0209681e95 100644 --- a/openpype/settings/entities/dict_immutable_keys_entity.py +++ b/openpype/settings/entities/dict_immutable_keys_entity.py @@ -414,12 +414,16 @@ class DictImmutableKeysEntity(ItemEntity): return value, metadata - def update_default_value(self, value): + def update_default_value(self, value, log_invalid_types=True): """Update default values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "default") + + self._default_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "default", log_invalid_types + ) self.has_default_value = value is not NOT_SET # TODO add value validation value, metadata = self._prepare_value(value) @@ -427,13 +431,13 @@ class DictImmutableKeysEntity(ItemEntity): if value is NOT_SET: for child_obj in self.non_gui_children.values(): - child_obj.update_default_value(value) + child_obj.update_default_value(value, log_invalid_types) return value_keys = set(value.keys()) expected_keys = set(self.non_gui_children) unknown_keys = value_keys - expected_keys - if unknown_keys: + if unknown_keys and log_invalid_types: self.log.warning( "{} Unknown keys in default values: {}".format( self.path, @@ -443,27 +447,31 @@ class DictImmutableKeysEntity(ItemEntity): for key, child_obj in self.non_gui_children.items(): child_value = value.get(key, NOT_SET) - child_obj.update_default_value(child_value) + child_obj.update_default_value(child_value, log_invalid_types) - def update_studio_value(self, value): + def update_studio_value(self, value, log_invalid_types=True): """Update studio override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "studio override") + + self._studio_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "studio override", log_invalid_types + ) value, metadata = self._prepare_value(value) self._studio_override_metadata = metadata self.had_studio_override = metadata is not NOT_SET if value is NOT_SET: for child_obj in self.non_gui_children.values(): - child_obj.update_studio_value(value) + child_obj.update_studio_value(value, log_invalid_types) return value_keys = set(value.keys()) expected_keys = set(self.non_gui_children) unknown_keys = value_keys - expected_keys - if unknown_keys: + if unknown_keys and log_invalid_types: self.log.warning( "{} Unknown keys in studio overrides: {}".format( self.path, @@ -472,27 +480,31 @@ class DictImmutableKeysEntity(ItemEntity): ) for key, child_obj in self.non_gui_children.items(): child_value = value.get(key, NOT_SET) - child_obj.update_studio_value(child_value) + child_obj.update_studio_value(child_value, log_invalid_types) - def update_project_value(self, value): + def update_project_value(self, value, log_invalid_types=True): """Update project override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "project override") + + self._project_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "project override", log_invalid_types + ) value, metadata = self._prepare_value(value) self._project_override_metadata = metadata self.had_project_override = metadata is not NOT_SET if value is NOT_SET: for child_obj in self.non_gui_children.values(): - child_obj.update_project_value(value) + child_obj.update_project_value(value, log_invalid_types) return value_keys = set(value.keys()) expected_keys = set(self.non_gui_children) unknown_keys = value_keys - expected_keys - if unknown_keys: + if unknown_keys and log_invalid_types: self.log.warning( "{} Unknown keys in project overrides: {}".format( self.path, @@ -502,7 +514,7 @@ class DictImmutableKeysEntity(ItemEntity): for key, child_obj in self.non_gui_children.items(): child_value = value.get(key, NOT_SET) - child_obj.update_project_value(child_value) + child_obj.update_project_value(child_value, log_invalid_types) def _discard_changes(self, on_change_trigger): self._ignore_child_changes = True @@ -694,37 +706,48 @@ class RootsDictEntity(DictImmutableKeysEntity): self._metadata_are_modified = False self._current_metadata = {} - def update_default_value(self, value): + def update_default_value(self, value, log_invalid_types=True): """Update default values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "default") + + self._default_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "default", log_invalid_types + ) value, _ = self._prepare_value(value) self._default_value = value self._default_metadata = {} self.has_default_value = value is not NOT_SET - def update_studio_value(self, value): + def update_studio_value(self, value, log_invalid_types=True): """Update studio override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "studio override") + + self._studio_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "studio override", log_invalid_types + ) value, _ = self._prepare_value(value) self._studio_value = value self._studio_override_metadata = {} self.had_studio_override = value is not NOT_SET - def update_project_value(self, value): + def update_project_value(self, value, log_invalid_types=True): """Update project override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "project override") + self._project_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "project override", log_invalid_types + ) value, _metadata = self._prepare_value(value) self._project_value = value @@ -886,37 +909,48 @@ class SyncServerSites(DictImmutableKeysEntity): self._metadata_are_modified = False self._current_metadata = {} - def update_default_value(self, value): + def update_default_value(self, value, log_invalid_types=True): """Update default values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "default") + + self._default_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "default", log_invalid_types + ) value, _ = self._prepare_value(value) self._default_value = value self._default_metadata = {} self.has_default_value = value is not NOT_SET - def update_studio_value(self, value): + def update_studio_value(self, value, log_invalid_types=True): """Update studio override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "studio override") + + self._studio_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "studio override", log_invalid_types + ) value, _ = self._prepare_value(value) self._studio_value = value self._studio_override_metadata = {} self.had_studio_override = value is not NOT_SET - def update_project_value(self, value): + def update_project_value(self, value, log_invalid_types=True): """Update project override values. Not an api method, should be called by parent. """ - value = self._check_update_value(value, "project override") + self._project_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "project override", log_invalid_types + ) value, _metadata = self._prepare_value(value) self._project_value = value diff --git a/openpype/settings/entities/dict_mutable_keys_entity.py b/openpype/settings/entities/dict_mutable_keys_entity.py index 6b9c0bc7ed..a0c93b97a7 100644 --- a/openpype/settings/entities/dict_mutable_keys_entity.py +++ b/openpype/settings/entities/dict_mutable_keys_entity.py @@ -393,11 +393,15 @@ class DictMutableKeysEntity(EndpointEntity): value = self.value_on_not_set using_values_from_state = False + log_invalid_types = True if state is OverrideState.PROJECT: + log_invalid_types = self._project_log_invalid_types using_values_from_state = using_project_overrides elif state is OverrideState.STUDIO: + log_invalid_types = self._studio_log_invalid_types using_values_from_state = using_studio_overrides elif state is OverrideState.DEFAULTS: + log_invalid_types = self._default_log_invalid_types using_values_from_state = using_default_values new_value = copy.deepcopy(value) @@ -437,11 +441,11 @@ class DictMutableKeysEntity(EndpointEntity): if not label: label = metadata_labels.get(new_key) - child_entity.update_default_value(_value) + child_entity.update_default_value(_value, log_invalid_types) if using_project_overrides: - child_entity.update_project_value(_value) + child_entity.update_project_value(_value, log_invalid_types) elif using_studio_overrides: - child_entity.update_studio_value(_value) + child_entity.update_studio_value(_value, log_invalid_types) if label: children_label_by_id[child_entity.id] = label @@ -598,8 +602,11 @@ class DictMutableKeysEntity(EndpointEntity): metadata[key] = value.pop(key) return value, metadata - def update_default_value(self, value): - value = self._check_update_value(value, "default") + def update_default_value(self, value, log_invalid_types=True): + self._default_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "default", log_invalid_types + ) has_default_value = value is not NOT_SET if has_default_value: for required_key in self.required_keys: @@ -611,15 +618,21 @@ class DictMutableKeysEntity(EndpointEntity): self._default_value = value self._default_metadata = metadata - def update_studio_value(self, value): - value = self._check_update_value(value, "studio override") + def update_studio_value(self, value, log_invalid_types=True): + self._studio_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "studio override", log_invalid_types + ) value, metadata = self._prepare_value(value) self._studio_override_value = value self._studio_override_metadata = metadata self.had_studio_override = value is not NOT_SET - def update_project_value(self, value): - value = self._check_update_value(value, "project override") + def update_project_value(self, value, log_invalid_types=True): + self._project_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "project override", log_invalid_types + ) value, metadata = self._prepare_value(value) self._project_override_value = value self._project_override_metadata = metadata @@ -686,9 +699,12 @@ class DictMutableKeysEntity(EndpointEntity): if not self._can_remove_from_project_override: return + log_invalid_types = True if self._has_studio_override: + log_invalid_types = self._studio_log_invalid_types value = self._studio_override_value elif self.has_default_value: + log_invalid_types = self._default_log_invalid_types value = self._default_value else: value = self.value_on_not_set @@ -709,9 +725,9 @@ class DictMutableKeysEntity(EndpointEntity): for _key, _value in new_value.items(): new_key = self._convert_to_regex_valid_key(_key) child_entity = self._add_key(new_key) - child_entity.update_default_value(_value) + child_entity.update_default_value(_value, log_invalid_types) if self._has_studio_override: - child_entity.update_studio_value(_value) + child_entity.update_studio_value(_value, log_invalid_types) label = metadata_labels.get(_key) if label: diff --git a/openpype/settings/entities/input_entities.py b/openpype/settings/entities/input_entities.py index 7512d7bfcc..3dcd238672 100644 --- a/openpype/settings/entities/input_entities.py +++ b/openpype/settings/entities/input_entities.py @@ -90,18 +90,27 @@ class EndpointEntity(ItemEntity): def require_restart(self): return self.has_unsaved_changes - def update_default_value(self, value): - value = self._check_update_value(value, "default") + def update_default_value(self, value, log_invalid_types=True): + self._default_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "default", log_invalid_types + ) self._default_value = value self.has_default_value = value is not NOT_SET - def update_studio_value(self, value): - value = self._check_update_value(value, "studio override") + def update_studio_value(self, value, log_invalid_types=True): + self._studio_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "studio override", log_invalid_types + ) self._studio_override_value = value self.had_studio_override = bool(value is not NOT_SET) - def update_project_value(self, value): - value = self._check_update_value(value, "project override") + def update_project_value(self, value, log_invalid_types=True): + self._project_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "project override", log_invalid_types + ) self._project_override_value = value self.had_project_override = bool(value is not NOT_SET) @@ -590,22 +599,26 @@ class RawJsonEntity(InputEntity): metadata[key] = value.pop(key) return value, metadata - def update_default_value(self, value): - value = self._check_update_value(value, "default") + def update_default_value(self, value, log_invalid_types=True): + value = self._check_update_value(value, "default", log_invalid_types) self.has_default_value = value is not NOT_SET value, metadata = self._prepare_value(value) self._default_value = value self.default_metadata = metadata - def update_studio_value(self, value): - value = self._check_update_value(value, "studio override") + def update_studio_value(self, value, log_invalid_types=True): + value = self._check_update_value( + value, "studio override", log_invalid_types + ) self.had_studio_override = value is not NOT_SET value, metadata = self._prepare_value(value) self._studio_override_value = value self.studio_override_metadata = metadata - def update_project_value(self, value): - value = self._check_update_value(value, "project override") + def update_project_value(self, value, log_invalid_types=True): + value = self._check_update_value( + value, "project override", log_invalid_types + ) self.had_project_override = value is not NOT_SET value, metadata = self._prepare_value(value) self._project_override_value = value diff --git a/openpype/settings/entities/item_entities.py b/openpype/settings/entities/item_entities.py index 9c6f428b97..3b756e4ede 100644 --- a/openpype/settings/entities/item_entities.py +++ b/openpype/settings/entities/item_entities.py @@ -173,14 +173,17 @@ class PathEntity(ItemEntity): self._ignore_missing_defaults = ignore_missing_defaults self.child_obj.set_override_state(state, ignore_missing_defaults) - def update_default_value(self, value): - self.child_obj.update_default_value(value) + def update_default_value(self, value, log_invalid_types=True): + self._default_log_invalid_types = log_invalid_types + self.child_obj.update_default_value(value, log_invalid_types) - def update_project_value(self, value): - self.child_obj.update_project_value(value) + def update_project_value(self, value, log_invalid_types=True): + self._studio_log_invalid_types = log_invalid_types + self.child_obj.update_project_value(value, log_invalid_types) - def update_studio_value(self, value): - self.child_obj.update_studio_value(value) + def update_studio_value(self, value, log_invalid_types=True): + self._project_log_invalid_types = log_invalid_types + self.child_obj.update_studio_value(value, log_invalid_types) def _discard_changes(self, *args, **kwargs): self.child_obj.discard_changes(*args, **kwargs) @@ -472,9 +475,9 @@ class ListStrictEntity(ItemEntity): self._has_project_override = False - def _check_update_value(self, value, value_type): + def _check_update_value(self, value, value_type, log_invalid_types=True): value = super(ListStrictEntity, self)._check_update_value( - value, value_type + value, value_type, log_invalid_types ) if value is NOT_SET: return value @@ -484,15 +487,16 @@ class ListStrictEntity(ItemEntity): if value_len == child_len: return value - self.log.warning( - ( - "{} Amount of strict list items in {} values is" - " not same as expected. Expected {} items. Got {} items. {}" - ).format( - self.path, value_type, - child_len, value_len, str(value) + if log_invalid_types: + self.log.warning( + ( + "{} Amount of strict list items in {} values is not same" + " as expected. Expected {} items. Got {} items. {}" + ).format( + self.path, value_type, + child_len, value_len, str(value) + ) ) - ) if value_len < child_len: # Fill missing values with NOT_SET @@ -504,36 +508,51 @@ class ListStrictEntity(ItemEntity): value.pop(child_len) return value - def update_default_value(self, value): - value = self._check_update_value(value, "default") + def update_default_value(self, value, log_invalid_types=True): + self._default_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "default", log_invalid_types + ) self.has_default_value = value is not NOT_SET if value is NOT_SET: for child_obj in self.children: - child_obj.update_default_value(value) + child_obj.update_default_value(value, log_invalid_types) else: for idx, item_value in enumerate(value): - self.children[idx].update_default_value(item_value) + self.children[idx].update_default_value( + item_value, log_invalid_types + ) - def update_studio_value(self, value): - value = self._check_update_value(value, "studio override") + def update_studio_value(self, value, log_invalid_types=True): + self._studio_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "studio override", log_invalid_types + ) if value is NOT_SET: for child_obj in self.children: - child_obj.update_studio_value(value) + child_obj.update_studio_value(value, log_invalid_types) else: for idx, item_value in enumerate(value): - self.children[idx].update_studio_value(item_value) + self.children[idx].update_studio_value( + item_value, log_invalid_types + ) - def update_project_value(self, value): - value = self._check_update_value(value, "project override") + def update_project_value(self, value, log_invalid_types=True): + self._project_log_invalid_types = log_invalid_types + value = self._check_update_value( + value, "project override", log_invalid_types + ) if value is NOT_SET: for child_obj in self.children: - child_obj.update_project_value(value) + child_obj.update_project_value(value, log_invalid_types) else: for idx, item_value in enumerate(value): - self.children[idx].update_project_value(item_value) + self.children[idx].update_project_value( + item_value, log_invalid_types + ) def reset_callbacks(self): super(ListStrictEntity, self).reset_callbacks() diff --git a/openpype/settings/entities/list_entity.py b/openpype/settings/entities/list_entity.py index 0268c208bb..5d6a64b3ea 100644 --- a/openpype/settings/entities/list_entity.py +++ b/openpype/settings/entities/list_entity.py @@ -325,16 +325,24 @@ class ListEntity(EndpointEntity): for item in value: child_obj = self._add_new_item() - child_obj.update_default_value(item) + child_obj.update_default_value( + item, self._default_log_invalid_types + ) if self._override_state is OverrideState.PROJECT: if self.had_project_override: - child_obj.update_project_value(item) + child_obj.update_project_value( + item, self._project_log_invalid_types + ) elif self.had_studio_override: - child_obj.update_studio_value(item) + child_obj.update_studio_value( + item, self._studio_log_invalid_types + ) elif self._override_state is OverrideState.STUDIO: if self.had_studio_override: - child_obj.update_studio_value(item) + child_obj.update_studio_value( + item, self._studio_log_invalid_types + ) for child_obj in self.children: child_obj.set_override_state( @@ -466,16 +474,24 @@ class ListEntity(EndpointEntity): for item in value: child_obj = self._add_new_item() - child_obj.update_default_value(item) + child_obj.update_default_value( + item, self._default_log_invalid_types + ) if self._override_state is OverrideState.PROJECT: if self.had_project_override: - child_obj.update_project_value(item) + child_obj.update_project_value( + item, self._project_log_invalid_types + ) elif self.had_studio_override: - child_obj.update_studio_value(item) + child_obj.update_studio_value( + item, self._studio_log_invalid_types + ) elif self._override_state is OverrideState.STUDIO: if self.had_studio_override: - child_obj.update_studio_value(item) + child_obj.update_studio_value( + item, self._studio_log_invalid_types + ) child_obj.set_override_state( self._override_state, self._ignore_missing_defaults diff --git a/openpype/settings/entities/schemas/README.md b/openpype/settings/entities/schemas/README.md index fbfd699937..b4bfef2972 100644 --- a/openpype/settings/entities/schemas/README.md +++ b/openpype/settings/entities/schemas/README.md @@ -745,6 +745,7 @@ How output of the schema could look like on save: ### label - add label with note or explanations - it is possible to use html tags inside the label +- set `work_wrap` to `true`/`false` if you want to enable word wrapping in UI (default: `false`) ``` { diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json index e6097a2b14..cd1741ba8b 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json @@ -30,6 +30,24 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "CollectDeadlinePools", + "label": "Default Deadline Pools", + "children": [ + { + "type": "text", + "key": "primary_pool", + "label": "Primary Pool" + }, + { + "type": "text", + "key": "secondary_pool", + "label": "Secondary Pool" + } + ] + }, { "type": "dict", "collapsible": true, @@ -117,6 +135,16 @@ "key": "asset_dependencies", "label": "Use Asset dependencies" }, + { + "type": "number", + "key": "priority", + "label": "Priority" + }, + { + "type": "number", + "key": "tile_priority", + "label": "Tile Assembler Priority" + }, { "type": "text", "key": "group", @@ -192,6 +220,9 @@ "key": "use_published", "label": "Use Published scene" }, + { + "type": "splitter" + }, { "type": "number", "key": "priority", @@ -203,20 +234,21 @@ "label": "Chunk Size" }, { - "type": "text", - "key": "primary_pool", - "label": "Primary Pool" + "type": "number", + "key": "concurrent_tasks", + "label": "Number of concurrent tasks" }, { - "type": "text", - "key": "secondary_pool", - "label": "Secondary Pool" + "type": "splitter" }, { "type": "text", "key": "group", "label": "Group" }, + { + "type": "splitter" + }, { "type": "text", "key": "department", @@ -289,16 +321,6 @@ "key": "chunk_size", "label": "Chunk Size" }, - { - "type": "text", - "key": "primary_pool", - "label": "Primary Pool" - }, - { - "type": "text", - "key": "secondary_pool", - "label": "Secondary Pool" - }, { "type": "text", "key": "group", @@ -348,16 +370,6 @@ "key": "chunk_size", "label": "Chunk Size" }, - { - "type": "text", - "key": "primary_pool", - "label": "Primary Pool" - }, - { - "type": "text", - "key": "secondary_pool", - "label": "Secondary Pool" - }, { "type": "text", "key": "group", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json index e352f8b132..fe11d63ac2 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json @@ -136,6 +136,87 @@ "key": "publish", "label": "Publish plugins", "children": [ + { + "type": "dict", + "collapsible": true, + "key": "CollectTimelineInstances", + "label": "Collect Timeline Instances", + "is_group": true, + "children": [ + { + "type": "collapsible-wrap", + "label": "XML presets attributes parsable from segment comments", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "list", + "key": "xml_preset_attrs_from_comments", + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "name", + "label": "Attribute name" + }, + { + "key": "type", + "label": "Attribute type", + "type": "enum", + "default": "number", + "enum_items": [ + { + "number": "number" + }, + { + "float": "float" + }, + { + "string": "string" + } + ] + } + ] + } + } + ] + }, + { + "type": "collapsible-wrap", + "label": "Add tasks", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "list", + "key": "add_tasks", + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "name", + "label": "Task name" + }, + { + "key": "type", + "label": "Task type", + "multiselection": false, + "type": "task-types-enum" + }, + { + "type": "boolean", + "key": "create_batch_group", + "label": "Create batch group" + } + ] + } + } + ] + } + ] + }, { "type": "dict", "collapsible": true, @@ -221,6 +302,20 @@ "type": "text", "multiline": false } + }, + { + "type": "separator" + }, + { + "type": "boolean", + "key": "load_to_batch_group", + "label": "Load to batch group reel", + "default": false + }, + { + "type": "text", + "key": "batch_group_loader_name", + "label": "Use loader name" } ] } @@ -281,6 +376,48 @@ "label": "Clip name template" } ] + }, + { + "type": "dict", + "collapsible": true, + "key": "LoadClipBatch", + "label": "Load as clip to current batch", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "families", + "label": "Families", + "object_type": "text" + }, + { + "type": "list", + "key": "representations", + "label": "Representations", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "reel_name", + "label": "Reel name" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "clip_name_template", + "label": "Clip name template" + } + ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json index cb59e9d67e..47effb3dbd 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json @@ -725,6 +725,31 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "CollectFtrackCustomAttributeData", + "label": "Collect Custom Attribute Data", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "Collect custom attributes from ftrack for ftrack entities that can be used in some templates during publishing." + }, + { + "type": "list", + "key": "custom_attribute_keys", + "label": "Custom attribute keys", + "object_type": "text" + } + ] + }, { "type": "dict", "collapsible": true, @@ -738,10 +763,15 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "label", + "label": "Template may contain formatting keys intent, comment, host_name, app_name, app_label and published_paths." + }, { "type": "text", - "key": "note_with_intent_template", - "label": "Note with intent template" + "key": "note_template", + "label": "Note template", + "multiline": true }, { "type": "list", @@ -751,6 +781,44 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "IntegrateFtrackDescription", + "label": "Integrate Ftrack Description", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "Add description to integrated AssetVersion." + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "type": "label", + "label": "Template may contain formatting keys intent and comment." + }, + { + "type": "text", + "key": "description_template", + "label": "Description template" + } + ] + }, { "type": "dict", "collapsible": true, @@ -784,6 +852,12 @@ "object_type": { "type": "text" } + }, + { + "type": "boolean", + "key": "keep_first_subset_name_for_review", + "label": "Make subset name as first asset name", + "default": true } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json b/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json index b499ccc4be..badf94229b 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json @@ -42,7 +42,7 @@ "children": [ { "type": "label", - "label": "Set color for publishable layers, set its resulting family and template for subset name. Can create flatten image from published instances" + "label": "Set color for publishable layers, set its resulting family and template for subset name. \nCan create flatten image from published instances.(Applicable only for remote publishing!)" }, { "type": "boolean", @@ -108,6 +108,23 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "CollectInstances", + "label": "Collect Instances", + "children": [ + { + "type": "label", + "label": "Name for flatten image created if no image instance present" + }, + { + "type": "text", + "key": "flatten_subset_template", + "label": "Subset template for flatten image" + } + ] + }, { "type": "schema_template", "name": "template_publish_plugin", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json index 8286ed1193..20fe5b0855 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json @@ -16,6 +16,30 @@ "key": "publish", "label": "Publish plugins", "children": [ + { + "type": "dict", + "collapsible": true, + "key": "CollectRenderScene", + "label": "Collect Render Scene", + "is_group": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "It is possible to fill 'render_layer' or 'variant' in subset name template with custom value.
- value of 'render_pass' is always \"beauty\"." + }, + { + "type": "text", + "key": "render_layer", + "label": "Render Layer" + } + ] + }, { "type": "dict", "collapsible": true, @@ -78,6 +102,47 @@ "docstring": "Validate if shot on instances metadata is same as workfiles shot" } ] + }, + { + "type": "dict", + "key": "ExtractConvertToEXR", + "label": "Extract Convert To EXR", + "is_group": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "WARNING: This plugin does not work on MacOS (using OIIO tool)." + }, + { + "type": "boolean", + "key": "replace_pngs", + "label": "Replace source PNG" + }, + { + "type": "enum", + "key": "exr_compression", + "label": "EXR Compression", + "multiselection": false, + "enum_items": [ + {"ZIP": "ZIP"}, + {"ZIPS": "ZIPS"}, + {"DWAA": "DWAA"}, + {"DWAB": "DWAB"}, + {"PIZ": "PIZ"}, + {"RLE": "RLE"}, + {"PXR24": "PXR24"}, + {"B44": "B44"}, + {"B44A": "B44A"}, + {"none": "None"} + ] + } + ] } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index 12043d4205..061874e31c 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -122,32 +122,6 @@ } ] }, - { - "type": "dict", - "collapsible": true, - "checkbox_key": "enabled", - "key": "IntegrateHeroVersion", - "label": "IntegrateHeroVersion", - "is_group": true, - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "type": "boolean", - "key": "optional", - "label": "Optional" - }, - { - "key": "families", - "label": "Families", - "type": "list", - "object_type": "text" - } - ] - }, { "type": "dict", "collapsible": true, @@ -652,6 +626,80 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "IntegrateHeroVersion", + "label": "IntegrateHeroVersion", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "list", + "key": "template_name_profiles", + "label": "Template name profiles", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "hosts-enum", + "key": "hosts", + "label": "Hosts", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template_name", + "label": "Template name", + "tooltip": "Name of template from Anatomy templates" + } + ] + } + } + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json index 0544b4bab7..6dc10ed2a5 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json @@ -97,6 +97,32 @@ } ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreateUnrealSkeletalMesh", + "label": "Create Unreal - Skeletal Mesh", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + }, + { + "type": "text", + "key": "joint_hints", + "label": "Joint root hint" + } + ] + }, { "type": "schema_template", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json index 1636a8d700..4a796f1933 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json @@ -138,6 +138,21 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "ExtractReviewData", + "label": "ExtractReviewData", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { "type": "dict", "collapsible": true, @@ -208,9 +223,10 @@ "type": "separator" }, { - "type": "text", - "key": "extension", - "label": "File extension" + "type": "boolean", + "key": "read_raw", + "label": "Read colorspace RAW", + "default": false }, { "type": "text", @@ -227,12 +243,6 @@ "key": "bake_viewer_input_process", "label": "Bake Viewer Input Process (LUTs)" }, - { - "key": "add_tags", - "label": "Add additional tags to representations", - "type": "list", - "object_type": "text" - }, { "type": "separator" }, @@ -246,7 +256,7 @@ "type": "collapsible-wrap", "label": "Reformat Node Knobs", "collapsible": true, - "collapsed": false, + "collapsed": true, "children": [ { "type": "list", @@ -347,6 +357,20 @@ } } ] + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "extension", + "label": "Write node file type" + }, + { + "key": "add_tags", + "label": "Add additional tags to representations", + "type": "list", + "object_type": "text" } ] } @@ -365,6 +389,59 @@ "type": "boolean", "key": "viewer_lut_raw", "label": "Viewer LUT raw" + }, + { + "type": "separator" + }, + { + "type": "label", + "label": "Fill specific slate node values with templates. Uncheck the checkbox to not change the value.", + "word_wrap": true + }, + { + "type": "dict", + "key": "key_value_mapping", + "children": [ + { + "type": "list-strict", + "key": "f_submission_note", + "label": "Submission Note:", + "object_types": [ + { + "type": "boolean" + }, + { + "type": "text" + } + ] + }, + { + "type": "list-strict", + "key": "f_submitting_for", + "label": "Submission For:", + "object_types": [ + { + "type": "boolean" + }, + { + "type": "text" + } + ] + }, + { + "type": "list-strict", + "key": "f_vfx_scope_of_work", + "label": "VFX Scope Of Work:", + "object_types": [ + { + "type": "boolean" + }, + { + "type": "text" + } + ] + } + ] } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/template_color.json b/openpype/settings/entities/schemas/projects_schema/schemas/template_color.json deleted file mode 100644 index af8fd9dae4..0000000000 --- a/openpype/settings/entities/schemas/projects_schema/schemas/template_color.json +++ /dev/null @@ -1,30 +0,0 @@ -[ - { - "type": "list-strict", - "key": "{name}", - "label": "{label}", - "object_types": [ - { - "label": "Red", - "type": "number", - "minimum": 0, - "maximum": 1, - "decimal": 3 - }, - { - "label": "Green", - "type": "number", - "minimum": 0, - "maximum": 1, - "decimal": 3 - }, - { - "label": "Blue", - "type": "number", - "minimum": 0, - "maximum": 1, - "decimal": 3 - } - ] - } -] diff --git a/openpype/settings/entities/schemas/system_schema/schema_general.json b/openpype/settings/entities/schemas/system_schema/schema_general.json index 6306317df8..fcab4cd5d8 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_general.json +++ b/openpype/settings/entities/schemas/system_schema/schema_general.json @@ -110,6 +110,17 @@ { "type": "splitter" }, + { + "type": "list", + "key": "local_env_white_list", + "label": "Local overrides of environment variable keys", + "tooltip": "Environment variable keys that can be changed per machine using Local settings UI.\nKey changes are applied only on applications and tools environments.", + "use_label_wrap": true, + "object_type": "text" + }, + { + "type": "splitter" + }, { "type": "collapsible-wrap", "label": "OpenPype deployment control", diff --git a/openpype/settings/entities/schemas/system_schema/schema_tools.json b/openpype/settings/entities/schemas/system_schema/schema_tools.json index 2346bef36d..7962fdd465 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_tools.json +++ b/openpype/settings/entities/schemas/system_schema/schema_tools.json @@ -25,7 +25,30 @@ "key": "variants", "collapsible_key": true, "object_type": { - "type": "raw-json" + "type": "dict", + "children": [ + { + "key": "host_names", + "label": "Hosts", + "type": "hosts-enum", + "multiselection": true + }, + { + "key": "app_variants", + "label": "Applications", + "type": "apps-enum", + "multiselection": true, + "tooltip": "Applications are not \"live\" and may require to Save and refresh settings UI to update values." + }, + { + "type": "separator" + }, + { + "key": "environment", + "label": "Environments", + "type": "raw-json" + } + ] } } ] diff --git a/openpype/settings/lib.py b/openpype/settings/lib.py index 1d303564d5..937329b417 100644 --- a/openpype/settings/lib.py +++ b/openpype/settings/lib.py @@ -265,11 +265,43 @@ def save_project_anatomy(project_name, anatomy_data): raise SaveWarningExc(warnings) +def _system_settings_backwards_compatible_conversion(studio_overrides): + # Backwards compatibility of tools 3.9.1 - 3.9.2 to keep + # "tools" environments + if ( + "tools" in studio_overrides + and "tool_groups" in studio_overrides["tools"] + ): + tool_groups = studio_overrides["tools"]["tool_groups"] + for tool_group, group_value in tool_groups.items(): + if tool_group in METADATA_KEYS: + continue + + variants = group_value.get("variants") + if not variants: + continue + + for key in set(variants.keys()): + if key in METADATA_KEYS: + continue + + variant_value = variants[key] + if "environment" not in variant_value: + variants[key] = { + "environment": variant_value + } + + @require_handler def get_studio_system_settings_overrides(return_version=False): - return _SETTINGS_HANDLER.get_studio_system_settings_overrides( + output = _SETTINGS_HANDLER.get_studio_system_settings_overrides( return_version ) + value = output + if return_version: + value, version = output + _system_settings_backwards_compatible_conversion(value) + return output @require_handler @@ -1081,6 +1113,14 @@ def get_general_environments(): clear_metadata_from_settings(environments) + whitelist_envs = result["general"].get("local_env_white_list") + if whitelist_envs: + local_settings = get_local_settings() + local_envs = local_settings.get("environments") or {} + for key, value in local_envs.items(): + if key in whitelist_envs and key in environments: + environments[key] = value + return environments diff --git a/openpype/style/style.css b/openpype/style/style.css index df83600973..b5f6962eee 100644 --- a/openpype/style/style.css +++ b/openpype/style/style.css @@ -1269,6 +1269,14 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { background: #21252B; } +/* Workfiles */ +#WorkfilesPublishedContextSelect { + background: rgba(0, 0, 0, 127); +} +#WorkfilesPublishedContextSelect QLabel { + font-size: 17pt; +} + /* Tray */ #TrayRestartButton { background: {color:restart-btn-bg}; diff --git a/openpype/tests/test_avalon_plugin_presets.py b/openpype/tests/test_avalon_plugin_presets.py index f1b1a94713..464c216d6f 100644 --- a/openpype/tests/test_avalon_plugin_presets.py +++ b/openpype/tests/test_avalon_plugin_presets.py @@ -1,6 +1,9 @@ -import avalon.api as api -import openpype -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + install_host, + LegacyCreator, + register_creator_plugin, + discover_creator_plugins, +) class MyTestCreator(LegacyCreator): @@ -19,16 +22,15 @@ class Test: __name__ = "test" ls = len - def __call__(self): - pass + @staticmethod + def install(): + register_creator_plugin(MyTestCreator) def test_avalon_plugin_presets(monkeypatch, printer): + install_host(Test) - openpype.install() - api.register_host(Test()) - api.register_plugin(LegacyCreator, MyTestCreator) - plugins = api.discover(LegacyCreator) + plugins = discover_creator_plugins() printer("Test if we got our test plugin") assert MyTestCreator in plugins for p in plugins: diff --git a/openpype/tools/creator/model.py b/openpype/tools/creator/model.py index ef61c6e0f0..d3d60b96f2 100644 --- a/openpype/tools/creator/model.py +++ b/openpype/tools/creator/model.py @@ -1,8 +1,7 @@ import uuid from Qt import QtGui, QtCore -from avalon import api -from openpype.pipeline import LegacyCreator +from openpype.pipeline import discover_legacy_creator_plugins from . constants import ( FAMILY_ROLE, @@ -22,7 +21,7 @@ class CreatorsModel(QtGui.QStandardItemModel): self._creators_by_id = {} items = [] - creators = api.discover(LegacyCreator) + creators = discover_legacy_creator_plugins() for creator in creators: item_id = str(uuid.uuid4()) self._creators_by_id[item_id] = creator diff --git a/openpype/tools/libraryloader/app.py b/openpype/tools/libraryloader/app.py index b73b415128..328e16205c 100644 --- a/openpype/tools/libraryloader/app.py +++ b/openpype/tools/libraryloader/app.py @@ -16,8 +16,6 @@ from openpype.tools.utils.assets_widget import MultiSelectAssetsWidget from openpype.modules import ModulesManager -from . import lib - module = sys.modules[__name__] module.window = None @@ -260,14 +258,6 @@ class LibraryLoaderWindow(QtWidgets.QDialog): self.dbcon.Session["AVALON_PROJECT"] = project_name - _config = lib.find_config() - if hasattr(_config, "install"): - _config.install() - else: - print( - "Config `%s` has no function `install`" % _config.__name__ - ) - self._subsets_widget.on_project_change(project_name) if self._repres_widget: self._repres_widget.on_project_change(project_name) diff --git a/openpype/tools/libraryloader/lib.py b/openpype/tools/libraryloader/lib.py deleted file mode 100644 index 182b48893a..0000000000 --- a/openpype/tools/libraryloader/lib.py +++ /dev/null @@ -1,21 +0,0 @@ -import os -import importlib -import logging - -log = logging.getLogger(__name__) - - -# `find_config` from `pipeline` -def find_config(): - log.info("Finding configuration for project..") - - config = os.environ["AVALON_CONFIG"] - - if not config: - raise EnvironmentError( - "No configuration found in " - "the project nor environment" - ) - - log.info("Found %s, loading.." % config) - return importlib.import_module(config) diff --git a/openpype/tools/loader/app.py b/openpype/tools/loader/app.py index 923a1fabdb..fad284d82b 100644 --- a/openpype/tools/loader/app.py +++ b/openpype/tools/loader/app.py @@ -5,6 +5,7 @@ from avalon import api, io from openpype import style from openpype.lib import register_event_callback +from openpype.pipeline import install_openpype_plugins from openpype.tools.utils import ( lib, PlaceholderLineEdit @@ -608,14 +609,6 @@ def cli(args): # Store settings api.Session["AVALON_PROJECT"] = project - from avalon import pipeline - - # Find the set config - _config = pipeline.find_config() - if hasattr(_config, "install"): - _config.install() - else: - print("Config `%s` has no function `install`" % - _config.__name__) + install_openpype_plugins(project) show() diff --git a/openpype/tools/mayalookassigner/commands.py b/openpype/tools/mayalookassigner/commands.py index 78fd51c7a3..8fd592d347 100644 --- a/openpype/tools/mayalookassigner/commands.py +++ b/openpype/tools/mayalookassigner/commands.py @@ -5,9 +5,12 @@ import os from bson.objectid import ObjectId import maya.cmds as cmds -from avalon import io, api +from avalon import io -from openpype.pipeline import remove_container +from openpype.pipeline import ( + remove_container, + registered_host, +) from openpype.hosts.maya.api import lib from .vray_proxies import get_alembic_ids_cache @@ -79,7 +82,7 @@ def get_all_asset_nodes(): list: list of dictionaries """ - host = api.registered_host() + host = registered_host() nodes = [] for container in host.ls(): @@ -192,7 +195,7 @@ def remove_unused_looks(): """ - host = api.registered_host() + host = registered_host() unused = [] for container in host.ls(): diff --git a/openpype/tools/mayalookassigner/vray_proxies.py b/openpype/tools/mayalookassigner/vray_proxies.py index 25621fc652..c97664f3cb 100644 --- a/openpype/tools/mayalookassigner/vray_proxies.py +++ b/openpype/tools/mayalookassigner/vray_proxies.py @@ -11,13 +11,14 @@ from bson.objectid import ObjectId import alembic.Abc from maya import cmds -from avalon import io, api +from avalon import io from openpype.pipeline import ( load_container, loaders_from_representation, discover_loader_plugins, get_representation_path, + registered_host, ) from openpype.hosts.maya.api import lib @@ -188,7 +189,7 @@ def load_look(version_id): "name": "ma"}) # See if representation is already loaded, if so reuse it. - host = api.registered_host() + host = registered_host() representation_id = str(look_representation['_id']) for container in host.ls(): if (container['loader'] == "LookLoader" and diff --git a/openpype/tools/project_manager/project_manager/model.py b/openpype/tools/project_manager/project_manager/model.py index 1c3ec089f6..871704e13c 100644 --- a/openpype/tools/project_manager/project_manager/model.py +++ b/openpype/tools/project_manager/project_manager/model.py @@ -7,6 +7,11 @@ from pymongo import UpdateOne, DeleteOne from Qt import QtCore, QtGui +from openpype.lib import ( + CURRENT_DOC_SCHEMAS, + PypeLogger, +) + from .constants import ( IDENTIFIER_ROLE, ITEM_TYPE_ROLE, @@ -18,8 +23,6 @@ from .constants import ( ) from .style import ResourceCache -from openpype.lib import CURRENT_DOC_SCHEMAS - class ProjectModel(QtGui.QStandardItemModel): """Load possible projects to modify from MongoDB. @@ -185,6 +188,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): for key in self.multiselection_columns } + self._log = None # TODO Reset them on project change self._current_project = None self._root_item = None @@ -194,6 +198,12 @@ class HierarchyModel(QtCore.QAbstractItemModel): self._reset_root_item() + @property + def log(self): + if self._log is None: + self._log = PypeLogger.get_logger("ProjectManagerModel") + return self._log + @property def items_by_id(self): return self._items_by_id @@ -1367,6 +1377,9 @@ class HierarchyModel(QtCore.QAbstractItemModel): to_process = collections.deque() to_process.append(project_item) + created_count = 0 + updated_count = 0 + removed_count = 0 bulk_writes = [] while to_process: parent = to_process.popleft() @@ -1381,6 +1394,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): insert_list.append(item) elif item.data(REMOVED_ROLE): + removed_count += 1 if item.data(HIERARCHY_CHANGE_ABLE_ROLE): bulk_writes.append(DeleteOne( {"_id": item.asset_id} @@ -1394,6 +1408,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): else: update_data = item.update_data() if update_data: + updated_count += 1 bulk_writes.append(UpdateOne( {"_id": item.asset_id}, update_data @@ -1406,11 +1421,21 @@ class HierarchyModel(QtCore.QAbstractItemModel): result = project_col.insert_many(new_docs) for idx, mongo_id in enumerate(result.inserted_ids): + created_count += 1 insert_list[idx].mongo_id = mongo_id + if sum([created_count, updated_count, removed_count]) == 0: + self.log.info("Nothing has changed") + return + if bulk_writes: project_col.bulk_write(bulk_writes) + self.log.info(( + "Save finished." + " Created {} | Updated {} | Removed {} asset documents" + ).format(created_count, updated_count, removed_count)) + self.refresh_project() def copy_mime_data(self, indexes): @@ -1819,12 +1844,16 @@ class AssetItem(BaseItem): } query_projection = { "_id": 1, - "data.tasks": 1, - "data.visualParent": 1, - "schema": 1, - "name": 1, + "schema": 1, "type": 1, + "parent": 1, + + "data.visualParent": 1, + "data.parents": 1, + + "data.tasks": 1, + "data.frameStart": 1, "data.frameEnd": 1, "data.fps": 1, @@ -1835,7 +1864,7 @@ class AssetItem(BaseItem): "data.clipIn": 1, "data.clipOut": 1, "data.pixelAspect": 1, - "data.tools_env": 1 + "data.tools_env": 1, } def __init__(self, asset_doc): diff --git a/openpype/tools/publisher/control.py b/openpype/tools/publisher/control.py index 6707feac9c..2973d6a5bb 100644 --- a/openpype/tools/publisher/control.py +++ b/openpype/tools/publisher/control.py @@ -11,10 +11,12 @@ try: except Exception: from openpype.lib.python_2_comp import WeakMethod -import avalon.api import pyblish.api -from openpype.pipeline import PublishValidationError +from openpype.pipeline import ( + PublishValidationError, + registered_host, +) from openpype.pipeline.create import CreateContext from Qt import QtCore @@ -353,7 +355,7 @@ class PublisherController: """ def __init__(self, dbcon=None, headless=False): self.log = logging.getLogger("PublisherController") - self.host = avalon.api.registered_host() + self.host = registered_host() self.headless = headless self.create_context = CreateContext( diff --git a/openpype/tools/publisher/widgets/create_dialog.py b/openpype/tools/publisher/widgets/create_dialog.py index 27ce97955a..7d98609c2c 100644 --- a/openpype/tools/publisher/widgets/create_dialog.py +++ b/openpype/tools/publisher/widgets/create_dialog.py @@ -271,7 +271,7 @@ class CreateDialog(QtWidgets.QDialog): create_btn.setEnabled(False) form_layout = QtWidgets.QFormLayout() - form_layout.addRow("Name:", variant_layout) + form_layout.addRow("Variant:", variant_layout) form_layout.addRow("Subset:", subset_name_input) mid_widget = QtWidgets.QWidget(self) diff --git a/openpype/tools/sceneinventory/model.py b/openpype/tools/sceneinventory/model.py index 091d6ca925..f8fd8a911a 100644 --- a/openpype/tools/sceneinventory/model.py +++ b/openpype/tools/sceneinventory/model.py @@ -7,8 +7,11 @@ from Qt import QtCore, QtGui import qtawesome from bson.objectid import ObjectId -from avalon import api, io, schema -from openpype.pipeline import HeroVersionType +from avalon import io, schema +from openpype.pipeline import ( + HeroVersionType, + registered_host, +) from openpype.style import get_default_entity_icon_color from openpype.tools.utils.models import TreeModel, Item from openpype.modules import ModulesManager @@ -181,7 +184,7 @@ class InventoryModel(TreeModel): def refresh(self, selected=None, items=None): """Refresh the model""" - host = api.registered_host() + host = registered_host() if not items: # for debugging or testing, injecting items from outside items = host.ls() diff --git a/openpype/tools/settings/local_settings/constants.py b/openpype/tools/settings/local_settings/constants.py index 1836c579af..16f87b6f05 100644 --- a/openpype/tools/settings/local_settings/constants.py +++ b/openpype/tools/settings/local_settings/constants.py @@ -9,6 +9,7 @@ LABEL_DISCARD_CHANGES = "Discard changes" # TODO move to settings constants LOCAL_GENERAL_KEY = "general" LOCAL_PROJECTS_KEY = "projects" +LOCAL_ENV_KEY = "environments" LOCAL_APPS_KEY = "applications" # Roots key constant diff --git a/openpype/tools/settings/local_settings/environments_widget.py b/openpype/tools/settings/local_settings/environments_widget.py new file mode 100644 index 0000000000..14ca517851 --- /dev/null +++ b/openpype/tools/settings/local_settings/environments_widget.py @@ -0,0 +1,93 @@ +from Qt import QtWidgets + +from openpype.tools.utils import PlaceholderLineEdit + + +class LocalEnvironmentsWidgets(QtWidgets.QWidget): + def __init__(self, system_settings_entity, parent): + super(LocalEnvironmentsWidgets, self).__init__(parent) + + self._widgets_by_env_key = {} + self.system_settings_entity = system_settings_entity + + content_widget = QtWidgets.QWidget(self) + content_layout = QtWidgets.QGridLayout(content_widget) + content_layout.setContentsMargins(0, 0, 0, 0) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + + self._layout = layout + self._content_layout = content_layout + self._content_widget = content_widget + + def _clear_layout(self, layout): + while layout.count() > 0: + item = layout.itemAt(0) + widget = item.widget() + layout.removeItem(item) + if widget is not None: + widget.setVisible(False) + widget.deleteLater() + + def _reset_env_widgets(self): + self._clear_layout(self._content_layout) + self._clear_layout(self._layout) + + content_widget = QtWidgets.QWidget(self) + content_layout = QtWidgets.QGridLayout(content_widget) + content_layout.setContentsMargins(0, 0, 0, 0) + white_list_entity = ( + self.system_settings_entity["general"]["local_env_white_list"] + ) + row = -1 + for row, item in enumerate(white_list_entity): + key = item.value + label_widget = QtWidgets.QLabel(key, self) + input_widget = PlaceholderLineEdit(self) + input_widget.setPlaceholderText("< Keep studio value >") + + content_layout.addWidget(label_widget, row, 0) + content_layout.addWidget(input_widget, row, 1) + + self._widgets_by_env_key[key] = input_widget + + if row < 0: + label_widget = QtWidgets.QLabel( + ( + "Your studio does not allow to change" + " Environment variables locally." + ), + self + ) + content_layout.addWidget(label_widget, 0, 0) + content_layout.setColumnStretch(0, 1) + + else: + content_layout.setColumnStretch(0, 0) + content_layout.setColumnStretch(1, 1) + + self._layout.addWidget(content_widget, 1) + + self._content_layout = content_layout + self._content_widget = content_widget + + def update_local_settings(self, value): + if not value: + value = {} + + self._reset_env_widgets() + + for env_key, widget in self._widgets_by_env_key.items(): + env_value = value.get(env_key) or "" + widget.setText(env_value) + + def settings_value(self): + output = {} + for env_key, widget in self._widgets_by_env_key.items(): + value = widget.text() + if value: + output[env_key] = value + if not output: + return None + return output diff --git a/openpype/tools/settings/local_settings/window.py b/openpype/tools/settings/local_settings/window.py index fb47e69a17..4db0e01476 100644 --- a/openpype/tools/settings/local_settings/window.py +++ b/openpype/tools/settings/local_settings/window.py @@ -25,11 +25,13 @@ from .experimental_widget import ( LOCAL_EXPERIMENTAL_KEY ) from .apps_widget import LocalApplicationsWidgets +from .environments_widget import LocalEnvironmentsWidgets from .projects_widget import ProjectSettingsWidget from .constants import ( LOCAL_GENERAL_KEY, LOCAL_PROJECTS_KEY, + LOCAL_ENV_KEY, LOCAL_APPS_KEY ) @@ -49,18 +51,20 @@ class LocalSettingsWidget(QtWidgets.QWidget): self.pype_mongo_widget = None self.general_widget = None self.experimental_widget = None + self.envs_widget = None self.apps_widget = None self.projects_widget = None - self._create_pype_mongo_ui() + self._create_mongo_url_ui() self._create_general_ui() self._create_experimental_ui() + self._create_environments_ui() self._create_app_ui() self._create_project_ui() self.main_layout.addStretch(1) - def _create_pype_mongo_ui(self): + def _create_mongo_url_ui(self): pype_mongo_expand_widget = ExpandingWidget("OpenPype Mongo URL", self) pype_mongo_content = QtWidgets.QWidget(self) pype_mongo_layout = QtWidgets.QVBoxLayout(pype_mongo_content) @@ -110,6 +114,22 @@ class LocalSettingsWidget(QtWidgets.QWidget): self.experimental_widget = experimental_widget + def _create_environments_ui(self): + envs_expand_widget = ExpandingWidget("Environments", self) + envs_content = QtWidgets.QWidget(self) + envs_layout = QtWidgets.QVBoxLayout(envs_content) + envs_layout.setContentsMargins(CHILD_OFFSET, 5, 0, 0) + envs_expand_widget.set_content_widget(envs_content) + + envs_widget = LocalEnvironmentsWidgets( + self.system_settings, envs_content + ) + envs_layout.addWidget(envs_widget) + + self.main_layout.addWidget(envs_expand_widget) + + self.envs_widget = envs_widget + def _create_app_ui(self): # Applications app_expand_widget = ExpandingWidget("Applications", self) @@ -154,6 +174,9 @@ class LocalSettingsWidget(QtWidgets.QWidget): self.general_widget.update_local_settings( value.get(LOCAL_GENERAL_KEY) ) + self.envs_widget.update_local_settings( + value.get(LOCAL_ENV_KEY) + ) self.app_widget.update_local_settings( value.get(LOCAL_APPS_KEY) ) @@ -170,6 +193,10 @@ class LocalSettingsWidget(QtWidgets.QWidget): if general_value: output[LOCAL_GENERAL_KEY] = general_value + envs_value = self.envs_widget.settings_value() + if envs_value: + output[LOCAL_ENV_KEY] = envs_value + app_value = self.app_widget.settings_value() if app_value: output[LOCAL_APPS_KEY] = app_value diff --git a/openpype/tools/settings/settings/base.py b/openpype/tools/settings/settings/base.py index bd48b3a966..44ec09b2ca 100644 --- a/openpype/tools/settings/settings/base.py +++ b/openpype/tools/settings/settings/base.py @@ -567,7 +567,9 @@ class GUIWidget(BaseWidget): def _create_label_ui(self): label = self.entity["label"] + word_wrap = self.entity.schema_data.get("word_wrap", False) label_widget = QtWidgets.QLabel(label, self) + label_widget.setWordWrap(word_wrap) label_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) label_widget.setObjectName("SettingsLabel") label_widget.linkActivated.connect(self._on_link_activate) diff --git a/openpype/tools/settings/settings/categories.py b/openpype/tools/settings/settings/categories.py index a5b5cd40f0..c8ade5fcdb 100644 --- a/openpype/tools/settings/settings/categories.py +++ b/openpype/tools/settings/settings/categories.py @@ -216,7 +216,7 @@ class SettingsCategoryWidget(QtWidgets.QWidget): def create_ui(self): self.modify_defaults_checkbox = None - conf_wrapper_widget = QtWidgets.QWidget(self) + conf_wrapper_widget = QtWidgets.QSplitter(self) configurations_widget = QtWidgets.QWidget(conf_wrapper_widget) # Breadcrumbs/Path widget @@ -294,10 +294,7 @@ class SettingsCategoryWidget(QtWidgets.QWidget): configurations_layout.addWidget(scroll_widget, 1) - conf_wrapper_layout = QtWidgets.QHBoxLayout(conf_wrapper_widget) - conf_wrapper_layout.setContentsMargins(0, 0, 0, 0) - conf_wrapper_layout.setSpacing(0) - conf_wrapper_layout.addWidget(configurations_widget, 1) + conf_wrapper_widget.addWidget(configurations_widget) main_layout = QtWidgets.QVBoxLayout(self) main_layout.setContentsMargins(0, 0, 0, 0) @@ -327,7 +324,7 @@ class SettingsCategoryWidget(QtWidgets.QWidget): self.breadcrumbs_model = None self.refresh_btn = refresh_btn - self.conf_wrapper_layout = conf_wrapper_layout + self.conf_wrapper_widget = conf_wrapper_widget self.main_layout = main_layout self.ui_tweaks() @@ -818,7 +815,9 @@ class ProjectWidget(SettingsCategoryWidget): project_list_widget = ProjectListWidget(self) - self.conf_wrapper_layout.insertWidget(0, project_list_widget, 0) + self.conf_wrapper_widget.insertWidget(0, project_list_widget) + self.conf_wrapper_widget.setStretchFactor(0, 0) + self.conf_wrapper_widget.setStretchFactor(1, 1) project_list_widget.project_changed.connect(self._on_project_change) project_list_widget.version_change_requested.connect( diff --git a/openpype/tools/settings/settings/widgets.py b/openpype/tools/settings/settings/widgets.py index 577c2630ab..6db001f2f6 100644 --- a/openpype/tools/settings/settings/widgets.py +++ b/openpype/tools/settings/settings/widgets.py @@ -97,6 +97,9 @@ class CompleterView(QtWidgets.QListView): QtCore.Qt.FramelessWindowHint | QtCore.Qt.Tool ) + + # Open the widget unactivated + self.setAttribute(QtCore.Qt.WA_ShowWithoutActivating) delegate = QtWidgets.QStyledItemDelegate() self.setItemDelegate(delegate) @@ -225,10 +228,18 @@ class SettingsLineEdit(PlaceholderLineEdit): def __init__(self, *args, **kwargs): super(SettingsLineEdit, self).__init__(*args, **kwargs) - self._completer = None + # Timer which will get started on focus in and stopped on focus out + # - callback checks if line edit or completer have focus + # and hide completer if not + focus_timer = QtCore.QTimer() + focus_timer.setInterval(50) + focus_timer.timeout.connect(self._on_focus_timer) self.textChanged.connect(self._on_text_change) + self._completer = None + self._focus_timer = focus_timer + def _on_text_change(self, text): if self._completer is not None: self._completer.set_text_filter(text) @@ -240,19 +251,19 @@ class SettingsLineEdit(PlaceholderLineEdit): new_point = self.mapToGlobal(point) self._completer.move(new_point) + def _on_focus_timer(self): + if not self.hasFocus() and not self._completer.hasFocus(): + self._completer.hide() + self._focus_timer.stop() + def focusInEvent(self, event): super(SettingsLineEdit, self).focusInEvent(event) self.focused_in.emit() - if self._completer is None: - return - self._completer.show() - self._update_completer() - - def focusOutEvent(self, event): - super(SettingsLineEdit, self).focusOutEvent(event) if self._completer is not None: - self._completer.hide() + self._focus_timer.start() + self._completer.show() + self._update_completer() def paintEvent(self, event): super(SettingsLineEdit, self).paintEvent(event) diff --git a/openpype/tools/standalonepublish/publish.py b/openpype/tools/standalonepublish/publish.py index 582e7eccf8..e1e9edebb9 100644 --- a/openpype/tools/standalonepublish/publish.py +++ b/openpype/tools/standalonepublish/publish.py @@ -1,14 +1,14 @@ import os import sys -import openpype import pyblish.api +from openpype.pipeline import install_openpype_plugins from openpype.tools.utils.host_tools import show_publish def main(env): # Registers pype's Global pyblish plugins - openpype.install() + install_openpype_plugins() # Register additional paths addition_paths_str = env.get("PUBLISH_PATHS") or "" diff --git a/openpype/tools/standalonepublish/widgets/widget_drop_frame.py b/openpype/tools/standalonepublish/widgets/widget_drop_frame.py index c1c59d65b6..e6c7328e88 100644 --- a/openpype/tools/standalonepublish/widgets/widget_drop_frame.py +++ b/openpype/tools/standalonepublish/widgets/widget_drop_frame.py @@ -37,6 +37,10 @@ class DropDataFrame(QtWidgets.QFrame): "video_file": video_extensions } + sequence_types = [ + ".bgeo", ".vdb" + ] + def __init__(self, parent): super().__init__() self.parent_widget = parent @@ -176,7 +180,7 @@ class DropDataFrame(QtWidgets.QFrame): non_collectionable_paths = [] for path in in_paths: ext = os.path.splitext(path)[1] - if ext in self.image_extensions: + if ext in self.image_extensions or ext in self.sequence_types: collectionable_paths.append(path) else: non_collectionable_paths.append(path) @@ -289,7 +293,7 @@ class DropDataFrame(QtWidgets.QFrame): def get_file_data(self, data): filepath = data['files'][0] ext = data['ext'].lower() - output = {} + output = {"fps": None} file_info = None if 'file_info' in data: diff --git a/openpype/tools/standalonepublish/widgets/widget_family_desc.py b/openpype/tools/standalonepublish/widgets/widget_family_desc.py index 79681615b9..2095b332bd 100644 --- a/openpype/tools/standalonepublish/widgets/widget_family_desc.py +++ b/openpype/tools/standalonepublish/widgets/widget_family_desc.py @@ -52,6 +52,7 @@ class FamilyDescriptionWidget(QtWidgets.QWidget): family.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft) help = QtWidgets.QLabel("help") + help.setWordWrap(True) help.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft) label_layout.addWidget(family) diff --git a/openpype/tools/stdout_broker/window.py b/openpype/tools/stdout_broker/window.py index a2190e0491..f5720ca05b 100644 --- a/openpype/tools/stdout_broker/window.py +++ b/openpype/tools/stdout_broker/window.py @@ -1,7 +1,9 @@ -from avalon import style -from Qt import QtWidgets, QtCore -import collections import re +import collections + +from Qt import QtWidgets + +from openpype import style class ConsoleDialog(QtWidgets.QDialog): diff --git a/openpype/tools/subsetmanager/model.py b/openpype/tools/subsetmanager/model.py index b76c3c2343..760a167b42 100644 --- a/openpype/tools/subsetmanager/model.py +++ b/openpype/tools/subsetmanager/model.py @@ -2,7 +2,7 @@ import uuid from Qt import QtCore, QtGui -from avalon import api +from openpype.pipeline import registered_host ITEM_ID_ROLE = QtCore.Qt.UserRole + 1 @@ -21,7 +21,7 @@ class InstanceModel(QtGui.QStandardItemModel): self._instances_by_item_id = {} instances = None - host = api.registered_host() + host = registered_host() list_instances = getattr(host, "list_instances", None) if list_instances: instances = list_instances() diff --git a/openpype/tools/subsetmanager/window.py b/openpype/tools/subsetmanager/window.py index a53af52174..6314e67015 100644 --- a/openpype/tools/subsetmanager/window.py +++ b/openpype/tools/subsetmanager/window.py @@ -4,9 +4,8 @@ import sys from Qt import QtWidgets, QtCore import qtawesome -from avalon import api - from openpype import style +from openpype.pipeline import registered_host from openpype.tools.utils import PlaceholderLineEdit from openpype.tools.utils.lib import ( iter_model_rows, @@ -106,7 +105,7 @@ class SubsetManagerWindow(QtWidgets.QDialog): self._details_widget.set_details(container, item_id) def _on_save(self): - host = api.registered_host() + host = registered_host() if not hasattr(host, "save_instances"): print("BUG: Host does not have \"save_instances\" method") return @@ -141,7 +140,7 @@ class SubsetManagerWindow(QtWidgets.QDialog): # Prepare menu menu = QtWidgets.QMenu(self) actions = [] - host = api.registered_host() + host = registered_host() if hasattr(host, "remove_instance"): action = QtWidgets.QAction("Remove instance", menu) action.setData(host.remove_instance) @@ -176,7 +175,7 @@ class SubsetManagerWindow(QtWidgets.QDialog): self._details_widget.set_details(None, None) self._model.refresh() - host = api.registered_host() + host = registered_host() dev_mode = os.environ.get("AVALON_DEVELOP_MODE") or "" editable = False if dev_mode.lower() in ("1", "yes", "true", "on"): diff --git a/openpype/tools/traypublisher/window.py b/openpype/tools/traypublisher/window.py index d0453c4f23..a550c88ead 100644 --- a/openpype/tools/traypublisher/window.py +++ b/openpype/tools/traypublisher/window.py @@ -8,8 +8,8 @@ publishing plugins. from Qt import QtWidgets, QtCore -import avalon.api from avalon.api import AvalonMongoDB +from openpype.pipeline import install_host from openpype.hosts.traypublisher import ( api as traypublisher ) @@ -163,7 +163,7 @@ class TrayPublishWindow(PublisherWindow): def main(): - avalon.api.install(traypublisher) + install_host(traypublisher) app = QtWidgets.QApplication([]) window = TrayPublishWindow() window.show() diff --git a/openpype/tools/utils/host_tools.py b/openpype/tools/utils/host_tools.py index 2d9733ec94..b0c30f6dfb 100644 --- a/openpype/tools/utils/host_tools.py +++ b/openpype/tools/utils/host_tools.py @@ -6,6 +6,7 @@ use singleton approach with global functions (using helper anyway). import os import avalon.api import pyblish.api +from openpype.pipeline import registered_host from .lib import qt_app_context @@ -47,7 +48,7 @@ class HostToolsHelper: Window, validate_host_requirements ) # Host validation - host = avalon.api.registered_host() + host = registered_host() validate_host_requirements(host) workfiles_window = Window(parent=parent) diff --git a/openpype/tools/utils/lib.py b/openpype/tools/utils/lib.py index 93b156bef8..8e2044482a 100644 --- a/openpype/tools/utils/lib.py +++ b/openpype/tools/utils/lib.py @@ -6,16 +6,19 @@ import collections from Qt import QtWidgets, QtCore, QtGui import qtawesome -import avalon.api - -from openpype.style import get_default_entity_icon_color +from openpype.style import ( + get_default_entity_icon_color, + get_objected_colors, +) +from openpype.resources import get_image_path +from openpype.lib import filter_profiles from openpype.api import ( get_project_settings, Logger ) -from openpype.lib import filter_profiles -from openpype.style import get_objected_colors -from openpype.resources import get_image_path +from openpype.pipeline import registered_host + +log = Logger.get_logger(__name__) def center_window(window): @@ -111,13 +114,23 @@ def get_qta_icon_by_name_and_color(icon_name, icon_color): variants.append("{0}.{1}".format(key, icon_name)) icon = None + used_variant = None for variant in variants: try: icon = qtawesome.icon(variant, color=icon_color) + used_variant = variant break except Exception: pass + if used_variant is None: + log.info("Didn't find icon \"{}\"".format(icon_name)) + + elif used_variant != icon_name: + log.debug("Icon \"{}\" was not found \"{}\" is used instead".format( + icon_name, used_variant + )) + SharedObjects.icons[full_icon_name] = icon return icon @@ -140,8 +153,8 @@ def get_asset_icon_name(asset_doc, has_children=True): return icon_name if has_children: - return "folder" - return "folder-o" + return "fa.folder" + return "fa.folder-o" def get_asset_icon_color(asset_doc): @@ -390,13 +403,14 @@ class FamilyConfigCache: self.family_configs.clear() # Skip if we're not in host context - if not avalon.api.registered_host(): + if not registered_host(): return # Update the icons from the project configuration project_name = os.environ.get("AVALON_PROJECT") asset_name = os.environ.get("AVALON_ASSET") task_name = os.environ.get("AVALON_TASK") + host_name = os.environ.get("AVALON_APP") if not all((project_name, asset_name, task_name)): return @@ -410,15 +424,21 @@ class FamilyConfigCache: ["family_filter_profiles"] ) if profiles: - asset_doc = self.dbcon.find_one( + # Make sure connection is installed + # - accessing attribute which does not have auto-install + self.dbcon.install() + database = getattr(self.dbcon, "database", None) + if database is None: + database = self.dbcon._database + asset_doc = database[project_name].find_one( {"type": "asset", "name": asset_name}, {"data.tasks": True} - ) + ) or {} tasks_info = asset_doc.get("data", {}).get("tasks") or {} task_type = tasks_info.get(task_name, {}).get("type") profiles_filter = { "task_types": task_type, - "hosts": os.environ["AVALON_APP"] + "hosts": host_name } matching_item = filter_profiles(profiles, profiles_filter) diff --git a/openpype/tools/workfiles/app.py b/openpype/tools/workfiles/app.py index f0e7900cf5..38e1911060 100644 --- a/openpype/tools/workfiles/app.py +++ b/openpype/tools/workfiles/app.py @@ -3,6 +3,7 @@ import logging from avalon import api +from openpype.pipeline import registered_host from openpype.tools.utils import qt_app_context from .window import Window @@ -47,7 +48,7 @@ def show(root=None, debug=False, parent=None, use_context=True, save=True): except (AttributeError, RuntimeError): pass - host = api.registered_host() + host = registered_host() validate_host_requirements(host) if debug: diff --git a/openpype/tools/workfiles/files_widget.py b/openpype/tools/workfiles/files_widget.py index d2b8a76952..bb2ded3b94 100644 --- a/openpype/tools/workfiles/files_widget.py +++ b/openpype/tools/workfiles/files_widget.py @@ -18,6 +18,7 @@ from openpype.lib.avalon_context import ( update_current_task, compute_session_changes ) +from openpype.pipeline import registered_host from .model import ( WorkAreaFilesModel, PublishFilesModel, @@ -26,7 +27,6 @@ from .model import ( DATE_MODIFIED_ROLE, ) from .save_as_dialog import SaveAsDialog -from .lib import TempPublishFiles log = logging.getLogger(__name__) @@ -45,11 +45,35 @@ class FilesView(QtWidgets.QTreeView): return super(FilesView, self).mouseDoubleClickEvent(event) +class SelectContextOverlay(QtWidgets.QFrame): + def __init__(self, parent): + super(SelectContextOverlay, self).__init__(parent) + + self.setObjectName("WorkfilesPublishedContextSelect") + label_widget = QtWidgets.QLabel( + "Please choose context on the left
<", + self + ) + label_widget.setAlignment(QtCore.Qt.AlignCenter) + + layout = QtWidgets.QHBoxLayout(self) + layout.addWidget(label_widget, 1, QtCore.Qt.AlignCenter) + + label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + parent.installEventFilter(self) + + def eventFilter(self, obj, event): + if event.type() == QtCore.QEvent.Resize: + self.resize(obj.size()) + + return super(SelectContextOverlay, self).eventFilter(obj, event) + + class FilesWidget(QtWidgets.QWidget): """A widget displaying files that allows to save and open files.""" file_selected = QtCore.Signal(str) file_opened = QtCore.Signal() - publish_file_viewed = QtCore.Signal() workfile_created = QtCore.Signal(str) published_visible_changed = QtCore.Signal(bool) @@ -70,10 +94,7 @@ class FilesWidget(QtWidgets.QWidget): # This is not root but workfile directory self._workfiles_root = None self._workdir_path = None - self.host = api.registered_host() - temp_publish_files = TempPublishFiles() - temp_publish_files.cleanup() - self._temp_publish_files = temp_publish_files + self.host = registered_host() # Whether to automatically select the latest modified # file on a refresh of the files model. @@ -93,14 +114,14 @@ class FilesWidget(QtWidgets.QWidget): filter_layout = QtWidgets.QHBoxLayout(filter_widget) filter_layout.setContentsMargins(0, 0, 0, 0) - filter_layout.addWidget(published_checkbox, 0) filter_layout.addWidget(filter_input, 1) + filter_layout.addWidget(published_checkbox, 0) # Create the Files models extensions = set(self.host.file_extensions()) views_widget = QtWidgets.QWidget(self) - # Workarea view + # --- Workarea view --- workarea_files_model = WorkAreaFilesModel(extensions) # Create proxy model for files to be able sort and filter @@ -118,13 +139,14 @@ class FilesWidget(QtWidgets.QWidget): # Date modified delegate workarea_time_delegate = PrettyTimeDelegate() workarea_files_view.setItemDelegateForColumn(1, workarea_time_delegate) - workarea_files_view.setIndentation(3) # smaller indentation + # smaller indentation + workarea_files_view.setIndentation(3) # Default to a wider first filename column it is what we mostly care # about and the date modified is relatively small anyway. workarea_files_view.setColumnWidth(0, 330) - # Publish files view + # --- Publish files view --- publish_files_model = PublishFilesModel(extensions, io, self.anatomy) publish_proxy_model = QtCore.QSortFilterProxyModel() @@ -141,12 +163,16 @@ class FilesWidget(QtWidgets.QWidget): # Date modified delegate publish_time_delegate = PrettyTimeDelegate() publish_files_view.setItemDelegateForColumn(1, publish_time_delegate) - publish_files_view.setIndentation(3) # smaller indentation + # smaller indentation + publish_files_view.setIndentation(3) # Default to a wider first filename column it is what we mostly care # about and the date modified is relatively small anyway. publish_files_view.setColumnWidth(0, 330) + publish_context_overlay = SelectContextOverlay(views_widget) + publish_context_overlay.setVisible(False) + views_layout = QtWidgets.QHBoxLayout(views_widget) views_layout.setContentsMargins(0, 0, 0, 0) views_layout.addWidget(workarea_files_view, 1) @@ -155,18 +181,43 @@ class FilesWidget(QtWidgets.QWidget): # Home Page # Build buttons widget for files widget btns_widget = QtWidgets.QWidget(self) - btn_save = QtWidgets.QPushButton("Save As", btns_widget) - btn_browse = QtWidgets.QPushButton("Browse", btns_widget) - btn_open = QtWidgets.QPushButton("Open", btns_widget) - btn_view_published = QtWidgets.QPushButton("View", btns_widget) + workarea_btns_widget = QtWidgets.QWidget(btns_widget) + btn_save = QtWidgets.QPushButton("Save As", workarea_btns_widget) + btn_browse = QtWidgets.QPushButton("Browse", workarea_btns_widget) + btn_open = QtWidgets.QPushButton("Open", workarea_btns_widget) + + workarea_btns_layout = QtWidgets.QHBoxLayout(workarea_btns_widget) + workarea_btns_layout.setContentsMargins(0, 0, 0, 0) + workarea_btns_layout.addWidget(btn_open, 1) + workarea_btns_layout.addWidget(btn_browse, 1) + workarea_btns_layout.addWidget(btn_save, 1) + + publish_btns_widget = QtWidgets.QWidget(btns_widget) + btn_save_as_published = QtWidgets.QPushButton( + "Copy && Open", publish_btns_widget + ) + btn_change_context = QtWidgets.QPushButton( + "Choose different context", publish_btns_widget + ) + btn_select_context_published = QtWidgets.QPushButton( + "Copy && Open", publish_btns_widget + ) + btn_cancel_published = QtWidgets.QPushButton( + "Cancel", publish_btns_widget + ) + + publish_btns_layout = QtWidgets.QHBoxLayout(publish_btns_widget) + publish_btns_layout.setContentsMargins(0, 0, 0, 0) + publish_btns_layout.addWidget(btn_save_as_published, 1) + publish_btns_layout.addWidget(btn_change_context, 1) + publish_btns_layout.addWidget(btn_select_context_published, 1) + publish_btns_layout.addWidget(btn_cancel_published, 1) btns_layout = QtWidgets.QHBoxLayout(btns_widget) btns_layout.setContentsMargins(0, 0, 0, 0) - btns_layout.addWidget(btn_open, 1) - btns_layout.addWidget(btn_browse, 1) - btns_layout.addWidget(btn_save, 1) - btns_layout.addWidget(btn_view_published, 1) + btns_layout.addWidget(workarea_btns_widget, 1) + btns_layout.addWidget(publish_btns_widget, 1) # Build files widgets for home page main_layout = QtWidgets.QVBoxLayout(self) @@ -188,14 +239,22 @@ class FilesWidget(QtWidgets.QWidget): workarea_files_view.selectionModel().selectionChanged.connect( self.on_file_select ) - publish_files_view.doubleClickedLeft.connect( - self._on_view_published_pressed - ) btn_open.pressed.connect(self._on_workarea_open_pressed) btn_browse.pressed.connect(self.on_browse_pressed) - btn_save.pressed.connect(self.on_save_as_pressed) - btn_view_published.pressed.connect(self._on_view_published_pressed) + btn_save.pressed.connect(self._on_save_as_pressed) + btn_save_as_published.pressed.connect( + self._on_published_save_as_pressed + ) + btn_change_context.pressed.connect( + self._on_publish_change_context_pressed + ) + btn_select_context_published.pressed.connect( + self._on_publish_select_context_pressed + ) + btn_cancel_published.pressed.connect( + self._on_publish_cancel_pressed + ) # Store attributes self._published_checkbox = published_checkbox @@ -211,18 +270,29 @@ class FilesWidget(QtWidgets.QWidget): self._publish_files_model = publish_files_model self._publish_proxy_model = publish_proxy_model - self._btns_widget = btns_widget + self._publish_context_overlay = publish_context_overlay + + self._workarea_btns_widget = workarea_btns_widget + self._publish_btns_widget = publish_btns_widget self._btn_open = btn_open self._btn_browse = btn_browse self._btn_save = btn_save - self._btn_view_published = btn_view_published + + self._btn_save_as_published = btn_save_as_published + self._btn_change_context = btn_change_context + self._btn_select_context_published = btn_select_context_published + self._btn_cancel_published = btn_cancel_published # Create a proxy widget for files widget self.setFocusProxy(btn_open) # Hide publish files widgets publish_files_view.setVisible(False) - btn_view_published.setVisible(False) + publish_btns_widget.setVisible(False) + btn_select_context_published.setVisible(False) + btn_cancel_published.setVisible(False) + + self._publish_context_select_mode = False @property def published_enabled(self): @@ -232,12 +302,10 @@ class FilesWidget(QtWidgets.QWidget): published_enabled = self.published_enabled self._workarea_files_view.setVisible(not published_enabled) - self._btn_open.setVisible(not published_enabled) - self._btn_browse.setVisible(not published_enabled) - self._btn_save.setVisible(not published_enabled) + self._workarea_btns_widget.setVisible(not published_enabled) self._publish_files_view.setVisible(published_enabled) - self._btn_view_published.setVisible(published_enabled) + self._publish_btns_widget.setVisible(published_enabled) self._update_filtering() self._update_asset_task() @@ -258,6 +326,9 @@ class FilesWidget(QtWidgets.QWidget): def set_save_enabled(self, enabled): self._btn_save.setEnabled(enabled) + if not enabled and self._published_checkbox.isChecked(): + self._published_checkbox.setChecked(False) + self._published_checkbox.setVisible(enabled) def set_asset_task(self, asset_id, task_name, task_type): if asset_id != self._asset_id: @@ -268,12 +339,14 @@ class FilesWidget(QtWidgets.QWidget): self._update_asset_task() def _update_asset_task(self): - if self.published_enabled: + if self.published_enabled and not self._publish_context_select_mode: self._publish_files_model.set_context( self._asset_id, self._task_name ) has_valid_items = self._publish_files_model.has_valid_items() - self._btn_view_published.setEnabled(has_valid_items) + self._btn_save_as_published.setEnabled(has_valid_items) + self._btn_change_context.setEnabled(has_valid_items) + else: # Define a custom session so we can query the work root # for a "Work area" that is not our current Session. @@ -291,6 +364,13 @@ class FilesWidget(QtWidgets.QWidget): has_valid_items = self._workarea_files_model.has_valid_items() self._btn_browse.setEnabled(has_valid_items) self._btn_open.setEnabled(has_valid_items) + + if self._publish_context_select_mode: + self._btn_select_context_published.setEnabled( + bool(self._asset_id) and bool(self._task_name) + ) + return + # Manually trigger file selection if not has_valid_items: self.on_file_select() @@ -400,11 +480,18 @@ class FilesWidget(QtWidgets.QWidget): """ session = self._get_session() + if self.published_enabled: + filepath = self._get_selected_filepath() + extensions = [os.path.splitext(filepath)[1]] + else: + extensions = self.host.file_extensions() + window = SaveAsDialog( parent=self, root=self._workfiles_root, anatomy=self.anatomy, template_key=self.template_key, + extensions=extensions, session=session ) window.exec_() @@ -462,10 +549,15 @@ class FilesWidget(QtWidgets.QWidget): if work_file: self.open_file(work_file) - def on_save_as_pressed(self): + def _on_save_as_pressed(self): + self._save_as_with_dialog() + + def _save_as_with_dialog(self): work_filename = self.get_filename() if not work_filename: - return + return None + + src_path = self._get_selected_filepath() # Trigger before save event emit_event( @@ -486,13 +578,20 @@ class FilesWidget(QtWidgets.QWidget): log.debug("Initializing Work Directory: %s", self._workfiles_root) os.makedirs(self._workfiles_root) - # Update session if context has changed - self._enter_session() # Prepare full path to workfile and save it filepath = os.path.join( os.path.normpath(self._workfiles_root), work_filename ) - self.host.save_file(filepath) + + # Update session if context has changed + self._enter_session() + + if not self.published_enabled: + self.host.save_file(filepath) + else: + shutil.copy(src_path, filepath) + self.host.open_file(filepath) + # Create extra folders create_workdir_extra_folders( self._workdir_path, @@ -510,17 +609,55 @@ class FilesWidget(QtWidgets.QWidget): self.workfile_created.emit(filepath) # Refresh files model - self.refresh() + if self.published_enabled: + self._published_checkbox.setChecked(False) + else: + self.refresh() + return filepath - def _on_view_published_pressed(self): - filepath = self._get_selected_filepath() - if not filepath or not os.path.exists(filepath): - return - item = self._temp_publish_files.add_file(filepath) - self.host.open_file(item.filepath) - self.publish_file_viewed.emit() - # Change state back to workarea - self._published_checkbox.setChecked(False) + def _on_published_save_as_pressed(self): + self._save_as_with_dialog() + + def _set_publish_context_select_mode(self, enabled): + self._publish_context_select_mode = enabled + + # Show buttons related to context selection + self._publish_context_overlay.setVisible(enabled) + self._btn_cancel_published.setVisible(enabled) + self._btn_select_context_published.setVisible(enabled) + # Change enabled state based on select context + self._btn_select_context_published.setEnabled( + bool(self._asset_id) and bool(self._task_name) + ) + + self._btn_save_as_published.setVisible(not enabled) + self._btn_change_context.setVisible(not enabled) + + # Change views and disable workarea view if enabled + self._workarea_files_view.setEnabled(not enabled) + if self.published_enabled: + self._workarea_files_view.setVisible(enabled) + self._publish_files_view.setVisible(not enabled) + else: + self._workarea_files_view.setVisible(True) + self._publish_files_view.setVisible(False) + + # Disable filter widgets + self._published_checkbox.setEnabled(not enabled) + self._filter_input.setEnabled(not enabled) + + def _on_publish_change_context_pressed(self): + self._set_publish_context_select_mode(True) + + def _on_publish_select_context_pressed(self): + result = self._save_as_with_dialog() + if result is not None: + self._set_publish_context_select_mode(False) + self._update_asset_task() + + def _on_publish_cancel_pressed(self): + self._set_publish_context_select_mode(False) + self._update_asset_task() def on_file_select(self): self.file_selected.emit(self._get_selected_filepath()) diff --git a/openpype/tools/workfiles/lib.py b/openpype/tools/workfiles/lib.py deleted file mode 100644 index 21a7485b7b..0000000000 --- a/openpype/tools/workfiles/lib.py +++ /dev/null @@ -1,272 +0,0 @@ -import os -import shutil -import uuid -import time -import json -import logging -import contextlib - -import appdirs - - -class TempPublishFilesItem(object): - """Object representing copied workfile in app temp folder. - - Args: - item_id (str): Id of item used as subfolder. - data (dict): Metadata about temp files. - directory (str): Path to directory where files are copied to. - """ - - def __init__(self, item_id, data, directory): - self._id = item_id - self._directory = directory - self._filepath = os.path.join(directory, data["filename"]) - - @property - def directory(self): - return self._directory - - @property - def filepath(self): - return self._filepath - - @property - def id(self): - return self._id - - @property - def size(self): - if os.path.exists(self.filepath): - s = os.stat(self.filepath) - return s.st_size - return 0 - - -class TempPublishFiles(object): - """Directory where published workfiles are copied when opened. - - Directory is located in appdirs on the machine. Folder contains file - with metadata about stored files. Each item in metadata has id, filename - and expiration time. When expiration time is higher then current time the - item is removed from metadata and it's files are deleted. Files of items - are stored in subfolder named by item's id. - - Metadata file can be in theory opened and modified by multiple processes, - threads at one time. For those cases is created simple lock file which - is created before modification begins and is removed when modification - ends. Existence of the file means that it should not be modified by - any other process at the same time. - - Metadata example: - ``` - { - "96050b4a-8974-4fca-8179-7c446c478d54": { - "created": 1647880725.555, - "expiration": 1647884325.555, - "filename": "cg_pigeon_workfileModeling_v025.ma" - }, - ... - } - ``` - - ## Why is this needed - Combination of more issues. Temp files are not automatically removed by - OS on windows so using tempfiles in TEMP would lead to kill disk space of - machine. There are also cases when someone wants to open multiple files - in short period of time and want to manually remove those files so keeping - track of temporary copied files in pre-defined structure is needed. - """ - minute_in_seconds = 60 - hour_in_seconds = 60 * minute_in_seconds - day_in_seconds = 24 * hour_in_seconds - - def __init__(self): - root_dir = appdirs.user_data_dir( - "published_workfiles_temp", "openpype" - ) - if not os.path.exists(root_dir): - os.makedirs(root_dir) - - metadata_path = os.path.join(root_dir, "metadata.json") - lock_path = os.path.join(root_dir, "lock.json") - - self._root_dir = root_dir - self._metadata_path = metadata_path - self._lock_path = lock_path - self._log = None - - @property - def log(self): - if self._log is None: - self._log = logging.getLogger(self.__class__.__name__) - return self._log - - @property - def life_time(self): - """How long will be new item kept in temp in seconds. - - Returns: - int: Lifetime of temp item. - """ - return int(self.hour_in_seconds) - - @property - def size(self): - """File size of existing items.""" - size = 0 - for item in self.get_items(): - size += item.size - return size - - def add_file(self, src_path): - """Add workfile to temp directory. - - This will create new item and source path is copied to it's directory. - """ - filename = os.path.basename(src_path) - - item_id = str(uuid.uuid4()) - dst_dirpath = os.path.join(self._root_dir, item_id) - if not os.path.exists(dst_dirpath): - os.makedirs(dst_dirpath) - - dst_path = os.path.join(dst_dirpath, filename) - shutil.copy(src_path, dst_path) - - now = time.time() - item_data = { - "filename": filename, - "expiration": now + self.life_time, - "created": now - } - with self._modify_data() as data: - data[item_id] = item_data - - return TempPublishFilesItem(item_id, item_data, dst_dirpath) - - @contextlib.contextmanager - def _modify_data(self): - """Create lock file when data in metadata file are modified.""" - start_time = time.time() - timeout = 3 - while os.path.exists(self._lock_path): - time.sleep(0.01) - if start_time > timeout: - self.log.warning(( - "Waited for {} seconds to free lock file. Overriding lock." - ).format(timeout)) - - with open(self._lock_path, "w") as stream: - json.dump({"pid": os.getpid()}, stream) - - try: - data = self._get_data() - yield data - with open(self._metadata_path, "w") as stream: - json.dump(data, stream) - - finally: - os.remove(self._lock_path) - - def _get_data(self): - output = {} - if not os.path.exists(self._metadata_path): - return output - - try: - with open(self._metadata_path, "r") as stream: - output = json.load(stream) - except Exception: - self.log.warning("Failed to read metadata file.", exc_info=True) - return output - - def cleanup(self, check_expiration=True): - """Cleanup files based on metadata. - - Items that passed expiration are removed when this is called. Or all - files are removed when `check_expiration` is set to False. - - Args: - check_expiration (bool): All items and files are removed when set - to True. - """ - data = self._get_data() - now = time.time() - remove_ids = set() - all_ids = set() - for item_id, item_data in data.items(): - all_ids.add(item_id) - if check_expiration and now < item_data["expiration"]: - continue - - remove_ids.add(item_id) - - for item_id in remove_ids: - try: - self.remove_id(item_id) - except Exception: - self.log.warning( - "Failed to remove temp publish item \"{}\"".format( - item_id - ), - exc_info=True - ) - - # Remove unknown folders/files - for filename in os.listdir(self._root_dir): - if filename in all_ids: - continue - - full_path = os.path.join(self._root_dir, filename) - if full_path in (self._metadata_path, self._lock_path): - continue - - try: - shutil.rmtree(full_path) - except Exception: - self.log.warning( - "Couldn't remove arbitrary path \"{}\"".format(full_path), - exc_info=True - ) - - def clear(self): - self.cleanup(False) - - def get_items(self): - """Receive all items from metadata file. - - Returns: - list: Info about each item in metadata. - """ - output = [] - data = self._get_data() - for item_id, item_data in data.items(): - item_path = os.path.join(self._root_dir, item_id) - output.append(TempPublishFilesItem(item_id, item_data, item_path)) - return output - - def remove_id(self, item_id): - """Remove files of item and then remove the item from metadata.""" - filepath = os.path.join(self._root_dir, item_id) - if os.path.exists(filepath): - shutil.rmtree(filepath) - - with self._modify_data() as data: - data.pop(item_id, None) - - -def file_size_to_string(file_size): - size = 0 - size_ending_mapping = { - "KB": 1024 ** 1, - "MB": 1024 ** 2, - "GB": 1024 ** 3 - } - ending = "B" - for _ending, _size in size_ending_mapping.items(): - if file_size < _size: - break - size = file_size / _size - ending = _ending - return "{:.2f} {}".format(size, ending) diff --git a/openpype/tools/workfiles/save_as_dialog.py b/openpype/tools/workfiles/save_as_dialog.py index e616a325cc..0a7c7821ba 100644 --- a/openpype/tools/workfiles/save_as_dialog.py +++ b/openpype/tools/workfiles/save_as_dialog.py @@ -11,6 +11,7 @@ from openpype.lib import ( get_last_workfile_with_version, get_workdir_data, ) +from openpype.pipeline import registered_host from openpype.tools.utils import PlaceholderLineEdit log = logging.getLogger(__name__) @@ -65,7 +66,7 @@ class CommentMatcher(object): return # Create a regex group for extensions - extensions = api.registered_host().file_extensions() + extensions = registered_host().file_extensions() any_extension = "(?:{})".format( "|".join(re.escape(ext[1:]) for ext in extensions) ) @@ -193,14 +194,17 @@ class SaveAsDialog(QtWidgets.QDialog): """ - def __init__(self, parent, root, anatomy, template_key, session=None): + def __init__( + self, parent, root, anatomy, template_key, extensions, session=None + ): super(SaveAsDialog, self).__init__(parent=parent) self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint) self.result = None - self.host = api.registered_host() + self.host = registered_host() self.root = root self.work_file = None + self._extensions = extensions if not session: # Fallback to active session @@ -257,7 +261,7 @@ class SaveAsDialog(QtWidgets.QDialog): # Add styled delegate to use stylesheets ext_delegate = QtWidgets.QStyledItemDelegate() ext_combo.setItemDelegate(ext_delegate) - ext_combo.addItems(self.host.file_extensions()) + ext_combo.addItems(self._extensions) # Build inputs inputs_layout = QtWidgets.QFormLayout(inputs_widget) @@ -336,7 +340,7 @@ class SaveAsDialog(QtWidgets.QDialog): def get_existing_comments(self): matcher = CommentMatcher(self.anatomy, self.template_key, self.data) - host_extensions = set(self.host.file_extensions()) + host_extensions = set(self._extensions) comments = set() if os.path.isdir(self.root): for fname in os.listdir(self.root): @@ -392,7 +396,7 @@ class SaveAsDialog(QtWidgets.QDialog): return anatomy_filled[self.template_key]["file"] def refresh(self): - extensions = self.host.file_extensions() + extensions = list(self._extensions) extension = self.data["ext"] if extension is None: # Define saving file extension diff --git a/openpype/tools/workfiles/window.py b/openpype/tools/workfiles/window.py index 8654a18036..73e63d30b5 100644 --- a/openpype/tools/workfiles/window.py +++ b/openpype/tools/workfiles/window.py @@ -14,7 +14,22 @@ from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget from openpype.tools.utils.tasks_widget import TasksWidget from .files_widget import FilesWidget -from .lib import TempPublishFiles, file_size_to_string + + +def file_size_to_string(file_size): + size = 0 + size_ending_mapping = { + "KB": 1024 ** 1, + "MB": 1024 ** 2, + "GB": 1024 ** 3 + } + ending = "B" + for _ending, _size in size_ending_mapping.items(): + if file_size < _size: + break + size = file_size / _size + ending = _ending + return "{:.2f} {}".format(size, ending) class SidePanelWidget(QtWidgets.QWidget): @@ -44,67 +59,25 @@ class SidePanelWidget(QtWidgets.QWidget): btn_note_save, 0, alignment=QtCore.Qt.AlignRight ) - publish_temp_widget = QtWidgets.QWidget(self) - publish_temp_info_label = QtWidgets.QLabel( - self.published_workfile_message.format( - file_size_to_string(0) - ), - publish_temp_widget - ) - publish_temp_info_label.setWordWrap(True) - - btn_clear_temp = QtWidgets.QPushButton( - "Clear temp", publish_temp_widget - ) - - publish_temp_layout = QtWidgets.QVBoxLayout(publish_temp_widget) - publish_temp_layout.setContentsMargins(0, 0, 0, 0) - publish_temp_layout.addWidget(publish_temp_info_label, 0) - publish_temp_layout.addWidget( - btn_clear_temp, 0, alignment=QtCore.Qt.AlignRight - ) - main_layout = QtWidgets.QVBoxLayout(self) main_layout.setContentsMargins(0, 0, 0, 0) main_layout.addWidget(details_label, 0) main_layout.addWidget(details_input, 1) main_layout.addWidget(artist_note_widget, 1) - main_layout.addWidget(publish_temp_widget, 0) note_input.textChanged.connect(self._on_note_change) btn_note_save.clicked.connect(self._on_save_click) - btn_clear_temp.clicked.connect(self._on_clear_temp_click) self._details_input = details_input self._artist_note_widget = artist_note_widget self._note_input = note_input self._btn_note_save = btn_note_save - self._publish_temp_info_label = publish_temp_info_label - self._publish_temp_widget = publish_temp_widget - self._orig_note = "" self._workfile_doc = None - publish_temp_widget.setVisible(False) - def set_published_visible(self, published_visible): self._artist_note_widget.setVisible(not published_visible) - self._publish_temp_widget.setVisible(published_visible) - if published_visible: - self.refresh_publish_temp_sizes() - - def refresh_publish_temp_sizes(self): - temp_publish_files = TempPublishFiles() - text = self.published_workfile_message.format( - file_size_to_string(temp_publish_files.size) - ) - self._publish_temp_info_label.setText(text) - - def _on_clear_temp_click(self): - temp_publish_files = TempPublishFiles() - temp_publish_files.clear() - self.refresh_publish_temp_sizes() def _on_note_change(self): text = self._note_input.toPlainText() @@ -225,9 +198,6 @@ class Window(QtWidgets.QMainWindow): files_widget.file_selected.connect(self.on_file_select) files_widget.workfile_created.connect(self.on_workfile_create) files_widget.file_opened.connect(self._on_file_opened) - files_widget.publish_file_viewed.connect( - self._on_publish_file_viewed - ) files_widget.published_visible_changed.connect( self._on_published_change ) @@ -292,9 +262,6 @@ class Window(QtWidgets.QMainWindow): def _on_file_opened(self): self.close() - def _on_publish_file_viewed(self): - self.side_panel.refresh_publish_temp_sizes() - def _on_published_change(self, visible): self.side_panel.set_published_visible(visible) diff --git a/openpype/version.py b/openpype/version.py index 2390309e76..9e2525e3b8 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.9.2-nightly.1" +__version__ = "3.10.0-nightly.1" diff --git a/openpype/widgets/attribute_defs/files_widget.py b/openpype/widgets/attribute_defs/files_widget.py index 87b98e2378..34f7d159ad 100644 --- a/openpype/widgets/attribute_defs/files_widget.py +++ b/openpype/widgets/attribute_defs/files_widget.py @@ -641,5 +641,6 @@ class SingleFileWidget(QtWidgets.QWidget): filepaths.append(filepath) # TODO filter check if len(filepaths) == 1: - self.set_value(filepaths[0], False) + self._filepath_input.setText(filepaths[0]) + event.accept() diff --git a/poetry.lock b/poetry.lock index ee7b839b8d..7998ede693 100644 --- a/poetry.lock +++ b/poetry.lock @@ -11,7 +11,7 @@ develop = false type = "git" url = "https://github.com/pypeclub/acre.git" reference = "master" -resolved_reference = "55a7c331e6dc5f81639af50ca4a8cc9d73e9273d" +resolved_reference = "126f7a188cfe36718f707f42ebbc597e86aa86c3" [[package]] name = "aiohttp" @@ -680,15 +680,8 @@ category = "main" optional = false python-versions = "*" -[package.dependencies] -attrs = ">=17.4.0" -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} -pyrsistent = ">=0.14.0" -six = ">=1.11.0" - [package.extras] -format = ["idna", "jsonpointer (>1.13)", "rfc3987", "strict-rfc3339", "webcolors"] -format_nongpl = ["idna", "jsonpointer (>1.13)", "webcolors", "rfc3986-validator (>0.1.0)", "rfc3339-validator"] +format = ["rfc3987", "strict-rfc3339", "webcolors"] [[package]] name = "keyring" @@ -784,7 +777,7 @@ pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" [[package]] name = "paramiko" -version = "2.9.2" +version = "2.10.1" description = "SSH2 protocol library" category = "main" optional = false @@ -794,6 +787,7 @@ python-versions = "*" bcrypt = ">=3.1.3" cryptography = ">=2.5" pynacl = ">=1.0.1" +six = "*" [package.extras] all = ["pyasn1 (>=0.1.7)", "pynacl (>=1.0.1)", "bcrypt (>=3.1.3)", "invoke (>=1.3)", "gssapi (>=1.4.1)", "pywin32 (>=2.1.8)"] @@ -1087,14 +1081,6 @@ category = "main" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" -[[package]] -name = "pyrsistent" -version = "0.18.1" -description = "Persistent/Functional/Immutable data structures" -category = "main" -optional = false -python-versions = ">=3.7" - [[package]] name = "pysftp" version = "0.2.9" @@ -1633,7 +1619,7 @@ testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest- [metadata] lock-version = "1.1" python-versions = "3.7.*" -content-hash = "2f78d48a6aad2d8a88b7dd7f31a76d907bec9fb65f0086fba6b6d2e1605f0f88" +content-hash = "b02313c8255a1897b0f0617ad4884a5943696c363512921aab1cb2dd8f4fdbe0" [metadata.files] acre = [] @@ -2171,12 +2157,28 @@ log4mongo = [ {file = "log4mongo-1.7.0.tar.gz", hash = "sha256:dc374617206162a0b14167fbb5feac01dbef587539a235dadba6200362984a68"}, ] markupsafe = [ + {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4dc8f9fb58f7364b63fd9f85013b780ef83c11857ae79f2feda41e270468dd9b"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20dca64a3ef2d6e4d5d615a3fd418ad3bde77a47ec8a23d984a12b5b4c74491a"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cdfba22ea2f0029c9261a4bd07e830a8da012291fbe44dc794e488b6c9bb353a"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-win32.whl", hash = "sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:deb993cacb280823246a026e3b2d81c493c53de6acfd5e6bfe31ab3402bb37dd"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:63f3268ba69ace99cab4e3e3b5840b03340efed0948ab8f78d2fd87ee5442a4f"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8d206346619592c6200148b01a2142798c989edcb9c896f9ac9722a99d4e77e6"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-win32.whl", hash = "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567"}, @@ -2185,14 +2187,27 @@ markupsafe = [ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d6c7ebd4e944c85e2c3421e612a7057a2f48d478d79e61800d81468a8d842207"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f0567c4dc99f264f49fe27da5f735f414c4e7e7dd850cfd8e69f0862d7c74ea9"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:89c687013cb1cd489a0f0ac24febe8c7a666e6e221b783e53ac50ebf68e45d86"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9"}, {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aca6377c0cb8a8253e493c6b451565ac77e98c2951c45f913e0b52facdcff83f"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:04635854b943835a6ea959e948d19dcd311762c5c0c6e1f0e16ee57022669194"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6300b8454aa6930a24b9618fbb54b5a68135092bc666f7b06901f897fa5c2fee"}, {file = "MarkupSafe-2.0.1-cp38-cp38-win32.whl", hash = "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64"}, {file = "MarkupSafe-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833"}, {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26"}, @@ -2202,6 +2217,12 @@ markupsafe = [ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135"}, {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902"}, {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4296f2b1ce8c86a6aea78613c34bb1a672ea0e3de9c6ba08a960efe0b0a09047"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f02365d4e99430a12647f09b6cc8bab61a6564363f313126f775eb4f6ef798e"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5b6d930f030f8ed98e3e6c98ffa0652bdb82601e7a016ec2ab5d7ff23baa78d1"}, {file = "MarkupSafe-2.0.1-cp39-cp39-win32.whl", hash = "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74"}, {file = "MarkupSafe-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8"}, {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"}, @@ -2277,8 +2298,8 @@ packaging = [ {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, ] paramiko = [ - {file = "paramiko-2.9.2-py2.py3-none-any.whl", hash = "sha256:04097dbd96871691cdb34c13db1883066b8a13a0df2afd4cb0a92221f51c2603"}, - {file = "paramiko-2.9.2.tar.gz", hash = "sha256:944a9e5dbdd413ab6c7951ea46b0ab40713235a9c4c5ca81cfe45c6f14fa677b"}, + {file = "paramiko-2.10.1-py2.py3-none-any.whl", hash = "sha256:f6cbd3e1204abfdbcd40b3ecbc9d32f04027cd3080fe666245e21e7540ccfc1b"}, + {file = "paramiko-2.10.1.tar.gz", hash = "sha256:443f4da23ec24e9a9c0ea54017829c282abdda1d57110bf229360775ccd27a31"}, ] parso = [ {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, @@ -2598,29 +2619,6 @@ pyparsing = [ {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, ] -pyrsistent = [ - {file = "pyrsistent-0.18.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1"}, - {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d45866ececf4a5fff8742c25722da6d4c9e180daa7b405dc0a2a2790d668c26"}, - {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ed6784ceac462a7d6fcb7e9b663e93b9a6fb373b7f43594f9ff68875788e01e"}, - {file = "pyrsistent-0.18.1-cp310-cp310-win32.whl", hash = "sha256:e4f3149fd5eb9b285d6bfb54d2e5173f6a116fe19172686797c056672689daf6"}, - {file = "pyrsistent-0.18.1-cp310-cp310-win_amd64.whl", hash = "sha256:636ce2dc235046ccd3d8c56a7ad54e99d5c1cd0ef07d9ae847306c91d11b5fec"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e92a52c166426efbe0d1ec1332ee9119b6d32fc1f0bbfd55d5c1088070e7fc1b"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7a096646eab884bf8bed965bad63ea327e0d0c38989fc83c5ea7b8a87037bfc"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cdfd2c361b8a8e5d9499b9082b501c452ade8bbf42aef97ea04854f4a3f43b22"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-win32.whl", hash = "sha256:7ec335fc998faa4febe75cc5268a9eac0478b3f681602c1f27befaf2a1abe1d8"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6455fc599df93d1f60e1c5c4fe471499f08d190d57eca040c0ea182301321286"}, - {file = "pyrsistent-0.18.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fd8da6d0124efa2f67d86fa70c851022f87c98e205f0594e1fae044e7119a5a6"}, - {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bfe2388663fd18bd8ce7db2c91c7400bf3e1a9e8bd7d63bf7e77d39051b85ec"}, - {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e3e1fcc45199df76053026a51cc59ab2ea3fc7c094c6627e93b7b44cdae2c8c"}, - {file = "pyrsistent-0.18.1-cp38-cp38-win32.whl", hash = "sha256:b568f35ad53a7b07ed9b1b2bae09eb15cdd671a5ba5d2c66caee40dbf91c68ca"}, - {file = "pyrsistent-0.18.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1b96547410f76078eaf66d282ddca2e4baae8964364abb4f4dcdde855cd123a"}, - {file = "pyrsistent-0.18.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f87cc2863ef33c709e237d4b5f4502a62a00fab450c9e020892e8e2ede5847f5"}, - {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bc66318fb7ee012071b2792024564973ecc80e9522842eb4e17743604b5e045"}, - {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:914474c9f1d93080338ace89cb2acee74f4f666fb0424896fcfb8d86058bf17c"}, - {file = "pyrsistent-0.18.1-cp39-cp39-win32.whl", hash = "sha256:1b34eedd6812bf4d33814fca1b66005805d3640ce53140ab8bbb1e2651b0d9bc"}, - {file = "pyrsistent-0.18.1-cp39-cp39-win_amd64.whl", hash = "sha256:e24a828f57e0c337c8d8bb9f6b12f09dfdf0273da25fda9e314f0b684b415a07"}, - {file = "pyrsistent-0.18.1.tar.gz", hash = "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96"}, -] pysftp = [ {file = "pysftp-0.2.9.tar.gz", hash = "sha256:fbf55a802e74d663673400acd92d5373c1c7ee94d765b428d9f977567ac4854a"}, ] diff --git a/pyproject.toml b/pyproject.toml index 90e264d456..4c65ac9bda 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "OpenPype" -version = "3.9.2-nightly.1" # OpenPype +version = "3.10.0-nightly.1" # OpenPype description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" @@ -136,3 +136,19 @@ hash = "de63a8bf7f6c45ff59ecafeba13123f710c2cbc1783ec9e0b938e980d4f5c37f" [openpype.thirdparty.oiio.darwin] url = "https://distribute.openpype.io/thirdparty/oiio-2.2.0-darwin.tgz" hash = "sha256:..." + +[tool.pyright] +include = [ + "igniter", + "openpype", + "repos", + "vendor" +] +exclude = [ + "**/node_modules", + "**/__pycache__" +] +ignore = ["website", "docs", ".git"] + +reportMissingImports = true +reportMissingTypeStubs = false \ No newline at end of file diff --git a/repos/avalon-core b/repos/avalon-core index ffe9e910f1..2fa14cea6f 160000 --- a/repos/avalon-core +++ b/repos/avalon-core @@ -1 +1 @@ -Subproject commit ffe9e910f1f382e222d457d8e4a8426c41ed43ae +Subproject commit 2fa14cea6f6a9d86eec70bbb96860cbe4c75c8eb diff --git a/setup.py b/setup.py index 3ee6ad43ea..bf42602b52 100644 --- a/setup.py +++ b/setup.py @@ -187,5 +187,6 @@ setup( "build_dir": (openpype_root / "docs" / "build").as_posix() } }, - executables=executables + executables=executables, + packages=[] ) diff --git a/tests/integration/conftest.py b/tests/conftest.py similarity index 100% rename from tests/integration/conftest.py rename to tests/conftest.py diff --git a/tests/lib/assert_classes.py b/tests/lib/assert_classes.py index 98f758767d..9a94f89fd0 100644 --- a/tests/lib/assert_classes.py +++ b/tests/lib/assert_classes.py @@ -24,16 +24,19 @@ class DBAssert: else: args[key] = val - msg = None no_of_docs = dbcon.count_documents(args) - if expected != no_of_docs: - msg = "Not expected no of versions. "\ - "Expected {}, found {}".format(expected, no_of_docs) + msg = None args.pop("type") detail_str = " " if args: - detail_str = " with {}".format(args) + detail_str = " with '{}'".format(args) + + if expected != no_of_docs: + msg = "Not expected no of '{}'{}."\ + "Expected {}, found {}".format(queried_type, + detail_str, + expected, no_of_docs) status = "successful" if msg: @@ -42,7 +45,5 @@ class DBAssert: print("Comparing count of {}{} {}".format(queried_type, detail_str, status)) - if msg: - print(msg) return msg diff --git a/tests/lib/testing_classes.py b/tests/lib/testing_classes.py index 0a9da1aca8..7dfbf6fd0d 100644 --- a/tests/lib/testing_classes.py +++ b/tests/lib/testing_classes.py @@ -273,8 +273,6 @@ class PublishTest(ModuleUnitTest): ) os.environ["AVALON_SCHEMA"] = schema_path - import openpype - openpype.install() os.environ["OPENPYPE_EXECUTABLE"] = sys.executable from openpype.lib import ApplicationManager diff --git a/tests/unit/openpype/modules/sync_server/test_module_api.py b/tests/unit/openpype/modules/sync_server/test_module_api.py new file mode 100644 index 0000000000..a484977758 --- /dev/null +++ b/tests/unit/openpype/modules/sync_server/test_module_api.py @@ -0,0 +1,64 @@ +"""Test file for Sync Server, tests API methods, currently for integrate_new + + File: + creates temporary directory and downloads .zip file from GDrive + unzips .zip file + uses content of .zip file (MongoDB's dumps) to import to new databases + with use of 'monkeypatch_session' modifies required env vars + temporarily + runs battery of tests checking that site operation for Sync Server + module are working + removes temporary folder + removes temporary databases (?) +""" +import pytest + +from tests.lib.testing_classes import ModuleUnitTest + + +class TestModuleApi(ModuleUnitTest): + + REPRESENTATION_ID = "60e578d0c987036c6a7b741d" + + TEST_FILES = [("1eCwPljuJeOI8A3aisfOIBKKjcmIycTEt", + "test_site_operations.zip", '')] + + @pytest.fixture(scope="module") + def setup_sync_server_module(self, dbcon): + """Get sync_server_module from ModulesManager""" + from openpype.modules import ModulesManager + + manager = ModulesManager() + sync_server = manager.modules_by_name["sync_server"] + yield sync_server + + def test_get_alt_site_pairs(self, setup_sync_server_module): + conf_sites = {"SFTP": {"alternative_sites": ["studio"]}, + "studio2": {"alternative_sites": ["studio"]}} + + ret = setup_sync_server_module._get_alt_site_pairs(conf_sites) + expected = {"SFTP": {"studio", "studio2"}, + "studio": {"SFTP", "studio2"}, + "studio2": {"studio", "SFTP"}} + assert ret == expected, "Not matching result" + + def test_get_alt_site_pairs_deep(self, setup_sync_server_module): + conf_sites = {"A": {"alternative_sites": ["C"]}, + "B": {"alternative_sites": ["C"]}, + "C": {"alternative_sites": ["D"]}, + "D": {"alternative_sites": ["A"]}, + "F": {"alternative_sites": ["G"]}, + "G": {"alternative_sites": ["F"]}, + } + + ret = setup_sync_server_module._get_alt_site_pairs(conf_sites) + expected = {"A": {"B", "C", "D"}, + "B": {"A", "C", "D"}, + "C": {"A", "B", "D"}, + "D": {"A", "B", "C"}, + "F": {"G"}, + "G": {"F"}} + assert ret == expected, "Not matching result" + + +test_case = TestModuleApi() diff --git a/tools/build.ps1 b/tools/build.ps1 index 10da3d0b83..ff28544954 100644 --- a/tools/build.ps1 +++ b/tools/build.ps1 @@ -180,6 +180,9 @@ $out = & "$($env:POETRY_HOME)\bin\poetry" run python setup.py build 2>&1 Set-Content -Path "$($openpype_root)\build\build.log" -Value $out if ($LASTEXITCODE -ne 0) { + Write-Host "------------------------------------------" -ForegroundColor Red + Get-Content "$($openpype_root)\build\build.log" + Write-Host "------------------------------------------" -ForegroundColor Red Write-Host "!!! " -NoNewLine -ForegroundColor Red Write-Host "Build failed. Check the log: " -NoNewline Write-Host ".\build\build.log" -ForegroundColor Yellow diff --git a/tools/build.sh b/tools/build.sh index 301f26023a..79fb748cd5 100755 --- a/tools/build.sh +++ b/tools/build.sh @@ -185,9 +185,9 @@ if [ "$disable_submodule_update" == 1 ]; then fi echo -e "${BIGreen}>>>${RST} Building ..." if [[ "$OSTYPE" == "linux-gnu"* ]]; then - "$POETRY_HOME/bin/poetry" run python "$openpype_root/setup.py" build &> "$openpype_root/build/build.log" || { echo -e "${BIRed}!!!${RST} Build failed, see the build log."; return 1; } + "$POETRY_HOME/bin/poetry" run python "$openpype_root/setup.py" build &> "$openpype_root/build/build.log" || { echo -e "${BIRed}------------------------------------------${RST}"; cat "$openpype_root/build/build.log"; echo -e "${BIRed}------------------------------------------${RST}"; echo -e "${BIRed}!!!${RST} Build failed, see the build log."; return 1; } elif [[ "$OSTYPE" == "darwin"* ]]; then - "$POETRY_HOME/bin/poetry" run python "$openpype_root/setup.py" bdist_mac &> "$openpype_root/build/build.log" || { echo -e "${BIRed}!!!${RST} Build failed, see the build log."; return 1; } + "$POETRY_HOME/bin/poetry" run python "$openpype_root/setup.py" bdist_mac &> "$openpype_root/build/build.log" || { echo -e "${BIRed}------------------------------------------${RST}"; cat "$openpype_root/build/build.log"; echo -e "${BIRed}------------------------------------------${RST}"; echo -e "${BIRed}!!!${RST} Build failed, see the build log."; return 1; } fi "$POETRY_HOME/bin/poetry" run python "$openpype_root/tools/build_dependencies.py" diff --git a/website/docs/admin_settings_project_anatomy.md b/website/docs/admin_settings_project_anatomy.md index 98003dc381..b98819cd8a 100644 --- a/website/docs/admin_settings_project_anatomy.md +++ b/website/docs/admin_settings_project_anatomy.md @@ -59,7 +59,7 @@ We have a few required anatomy templates for OpenPype to work properly, however | `asset` | Name of asset or shot | | `task[name]` | Name of task | | `task[type]` | Type of task | -| `task[short]` | Shortname of task | +| `task[short]` | Short name of task type (eg. 'Modeling' > 'mdl') | | `parent` | Name of hierarchical parent | | `version` | Version number | | `subset` | Subset name | @@ -105,5 +105,8 @@ We have a few required anatomy templates for OpenPype to work properly, however ## Task Types +Current state of default Task descriptors. + +![tasks](assets/settings/anatomy_tasks.png) ## Colour Management and Formats \ No newline at end of file diff --git a/website/docs/api.md b/website/docs/api.md deleted file mode 100644 index 7cad92d603..0000000000 --- a/website/docs/api.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -id: api -title: Pype API -sidebar_label: API ---- - -Work in progress diff --git a/website/docs/artist_hosts.md b/website/docs/artist_hosts.md deleted file mode 100644 index 609f6d97c8..0000000000 --- a/website/docs/artist_hosts.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: artist_hosts -title: Hosts -sidebar_label: Hosts ---- - -## Maya - -## Houdini - -## Nuke - -## Fusion - -## Unreal - -## System diff --git a/website/docs/artist_hosts_hiero.md b/website/docs/artist_hosts_hiero.md index f516c3a6e0..dc6f1696e7 100644 --- a/website/docs/artist_hosts_hiero.md +++ b/website/docs/artist_hosts_hiero.md @@ -94,6 +94,8 @@ This tool will set any defined colorspace definition from OpenPype `Settings / P With OpenPype, you can use Hiero/NKS as a starting point for creating a project's **shots** as *assets* from timeline clips with its *hierarchycal parents* like **episodes**, **sequences**, **folders**, and its child **tasks**. Most importantly it will create **versions** of plate *subsets*, with or without **reference video**. Publishig is naturally creating clip's **thumbnails** and assigns it to shot *asset*. Hiero is also publishing **audio** *subset* and various **soft-effects** either as retiming component as part of published plates or **color-tranformations**, that will be evailable later on for compositor artists to use either as *viewport input-process* or *loaded nodes* in graph editor.



+ + ### Preparing timeline for conversion to instances Because we don't support on-fly data conversion so in case of working with raw camera sources or some other formats which need to be converted for 2D/3D work. We suggest to convert those before and reconform the timeline. Before any clips in timeline could be converted to publishable instances we recommend following. 1. Merge all tracks which supposed to be one and they are multiply only because of editor's style @@ -191,3 +193,12 @@ If you wish to change any individual properties of the shot then you are able to + +### Publishing Effects from Hiero to Nuke +This video shows a way to publish shot look as effect from Hiero to Nuke. + + + +### Assembling edit from published shot versions + + diff --git a/website/docs/artist_hosts_nuke.md b/website/docs/artist_hosts_nuke.md deleted file mode 100644 index 1e02599570..0000000000 --- a/website/docs/artist_hosts_nuke.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -id: artist_hosts_nuke -title: Nuke -sidebar_label: Nuke ---- - -:::important -After Nuke starts it will automatically **Apply All Settings** for you. If you are sure the settings are wrong just contact your supervisor and he will set them correctly for you in project database. -::: - -:::note -The workflows are identical for both. We are supporting versions **`11.0`** and above. -::: - -## OpenPype global tools - -- [Set Context](artist_tools.md#set-context) -- [Work Files](artist_tools.md#workfiles) -- [Create](artist_tools.md#creator) -- [Load](artist_tools.md#loader) -- [Manage (Inventory)](artist_tools.md#inventory) -- [Publish](artist_tools.md#publisher) -- [Library Loader](artist_tools.md#library-loader) - -## Nuke specific tools - -
-
- -### Set Frame Ranges - -Use this feature in case you are not sure the frame range is correct. - -##### Result - -- setting Frame Range in script settings -- setting Frame Range in viewers (timeline) - -
-
- -![Set Frame Ranges](assets/nuke_setFrameRanges.png) - -
-
- - -
- -![Set Frame Ranges Timeline](assets/nuke_setFrameRanges_timeline.png) - -
- -1. limiting to Frame Range without handles -2. **Input** handle on start -3. **Output** handle on end - -
-
- -### Set Resolution - -
-
- - -This menu item will set correct resolution format for you defined by your production. - -##### Result - -- creates new item in formats with project name -- sets the new format as used - -
-
- -![Set Resolution](assets/nuke_setResolution.png) - -
-
- - -### Set Colorspace - -
-
- -This menu item will set correct Colorspace definitions for you. All has to be configured by your production (Project coordinator). - -##### Result - -- set Colorspace in your script settings -- set preview LUT to your viewers -- set correct colorspace to all discovered Read nodes (following expression set in settings) - -
-
- -![Set Colorspace](assets/nuke_setColorspace.png) - -
-
- - -### Apply All Settings - -
-
- -It is usually enough if you once per while use this option just to make yourself sure the workfile is having set correct properties. - -##### Result - -- set Frame Ranges -- set Colorspace -- set Resolution - -
-
- -![Apply All Settings](assets/nuke_applyAllSettings.png) - -
-
- -### Build Workfile - -
-
- -This tool will append all available subsets into an actual node graph. It will look into database and get all last [versions](artist_concepts.md#version) of available [subsets](artist_concepts.md#subset). - - -##### Result - -- adds all last versions of subsets (rendered image sequences) as read nodes -- adds publishable write node as `renderMain` subset - -
-
- -![Build First Work File](assets/nuke_buildFirstWorkfile.png) - -
-
\ No newline at end of file diff --git a/website/docs/artist_hosts_nuke_tut.md b/website/docs/artist_hosts_nuke_tut.md index 4b0ef7a78a..296fdf44d5 100644 --- a/website/docs/artist_hosts_nuke_tut.md +++ b/website/docs/artist_hosts_nuke_tut.md @@ -89,6 +89,8 @@ This menu item will set correct Colorspace definitions for you. All has to be co - set preview LUT to your viewers - set correct colorspace to all discovered Read nodes (following expression set in settings) +See [Nuke Color Management](artist_hosts_nuke_tut.md#nuke-color-management) +
@@ -144,6 +146,8 @@ This tool will append all available subsets into an actual node graph. It will l This QuickStart is short introduction to what OpenPype can do for you. It attempts to make an overview for compositing artists, and simplifies processes that are better described in specific parts of the documentation. + + ### Launch Nuke - Shot and Task Context OpenPype has to know what shot and task you are working on. You need to run Nuke in context of the task, using Ftrack Action or OpenPype Launcher to select the task and run Nuke. @@ -161,7 +165,7 @@ Nuke OpenPype menu shows the current context Launching Nuke with context stops your timer, and starts the clock on the shot and task you picked. -Openpype makes initial setup for your Nuke script. It is the same as running [Apply All Settings](artist_hosts_nuke.md#apply-all-settings) from the OpenPype menu. +Openpype makes initial setup for your Nuke script. It is the same as running [Apply All Settings](artist_hosts_nuke_tut.md#apply-all-settings) from the OpenPype menu. - Reads frame range and resolution from Avalon database, sets it in Nuke Project Settings, Creates Viewer node, sets it’s range and indicates handles by In and Out points. @@ -226,6 +230,11 @@ This will create a Group with a Write node inside. You can configure write node parameters in **Studio Settings → Project → Anatomy → Color Management and Output Formats → Nuke → Nodes** ::: +### Create Prerender Node +Creating Prerender is very similar to creating OpenPype managed Write node. + + + #### What Nuke Publish Does From Artist perspective, Nuke publish gathers all the stuff found in the Nuke script with Publish checkbox set to on, exports stuff and raises the Nuke script (workfile) version. @@ -315,6 +324,8 @@ Main disadvantage of this approach is that you can render only one version of yo When making quick farm publishes, like making two versions with different color correction, care must be taken to let the first job (first version) completely finish before the second version starts rendering. + + ### Managing Versions ![Versionless](assets/nuke_tut/nuke_ManageVersion.png) @@ -323,15 +334,30 @@ OpenPype checks all the assets loaded to Nuke on script open. All out of date as Use Manage to switch versions for loaded assets. +### Loading Effects +This video show how to publish effect from Hiero / Nuke Studio, and use the effect in Nuke. + + + + + +### Nuke Color Management + + + ## Troubleshooting ### Fixing Validate Containers +If your Pyblish dialog fails on Validate Containers, you might have an old asset loaded. Use OpenPype - Manage... to switch the asset(s) to the latest version. + ![Versionless](assets/nuke_tut/nuke_ValidateContainers.png) -If your Pyblish dialog fails on Validate Containers, you might have an old asset loaded. Use OpenPype - Manage... to switch the asset(s) to the latest version. + ### Fixing Validate Version If your Pyblish dialog fails on Validate Version, you might be trying to publish already published version. Rise your version in the OpenPype WorkFiles SaveAs. -Or maybe you accidentally copied write node from different shot to your current one. Check the write publishes on the left side of the Pyblish dialog. Typically you publish only one write. Locate and delete the stray write from other shot. \ No newline at end of file +Or maybe you accidentally copied write node from different shot to your current one. Check the write publishes on the left side of the Pyblish dialog. Typically you publish only one write. Locate and delete the stray write from other shot. + + diff --git a/website/docs/artist_hosts_photoshop.md b/website/docs/artist_hosts_photoshop.md index b2b5fd58da..a140170c49 100644 --- a/website/docs/artist_hosts_photoshop.md +++ b/website/docs/artist_hosts_photoshop.md @@ -49,6 +49,12 @@ With the `Creator` you have a variety of options to create: - Uncheck `Use selection`. - This will create a single group named after the `Subset` in the `Creator`. +#### Simplified publish + +There is a simplified workflow for simple use case where only single image should be created containing all visible layers. +No image instances must be present in a workfile and `project_settings/photoshop/publish/CollectInstances/flatten_subset_template` must be filled in Settings. +Then artists just need to hit 'Publish' button in menu. + ### Publish When you are ready to share some work, you will need to publish. This is done by opening the `Pyblish` through the extensions `Publish` button. diff --git a/website/docs/assets/publisher_card_view.png b/website/docs/assets/publisher_card_view.png new file mode 100644 index 0000000000..57b012cb6d Binary files /dev/null and b/website/docs/assets/publisher_card_view.png differ diff --git a/website/docs/assets/publisher_create_dialog.png b/website/docs/assets/publisher_create_dialog.png new file mode 100644 index 0000000000..6e9275062d Binary files /dev/null and b/website/docs/assets/publisher_create_dialog.png differ diff --git a/website/docs/assets/publisher_list_view.png b/website/docs/assets/publisher_list_view.png new file mode 100644 index 0000000000..e9dc8a607a Binary files /dev/null and b/website/docs/assets/publisher_list_view.png differ diff --git a/website/docs/assets/settings/anatomy_tasks.png b/website/docs/assets/settings/anatomy_tasks.png new file mode 100644 index 0000000000..16265cf8eb Binary files /dev/null and b/website/docs/assets/settings/anatomy_tasks.png differ diff --git a/website/docs/dev_publishing.md b/website/docs/dev_publishing.md new file mode 100644 index 0000000000..8ee3b7e85f --- /dev/null +++ b/website/docs/dev_publishing.md @@ -0,0 +1,548 @@ +--- +id: dev_publishing +title: Publishing +sidebar_label: Publishing +toc_max_heading_level: 4 +--- + +Publishing workflow consists of 2 parts: +- Creating - Mark what will be published and how. +- Publishing - Use data from Creating to go through the pyblish process. + +OpenPype is using [pyblish](https://pyblish.com/) for the publishing process. OpenPype extends and modifies its few functions a bit, mainly for reports and UI purposes. The main differences are that OpenPype's publish UI allows to enable/disable instances or plugins during Creating part instead of in the publishing part and has limited plugin actions only for failed validation plugins. + +## **Creating** + +Concept of Creating does not have to "create" anything yet, but prepare and store metadata about an "instance" (becomes a subset after the publish process). Created instance always has `family` which defines what kind of data will be published, the best example is `workfile` family. Storing of metadata is host specific and may be even a Creator plugin specific. Most hosts are storing metadata into a workfile (Maya scene, Nuke script, etc.) to an item or a node the same way as regular Pyblish instances, so consistency of host implementation is kept, but some features may require a different approach that is the reason why it is creator plugin responsibility. Storing the metadata to the workfile persists values, so the artist does not have to create and set what should be published and how over and over. + +### Created instance + +Objected representation of created instance metadata defined by class **CreatedInstance**. Has access to **CreateContext** and **BaseCreator** that initialized the object. Is a dictionary-like object with few immutable keys (marked with start `*` in table). The immutable keys are set by the creator plugin or create context on initialization and their values can't change. Instance can have more arbitrary data, for example ids of nodes in scene but keep in mind that some keys are reserved. + +| Key | Type | Description | +|---|---|---| +| *id | str | Identifier of metadata type. ATM constant **"pyblish.avalon.instance"** | +| *instance_id | str | Unique ID of instance. Set automatically on instance creation using `str(uuid.uuid4())` | +| *family | str | Instance's family representing type defined by creator plugin. | +| *creator_identifier | str | Identifier of creator that collected/created the instance. | +| *creator_attributes | dict | Dictionary of attributes that are defined by the creator plugin (`get_instance_attr_defs`). | +| *publish_attributes | dict | Dictionary of attributes that are defined by publish plugins. | +| variant | str | Variant is entered by the artist on creation and may affect **subset**. | +| subset | str | Name of instance. This name will be used as a subset name during publishing. Can be changed on context change or variant change. | +| active | bool | Is the instance active and will be published or not. | +| asset | str | Name of asset in which context was created. | +| task | str | Name of task in which context was created. Can be set to `None`. | + +:::note +Task should not be required until the subset name template expects it. +::: + +object of **CreatedInstance** has method **data_to_store** which returns a dictionary that can be parsed to a json string. This method will return all data related to the instance so it can be re-created using `CreatedInstance.from_existing(data)`. + +#### *Create context* {#category-doc-link} + +Controller and wrapper around Creating is `CreateContext` which cares about loading of plugins needed for Creating. And validates required functions in host implementation. + +Context discovers creator and publish plugins. Trigger collections of existing instances on creators and trigger Creating itself. Also it keeps in mind instance objects by their ids. + +Creator plugins can call **creator_adds_instance** or **creator_removed_instance** to add/remove instances but these methods are not meant to be called directly out of the creator. The reason is that it is the creator's responsibility to remove metadata or decide if it should remove the instance. + +#### Required functions in host implementation +Host implementation **must** implement **get_context_data** and **update_context_data**. These two functions are needed to store metadata that are not related to any instance but are needed for Creating and publishing process. Right now only data about enabled/disabled optional publish plugins is stored there. When data is not stored and loaded properly, reset of publishing will cause that they will be set to default value. Context data also parsed to json string similarly as instance data. + +There are also few optional functions. For UI purposes it is possible to implement **get_context_title** which can return a string shown in UI as a title. Output string may contain html tags. It is recommended to return context path (it will be created function this purposes) in this order `"{project name}/{asset hierarchy}/{asset name}/{task name}"`. + +Another optional function is **get_current_context**. This function is handy in hosts where it is possible to open multiple workfiles in one process so using global context variables is not relevant because artists can switch between opened workfiles without being acknowledged. When a function is not implemented or won't return the right keys the global context is used. +```json +# Expected keys in output +{ + "project_name": "MyProject", + "asset_name": "sq01_sh0010", + "task_name": "Modeling" +} +``` + +### Create plugin +Main responsibility of create plugin is to create, update, collect and remove instance metadata and propagate changes to create context. Has access to **CreateContext** (`self.create_context`) that discovered the plugin so has also access to other creators and instances. Create plugins have a lot of responsibility so it is recommended to implement common code per host. + +#### *BaseCreator* +Base implementation of creator plugin. It is not recommended to use this class as base for production plugins but rather use one of **AutoCreator** and **Creator** variants. + +**Abstractions** +- **`family`** (class attr) - Tells what kind of instance will be created. +```python +class WorkfileCreator(Creator): + family = "workfile" +``` + +- **`collect_instances`** (method) - Collect already existing instances from the workfile and add them to create context. This method is called on initialization or reset of **CreateContext**. Each creator is responsible to find its instance metadata, convert them to **CreatedInstance** object and add them to create context (`self._add_instance_to_context(instnace_obj)`). +```python +def collect_instances(self): + # Using 'pipeline.list_instances' is just example how to get existing instances from scene + # - getting existing instances is different per host implementation + for instance_data in pipeline.list_instances(): + # Process only instances that were created by this creator + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + # Create instance object from existing data + instance = CreatedInstance.from_existing( + instance_data, self + ) + # Add instance to create context + self._add_instance_to_context(instance) +``` + +- **`create`** (method) - Create a new object of **CreatedInstance** store its metadata to the workfile and add the instance into the created context. Failed Creating should raise **CreatorError** if an error happens that artists can fix or give them some useful information. Triggers and implementation differs for **Creator** and **AutoCreator**. + +- **`update_instances`** (method) - Update data of instances. Receives tuple with **instance** and **changes**. +```python +def update_instances(self, update_list): + # Loop over changed instances + for instance, changes in update_list: + # Example possible usage of 'changes' to use different node on change + # of node id in instance data (MADE UP) + node = None + if "node_id" in changes: + old_value, new_value = changes["node_id"] + if new_value is not None: + node = pipeline.get_node_by_id(new_value) + + if node is None: + node = pipeline.get_node_by_instance_id(instance.id) + # Get node in scene that represents the instance + # Imprind data to a node + pipeline.imprint(node, instance.data_to_store()) + + +# Most implementations will probably ignore 'changes' completely +def update_instances(self, update_list): + for instance, _ in update_list: + # Get node from scene + node = pipeline.get_node_by_instance_id(instance.id) + # Imprint data to node + pipeline.imprint(node, instance.data_to_store()) +``` + +- **`remove_instances`** (method) - Remove instance metadata from workfile and from create context. +```python +# Possible way how to remove instance +def remove_instances(self, instances): + for instance in instances: + # Remove instance metadata from workflle + pipeline.remove_instance(instance.id) + # Remove instance from create context + self._remove_instance_from_context(instance) + + +# Default implementation of `AutoCreator` +def remove_instances(self, instances): + pass +``` + +:::note +When host implementation use universal way how to store and load instances you should implement host specific creator plugin base class with implemented **collect_instances**, **update_instances** and **remove_instances**. +::: + +**Optional implementations** + +- **`enabled`** (attr) - Boolean if the creator plugin is enabled and used. +- **`identifier`** (class attr) - Consistent unique string identifier of the creator plugin. Is used to identify source plugin of existing instances. There can't be 2 creator plugins with the same identifier. Default implementation returns `family` attribute. +```python +class RenderLayerCreator(Creator): + family = "render" + identifier = "render_layer" + + +class RenderPassCreator(Creator): + family = "render" + identifier = "render_pass" +``` + +- **`label`** (attr) - String label of creator plugin which will show up in UI, `identifier` is used when not set. It should be possible to use html tags. +```python +class RenderLayerCreator(Creator): + label = "Render Layer" +``` + +- **`get_icon`** (attr) - Icon of creator and its instances. Value can be a path to an image file, full name of qtawesome icon, `QPixmap` or `QIcon`. For complex cases or cases when `Qt` objects are returned it is recommended to override `get_icon` method and handle the logic or import `Qt` inside the method to not break headless usage of creator plugin. For list of qtawesome icons check qtawesome github repository (look for the used version in pyproject.toml). Default implementation return **icon** attribute. +- **`icon`** (method) - Attribute for default implementation of **get_icon**. +```python +class RenderLayerCreator(Creator): + # Use font awesome 5 icon + icon = "fa5.building" +``` + +- **`get_instance_attr_defs`** (method) - Attribute definitions of instance. Creator can define attribute values with default values for each instance. These attributes may affect how instances will be instance processed during publishing. Attribute defiitions can be used from `openpype.pipeline.lib.attribute_definitions` (NOTE: Will be moved to `openpype.lib.attribute_definitions` soon). Attribute definitions define basic types of values for different cases e.g. boolean, number, string, enumerator, etc. Default implementation returns **instance_attr_defs**. +- **`instance_attr_defs`** (attr) - Attribute for default implementation of **get_instance_attr_defs**. + +```python +from openpype.pipeline import attribute_definitions + + +class RenderLayerCreator(Creator): + def get_instance_attr_defs(self): + # Return empty list if '_allow_farm_render' is not enabled (can be set during initialization) + if not self._allow_farm_render: + return [] + # Give artist option to change if should be rendered on farm or locally + return [ + attribute_definitions.BoolDef( + "render_farm", + default=False, + label="Render on Farm" + ) + ] +``` + +- **`get_subset_name`** (method) - Calculate subset name based on passed data. Data can be extended using the `get_dynamic_data` method. Default implementation is using `get_subset_name` from `openpype.lib` which is recommended. + +- **`get_dynamic_data`** (method) - Can be used to extend data for subset templates which may be required in some cases. + + +#### *AutoCreator* +Creator that is triggered on reset of create context. Can be used for families that are expected to be created automatically without artist interaction (e.g. **workfile**). Method `create` is triggered after collecting all creators. + +:::important +**AutoCreator** has implemented **remove_instances** to do nothing as removing of auto created instances would lead to creating new instance immediately or on refresh. +::: + +```python +def __init__( + self, create_context, system_settings, project_settings, *args, **kwargs +): + super(MyCreator, self).__init__( + create_context, system_settings, project_settings, *args, **kwargs + ) + # Get variant value from settings + variant_name = ( + project_settings["my_host"][self.identifier]["variant"] + ).strip() + if not variant_name: + variant_name = "Main" + self._variant_name = variant_name + +# Create does not expect any arguments +def create(self): + # Look for existing instance in create context + existing_instance = None + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + existing_instance = instance + break + + # Collect current context information + # - variant can be filled from settings + variant = self._variant_name + # Only place where we can look for current context + project_name = io.Session["AVALON_PROJECT"] + asset_name = io.Session["AVALON_ASSET"] + task_name = io.Session["AVALON_TASK"] + host_name = io.Session["AVALON_APP"] + + # Create new instance if does not exist yet + if existing_instance is None: + asset_doc = io.find_one({"type": "asset", "name": asset_name}) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": variant + } + data.update(self.get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name + )) + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(new_instance) + + # Update instance context if is not the same + elif ( + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = io.find_one({"type": "asset", "name": asset_name}) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name +``` + +#### *Creator* +Implementation of creator plugin that is triggered manually by the artist in UI (or by code). Has extended options for UI purposes than **AutoCreator** and **create** method expect more arguments. + +**Optional implementations** +- **`create_allow_context_change`** (class attr) - Allow to set context in UI before Creating. Some creators may not allow it or their logic would not use the context selection (e.g. bulk creators). Is set to `True` but default. +```python +class BulkRenderCreator(Creator): + create_allow_context_change = False +``` +- **`get_default_variants`** (method) - Returns list of default variants that are listed in create dialog for user. Returns **default_variants** attribute by default. +- **`default_variants`** (attr) - Attribute for default implementation of **get_default_variants**. + +- **`get_default_variant`** (method) - Returns default variant that is prefilled in UI (value does not have to be in default variants). By default returns **default_variant** attribute. If returns `None` then UI logic will take first item from **get_default_variants** if there is any otherwise **"Main"** is used. +- **`default_variant`** (attr) - Attribute for default implementation of **get_default_variant**. + +- **`get_description`** (method) - Returns a short string description of the creator. Returns **description** attribute by default. +- **`description`** (attr) - Attribute for default implementation of **get_description**. + +- **`get_detailed_description`** (method) - Returns detailed string description of creator. Can contain markdown. Returns **detailed_description** attribute by default. +- **`detailed_description`** (attr) - Attribute for default implementation of **get_detailed_description**. + +- **`get_pre_create_attr_defs`** (method) - Similar to **get_instance_attr_defs** returns attribute definitions but they are filled before creation. When creation is called from UI the values are passed to **create** method. Returns **pre_create_attr_defs** attribute by default. +- **`pre_create_attr_defs`** (attr) - Attribute for default implementation of **get_pre_create_attr_defs**. + +```python +from openpype.pipeline import Creator, attribute_definitions + + +class CreateRender(Creator): + family = "render" + label = "Render" + icon = "fa.eye" + description = "Render scene viewport" + + def __init__( + self, context, system_settings, project_settings, *args, **kwargs + ): + super(CreateRender, self).__init__( + context, system_settings, project_settings, *args, **kwargs + ) + plugin_settings = ( + project_settings["my_host"]["create"][self.__class__.__name__] + ) + # Get information if studio has enabled farm publishing + self._allow_farm_render = plugin_settings["allow_farm_render"] + # Get default variants from settings + self.default_variants = plugin_settings["variants"] + + def get_instance_attr_defs(self): + # Return empty list if '_allow_farm_render' is not enabled (can be set during initialization) + if not self._allow_farm_render: + return [] + # Give artist option to change if should be rendered on farm or locally + return [ + attribute_definitions.BoolDef( + "render_farm", + default=False, + label="Render on Farm" + ) + ] + + def get_pre_create_attr_defs(self): + # Give user option to use selection or not + attrs = [ + attribute_definitions.BoolDef( + "use_selection", + default=False, + label="Use selection" + ) + ] + if self._allow_farm_render: + # Set to render on farm in creator dialog + # - this value is not automatically passed to instance attributes + # creator must do that during creation + attrs.append( + attribute_definitions.BoolDef( + "render_farm", + default=False, + label="Render on Farm" + ) + ) + return attrs + + def create(self, subset_name, instance_data, pre_create_data): + # ARGS: + # - 'subset_name' - precalculated subset name + # - 'instance_data' - context data + # - 'asset' - asset name + # - 'task' - task name + # - 'variant' - variant + # - 'family' - instnace family + + # Check if should use selection or not + if pre_create_data.get("use_selection"): + items = pipeline.get_selection() + else: + items = [pipeline.create_write()] + + # Validations related to selection + if len(items) > 1: + raise CreatorError("Please select only single item at time.") + + elif not items: + raise CreatorError("Nothing to create. Select at least one item.") + + # Create instence object + new_instance = CreatedInstance(self.family, subset_name, data, self) + # Pass value from pre create attribute to instance + # - use them only when pre create date contain the data + if "render_farm" in pre_create_data: + use_farm = pre_create_data["render_farm"] + new_instance.creator_attributes["render_farm"] = use_farm + + # Store metadata to workfile + pipeline.imprint(new_instance.id, new_instance.data_to_store()) + + # Add instance to context + self._add_instance_to_context(new_instance) +``` + +## **Publish** +### Exceptions +OpenPype define few specific exceptions that should be used in publish plugins. + +#### *Validation exception* +Validation plugins should raise `PublishValidationError` to show to an artist what's wrong and give him actions to fix it. The exception says that errors in the plugin can be fixed by the artist himself (with or without action on plugin). Any other errors will stop publishing immediately. The exception `PublishValidationError` raised after validation order has the same effect as any other exception. + +Exception `PublishValidationError` expects 4 arguments: +- **message** Which is not used in UI but for headless publishing. +- **title** Short description of error (2-5 words). Title is used for grouping of exceptions per plugin. +- **description** Detailed description of the issue where markdown and html can be used. +- **detail** Is optional to give even more detailed information for advanced users. At this moment the detail is shown directly under description but it is in plan to have detail in a collapsible widget. + +Extended version is `PublishXmlValidationError` which uses xml files with stored descriptions. This helps to avoid having huge markdown texts inside code. The exception has 4 arguments: +- **plugin** The plugin object which raises the exception to find its related xml file. +- **message** Exception message for publishing without UI or different pyblish UI. +- **key** Optional argument says which error from xml is used as a validation plugin may raise error with different messages based on the current errors. Default is **"main"**. +- **formatting_data** Optional dictionary to format data in the error. This is used to fill detailed description with data from the publishing so artist can get more precise information. + +**Where and how to create xml file** + +Xml files for `PublishXmlValidationError` must be located in **./help** subfolder next to the plugin and the filename must match the filename of the plugin. +``` +# File location related to plugin file +└ publish + ├ help + │ ├ validate_scene.xml + │ └ ... + ├ validate_scene.py + └ ... +``` + +Xml file content has **<root>** node which may contain any amount of **<error>** nodes, but each of them must have **id** attribute with unique value. That is then used for **key**. Each error must have **<title>** and **<description>** and **<detail>**. Text content may contain python formatting keys that can be filled when an exception is raised. +```xml + + + + Subset context + ## Invalid subset context + +Context of the given subset doesn't match your current scene. + +### How to repair? + +You can fix this with the "Repair" button on the right. This will use '{expected_asset}' asset name and overwrite '{found_asset}' asset name in scene metadata. + +After that restart publishing with Reload button. + + +### How could this happen? + +The subset was created in different scene with different context +or the scene file was copy pasted from different context. + + + +``` + +#### *Known errors* +When there is a known error that can't be fixed by the user (e.g. can't connect to deadline service, etc.) `KnownPublishError` should be raised. The only difference is that its message is shown in UI to the artist otherwise a neutral message without context is shown. + +### Plugin extension +Publish plugins can be extended by additional logic when inheriting from `OpenPypePyblishPluginMixin` which can be used as mixin (additional inheritance of class). Publish plugins that inherit from this mixin can define attributes that will be shown in **CreatedInstance**. One of the most important usages is to be able turn on/off optional plugins. + +Attributes are defined by the return value of `get_attribute_defs` method. Attribute definitions are for families defined in plugin's `families` attribute if it's instance plugin or for whole context if it's context plugin. To convert existing values (or to remove legacy values) can be re-implemented `convert_attribute_values`. Default implementation just converts the values to right types. + +:::Important +Values of publish attributes from created instance are never removed automatically so implementing this method is the best way to remove legacy data or convert them to new data structure. +::: + +Possible attribute definitions can be found in `openpype/pipeline/lib/attribute_definitions.py`. + +
+Example plugin +

+ +```python +import pyblish.api +from openpype.pipeline import ( + OpenPypePyblishPluginMixin, + attribute_definitions, +) + + +# Example context plugin +class MyExtendedPlugin( + pyblish.api.ContextPlugin, OpenPypePyblishPluginMixin +): + optional = True + active = True + + @classmethod + def get_attribute_defs(cls): + return [ + attribute_definitions.BoolDef( + # Key under which it will be stored + "process", + # Use 'active' as default value + default=cls.active, + # Use plugin label as label for attribute + label=cls.label + ) + ] + + def process_plugin(self, context): + # First check if plugin is optional + if not self.optional: + return True + + # Attribute values are stored by class names + # - for those purposes was implemented 'get_attr_values_from_data' + # to help with accessing it + attribute_values = self.get_attr_values_from_data(context.data) + # Get 'process' key + process_value = attribute_values.get("process") + if process_value is None or process_value: + return True + return False + + def process(self, context): + if not self.process_plugin(context): + return + # Do plugin logic + ... +``` +

+
+ +## **UI examples** +### Main publish window +Main window of publisher shows instances and their values, collected by creators. + +**Card view** +![Publisher UI - Card view](assets/publisher_card_view.png) +**List view** +![Publisher UI - List view](assets/publisher_list_view.png) + +#### *Instances views* +List of instances always contains an `Options` item which is used to show attributes of context plugins. Values from the item are saved and loaded using [host implementation](#required-functions-in-host-implementation) **get_context_data** and **update_context_data**. Instances are grouped by family and can be shown in card view (single selection) or list view (multi selection). + +Instance view has at the bottom 3 buttons. Plus sign opens [create dialog](#create-dialog), bin removes selected instances and stripes swap card and list view. + +#### *Context options* +It is possible to change variant or asset and task context of instances at the top part but all changes there must be confirmed. Confirmation will trigger recalculation of subset names and all new data are stored to instances. + +#### *Create attributes* +Instance attributes display all created attributes of all selected instances. All attributes that have the same definition are grouped into one input and are visually indicated if values are not the same for selected instances. In most cases have **< Multiselection >** placeholder. + +#### *Publish attributes* +Publish attributes work the same way as create attributes but the source of attribute definitions are pyblish plugins. Attributes are filtered based on families of selected instances and families defined in the pyblish plugin. + +### Create dialog +![Publisher UI - Create dialog](assets/publisher_create_dialog.png) +Create dialog is used by artist to create new instances in a context. The context selection can be enabled/disabled by changing `create_allow_context_change` on [creator plugin](#creator). In the middle part the artist selects what will be created and what variant it is. On the right side is information about the selected creator and its pre-create attributes. There is also a question mark button which extends the window and displays more detailed information about the creator. \ No newline at end of file diff --git a/website/docs/dev_requirements.md b/website/docs/dev_requirements.md index 6c87054ba0..a10aea7865 100644 --- a/website/docs/dev_requirements.md +++ b/website/docs/dev_requirements.md @@ -14,7 +14,7 @@ The main things you will need to run and build pype are: - **Terminal** in your OS - PowerShell 5.0+ (Windows) - Bash (Linux) -- [**Python 3.7.8**](#python) or higher +- [**Python 3.7.9**](#python) or higher - [**MongoDB**](#database) diff --git a/website/docs/hosts-maya.md b/website/docs/hosts-maya.md deleted file mode 100644 index 0ee0c2d86b..0000000000 --- a/website/docs/hosts-maya.md +++ /dev/null @@ -1,33 +0,0 @@ -### Tools -Creator -Publisher -Loader -Scene Inventory -Look assigner -Workfiles - -### Plugins -Deadline -Muster -Yeti -Arnold -Vray -Redshift - -### Families -Model -Look -Rig -Animation -Cache -Camera -Assembly -MayaAscii (generic scene) -Setdress -RenderSetup -Review -arnoldStandin -vrayProxy -vrayScene -yetiCache -yetiRig diff --git a/website/docs/manager_ftrack.md b/website/docs/manager_ftrack.md index defbb4b48f..b5ca167838 100644 --- a/website/docs/manager_ftrack.md +++ b/website/docs/manager_ftrack.md @@ -4,7 +4,7 @@ title: Ftrack sidebar_label: Project Manager --- -Ftrack is currently the main project management option for OpenPype. This documentation assumes that you are familiar with Ftrack and it's basic principles. If you're new to Ftrack, we recommend having a thorough look at [Ftrack Official Documentation](http://ftrack.rtd.ftrack.com/en/stable/). +Ftrack is currently the main project management option for OpenPype. This documentation assumes that you are familiar with Ftrack and it's basic principles. If you're new to Ftrack, we recommend having a thorough look at [Ftrack Official Documentation](https://help.ftrack.com/en/). ## Project management Setting project attributes is the key to properly working pipeline. @@ -31,7 +31,7 @@ This process describes how data from Ftrack will get into Avalon database. ### How to synchronize You can trigger synchronization manually using [Sync To Avalon](manager_ftrack_actions.md#sync-to-avalon) action. -Synchronization can also be automated with OpenPype's [event server](#event-server) and synchronization events. If your Ftrack is [prepared for OpenPype](#prepare-ftrack-for-openpype), the project should have custom attribute `Avalon auto-sync`. Check the custom attribute to allow auto-updates with event server. +Synchronization can also be automated with OpenPype's [event server](#event-server) and synchronization events. If your Ftrack is [prepared for OpenPype](module_ftrack.md#prepare-ftrack-for-openpype), the project should have custom attribute `Avalon auto-sync`. Check the custom attribute to allow auto-updates with event server. :::tip Always use `Sync To Avalon` action before you enable `Avalon auto-sync`! diff --git a/website/docs/manager_naming.md b/website/docs/manager_naming.md deleted file mode 100644 index bf822fbeb4..0000000000 --- a/website/docs/manager_naming.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -id: manager_naming -title: Naming Conventions -sidebar_label: Naming Conventions ---- - -:::note -This naming convention holds true for most of our pipeline. Please match it as close as possible even for projects and files that might be outside of pipeline scope at this point. Small errors count! The reason for given formatting is to allow people to understand the file at glance and that a script or a program can easily get meaningful information from your files without errors. -::: - -## General rules - -For more detailed rules and different file types, have a look at naming conventions for scenes and assets - -- Every file starts with file code based on a project it belongs to e.g. ‘tst_’, ‘drm_’ -- Optional subversion and comment always comes after the major version. v##.subversion_comment. -- File names can only be composed of letters, numbers, underscores `_` and dots “.” -- You can use snakeCase or CamelCase if you need more words in a section.  thisIsLongerSentenceInComment -- No spaces in filenames. Ever! -- Frame numbers are always separated by a period ”.” -- If you're not sure use this template: - -## Work files - -**`{code}_{shot}_{task}_v001.ext`** - -**`{code}_{asset}_{task}_v001.ext`** - -**Examples:** - - prj_sh010_enviro_v001.ma - prj_sh010_animation_v001.ma - prj_sh010_comp_v001.nk - - prj_bob_modelling_v001.ma - prj_bob_rigging_v001.ma - prj_bob_lookdev_v001.ma - -:::info -In all of the examples anything enclosed in curly brackets  { } is compulsory in the name. -Anything in square brackets [ ] is optional. -::: - -## Published Assets - -**`{code}_{asset}_{family}_{subset}_{version}_[comment].ext`** - -**Examples:** - - prj_bob_model_main_v01.ma - prj_bob_model_hires_v01.ma - prj_bob_model_main_v01_clothes.ma - prj_bob_model_main_v01_body.ma - prj_bob_rig_main_v01.ma - Prj_bob_look_main_v01.ma - Prj_bob_look_wet_v01.ma diff --git a/website/docs/module_site_sync.md b/website/docs/module_site_sync.md index 78f482352e..2e9cf01102 100644 --- a/website/docs/module_site_sync.md +++ b/website/docs/module_site_sync.md @@ -123,6 +123,10 @@ To get working connection to Google Drive there are some necessary steps: - add new site back in OpenPype Settings, name as you want, provider needs to be 'gdrive' - distribute credentials file via shared mounted disk location +:::note +If you are using regular personal GDrive for testing don't forget adding `/My Drive` as the prefix in root configuration. Business accounts and share drives don't need this. +::: + ### SFTP SFTP provider is used to connect to SFTP server. Currently authentication with `user:password` or `user:ssh key` is implemented. diff --git a/website/sidebars.js b/website/sidebars.js index 16af1e1151..105afc30eb 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -136,6 +136,13 @@ module.exports = { "dev_requirements", "dev_build", "dev_testing", - "dev_contribute" + "dev_contribute", + { + type: "category", + label: "Hosts integrations", + items: [ + "dev_publishing" + ] + } ] }; diff --git a/website/src/pages/index.js b/website/src/pages/index.js index 791b309bbc..d9bbc3eaa0 100644 --- a/website/src/pages/index.js +++ b/website/src/pages/index.js @@ -144,6 +144,11 @@ const studios = [ title: "Ember Light", image: "/img/EmberLight_black.png", infoLink: "https://emberlight.se/", + }, + { + title: "IGG Canada", + image: "/img/igg-logo.png", + infoLink: "https://www.igg.com/", } ]; diff --git a/website/static/img/igg-logo.png b/website/static/img/igg-logo.png new file mode 100644 index 0000000000..3c7f7718f7 Binary files /dev/null and b/website/static/img/igg-logo.png differ diff --git a/website/yarn.lock b/website/yarn.lock index 7f677aaed7..04b9dd658b 100644 --- a/website/yarn.lock +++ b/website/yarn.lock @@ -2311,9 +2311,9 @@ asap@~2.0.3: integrity sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY= async@^2.6.2: - version "2.6.3" - resolved "https://registry.yarnpkg.com/async/-/async-2.6.3.tgz#d72625e2344a3656e3a3ad4fa749fa83299d82ff" - integrity sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg== + version "2.6.4" + resolved "https://registry.yarnpkg.com/async/-/async-2.6.4.tgz#706b7ff6084664cd7eae713f6f965433b5504221" + integrity sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA== dependencies: lodash "^4.17.14" @@ -5125,9 +5125,9 @@ minimatch@^3.0.4: brace-expansion "^1.1.7" minimist@^1.2.0, minimist@^1.2.5: - version "1.2.5" - resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.5.tgz#67d66014b66a6a8aaa0c083c5fd58df4e4e97602" - integrity sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw== + version "1.2.6" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44" + integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== mkdirp@^0.5.5: version "0.5.5" @@ -5207,9 +5207,9 @@ node-fetch@2.6.7: whatwg-url "^5.0.0" node-forge@^1.2.0: - version "1.2.1" - resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-1.2.1.tgz#82794919071ef2eb5c509293325cec8afd0fd53c" - integrity sha512-Fcvtbb+zBcZXbTTVwqGA5W+MKBj56UjVRevvchv5XrcyXbmNdesfZL37nlcWOfpgHhgmxApw3tQbTr4CqNmX4w== + version "1.3.0" + resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-1.3.0.tgz#37a874ea723855f37db091e6c186e5b67a01d4b2" + integrity sha512-08ARB91bUi6zNKzVmaj3QO7cr397uiDT2nJ63cHjyNtCTWIgvS47j3eT0WfzUwS9+6Z5YshRaoasFkXCKrIYbA== node-releases@^2.0.1: version "2.0.2"