diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml index 258458e2d4..d9b4d8089c 100644 --- a/.github/workflows/prerelease.yml +++ b/.github/workflows/prerelease.yml @@ -43,7 +43,7 @@ jobs: uses: heinrichreimer/github-changelog-generator-action@v2.2 with: token: ${{ secrets.ADMIN_TOKEN }} - addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}}' + addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}' issues: false issuesWoLabels: false sinceTag: "3.0.0" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3f85525c26..917e6c884c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -39,7 +39,7 @@ jobs: uses: heinrichreimer/github-changelog-generator-action@v2.2 with: token: ${{ secrets.ADMIN_TOKEN }} - addSections: '{"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]},"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]}}' + addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}' issues: false issuesWoLabels: false sinceTag: "3.0.0" @@ -81,7 +81,7 @@ jobs: uses: heinrichreimer/github-changelog-generator-action@v2.2 with: token: ${{ secrets.ADMIN_TOKEN }} - addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}}' + addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}' issues: false issuesWoLabels: false sinceTag: ${{ steps.version.outputs.last_release }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 348f7dc1b8..f3c7820d8f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,67 +1,98 @@ # Changelog -## [3.9.0-nightly.5](https://github.com/pypeclub/OpenPype/tree/HEAD) +## [3.9.1](https://github.com/pypeclub/OpenPype/tree/3.9.1) (2022-03-17) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.2...HEAD) - -**Deprecated:** - -- Houdini: Remove unused code [\#2779](https://github.com/pypeclub/OpenPype/pull/2779) - -### 📖 Documentation - -- Documentation: fixed broken links [\#2799](https://github.com/pypeclub/OpenPype/pull/2799) -- Documentation: broken link fix [\#2785](https://github.com/pypeclub/OpenPype/pull/2785) -- Documentation: link fixes [\#2772](https://github.com/pypeclub/OpenPype/pull/2772) -- Update docusaurus to latest version [\#2760](https://github.com/pypeclub/OpenPype/pull/2760) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.0...3.9.1) **🚀 Enhancements** -- General: Color dialog UI fixes [\#2817](https://github.com/pypeclub/OpenPype/pull/2817) -- General: Set context environments for non host applications [\#2803](https://github.com/pypeclub/OpenPype/pull/2803) -- Tray publisher: New Tray Publisher host \(beta\) [\#2778](https://github.com/pypeclub/OpenPype/pull/2778) -- Houdini: Implement Reset Frame Range [\#2770](https://github.com/pypeclub/OpenPype/pull/2770) -- Pyblish Pype: Remove redundant new line in installed fonts printing [\#2758](https://github.com/pypeclub/OpenPype/pull/2758) -- Flame: use Shot Name on segment for asset name [\#2751](https://github.com/pypeclub/OpenPype/pull/2751) -- Flame: adding validator source clip [\#2746](https://github.com/pypeclub/OpenPype/pull/2746) -- Ftrack: Disable ftrack module by default [\#2732](https://github.com/pypeclub/OpenPype/pull/2732) -- RoyalRender: Minor enhancements [\#2700](https://github.com/pypeclub/OpenPype/pull/2700) +- General: Change how OPENPYPE\_DEBUG value is handled [\#2907](https://github.com/pypeclub/OpenPype/pull/2907) +- nuke: imageio adding ocio config version 1.2 [\#2897](https://github.com/pypeclub/OpenPype/pull/2897) +- Flame: support for comment with xml attribute overrides [\#2892](https://github.com/pypeclub/OpenPype/pull/2892) +- Nuke: ExtractReviewSlate can handle more codes and profiles [\#2879](https://github.com/pypeclub/OpenPype/pull/2879) +- Flame: sequence used for reference video [\#2869](https://github.com/pypeclub/OpenPype/pull/2869) **🐛 Bug fixes** +- General: Fix use of Anatomy roots [\#2904](https://github.com/pypeclub/OpenPype/pull/2904) +- Fixing gap detection in extract review [\#2902](https://github.com/pypeclub/OpenPype/pull/2902) +- Pyblish Pype - ensure current state is correct when entering new group order [\#2899](https://github.com/pypeclub/OpenPype/pull/2899) +- SceneInventory: Fix import of load function [\#2894](https://github.com/pypeclub/OpenPype/pull/2894) +- Harmony - fixed creator issue [\#2891](https://github.com/pypeclub/OpenPype/pull/2891) +- General: Remove forgotten use of avalon Creator [\#2885](https://github.com/pypeclub/OpenPype/pull/2885) +- General: Avoid circular import [\#2884](https://github.com/pypeclub/OpenPype/pull/2884) +- Fixes for attaching loaded containers \(\#2837\) [\#2874](https://github.com/pypeclub/OpenPype/pull/2874) +- Maya: Deformer node ids validation plugin [\#2826](https://github.com/pypeclub/OpenPype/pull/2826) + +**🔀 Refactored code** + +- General: Reduce style usage to OpenPype repository [\#2889](https://github.com/pypeclub/OpenPype/pull/2889) +- General: Move loader logic from avalon to openpype [\#2886](https://github.com/pypeclub/OpenPype/pull/2886) + +## [3.9.0](https://github.com/pypeclub/OpenPype/tree/3.9.0) (2022-03-14) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.0-nightly.9...3.9.0) + +**Deprecated:** + +- AssetCreator: Remove the tool [\#2845](https://github.com/pypeclub/OpenPype/pull/2845) + +### 📖 Documentation + +- Documentation: Change Photoshop & AfterEffects plugin path [\#2878](https://github.com/pypeclub/OpenPype/pull/2878) + +**🚀 Enhancements** + +- General: Subset name filtering in ExtractReview outpus [\#2872](https://github.com/pypeclub/OpenPype/pull/2872) +- NewPublisher: Descriptions and Icons in creator dialog [\#2867](https://github.com/pypeclub/OpenPype/pull/2867) +- NewPublisher: Changing task on publishing instance [\#2863](https://github.com/pypeclub/OpenPype/pull/2863) +- TrayPublisher: Choose project widget is more clear [\#2859](https://github.com/pypeclub/OpenPype/pull/2859) +- New: Validation exceptions [\#2841](https://github.com/pypeclub/OpenPype/pull/2841) +- Maya: add loaded containers to published instance [\#2837](https://github.com/pypeclub/OpenPype/pull/2837) +- Ftrack: Can sync fps as string [\#2836](https://github.com/pypeclub/OpenPype/pull/2836) +- General: Custom function for find executable [\#2822](https://github.com/pypeclub/OpenPype/pull/2822) +- General: Color dialog UI fixes [\#2817](https://github.com/pypeclub/OpenPype/pull/2817) +- global: letter box calculated on output as last process [\#2812](https://github.com/pypeclub/OpenPype/pull/2812) +- Nuke: adding Reformat to baking mov plugin [\#2811](https://github.com/pypeclub/OpenPype/pull/2811) +- Manager: Update all to latest button [\#2805](https://github.com/pypeclub/OpenPype/pull/2805) + +**🐛 Bug fixes** + +- General: Missing time function [\#2877](https://github.com/pypeclub/OpenPype/pull/2877) +- Deadline: Fix plugin name for tile assemble [\#2868](https://github.com/pypeclub/OpenPype/pull/2868) +- Nuke: gizmo precollect fix [\#2866](https://github.com/pypeclub/OpenPype/pull/2866) +- General: Fix hardlink for windows [\#2864](https://github.com/pypeclub/OpenPype/pull/2864) +- General: ffmpeg was crashing on slate merge [\#2860](https://github.com/pypeclub/OpenPype/pull/2860) +- WebPublisher: Video file was published with one too many frame [\#2858](https://github.com/pypeclub/OpenPype/pull/2858) +- New Publisher: Error dialog got right styles [\#2857](https://github.com/pypeclub/OpenPype/pull/2857) +- General: Fix getattr clalback on dynamic modules [\#2855](https://github.com/pypeclub/OpenPype/pull/2855) +- Nuke: slate resolution to input video resolution [\#2853](https://github.com/pypeclub/OpenPype/pull/2853) +- WebPublisher: Fix username stored in DB [\#2852](https://github.com/pypeclub/OpenPype/pull/2852) +- WebPublisher: Fix wrong number of frames for video file [\#2851](https://github.com/pypeclub/OpenPype/pull/2851) +- Nuke: Fix family test in validate\_write\_legacy to work with stillImage [\#2847](https://github.com/pypeclub/OpenPype/pull/2847) +- Nuke: fix multiple baking profile farm publishing [\#2842](https://github.com/pypeclub/OpenPype/pull/2842) +- Blender: Fixed parameters for FBX export of the camera [\#2840](https://github.com/pypeclub/OpenPype/pull/2840) +- Maya: Stop creation of reviews for Cryptomattes [\#2832](https://github.com/pypeclub/OpenPype/pull/2832) +- Deadline: Remove recreated event [\#2828](https://github.com/pypeclub/OpenPype/pull/2828) +- Deadline: Added missing events folder [\#2827](https://github.com/pypeclub/OpenPype/pull/2827) - Settings: Missing document with OP versions may break start of OpenPype [\#2825](https://github.com/pypeclub/OpenPype/pull/2825) +- Deadline: more detailed temp file name for environment json [\#2824](https://github.com/pypeclub/OpenPype/pull/2824) +- General: Host name was formed from obsolete code [\#2821](https://github.com/pypeclub/OpenPype/pull/2821) - Settings UI: Fix "Apply from" action [\#2820](https://github.com/pypeclub/OpenPype/pull/2820) -- Settings UI: Search case sensitivity [\#2810](https://github.com/pypeclub/OpenPype/pull/2810) -- Flame Babypublisher optimalization [\#2806](https://github.com/pypeclub/OpenPype/pull/2806) -- resolve: fixing fusion module loading [\#2802](https://github.com/pypeclub/OpenPype/pull/2802) -- Flame: Fix version string in default settings [\#2783](https://github.com/pypeclub/OpenPype/pull/2783) -- After Effects: Fix typo in name `afftereffects` -\> `aftereffects` [\#2768](https://github.com/pypeclub/OpenPype/pull/2768) -- Avoid renaming udim indexes [\#2765](https://github.com/pypeclub/OpenPype/pull/2765) -- Maya: Fix `unique\_namespace` when in an namespace that is empty [\#2759](https://github.com/pypeclub/OpenPype/pull/2759) -- Loader UI: Fix right click in representation widget [\#2757](https://github.com/pypeclub/OpenPype/pull/2757) -- Aftereffects 2022 and Deadline [\#2748](https://github.com/pypeclub/OpenPype/pull/2748) -- Flame: bunch of bugs [\#2745](https://github.com/pypeclub/OpenPype/pull/2745) -- Maya: Save current scene on workfile publish [\#2744](https://github.com/pypeclub/OpenPype/pull/2744) -- Version Up: Preserve parts of filename after version number \(like subversion\) on version\_up [\#2741](https://github.com/pypeclub/OpenPype/pull/2741) -- Maya: Remove some unused code [\#2709](https://github.com/pypeclub/OpenPype/pull/2709) - -**Merged pull requests:** - -- Move Unreal Implementation to OpenPype [\#2823](https://github.com/pypeclub/OpenPype/pull/2823) - Ftrack: Job killer with missing user [\#2819](https://github.com/pypeclub/OpenPype/pull/2819) -- Ftrack: Unset task ids from asset versions before tasks are removed [\#2800](https://github.com/pypeclub/OpenPype/pull/2800) -- Slack: fail gracefully if slack exception [\#2798](https://github.com/pypeclub/OpenPype/pull/2798) -- Ftrack: Moved module one hierarchy level higher [\#2792](https://github.com/pypeclub/OpenPype/pull/2792) -- SyncServer: Moved module one hierarchy level higher [\#2791](https://github.com/pypeclub/OpenPype/pull/2791) -- Royal render: Move module one hierarchy level higher [\#2790](https://github.com/pypeclub/OpenPype/pull/2790) -- Deadline: Move module one hierarchy level higher [\#2789](https://github.com/pypeclub/OpenPype/pull/2789) -- Houdini: Remove duplicate ValidateOutputNode plug-in [\#2780](https://github.com/pypeclub/OpenPype/pull/2780) -- Slack: Added regex for filtering on subset names [\#2775](https://github.com/pypeclub/OpenPype/pull/2775) -- Houdini: Fix open last workfile [\#2767](https://github.com/pypeclub/OpenPype/pull/2767) +- Nuke: Use AVALON\_APP to get value for "app" key [\#2818](https://github.com/pypeclub/OpenPype/pull/2818) +- StandalonePublisher: use dynamic groups in subset names [\#2816](https://github.com/pypeclub/OpenPype/pull/2816) + +**🔀 Refactored code** + +- Refactor: move webserver tool to openpype [\#2876](https://github.com/pypeclub/OpenPype/pull/2876) +- General: Move create logic from avalon to OpenPype [\#2854](https://github.com/pypeclub/OpenPype/pull/2854) +- General: Add vendors from avalon [\#2848](https://github.com/pypeclub/OpenPype/pull/2848) +- General: Basic event system [\#2846](https://github.com/pypeclub/OpenPype/pull/2846) +- General: Move change context functions [\#2839](https://github.com/pypeclub/OpenPype/pull/2839) +- Tools: Don't use avalon tools code [\#2829](https://github.com/pypeclub/OpenPype/pull/2829) +- Move Unreal Implementation to OpenPype [\#2823](https://github.com/pypeclub/OpenPype/pull/2823) - General: Extract template formatting from anatomy [\#2766](https://github.com/pypeclub/OpenPype/pull/2766) -- Harmony: Rendering in Deadline didn't work in other machines than submitter [\#2754](https://github.com/pypeclub/OpenPype/pull/2754) -- Houdini: Move Houdini Save Current File to beginning of ExtractorOrder [\#2747](https://github.com/pypeclub/OpenPype/pull/2747) -- Maya: set Deadline job/batch name to original source workfile name instead of published workfile [\#2733](https://github.com/pypeclub/OpenPype/pull/2733) ## [3.8.2](https://github.com/pypeclub/OpenPype/tree/3.8.2) (2022-02-07) diff --git a/openpype/__init__.py b/openpype/__init__.py index 11b563ebfe..99629a4257 100644 --- a/openpype/__init__.py +++ b/openpype/__init__.py @@ -10,7 +10,8 @@ from .lib import ( Anatomy, filter_pyblish_plugins, set_plugin_attributes_from_settings, - change_timer_to_current_context + change_timer_to_current_context, + register_event_callback, ) pyblish = avalon = _original_discover = None @@ -58,10 +59,15 @@ def patched_discover(superclass): """ # run original discover and get plugins plugins = _original_discover(superclass) + filtered_plugins = [ + plugin + for plugin in plugins + if issubclass(plugin, superclass) + ] - set_plugin_attributes_from_settings(plugins, superclass) + set_plugin_attributes_from_settings(filtered_plugins, superclass) - return plugins + return filtered_plugins @import_wrapper @@ -69,6 +75,10 @@ def install(): """Install Pype to Avalon.""" from pyblish.lib import MessageHandler from openpype.modules import load_modules + from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + ) from avalon import pipeline # Make sure modules are loaded @@ -84,7 +94,7 @@ def install(): log.info("Registering global plug-ins..") pyblish.register_plugin_path(PUBLISH_PATH) pyblish.register_discovery_filter(filter_pyblish_plugins) - avalon.register_plugin_path(avalon.Loader, LOAD_PATH) + register_loader_plugin_path(LOAD_PATH) project_name = os.environ.get("AVALON_PROJECT") @@ -112,8 +122,8 @@ def install(): continue pyblish.register_plugin_path(path) - avalon.register_plugin_path(avalon.Loader, path) - avalon.register_plugin_path(avalon.Creator, path) + register_loader_plugin_path(path) + avalon.register_plugin_path(LegacyCreator, path) avalon.register_plugin_path(avalon.InventoryAction, path) # apply monkey patched discover to original one @@ -122,20 +132,22 @@ def install(): avalon.discover = patched_discover pipeline.discover = patched_discover - avalon.on("taskChanged", _on_task_change) + register_event_callback("taskChanged", _on_task_change) -def _on_task_change(*args): +def _on_task_change(): change_timer_to_current_context() @import_wrapper def uninstall(): """Uninstall Pype from Avalon.""" + from openpype.pipeline import deregister_loader_plugin_path + log.info("Deregistering global plug-ins..") pyblish.deregister_plugin_path(PUBLISH_PATH) pyblish.deregister_discovery_filter(filter_pyblish_plugins) - avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) + deregister_loader_plugin_path(LOAD_PATH) log.info("Global plug-ins unregistred") # restore original discover diff --git a/openpype/api.py b/openpype/api.py index 51854492ab..b692b36065 100644 --- a/openpype/api.py +++ b/openpype/api.py @@ -45,9 +45,6 @@ from .lib.avalon_context import ( from . import resources from .plugin import ( - PypeCreatorMixin, - Creator, - Extractor, ValidatePipelineOrder, @@ -89,9 +86,6 @@ __all__ = [ # Resources "resources", - # Pype creator mixin - "PypeCreatorMixin", - "Creator", # plugin classes "Extractor", # ordering diff --git a/openpype/cli.py b/openpype/cli.py index 155e07dea3..cbeb7fef9b 100644 --- a/openpype/cli.py +++ b/openpype/cli.py @@ -101,7 +101,7 @@ def eventserver(debug, on linux and window service). """ if debug: - os.environ['OPENPYPE_DEBUG'] = "3" + os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands().launch_eventservercli( ftrack_url, @@ -128,7 +128,7 @@ def webpublisherwebserver(debug, executable, upload_dir, host=None, port=None): Expect "pype.club" user created on Ftrack. """ if debug: - os.environ['OPENPYPE_DEBUG'] = "3" + os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands().launch_webpublisher_webservercli( upload_dir=upload_dir, @@ -176,7 +176,7 @@ def publish(debug, paths, targets, gui): More than one path is allowed. """ if debug: - os.environ['OPENPYPE_DEBUG'] = '3' + os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands.publish(list(paths), targets, gui) @@ -195,7 +195,7 @@ def remotepublishfromapp(debug, project, path, host, user=None, targets=None): More than one path is allowed. """ if debug: - os.environ['OPENPYPE_DEBUG'] = '3' + os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands.remotepublishfromapp( project, path, host, user, targets=targets ) @@ -215,7 +215,7 @@ def remotepublish(debug, project, path, user=None, targets=None): More than one path is allowed. """ if debug: - os.environ['OPENPYPE_DEBUG'] = '3' + os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands.remotepublish(project, path, user, targets=targets) @@ -240,7 +240,7 @@ def texturecopy(debug, project, asset, path): Nothing is written to database. """ if debug: - os.environ['OPENPYPE_DEBUG'] = '3' + os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands().texture_copy(project, asset, path) @@ -409,7 +409,7 @@ def syncserver(debug, active_site): var OPENPYPE_LOCAL_ID set to 'active_site'. """ if debug: - os.environ['OPENPYPE_DEBUG'] = '3' + os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands().syncserver(active_site) diff --git a/openpype/hosts/aftereffects/api/launch_logic.py b/openpype/hosts/aftereffects/api/launch_logic.py index 97f14c9332..c549268978 100644 --- a/openpype/hosts/aftereffects/api/launch_logic.py +++ b/openpype/hosts/aftereffects/api/launch_logic.py @@ -15,7 +15,7 @@ from Qt import QtCore from openpype.tools.utils import host_tools from avalon import api -from avalon.tools.webserver.app import WebServerTool +from openpype.tools.adobe_webserver.app import WebServerTool from .ws_stub import AfterEffectsServerStub diff --git a/openpype/hosts/aftereffects/api/pipeline.py b/openpype/hosts/aftereffects/api/pipeline.py index 94f1e3d105..681f1c51a7 100644 --- a/openpype/hosts/aftereffects/api/pipeline.py +++ b/openpype/hosts/aftereffects/api/pipeline.py @@ -9,7 +9,13 @@ from avalon import io, pipeline from openpype import lib from openpype.api import Logger +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, +) import openpype.hosts.aftereffects +from openpype.lib import register_event_callback from .launch_logic import get_stub @@ -65,21 +71,21 @@ def install(): pyblish.api.register_host("aftereffects") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH) + register_loader_plugin_path(LOAD_PATH) + avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) log.info(PUBLISH_PATH) pyblish.api.register_callback( "instanceToggled", on_pyblish_instance_toggled ) - avalon.api.on("application.launched", application_launch) + register_event_callback("application.launched", application_launch) def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH) + deregister_loader_plugin_path(LOAD_PATH) + avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) def on_pyblish_instance_toggled(instance, old_value, new_value): diff --git a/openpype/hosts/aftereffects/api/plugin.py b/openpype/hosts/aftereffects/api/plugin.py index fbe07663dd..29705cc5be 100644 --- a/openpype/hosts/aftereffects/api/plugin.py +++ b/openpype/hosts/aftereffects/api/plugin.py @@ -1,9 +1,8 @@ -import avalon.api +from openpype.pipeline import LoaderPlugin from .launch_logic import get_stub -class AfterEffectsLoader(avalon.api.Loader): +class AfterEffectsLoader(LoaderPlugin): @staticmethod def get_stub(): return get_stub() - diff --git a/openpype/hosts/aftereffects/api/ws_stub.py b/openpype/hosts/aftereffects/api/ws_stub.py index 5a0600e92e..b0893310c1 100644 --- a/openpype/hosts/aftereffects/api/ws_stub.py +++ b/openpype/hosts/aftereffects/api/ws_stub.py @@ -8,7 +8,7 @@ import logging import attr from wsrpc_aiohttp import WebSocketAsync -from avalon.tools.webserver.app import WebServerTool +from openpype.tools.adobe_webserver.app import WebServerTool @attr.s diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py index 8dfc85cdc8..41efb4b0ba 100644 --- a/openpype/hosts/aftereffects/plugins/create/create_render.py +++ b/openpype/hosts/aftereffects/plugins/create/create_render.py @@ -1,13 +1,12 @@ -from avalon.api import CreatorError - -import openpype.api +from openpype.pipeline import create +from openpype.pipeline import CreatorError from openpype.hosts.aftereffects.api import ( get_stub, list_instances ) -class CreateRender(openpype.api.Creator): +class CreateRender(create.LegacyCreator): """Render folder for publish. Creates subsets in format 'familyTaskSubsetname', diff --git a/openpype/hosts/aftereffects/plugins/load/load_background.py b/openpype/hosts/aftereffects/plugins/load/load_background.py index 1a2d6fc432..be43cae44e 100644 --- a/openpype/hosts/aftereffects/plugins/load/load_background.py +++ b/openpype/hosts/aftereffects/plugins/load/load_background.py @@ -1,11 +1,10 @@ import re -import avalon.api - from openpype.lib import ( get_background_layers, get_unique_layer_name ) +from openpype.pipeline import get_representation_path from openpype.hosts.aftereffects.api import ( AfterEffectsLoader, containerise @@ -78,7 +77,7 @@ class BackgroundLoader(AfterEffectsLoader): else: # switching version - keep same name comp_name = container["namespace"] - path = avalon.api.get_representation_path(representation) + path = get_representation_path(representation) layers = get_background_layers(path) comp = stub.reload_background(container["members"][1], diff --git a/openpype/hosts/aftereffects/plugins/load/load_file.py b/openpype/hosts/aftereffects/plugins/load/load_file.py index 9dbbf7aae1..9eb9e80a2c 100644 --- a/openpype/hosts/aftereffects/plugins/load/load_file.py +++ b/openpype/hosts/aftereffects/plugins/load/load_file.py @@ -1,8 +1,8 @@ import re -import avalon.api from openpype import lib +from openpype.pipeline import get_representation_path from openpype.hosts.aftereffects.api import ( AfterEffectsLoader, containerise @@ -92,7 +92,7 @@ class FileLoader(AfterEffectsLoader): "{}_{}".format(context["asset"], context["subset"])) else: # switching version - keep same name layer_name = container["namespace"] - path = avalon.api.get_representation_path(representation) + path = get_representation_path(representation) # with aftereffects.maintained_selection(): # TODO stub.replace_item(layer.id, path, stub.LOADED_ICON + layer_name) stub.imprint( diff --git a/openpype/hosts/aftereffects/plugins/publish/help/validate_instance_asset.xml b/openpype/hosts/aftereffects/plugins/publish/help/validate_instance_asset.xml new file mode 100644 index 0000000000..13f03a9b9a --- /dev/null +++ b/openpype/hosts/aftereffects/plugins/publish/help/validate_instance_asset.xml @@ -0,0 +1,21 @@ + + + +Subset context + +## Invalid subset context + +Context of the given subset doesn't match your current scene. + +### How to repair? + +You can fix this with "repair" button on the right. + + +### __Detailed Info__ (optional) + +This might happen if you are reuse old workfile and open it in different context. +(Eg. you created subset "renderCompositingDefault" from asset "Robot' in "your_project_Robot_compositing.aep", now you opened this workfile in a context "Sloth" but existing subset for "Robot" asset stayed in the workfile.) + + + \ No newline at end of file diff --git a/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml b/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml new file mode 100644 index 0000000000..36fa90456e --- /dev/null +++ b/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml @@ -0,0 +1,35 @@ + + + +Scene setting + +## Invalid scene setting found + +One of the settings in a scene doesn't match to asset settings in database. + +{invalid_setting_str} + +### How to repair? + +Change values for {invalid_keys_str} in the scene OR change them in the asset database if they are wrong there. + + +### __Detailed Info__ (optional) + +This error is shown when for example resolution in the scene doesn't match to resolution set on the asset in the database. +Either value in the database or in the scene is wrong. + + + +Scene file doesn't exist + +## Scene file doesn't exist + +Collected scene {scene_url} doesn't exist. + +### How to repair? + +Re-save file, start publish from the beginning again. + + + \ No newline at end of file diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py index 71c1750457..37cecfbcc4 100644 --- a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py +++ b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py @@ -1,6 +1,7 @@ from avalon import api import pyblish.api import openpype.api +from openpype.pipeline import PublishXmlValidationError from openpype.hosts.aftereffects.api import get_stub @@ -53,9 +54,8 @@ class ValidateInstanceAsset(pyblish.api.InstancePlugin): current_asset = api.Session["AVALON_ASSET"] msg = ( f"Instance asset {instance_asset} is not the same " - f"as current context {current_asset}. PLEASE DO:\n" - f"Repair with 'A' action to use '{current_asset}'.\n" - f"If that's not correct value, close workfile and " - f"reopen via Workfiles!" + f"as current context {current_asset}." ) - assert instance_asset == current_asset, msg + + if instance_asset != current_asset: + raise PublishXmlValidationError(self, msg) diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py index 5ae391e230..273ccd295e 100644 --- a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py +++ b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py @@ -5,6 +5,7 @@ import re import pyblish.api +from openpype.pipeline import PublishXmlValidationError from openpype.hosts.aftereffects.api import get_asset_settings @@ -99,12 +100,14 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): self.log.info("current_settings:: {}".format(current_settings)) invalid_settings = [] + invalid_keys = set() for key, value in expected_settings.items(): if value != current_settings[key]: invalid_settings.append( "{} expected: {} found: {}".format(key, value, current_settings[key]) ) + invalid_keys.add(key) if ((expected_settings.get("handleStart") or expected_settings.get("handleEnd")) @@ -116,7 +119,27 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): msg = "Found invalid settings:\n{}".format( "\n".join(invalid_settings) ) - assert not invalid_settings, msg - assert os.path.exists(instance.data.get("source")), ( - "Scene file not found (saved under wrong name)" - ) + + if invalid_settings: + invalid_keys_str = ",".join(invalid_keys) + break_str = "
" + invalid_setting_str = "Found invalid settings:
{}".\ + format(break_str.join(invalid_settings)) + + formatting_data = { + "invalid_setting_str": invalid_setting_str, + "invalid_keys_str": invalid_keys_str + } + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + if not os.path.exists(instance.data.get("source")): + scene_url = instance.data.get("source") + msg = "Scene file {} not found (saved under wrong name)".format( + scene_url + ) + formatting_data = { + "scene_url": scene_url + } + raise PublishXmlValidationError(self, msg, key="file_not_found", + formatting_data=formatting_data) diff --git a/openpype/hosts/blender/api/pipeline.py b/openpype/hosts/blender/api/pipeline.py index 6da0ba3dcb..07a7509dd7 100644 --- a/openpype/hosts/blender/api/pipeline.py +++ b/openpype/hosts/blender/api/pipeline.py @@ -14,7 +14,16 @@ import avalon.api from avalon import io, schema from avalon.pipeline import AVALON_CONTAINER_ID +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, +) from openpype.api import Logger +from openpype.lib import ( + register_event_callback, + emit_event +) import openpype.hosts.blender HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.blender.__file__)) @@ -45,13 +54,14 @@ def install(): pyblish.api.register_host("blender") pyblish.api.register_plugin_path(str(PUBLISH_PATH)) - avalon.api.register_plugin_path(avalon.api.Loader, str(LOAD_PATH)) - avalon.api.register_plugin_path(avalon.api.Creator, str(CREATE_PATH)) + register_loader_plugin_path(str(LOAD_PATH)) + avalon.api.register_plugin_path(LegacyCreator, str(CREATE_PATH)) lib.append_user_scripts() - avalon.api.on("new", on_new) - avalon.api.on("open", on_open) + register_event_callback("new", on_new) + register_event_callback("open", on_open) + _register_callbacks() _register_events() @@ -66,8 +76,8 @@ def uninstall(): pyblish.api.deregister_host("blender") pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) - avalon.api.deregister_plugin_path(avalon.api.Loader, str(LOAD_PATH)) - avalon.api.deregister_plugin_path(avalon.api.Creator, str(CREATE_PATH)) + deregister_loader_plugin_path(str(LOAD_PATH)) + avalon.api.deregister_plugin_path(LegacyCreator, str(CREATE_PATH)) if not IS_HEADLESS: ops.unregister() @@ -113,22 +123,22 @@ def set_start_end_frames(): scene.render.resolution_y = resolution_y -def on_new(arg1, arg2): +def on_new(): set_start_end_frames() -def on_open(arg1, arg2): +def on_open(): set_start_end_frames() @bpy.app.handlers.persistent def _on_save_pre(*args): - avalon.api.emit("before_save", args) + emit_event("before.save") @bpy.app.handlers.persistent def _on_save_post(*args): - avalon.api.emit("save", args) + emit_event("save") @bpy.app.handlers.persistent @@ -136,9 +146,9 @@ def _on_load_post(*args): # Detect new file or opening an existing file if bpy.data.filepath: # Likely this was an open operation since it has a filepath - avalon.api.emit("open", args) + emit_event("open") else: - avalon.api.emit("new", args) + emit_event("new") ops.OpenFileCacher.post_load() @@ -169,7 +179,7 @@ def _register_callbacks(): log.info("Installed event handler _on_load_post...") -def _on_task_changed(*args): +def _on_task_changed(): """Callback for when the task in the context is changed.""" # TODO (jasper): Blender has no concept of projects or workspace. @@ -186,7 +196,7 @@ def _on_task_changed(*args): def _register_events(): """Install callbacks for specific events.""" - avalon.api.on("taskChanged", _on_task_changed) + register_event_callback("taskChanged", _on_task_changed) log.info("Installed event callback for 'taskChanged'...") diff --git a/openpype/hosts/blender/api/plugin.py b/openpype/hosts/blender/api/plugin.py index 8c9ab9a27f..3207f543b7 100644 --- a/openpype/hosts/blender/api/plugin.py +++ b/openpype/hosts/blender/api/plugin.py @@ -5,8 +5,10 @@ from typing import Dict, List, Optional import bpy -import avalon.api -from openpype.api import PypeCreatorMixin +from openpype.pipeline import ( + LegacyCreator, + LoaderPlugin, +) from .pipeline import AVALON_CONTAINERS from .ops import ( MainThreadItem, @@ -129,7 +131,7 @@ def deselect_all(): bpy.context.view_layer.objects.active = active -class Creator(PypeCreatorMixin, avalon.api.Creator): +class Creator(LegacyCreator): """Base class for Creator plug-ins.""" defaults = ['Main'] @@ -145,13 +147,13 @@ class Creator(PypeCreatorMixin, avalon.api.Creator): return collection -class Loader(avalon.api.Loader): +class Loader(LoaderPlugin): """Base class for Loader plug-ins.""" hosts = ["blender"] -class AssetLoader(avalon.api.Loader): +class AssetLoader(LoaderPlugin): """A basic AssetLoader for Blender This will implement the basic logic for linking/appending assets diff --git a/openpype/hosts/blender/plugins/load/load_abc.py b/openpype/hosts/blender/plugins/load/load_abc.py index 07800521c9..3daaeceffe 100644 --- a/openpype/hosts/blender/plugins/load/load_abc.py +++ b/openpype/hosts/blender/plugins/load/load_abc.py @@ -6,7 +6,7 @@ from typing import Dict, List, Optional import bpy -from avalon import api +from openpype.pipeline import get_representation_path from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, @@ -178,7 +178,7 @@ class CacheModelLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_action.py b/openpype/hosts/blender/plugins/load/load_action.py index a9d8522220..3c8fe988f0 100644 --- a/openpype/hosts/blender/plugins/load/load_action.py +++ b/openpype/hosts/blender/plugins/load/load_action.py @@ -5,9 +5,13 @@ from pathlib import Path from pprint import pformat from typing import Dict, List, Optional -from avalon import api, blender import bpy +from openpype.pipeline import get_representation_path import openpype.hosts.blender.api.plugin +from openpype.hosts.blender.api.pipeline import ( + containerise_existing, + AVALON_PROPERTY, +) logger = logging.getLogger("openpype").getChild("blender").getChild("load_action") @@ -49,7 +53,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader): container = bpy.data.collections.new(lib_container) container.name = container_name - blender.pipeline.containerise_existing( + containerise_existing( container, name, namespace, @@ -57,8 +61,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader): self.__class__.__name__, ) - container_metadata = container.get( - blender.pipeline.AVALON_PROPERTY) + container_metadata = container.get(AVALON_PROPERTY) container_metadata["libpath"] = libpath container_metadata["lib_container"] = lib_container @@ -90,16 +93,16 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader): anim_data.action.make_local() - if not obj.get(blender.pipeline.AVALON_PROPERTY): + if not obj.get(AVALON_PROPERTY): - obj[blender.pipeline.AVALON_PROPERTY] = dict() + obj[AVALON_PROPERTY] = dict() - avalon_info = obj[blender.pipeline.AVALON_PROPERTY] + avalon_info = obj[AVALON_PROPERTY] avalon_info.update({"container_name": container_name}) objects_list.append(obj) - animation_container.pop(blender.pipeline.AVALON_PROPERTY) + animation_container.pop(AVALON_PROPERTY) # Save the list of objects in the metadata container container_metadata["objects"] = objects_list @@ -128,7 +131,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader): container["objectName"] ) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() logger.info( @@ -153,8 +156,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader): f"Unsupported file: {libpath}" ) - collection_metadata = collection.get( - blender.pipeline.AVALON_PROPERTY) + collection_metadata = collection.get(AVALON_PROPERTY) collection_libpath = collection_metadata["libpath"] normalized_collection_libpath = ( @@ -225,16 +227,16 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader): strip.action = anim_data.action strip.action_frame_end = anim_data.action.frame_range[1] - if not obj.get(blender.pipeline.AVALON_PROPERTY): + if not obj.get(AVALON_PROPERTY): - obj[blender.pipeline.AVALON_PROPERTY] = dict() + obj[AVALON_PROPERTY] = dict() - avalon_info = obj[blender.pipeline.AVALON_PROPERTY] + avalon_info = obj[AVALON_PROPERTY] avalon_info.update({"container_name": collection.name}) objects_list.append(obj) - anim_container.pop(blender.pipeline.AVALON_PROPERTY) + anim_container.pop(AVALON_PROPERTY) # Save the list of objects in the metadata container collection_metadata["objects"] = objects_list @@ -266,8 +268,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader): "Nested collections are not supported." ) - collection_metadata = collection.get( - blender.pipeline.AVALON_PROPERTY) + collection_metadata = collection.get(AVALON_PROPERTY) objects = collection_metadata["objects"] lib_container = collection_metadata["lib_container"] diff --git a/openpype/hosts/blender/plugins/load/load_audio.py b/openpype/hosts/blender/plugins/load/load_audio.py index e065150c15..b95c5db270 100644 --- a/openpype/hosts/blender/plugins/load/load_audio.py +++ b/openpype/hosts/blender/plugins/load/load_audio.py @@ -6,7 +6,7 @@ from typing import Dict, List, Optional import bpy -from avalon import api +from openpype.pipeline import get_representation_path from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, @@ -102,7 +102,7 @@ class AudioLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) self.log.info( "Container: %s\nRepresentation: %s", diff --git a/openpype/hosts/blender/plugins/load/load_camera_blend.py b/openpype/hosts/blender/plugins/load/load_camera_blend.py index 61955f124d..6ed2e8a575 100644 --- a/openpype/hosts/blender/plugins/load/load_camera_blend.py +++ b/openpype/hosts/blender/plugins/load/load_camera_blend.py @@ -7,7 +7,7 @@ from typing import Dict, List, Optional import bpy -from avalon import api +from openpype.pipeline import get_representation_path from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, @@ -155,7 +155,7 @@ class BlendCameraLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_camera_fbx.py b/openpype/hosts/blender/plugins/load/load_camera_fbx.py index 175ddacf9f..626ed44f08 100644 --- a/openpype/hosts/blender/plugins/load/load_camera_fbx.py +++ b/openpype/hosts/blender/plugins/load/load_camera_fbx.py @@ -6,7 +6,7 @@ from typing import Dict, List, Optional import bpy -from avalon import api +from openpype.pipeline import get_representation_path from openpype.hosts.blender.api import plugin, lib from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, @@ -143,7 +143,7 @@ class FbxCameraLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_fbx.py b/openpype/hosts/blender/plugins/load/load_fbx.py index c6e6af5592..2d249ef647 100644 --- a/openpype/hosts/blender/plugins/load/load_fbx.py +++ b/openpype/hosts/blender/plugins/load/load_fbx.py @@ -6,7 +6,7 @@ from typing import Dict, List, Optional import bpy -from avalon import api +from openpype.pipeline import get_representation_path from openpype.hosts.blender.api import plugin, lib from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, @@ -187,7 +187,7 @@ class FbxModelLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_layout_blend.py b/openpype/hosts/blender/plugins/load/load_layout_blend.py index 8029c38b4a..d87df3c010 100644 --- a/openpype/hosts/blender/plugins/load/load_layout_blend.py +++ b/openpype/hosts/blender/plugins/load/load_layout_blend.py @@ -6,8 +6,11 @@ from typing import Dict, List, Optional import bpy -from avalon import api from openpype import lib +from openpype.pipeline import ( + legacy_create, + get_representation_path, +) from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, @@ -159,7 +162,7 @@ class BlendLayoutLoader(plugin.AssetLoader): raise ValueError("Creator plugin \"CreateAnimation\" was " "not found.") - api.create( + legacy_create( creator_plugin, name=local_obj.name.split(':')[-1] + "_animation", asset=asset, @@ -308,7 +311,7 @@ class BlendLayoutLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_layout_json.py b/openpype/hosts/blender/plugins/load/load_layout_json.py index 0a5bdeecaa..0693937fec 100644 --- a/openpype/hosts/blender/plugins/load/load_layout_json.py +++ b/openpype/hosts/blender/plugins/load/load_layout_json.py @@ -7,8 +7,13 @@ from typing import Dict, Optional import bpy -from avalon import api -from openpype import lib +from openpype.pipeline import ( + discover_loader_plugins, + remove_container, + load_container, + get_representation_path, + loaders_from_representation, +) from openpype.hosts.blender.api.pipeline import ( AVALON_INSTANCES, AVALON_CONTAINERS, @@ -34,7 +39,7 @@ class JsonLayoutLoader(plugin.AssetLoader): objects = list(asset_group.children) for obj in objects: - api.remove(obj.get(AVALON_PROPERTY)) + remove_container(obj.get(AVALON_PROPERTY)) def _remove_animation_instances(self, asset_group): instances = bpy.data.collections.get(AVALON_INSTANCES) @@ -67,13 +72,13 @@ class JsonLayoutLoader(plugin.AssetLoader): with open(libpath, "r") as fp: data = json.load(fp) - all_loaders = api.discover(api.Loader) + all_loaders = discover_loader_plugins() for element in data: reference = element.get('reference') family = element.get('family') - loaders = api.loaders_from_representation(all_loaders, reference) + loaders = loaders_from_representation(all_loaders, reference) loader = self._get_loader(loaders, family) if not loader: @@ -103,7 +108,7 @@ class JsonLayoutLoader(plugin.AssetLoader): # at this time it will not return anything. The assets will be # loaded in the next Blender cycle, so we use the options to # set the transform, parent and assign the action, if there is one. - api.load( + load_container( loader, reference, namespace=instance_name, @@ -118,7 +123,7 @@ class JsonLayoutLoader(plugin.AssetLoader): # raise ValueError("Creator plugin \"CreateCamera\" was " # "not found.") - # api.create( + # legacy_create( # creator_plugin, # name="camera", # # name=f"{unique_number}_{subset}_animation", @@ -189,7 +194,7 @@ class JsonLayoutLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_look.py b/openpype/hosts/blender/plugins/load/load_look.py index 066ec0101b..70d1b95f02 100644 --- a/openpype/hosts/blender/plugins/load/load_look.py +++ b/openpype/hosts/blender/plugins/load/load_look.py @@ -8,7 +8,7 @@ import os import json import bpy -from avalon import api +from openpype.pipeline import get_representation_path from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( containerise_existing, @@ -140,7 +140,7 @@ class BlendLookLoader(plugin.AssetLoader): def update(self, container: Dict, representation: Dict): collection = bpy.data.collections.get(container["objectName"]) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_model.py b/openpype/hosts/blender/plugins/load/load_model.py index 04ece0b338..18d01dcb29 100644 --- a/openpype/hosts/blender/plugins/load/load_model.py +++ b/openpype/hosts/blender/plugins/load/load_model.py @@ -6,7 +6,7 @@ from typing import Dict, List, Optional import bpy -from avalon import api +from openpype.pipeline import get_representation_path from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, @@ -195,7 +195,7 @@ class BlendModelLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_rig.py b/openpype/hosts/blender/plugins/load/load_rig.py index eb6d273a51..cec088076c 100644 --- a/openpype/hosts/blender/plugins/load/load_rig.py +++ b/openpype/hosts/blender/plugins/load/load_rig.py @@ -6,10 +6,15 @@ from typing import Dict, List, Optional import bpy -from avalon import api -from avalon.blender import lib as avalon_lib from openpype import lib -from openpype.hosts.blender.api import plugin +from openpype.pipeline import ( + legacy_create, + get_representation_path, +) +from openpype.hosts.blender.api import ( + plugin, + get_selection, +) from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, @@ -248,7 +253,7 @@ class BlendRigLoader(plugin.AssetLoader): animation_asset = options.get('animation_asset') - api.create( + legacy_create( creator_plugin, name=namespace + "_animation", # name=f"{unique_number}_{subset}_animation", @@ -262,7 +267,7 @@ class BlendRigLoader(plugin.AssetLoader): if anim_file: bpy.ops.import_scene.fbx(filepath=anim_file, anim_offset=0.0) - imported = avalon_lib.get_selection() + imported = get_selection() armature = [ o for o in asset_group.children if o.type == 'ARMATURE'][0] @@ -306,7 +311,7 @@ class BlendRigLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/publish/extract_camera.py b/openpype/hosts/blender/plugins/publish/extract_camera.py index 597dcecd21..b2c7611b58 100644 --- a/openpype/hosts/blender/plugins/publish/extract_camera.py +++ b/openpype/hosts/blender/plugins/publish/extract_camera.py @@ -50,6 +50,10 @@ class ExtractCamera(api.Extractor): filepath=filepath, use_active_collection=False, use_selection=True, + bake_anim_use_nla_strips=False, + bake_anim_use_all_actions=False, + add_leaf_bones=False, + armature_nodetype='ROOT', object_types={'CAMERA'}, bake_anim_simplify_factor=0.0 ) diff --git a/openpype/hosts/celaction/plugins/publish/submit_celaction_deadline.py b/openpype/hosts/celaction/plugins/publish/submit_celaction_deadline.py index fd958d11a3..ea109e9445 100644 --- a/openpype/hosts/celaction/plugins/publish/submit_celaction_deadline.py +++ b/openpype/hosts/celaction/plugins/publish/submit_celaction_deadline.py @@ -1,9 +1,9 @@ import os +import re import json import getpass -from avalon.vendor import requests -import re +import requests import pyblish.api diff --git a/openpype/hosts/flame/api/__init__.py b/openpype/hosts/flame/api/__init__.py index 56bbadd2fc..f210c27f87 100644 --- a/openpype/hosts/flame/api/__init__.py +++ b/openpype/hosts/flame/api/__init__.py @@ -68,7 +68,8 @@ from .workio import ( ) from .render_utils import ( export_clip, - get_preset_path_by_xml_name + get_preset_path_by_xml_name, + modify_preset_file ) __all__ = [ @@ -140,5 +141,6 @@ __all__ = [ # render utils "export_clip", - "get_preset_path_by_xml_name" + "get_preset_path_by_xml_name", + "modify_preset_file" ] diff --git a/openpype/hosts/flame/api/pipeline.py b/openpype/hosts/flame/api/pipeline.py index af071439ef..930c6abe29 100644 --- a/openpype/hosts/flame/api/pipeline.py +++ b/openpype/hosts/flame/api/pipeline.py @@ -7,6 +7,11 @@ from avalon import api as avalon from avalon.pipeline import AVALON_CONTAINER_ID from pyblish import api as pyblish from openpype.api import Logger +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, +) from .lib import ( set_segment_data_marker, set_publish_attribute, @@ -32,8 +37,8 @@ def install(): pyblish.register_host("flame") pyblish.register_plugin_path(PUBLISH_PATH) - avalon.register_plugin_path(avalon.Loader, LOAD_PATH) - avalon.register_plugin_path(avalon.Creator, CREATE_PATH) + register_loader_plugin_path(LOAD_PATH) + avalon.register_plugin_path(LegacyCreator, CREATE_PATH) avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) log.info("OpenPype Flame plug-ins registred ...") @@ -47,8 +52,8 @@ def uninstall(): log.info("Deregistering Flame plug-ins..") pyblish.deregister_plugin_path(PUBLISH_PATH) - avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) - avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH) + deregister_loader_plugin_path(LOAD_PATH) + avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH) avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH) # register callback for switching publishable diff --git a/openpype/hosts/flame/api/plugin.py b/openpype/hosts/flame/api/plugin.py index ec49db1601..4c9d3c5383 100644 --- a/openpype/hosts/flame/api/plugin.py +++ b/openpype/hosts/flame/api/plugin.py @@ -2,13 +2,16 @@ import os import re import shutil import sys -from avalon.vendor import qargparse from xml.etree import ElementTree as ET import six +import qargparse from Qt import QtWidgets, QtCore import openpype.api as openpype +from openpype.pipeline import ( + LegacyCreator, + LoaderPlugin, +) from openpype import style -import avalon.api as avalon from . import ( lib as flib, pipeline as fpipeline, @@ -299,7 +302,7 @@ class Spacer(QtWidgets.QWidget): self.setLayout(layout) -class Creator(openpype.Creator): +class Creator(LegacyCreator): """Creator class wrapper """ clip_color = constants.COLOR_MAP["purple"] @@ -659,7 +662,7 @@ class PublishableClip: # Publishing plugin functions # Loader plugin functions -class ClipLoader(avalon.Loader): +class ClipLoader(LoaderPlugin): """A basic clip loader for Flame This will implement the basic behavior for a loader to inherit from that diff --git a/openpype/hosts/flame/api/render_utils.py b/openpype/hosts/flame/api/render_utils.py index 1b086646cc..473fb2f985 100644 --- a/openpype/hosts/flame/api/render_utils.py +++ b/openpype/hosts/flame/api/render_utils.py @@ -1,4 +1,5 @@ import os +from xml.etree import ElementTree as ET def export_clip(export_path, clip, preset_path, **kwargs): @@ -123,3 +124,29 @@ def get_preset_path_by_xml_name(xml_preset_name): # if nothing found then return False return False + + +def modify_preset_file(xml_path, staging_dir, data): + """Modify xml preset with input data + + Args: + xml_path (str ): path for input xml preset + staging_dir (str): staging dir path + data (dict): data where key is xmlTag and value as string + + Returns: + str: _description_ + """ + # create temp path + dirname, basename = os.path.split(xml_path) + temp_path = os.path.join(staging_dir, basename) + + # change xml following data keys + with open(xml_path, "r") as datafile: + tree = ET.parse(datafile) + for key, value in data.items(): + for element in tree.findall(".//{}".format(key)): + element.text = str(value) + tree.write(temp_path) + + return temp_path diff --git a/openpype/hosts/flame/api/scripts/wiretap_com.py b/openpype/hosts/flame/api/scripts/wiretap_com.py index c864399608..ee906c2608 100644 --- a/openpype/hosts/flame/api/scripts/wiretap_com.py +++ b/openpype/hosts/flame/api/scripts/wiretap_com.py @@ -420,13 +420,20 @@ class WireTapCom(object): RuntimeError: Not able to set colorspace policy """ color_policy = color_policy or "Legacy" + + # check if the colour policy in custom dir + if not os.path.exists(color_policy): + color_policy = "/syncolor/policies/Autodesk/{}".format( + color_policy) + + # create arguments project_colorspace_cmd = [ os.path.join( self.wiretap_tools_dir, "wiretap_duplicate_node" ), "-s", - "/syncolor/policies/Autodesk/{}".format(color_policy), + color_policy, "-n", "/projects/{}/syncolor".format(project_name) ] diff --git a/openpype/hosts/flame/hooks/pre_flame_setup.py b/openpype/hosts/flame/hooks/pre_flame_setup.py index 0d63b0d926..ad2b0dc897 100644 --- a/openpype/hosts/flame/hooks/pre_flame_setup.py +++ b/openpype/hosts/flame/hooks/pre_flame_setup.py @@ -73,7 +73,7 @@ class FlamePrelaunch(PreLaunchHook): "FrameWidth": int(width), "FrameHeight": int(height), "AspectRatio": float((width / height) * _db_p_data["pixelAspect"]), - "FrameRate": "{} fps".format(fps), + "FrameRate": self._get_flame_fps(fps), "FrameDepth": str(imageio_flame["project"]["frameDepth"]), "FieldDominance": str(imageio_flame["project"]["fieldDominance"]) } @@ -101,6 +101,28 @@ class FlamePrelaunch(PreLaunchHook): self.launch_context.launch_args.extend(app_arguments) + def _get_flame_fps(self, fps_num): + fps_table = { + float(23.976): "23.976 fps", + int(25): "25 fps", + int(24): "24 fps", + float(29.97): "29.97 fps DF", + int(30): "30 fps", + int(50): "50 fps", + float(59.94): "59.94 fps DF", + int(60): "60 fps" + } + + match_key = min(fps_table.keys(), key=lambda x: abs(x - fps_num)) + + try: + return fps_table[match_key] + except KeyError as msg: + raise KeyError(( + "Missing FPS key in conversion table. " + "Following keys are available: {}".format(fps_table.keys()) + )) from msg + def _add_pythonpath(self): pythonpath = self.launch_context.env.get("PYTHONPATH") diff --git a/openpype/hosts/flame/plugins/load/load_clip.py b/openpype/hosts/flame/plugins/load/load_clip.py index 8ba01d6937..8980f72cb8 100644 --- a/openpype/hosts/flame/plugins/load/load_clip.py +++ b/openpype/hosts/flame/plugins/load/load_clip.py @@ -172,7 +172,7 @@ class LoadClip(opfapi.ClipLoader): # version_name = version.get("name", None) # colorspace = version_data.get("colorspace", None) # object_name = "{}_{}".format(name, namespace) - # file = api.get_representation_path(representation).replace("\\", "/") + # file = get_representation_path(representation).replace("\\", "/") # clip = track_item.source() # # reconnect media to new path diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py index 6424bce3bc..70340ad7a2 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py @@ -1,3 +1,4 @@ +import re import pyblish import openpype import openpype.hosts.flame.api as opfapi @@ -6,6 +7,10 @@ from openpype.hosts.flame.otio import flame_export # # developer reload modules from pprint import pformat +# constatns +NUM_PATERN = re.compile(r"([0-9\.]+)") +TXT_PATERN = re.compile(r"([a-zA-Z]+)") + class CollectTimelineInstances(pyblish.api.ContextPlugin): """Collect all Timeline segment selection.""" @@ -16,6 +21,16 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): audio_track_items = [] + # TODO: add to settings + # settings + xml_preset_attrs_from_comments = { + "width": "number", + "height": "number", + "pixelRatio": "float", + "resizeType": "string", + "resizeFilter": "string" + } + def process(self, context): project = context.data["flameProject"] sequence = context.data["flameSequence"] @@ -26,6 +41,10 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): # process all sellected with opfapi.maintained_segment_selection(sequence) as segments: for segment in segments: + comment_attributes = self._get_comment_attributes(segment) + self.log.debug("_ comment_attributes: {}".format( + pformat(comment_attributes))) + clip_data = opfapi.get_segment_attributes(segment) clip_name = clip_data["segment_name"] self.log.debug("clip_name: {}".format(clip_name)) @@ -101,6 +120,9 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): # add resolution self._get_resolution_to_data(inst_data, context) + # add comment attributes if any + inst_data.update(comment_attributes) + # create instance instance = context.create_instance(**inst_data) @@ -126,6 +148,94 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): if marker_data.get("reviewTrack") is not None: instance.data["reviewAudio"] = True + def _get_comment_attributes(self, segment): + comment = segment.comment.get_value() + + # try to find attributes + attributes = { + "xml_overrides": { + "pixelRatio": 1.00} + } + # search for `:` + for split in self._split_comments(comment): + # make sure we ignore if not `:` in key + if ":" not in split: + continue + + self._get_xml_preset_attrs( + attributes, split) + + # add xml overides resolution to instance data + xml_overrides = attributes["xml_overrides"] + if xml_overrides.get("width"): + attributes.update({ + "resolutionWidth": xml_overrides["width"], + "resolutionHeight": xml_overrides["height"], + "pixelAspect": xml_overrides["pixelRatio"] + }) + + return attributes + + def _get_xml_preset_attrs(self, attributes, split): + + # split to key and value + key, value = split.split(":") + + for a_name, a_type in self.xml_preset_attrs_from_comments.items(): + # exclude all not related attributes + if a_name.lower() not in key.lower(): + continue + + # get pattern defined by type + pattern = TXT_PATERN + if a_type in ("number" , "float"): + pattern = NUM_PATERN + + res_goup = pattern.findall(value) + + # raise if nothing is found as it is not correctly defined + if not res_goup: + raise ValueError(( + "Value for `{}` attribute is not " + "set correctly: `{}`").format(a_name, split)) + + if "string" in a_type: + _value = res_goup[0] + if "float" in a_type: + _value = float(res_goup[0]) + if "number" in a_type: + _value = int(res_goup[0]) + + attributes["xml_overrides"][a_name] = _value + + # condition for resolution in key + if "resolution" in key.lower(): + res_goup = NUM_PATERN.findall(value) + # check if axpect was also defined + # 1920x1080x1.5 + aspect = res_goup[2] if len(res_goup) > 2 else 1 + + width = int(res_goup[0]) + height = int(res_goup[1]) + pixel_ratio = float(aspect) + attributes["xml_overrides"].update({ + "width": width, + "height": height, + "pixelRatio": pixel_ratio + }) + + def _split_comments(self, comment_string): + # first split comment by comma + split_comments = [] + if "," in comment_string: + split_comments.extend(comment_string.split(",")) + elif ";" in comment_string: + split_comments.extend(comment_string.split(";")) + else: + split_comments.append(comment_string) + + return split_comments + def _get_head_tail(self, clip_data, first_frame): # calculate head and tail with forward compatibility head = clip_data.get("segment_head") diff --git a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py index db85bede85..32f6b9508f 100644 --- a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py +++ b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py @@ -1,6 +1,7 @@ import os from pprint import pformat from copy import deepcopy + import pyblish.api import openpype.api from openpype.hosts.flame import api as opfapi @@ -22,6 +23,8 @@ class ExtractSubsetResources(openpype.api.Extractor): "ext": "jpg", "xml_preset_file": "Jpeg (8-bit).xml", "xml_preset_dir": "", + "export_type": "File Sequence", + "ignore_comment_attrs": True, "colorspace_out": "Output - sRGB", "representation_add_range": False, "representation_tags": ["thumbnail"] @@ -30,6 +33,8 @@ class ExtractSubsetResources(openpype.api.Extractor): "ext": "mov", "xml_preset_file": "Apple iPad (1920x1080).xml", "xml_preset_dir": "", + "export_type": "Movie", + "ignore_comment_attrs": True, "colorspace_out": "Output - Rec.709", "representation_add_range": True, "representation_tags": [ @@ -54,21 +59,35 @@ class ExtractSubsetResources(openpype.api.Extractor): ): instance.data["representations"] = [] - frame_start = instance.data["frameStart"] - handle_start = instance.data["handleStart"] - frame_start_handle = frame_start - handle_start - source_first_frame = instance.data["sourceFirstFrame"] - source_start_handles = instance.data["sourceStartH"] - source_end_handles = instance.data["sourceEndH"] - source_duration_handles = ( - source_end_handles - source_start_handles) + 1 - + # flame objects + segment = instance.data["item"] + sequence_clip = instance.context.data["flameSequence"] clip_data = instance.data["flameSourceClip"] clip = clip_data["PyClip"] - in_mark = (source_start_handles - source_first_frame) + 1 - out_mark = in_mark + source_duration_handles + # segment's parent track name + s_track_name = segment.parent.name.get_value() + # get configured workfile frame start/end (handles excluded) + frame_start = instance.data["frameStart"] + # get media source first frame + source_first_frame = instance.data["sourceFirstFrame"] + + # get timeline in/out of segment + clip_in = instance.data["clipIn"] + clip_out = instance.data["clipOut"] + + # get handles value - take only the max from both + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleStart"] + handles = max(handle_start, handle_end) + + # get media source range with handles + source_end_handles = instance.data["sourceEndH"] + source_start_handles = instance.data["sourceStartH"] + source_end_handles = instance.data["sourceEndH"] + + # create staging dir path staging_dir = self.staging_dir(instance) # add default preset type for thumbnail and reviewable video @@ -77,15 +96,61 @@ class ExtractSubsetResources(openpype.api.Extractor): export_presets = deepcopy(self.default_presets) export_presets.update(self.export_presets_mapping) - # with maintained duplication loop all presets - with opfapi.maintained_object_duplication(clip) as duplclip: - # loop all preset names and - for unique_name, preset_config in export_presets.items(): + # loop all preset names and + for unique_name, preset_config in export_presets.items(): + modify_xml_data = {} + + # get all presets attributes + preset_file = preset_config["xml_preset_file"] + preset_dir = preset_config["xml_preset_dir"] + export_type = preset_config["export_type"] + repre_tags = preset_config["representation_tags"] + ignore_comment_attrs = preset_config["ignore_comment_attrs"] + color_out = preset_config["colorspace_out"] + + # get frame range with handles for representation range + frame_start_handle = frame_start - handle_start + source_duration_handles = ( + source_end_handles - source_start_handles) + 1 + + # define in/out marks + in_mark = (source_start_handles - source_first_frame) + 1 + out_mark = in_mark + source_duration_handles + + # by default export source clips + exporting_clip = clip + + if export_type == "Sequence Publish": + # change export clip to sequence + exporting_clip = sequence_clip + + # change in/out marks to timeline in/out + in_mark = clip_in + out_mark = clip_out + + # add xml tags modifications + modify_xml_data.update({ + "exportHandles": True, + "nbHandles": handles, + "startFrame": frame_start + }) + + if not ignore_comment_attrs: + # add any xml overrides collected form segment.comment + modify_xml_data.update(instance.data["xml_overrides"]) + + self.log.debug("__ modify_xml_data: {}".format(pformat( + modify_xml_data + ))) + + # with maintained duplication loop all presets + with opfapi.maintained_object_duplication( + exporting_clip) as duplclip: kwargs = {} - preset_file = preset_config["xml_preset_file"] - preset_dir = preset_config["xml_preset_dir"] - repre_tags = preset_config["representation_tags"] - color_out = preset_config["colorspace_out"] + + if export_type == "Sequence Publish": + # only keep visible layer where instance segment is child + self.hide_other_tracks(duplclip, s_track_name) # validate xml preset file is filled if preset_file == "": @@ -108,10 +173,13 @@ class ExtractSubsetResources(openpype.api.Extractor): ) # create preset path - preset_path = str(os.path.join( + preset_orig_xml_path = str(os.path.join( preset_dir, preset_file )) + preset_path = opfapi.modify_preset_file( + preset_orig_xml_path, staging_dir, modify_xml_data) + # define kwargs based on preset type if "thumbnail" in unique_name: kwargs["thumb_frame_number"] = in_mark + ( @@ -122,6 +190,7 @@ class ExtractSubsetResources(openpype.api.Extractor): "out_mark": out_mark }) + # get and make export dir paths export_dir_path = str(os.path.join( staging_dir, unique_name )) @@ -132,6 +201,7 @@ class ExtractSubsetResources(openpype.api.Extractor): export_dir_path, duplclip, preset_path, **kwargs) extension = preset_config["ext"] + # create representation data representation_data = { "name": unique_name, @@ -159,7 +229,12 @@ class ExtractSubsetResources(openpype.api.Extractor): # add files to represetation but add # imagesequence as list if ( - "movie_file" in preset_path + # first check if path in files is not mov extension + [ + f for f in files + if os.path.splitext(f)[-1] == ".mov" + ] + # then try if thumbnail is not in unique name or unique_name == "thumbnail" ): representation_data["files"] = files.pop() @@ -246,3 +321,19 @@ class ExtractSubsetResources(openpype.api.Extractor): ) return new_stage_dir, new_files_list + + def hide_other_tracks(self, sequence_clip, track_name): + """Helper method used only if sequence clip is used + + Args: + sequence_clip (flame.Clip): sequence clip + track_name (str): track name + """ + # create otio tracks and clips + for ver in sequence_clip.versions: + for track in ver.tracks: + if len(track.segments) == 0 and track.hidden: + continue + + if track.name.get_value() != track_name: + track.hidden = True diff --git a/openpype/hosts/fusion/api/lib.py b/openpype/hosts/fusion/api/lib.py index 5d97f83032..2bb5ea8aae 100644 --- a/openpype/hosts/fusion/api/lib.py +++ b/openpype/hosts/fusion/api/lib.py @@ -5,8 +5,8 @@ import contextlib from Qt import QtGui -import avalon.api from avalon import io +from openpype.pipeline import switch_container from .pipeline import get_current_comp, comp_lock_and_undo_chunk self = sys.modules[__name__] @@ -142,7 +142,7 @@ def switch_item(container, assert representation, ("Could not find representation in the database " "with the name '%s'" % representation_name) - avalon.api.switch(container, representation) + switch_container(container, representation) return representation diff --git a/openpype/hosts/fusion/api/pipeline.py b/openpype/hosts/fusion/api/pipeline.py index 64dda0bc8a..92e54ad6f5 100644 --- a/openpype/hosts/fusion/api/pipeline.py +++ b/openpype/hosts/fusion/api/pipeline.py @@ -11,6 +11,11 @@ import avalon.api from avalon.pipeline import AVALON_CONTAINER_ID from openpype.api import Logger +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, +) import openpype.hosts.fusion log = Logger().get_logger(__name__) @@ -62,8 +67,8 @@ def install(): pyblish.api.register_plugin_path(PUBLISH_PATH) log.info("Registering Fusion plug-ins..") - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH) + register_loader_plugin_path(LOAD_PATH) + avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH) pyblish.api.register_callback( @@ -86,8 +91,8 @@ def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) log.info("Deregistering Fusion plug-ins..") - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH) + deregister_loader_plugin_path(LOAD_PATH) + avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) avalon.api.deregister_plugin_path( avalon.api.InventoryAction, INVENTORY_PATH ) diff --git a/openpype/hosts/fusion/plugins/create/create_exr_saver.py b/openpype/hosts/fusion/plugins/create/create_exr_saver.py index 04717f4746..ff8bdb21ef 100644 --- a/openpype/hosts/fusion/plugins/create/create_exr_saver.py +++ b/openpype/hosts/fusion/plugins/create/create_exr_saver.py @@ -1,13 +1,13 @@ import os -import openpype.api +from openpype.pipeline import create from openpype.hosts.fusion.api import ( get_current_comp, comp_lock_and_undo_chunk ) -class CreateOpenEXRSaver(openpype.api.Creator): +class CreateOpenEXRSaver(create.LegacyCreator): name = "openexrDefault" label = "Create OpenEXR Saver" diff --git a/openpype/hosts/fusion/plugins/load/actions.py b/openpype/hosts/fusion/plugins/load/actions.py index 6af99e4c56..bc59cec77f 100644 --- a/openpype/hosts/fusion/plugins/load/actions.py +++ b/openpype/hosts/fusion/plugins/load/actions.py @@ -2,10 +2,10 @@ """ -from avalon import api +from openpype.pipeline import load -class FusionSetFrameRangeLoader(api.Loader): +class FusionSetFrameRangeLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["animation", @@ -39,7 +39,7 @@ class FusionSetFrameRangeLoader(api.Loader): lib.update_frame_range(start, end) -class FusionSetFrameRangeWithHandlesLoader(api.Loader): +class FusionSetFrameRangeWithHandlesLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["animation", diff --git a/openpype/hosts/fusion/plugins/load/load_sequence.py b/openpype/hosts/fusion/plugins/load/load_sequence.py index ea118585bf..075820de35 100644 --- a/openpype/hosts/fusion/plugins/load/load_sequence.py +++ b/openpype/hosts/fusion/plugins/load/load_sequence.py @@ -1,8 +1,12 @@ import os import contextlib -from avalon import api, io +from avalon import io +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.fusion.api import ( imprint_container, get_current_comp, @@ -117,7 +121,7 @@ def loader_shift(loader, frame, relative=True): return int(shift) -class FusionLoadSequence(api.Loader): +class FusionLoadSequence(load.LoaderPlugin): """Load image sequence into Fusion""" families = ["imagesequence", "review", "render"] @@ -204,7 +208,7 @@ class FusionLoadSequence(api.Loader): assert tool.ID == "Loader", "Must be Loader" comp = tool.Comp() - root = os.path.dirname(api.get_representation_path(representation)) + root = os.path.dirname(get_representation_path(representation)) path = self._get_first_image(root) # Get start frame from version data diff --git a/openpype/hosts/fusion/scripts/fusion_switch_shot.py b/openpype/hosts/fusion/scripts/fusion_switch_shot.py index 9dd8a351e4..ca7efb9136 100644 --- a/openpype/hosts/fusion/scripts/fusion_switch_shot.py +++ b/openpype/hosts/fusion/scripts/fusion_switch_shot.py @@ -5,11 +5,12 @@ import logging # Pipeline imports import avalon.api -from avalon import io, pipeline +from avalon import io from openpype.lib import version_up from openpype.hosts.fusion import api from openpype.hosts.fusion.api import lib +from openpype.lib.avalon_context import get_workdir_from_session log = logging.getLogger("Update Slap Comp") @@ -44,16 +45,6 @@ def _format_version_folder(folder): return version_folder -def _get_work_folder(session): - """Convenience function to get the work folder path of the current asset""" - - # Get new filename, create path based on asset and work template - template_work = self._project["config"]["template"]["work"] - work_path = pipeline._format_work_template(template_work, session) - - return os.path.normpath(work_path) - - def _get_fusion_instance(): fusion = getattr(sys.modules["__main__"], "fusion", None) if fusion is None: @@ -72,7 +63,7 @@ def _format_filepath(session): asset = session["AVALON_ASSET"] # Save updated slap comp - work_path = _get_work_folder(session) + work_path = get_workdir_from_session(session) walk_to_dir = os.path.join(work_path, "scenes", "slapcomp") slapcomp_dir = os.path.abspath(walk_to_dir) @@ -112,7 +103,7 @@ def _update_savers(comp, session): None """ - new_work = _get_work_folder(session) + new_work = get_workdir_from_session(session) renders = os.path.join(new_work, "renders") version_folder = _format_version_folder(renders) renders_version = os.path.join(renders, version_folder) diff --git a/openpype/hosts/fusion/scripts/set_rendermode.py b/openpype/hosts/fusion/scripts/set_rendermode.py index 77a2d8e945..f0638e4fe3 100644 --- a/openpype/hosts/fusion/scripts/set_rendermode.py +++ b/openpype/hosts/fusion/scripts/set_rendermode.py @@ -1,5 +1,5 @@ from Qt import QtWidgets -from avalon.vendor import qtawesome +import qtawesome from openpype.hosts.fusion.api import get_current_comp diff --git a/openpype/hosts/fusion/utility_scripts/switch_ui.py b/openpype/hosts/fusion/utility_scripts/switch_ui.py index fe324d9a41..d9eeae25ea 100644 --- a/openpype/hosts/fusion/utility_scripts/switch_ui.py +++ b/openpype/hosts/fusion/utility_scripts/switch_ui.py @@ -5,11 +5,12 @@ import logging from Qt import QtWidgets, QtCore import avalon.api -from avalon import io, pipeline -from avalon.vendor import qtawesome as qta +from avalon import io +import qtawesome as qta from openpype import style from openpype.hosts.fusion import api +from openpype.lib.avalon_context import get_workdir_from_session log = logging.getLogger("Fusion Switch Shot") @@ -123,7 +124,7 @@ class App(QtWidgets.QWidget): def _on_open_from_dir(self): - start_dir = self._get_context_directory() + start_dir = get_workdir_from_session() comp_file, _ = QtWidgets.QFileDialog.getOpenFileName( self, "Choose comp", start_dir) @@ -157,17 +158,6 @@ class App(QtWidgets.QWidget): import colorbleed.scripts.fusion_switch_shot as switch_shot switch_shot.switch(asset_name=asset, filepath=file_name, new=True) - def _get_context_directory(self): - - project = io.find_one({"type": "project", - "name": avalon.api.Session["AVALON_PROJECT"]}, - projection={"config": True}) - - template = project["config"]["template"]["work"] - dir = pipeline._format_work_template(template, avalon.api.Session) - - return dir - def collect_slap_comps(self, directory): items = glob.glob("{}/*.comp".format(directory)) return items diff --git a/openpype/hosts/harmony/api/README.md b/openpype/hosts/harmony/api/README.md index a8d182736a..e8d354e1e6 100644 --- a/openpype/hosts/harmony/api/README.md +++ b/openpype/hosts/harmony/api/README.md @@ -575,7 +575,7 @@ replace_files = """function %s_replace_files(args) """ % (signature, signature) -class ImageSequenceLoader(api.Loader): +class ImageSequenceLoader(load.LoaderPlugin): """Load images Stores the imported asset in a container named after the asset. """ diff --git a/openpype/hosts/harmony/api/TB_sceneOpened.js b/openpype/hosts/harmony/api/TB_sceneOpened.js index 5a3fe9ce82..6a403fa65e 100644 --- a/openpype/hosts/harmony/api/TB_sceneOpened.js +++ b/openpype/hosts/harmony/api/TB_sceneOpened.js @@ -272,8 +272,8 @@ function Client() { app.avalonClient.send( { - 'module': 'avalon.api', - 'method': 'emit', + 'module': 'openpype.lib', + 'method': 'emit_event', 'args': ['application.launched'] }, false); }; diff --git a/openpype/hosts/harmony/api/pipeline.py b/openpype/hosts/harmony/api/pipeline.py index 17d2870876..f967da15ca 100644 --- a/openpype/hosts/harmony/api/pipeline.py +++ b/openpype/hosts/harmony/api/pipeline.py @@ -9,6 +9,12 @@ import avalon.api from avalon.pipeline import AVALON_CONTAINER_ID from openpype import lib +from openpype.lib import register_event_callback +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, +) import openpype.hosts.harmony import openpype.hosts.harmony.api as harmony @@ -129,7 +135,7 @@ def check_inventory(): harmony.send({"function": "PypeHarmony.message", "args": msg}) -def application_launch(): +def application_launch(event): """Event that is executed after Harmony is launched.""" # FIXME: This is breaking server <-> client communication. # It is now moved so it it manually called. @@ -178,8 +184,8 @@ def install(): pyblish.api.register_host("harmony") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH) + register_loader_plugin_path(LOAD_PATH) + avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) log.info(PUBLISH_PATH) # Register callbacks. @@ -187,13 +193,13 @@ def install(): "instanceToggled", on_pyblish_instance_toggled ) - avalon.api.on("application.launched", application_launch) + register_event_callback("application.launched", application_launch) def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH) + deregister_loader_plugin_path(LOAD_PATH) + avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) def on_pyblish_instance_toggled(instance, old_value, new_value): diff --git a/openpype/hosts/harmony/api/plugin.py b/openpype/hosts/harmony/api/plugin.py index d6d61a547a..c55d200d30 100644 --- a/openpype/hosts/harmony/api/plugin.py +++ b/openpype/hosts/harmony/api/plugin.py @@ -1,9 +1,8 @@ -import avalon.api -from openpype.api import PypeCreatorMixin +from openpype.pipeline import LegacyCreator import openpype.hosts.harmony.api as harmony -class Creator(PypeCreatorMixin, avalon.api.Creator): +class Creator(LegacyCreator): """Creator plugin to create instances in Harmony. By default a Composite node is created to support any number of nodes in diff --git a/openpype/hosts/harmony/plugins/load/load_audio.py b/openpype/hosts/harmony/plugins/load/load_audio.py index 57ea8ae312..e18a6de097 100644 --- a/openpype/hosts/harmony/plugins/load/load_audio.py +++ b/openpype/hosts/harmony/plugins/load/load_audio.py @@ -1,4 +1,7 @@ -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) import openpype.hosts.harmony.api as harmony sig = harmony.signature() @@ -29,7 +32,7 @@ function %s(args) """ % (sig, sig) -class ImportAudioLoader(api.Loader): +class ImportAudioLoader(load.LoaderPlugin): """Import audio.""" families = ["shot", "audio"] @@ -37,7 +40,7 @@ class ImportAudioLoader(api.Loader): label = "Import Audio" def load(self, context, name=None, namespace=None, data=None): - wav_file = api.get_representation_path(context["representation"]) + wav_file = get_representation_path(context["representation"]) harmony.send( {"function": func, "args": [context["subset"]["name"], wav_file]} ) diff --git a/openpype/hosts/harmony/plugins/load/load_background.py b/openpype/hosts/harmony/plugins/load/load_background.py index 686d6b5b7b..9c01fe3cd8 100644 --- a/openpype/hosts/harmony/plugins/load/load_background.py +++ b/openpype/hosts/harmony/plugins/load/load_background.py @@ -1,7 +1,10 @@ import os import json -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) import openpype.hosts.harmony.api as harmony import openpype.lib @@ -226,7 +229,7 @@ replace_files """ -class BackgroundLoader(api.Loader): +class BackgroundLoader(load.LoaderPlugin): """Load images Stores the imported asset in a container named after the asset. """ @@ -278,7 +281,7 @@ class BackgroundLoader(api.Loader): def update(self, container, representation): - path = api.get_representation_path(representation) + path = get_representation_path(representation) with open(path) as json_file: data = json.load(json_file) @@ -297,7 +300,7 @@ class BackgroundLoader(api.Loader): bg_folder = os.path.dirname(path) - path = api.get_representation_path(representation) + path = get_representation_path(representation) print(container) diff --git a/openpype/hosts/harmony/plugins/load/load_imagesequence.py b/openpype/hosts/harmony/plugins/load/load_imagesequence.py index 310f9bdb61..18695438d5 100644 --- a/openpype/hosts/harmony/plugins/load/load_imagesequence.py +++ b/openpype/hosts/harmony/plugins/load/load_imagesequence.py @@ -6,12 +6,15 @@ from pathlib import Path import clique -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) import openpype.hosts.harmony.api as harmony import openpype.lib -class ImageSequenceLoader(api.Loader): +class ImageSequenceLoader(load.LoaderPlugin): """Load image sequences. Stores the imported asset in a container named after the asset. @@ -79,7 +82,7 @@ class ImageSequenceLoader(api.Loader): self_name = self.__class__.__name__ node = container.get("nodes").pop() - path = api.get_representation_path(representation) + path = get_representation_path(representation) collections, remainder = clique.assemble( os.listdir(os.path.dirname(path)) ) diff --git a/openpype/hosts/harmony/plugins/load/load_palette.py b/openpype/hosts/harmony/plugins/load/load_palette.py index 2e0f70d135..1da3e61e1b 100644 --- a/openpype/hosts/harmony/plugins/load/load_palette.py +++ b/openpype/hosts/harmony/plugins/load/load_palette.py @@ -1,11 +1,14 @@ import os import shutil -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) import openpype.hosts.harmony.api as harmony -class ImportPaletteLoader(api.Loader): +class ImportPaletteLoader(load.LoaderPlugin): """Import palettes.""" families = ["palette", "harmony.palette"] @@ -31,7 +34,7 @@ class ImportPaletteLoader(api.Loader): scene_path = harmony.send( {"function": "scene.currentProjectPath"} )["result"] - src = api.get_representation_path(representation) + src = get_representation_path(representation) dst = os.path.join( scene_path, "palette-library", diff --git a/openpype/hosts/harmony/plugins/load/load_template.py b/openpype/hosts/harmony/plugins/load/load_template.py index 112e613ae6..c6dc9d913b 100644 --- a/openpype/hosts/harmony/plugins/load/load_template.py +++ b/openpype/hosts/harmony/plugins/load/load_template.py @@ -6,12 +6,15 @@ import os import shutil import uuid -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) import openpype.hosts.harmony.api as harmony import openpype.lib -class TemplateLoader(api.Loader): +class TemplateLoader(load.LoaderPlugin): """Load Harmony template as container. .. todo:: @@ -38,7 +41,7 @@ class TemplateLoader(api.Loader): # Load template. self_name = self.__class__.__name__ temp_dir = tempfile.mkdtemp() - zip_file = api.get_representation_path(context["representation"]) + zip_file = get_representation_path(context["representation"]) template_path = os.path.join(temp_dir, "temp.tpl") with zipfile.ZipFile(zip_file, "r") as zip_ref: zip_ref.extractall(template_path) diff --git a/openpype/hosts/harmony/plugins/load/load_template_workfile.py b/openpype/hosts/harmony/plugins/load/load_template_workfile.py index c21b8194b1..2b84a43b35 100644 --- a/openpype/hosts/harmony/plugins/load/load_template_workfile.py +++ b/openpype/hosts/harmony/plugins/load/load_template_workfile.py @@ -3,11 +3,14 @@ import zipfile import os import shutil -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) import openpype.hosts.harmony.api as harmony -class ImportTemplateLoader(api.Loader): +class ImportTemplateLoader(load.LoaderPlugin): """Import templates.""" families = ["harmony.template", "workfile"] @@ -17,7 +20,7 @@ class ImportTemplateLoader(api.Loader): def load(self, context, name=None, namespace=None, data=None): # Import template. temp_dir = tempfile.mkdtemp() - zip_file = api.get_representation_path(context["representation"]) + zip_file = get_representation_path(context["representation"]) template_path = os.path.join(temp_dir, "temp.tpl") with zipfile.ZipFile(zip_file, "r") as zip_ref: zip_ref.extractall(template_path) diff --git a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py b/openpype/hosts/harmony/plugins/publish/collect_farm_render.py index 85237094e4..35b123f97d 100644 --- a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py +++ b/openpype/hosts/harmony/plugins/publish/collect_farm_render.py @@ -5,6 +5,7 @@ from pathlib import Path import attr from avalon import api +from openpype.lib import get_formatted_current_time import openpype.lib.abstract_collect_render import openpype.hosts.harmony.api as harmony from openpype.lib.abstract_collect_render import RenderInstance @@ -138,7 +139,7 @@ class CollectFarmRender(openpype.lib.abstract_collect_render. render_instance = HarmonyRenderInstance( version=version, - time=api.time(), + time=get_formatted_current_time(), source=context.data["currentFile"], label=node.split("/")[1], subset=subset_name, diff --git a/openpype/hosts/harmony/plugins/publish/help/validate_audio.xml b/openpype/hosts/harmony/plugins/publish/help/validate_audio.xml new file mode 100644 index 0000000000..e9a183c675 --- /dev/null +++ b/openpype/hosts/harmony/plugins/publish/help/validate_audio.xml @@ -0,0 +1,15 @@ + + + +Missing audio file + +## Cannot locate linked audio file + +Audio file at {audio_url} cannot be found. + +### How to repair? + +Copy audio file to the highlighted location or remove audio link in the workfile. + + + \ No newline at end of file diff --git a/openpype/hosts/harmony/plugins/publish/help/validate_instances.xml b/openpype/hosts/harmony/plugins/publish/help/validate_instances.xml new file mode 100644 index 0000000000..3b040e8ea8 --- /dev/null +++ b/openpype/hosts/harmony/plugins/publish/help/validate_instances.xml @@ -0,0 +1,25 @@ + + + +Subset context + +## Invalid subset context + +Asset name found '{found}' in subsets, expected '{expected}'. + +### How to repair? + +You can fix this with `Repair` button on the right. This will use '{expected}' asset name and overwrite '{found}' asset name in scene metadata. + +After that restart `Publish` with a `Reload button`. + +If this is unwanted, close workfile and open again, that way different asset value would be used for context information. + + +### __Detailed Info__ (optional) + +This might happen if you are reuse old workfile and open it in different context. +(Eg. you created subset "renderCompositingDefault" from asset "Robot' in "your_project_Robot_compositing.aep", now you opened this workfile in a context "Sloth" but existing subset for "Robot" asset stayed in the workfile.) + + + \ No newline at end of file diff --git a/openpype/hosts/harmony/plugins/publish/help/validate_scene_settings.xml b/openpype/hosts/harmony/plugins/publish/help/validate_scene_settings.xml new file mode 100644 index 0000000000..36fa90456e --- /dev/null +++ b/openpype/hosts/harmony/plugins/publish/help/validate_scene_settings.xml @@ -0,0 +1,35 @@ + + + +Scene setting + +## Invalid scene setting found + +One of the settings in a scene doesn't match to asset settings in database. + +{invalid_setting_str} + +### How to repair? + +Change values for {invalid_keys_str} in the scene OR change them in the asset database if they are wrong there. + + +### __Detailed Info__ (optional) + +This error is shown when for example resolution in the scene doesn't match to resolution set on the asset in the database. +Either value in the database or in the scene is wrong. + + + +Scene file doesn't exist + +## Scene file doesn't exist + +Collected scene {scene_url} doesn't exist. + +### How to repair? + +Re-save file, start publish from the beginning again. + + + \ No newline at end of file diff --git a/openpype/hosts/harmony/plugins/publish/validate_audio.py b/openpype/hosts/harmony/plugins/publish/validate_audio.py index d183dadb91..cb6b2307cd 100644 --- a/openpype/hosts/harmony/plugins/publish/validate_audio.py +++ b/openpype/hosts/harmony/plugins/publish/validate_audio.py @@ -4,6 +4,8 @@ import pyblish.api import openpype.hosts.harmony.api as harmony +from openpype.pipeline import PublishXmlValidationError + class ValidateAudio(pyblish.api.InstancePlugin): """Ensures that there is an audio file in the scene. @@ -42,4 +44,9 @@ class ValidateAudio(pyblish.api.InstancePlugin): msg = "You are missing audio file:\n{}".format(audio_path) - assert os.path.isfile(audio_path), msg + formatting_data = { + "audio_url": audio_path + } + if os.path.isfile(audio_path): + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/hosts/harmony/plugins/publish/validate_instances.py b/openpype/hosts/harmony/plugins/publish/validate_instances.py index 1ba65573fc..373ef94cc3 100644 --- a/openpype/hosts/harmony/plugins/publish/validate_instances.py +++ b/openpype/hosts/harmony/plugins/publish/validate_instances.py @@ -2,6 +2,7 @@ import os import pyblish.api import openpype.api +from openpype.pipeline import PublishXmlValidationError import openpype.hosts.harmony.api as harmony @@ -45,4 +46,11 @@ class ValidateInstance(pyblish.api.InstancePlugin): "Instance asset is not the same as current asset:" f"\nInstance: {instance_asset}\nCurrent: {current_asset}" ) - assert instance_asset == current_asset, msg + + formatting_data = { + "found": instance_asset, + "expected": current_asset + } + if instance_asset != current_asset: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py index efd9bbb212..4c3a6c4465 100644 --- a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py +++ b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py @@ -7,7 +7,7 @@ import re import pyblish.api import openpype.hosts.harmony.api as harmony -import openpype.hosts.harmony +from openpype.pipeline import PublishXmlValidationError class ValidateSceneSettingsRepair(pyblish.api.Action): @@ -19,12 +19,12 @@ class ValidateSceneSettingsRepair(pyblish.api.Action): def process(self, context, plugin): """Repair action entry point.""" - expected = openpype.hosts.harmony.api.get_asset_settings() + expected = harmony.get_asset_settings() asset_settings = _update_frames(dict.copy(expected)) asset_settings["frameStart"] = 1 asset_settings["frameEnd"] = asset_settings["frameEnd"] + \ asset_settings["handleEnd"] - openpype.hosts.harmony.api.set_scene_settings(asset_settings) + harmony.set_scene_settings(asset_settings) if not os.path.exists(context.data["scenePath"]): self.log.info("correcting scene name") scene_dir = os.path.dirname(context.data["currentFile"]) @@ -55,7 +55,7 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): def process(self, instance): """Plugin entry point.""" - expected_settings = openpype.hosts.harmony.api.get_asset_settings() + expected_settings = harmony.get_asset_settings() self.log.info("scene settings from DB:".format(expected_settings)) expected_settings = _update_frames(dict.copy(expected_settings)) @@ -102,13 +102,13 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): self.log.debug("current scene settings {}".format(current_settings)) invalid_settings = [] + invalid_keys = set() for key, value in expected_settings.items(): if value != current_settings[key]: - invalid_settings.append({ - "name": key, - "expected": value, - "current": current_settings[key] - }) + invalid_settings.append( + "{} expected: {} found: {}".format(key, value, + current_settings[key])) + invalid_keys.add(key) if ((expected_settings["handleStart"] or expected_settings["handleEnd"]) @@ -120,10 +120,30 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): msg = "Found invalid settings:\n{}".format( json.dumps(invalid_settings, sort_keys=True, indent=4) ) - assert not invalid_settings, msg - assert os.path.exists(instance.context.data.get("scenePath")), ( - "Scene file not found (saved under wrong name)" - ) + + if invalid_settings: + invalid_keys_str = ",".join(invalid_keys) + break_str = "
" + invalid_setting_str = "Found invalid settings:
{}".\ + format(break_str.join(invalid_settings)) + + formatting_data = { + "invalid_setting_str": invalid_setting_str, + "invalid_keys_str": invalid_keys_str + } + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + scene_url = instance.context.data.get("scenePath") + if not os.path.exists(scene_url): + msg = "Scene file {} not found (saved under wrong name)".format( + scene_url + ) + formatting_data = { + "scene_url": scene_url + } + raise PublishXmlValidationError(self, msg, key="file_not_found", + formatting_data=formatting_data) def _update_frames(expected_settings): diff --git a/openpype/hosts/hiero/api/events.py b/openpype/hosts/hiero/api/events.py index 7563503593..9439199933 100644 --- a/openpype/hosts/hiero/api/events.py +++ b/openpype/hosts/hiero/api/events.py @@ -1,12 +1,12 @@ import os import hiero.core.events -import avalon.api as avalon from openpype.api import Logger from .lib import ( sync_avalon_data_to_workfile, launch_workfiles_app, selection_changed_timeline, - before_project_save + before_project_save, + register_event_callback ) from .tags import add_tags_to_workfile from .menu import update_menu_task_label @@ -126,5 +126,5 @@ def register_events(): """ # if task changed then change notext of hiero - avalon.on("taskChanged", update_menu_task_label) + register_event_callback("taskChanged", update_menu_task_label) log.info("Installed event callback for 'taskChanged'..") diff --git a/openpype/hosts/hiero/api/menu.py b/openpype/hosts/hiero/api/menu.py index 306bef87ca..de20b86f30 100644 --- a/openpype/hosts/hiero/api/menu.py +++ b/openpype/hosts/hiero/api/menu.py @@ -14,7 +14,7 @@ self = sys.modules[__name__] self._change_context_menu = None -def update_menu_task_label(*args): +def update_menu_task_label(): """Update the task label in Avalon menu to current session""" object_name = self._change_context_menu diff --git a/openpype/hosts/hiero/api/pipeline.py b/openpype/hosts/hiero/api/pipeline.py index cbcaf23994..eff126c0b6 100644 --- a/openpype/hosts/hiero/api/pipeline.py +++ b/openpype/hosts/hiero/api/pipeline.py @@ -9,6 +9,11 @@ from avalon import api as avalon from avalon import schema from pyblish import api as pyblish from openpype.api import Logger +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, +) from openpype.tools.utils import host_tools from . import lib, menu, events @@ -44,8 +49,8 @@ def install(): log.info("Registering Hiero plug-ins..") pyblish.register_host("hiero") pyblish.register_plugin_path(PUBLISH_PATH) - avalon.register_plugin_path(avalon.Loader, LOAD_PATH) - avalon.register_plugin_path(avalon.Creator, CREATE_PATH) + register_loader_plugin_path(LOAD_PATH) + avalon.register_plugin_path(LegacyCreator, CREATE_PATH) avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) # register callback for switching publishable @@ -66,8 +71,8 @@ def uninstall(): log.info("Deregistering Hiero plug-ins..") pyblish.deregister_host("hiero") pyblish.deregister_plugin_path(PUBLISH_PATH) - avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) - avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH) + deregister_loader_plugin_path(LOAD_PATH) + avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH) # register callback for switching publishable pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py index 3506af2d6a..54e66bf99a 100644 --- a/openpype/hosts/hiero/api/plugin.py +++ b/openpype/hosts/hiero/api/plugin.py @@ -1,13 +1,16 @@ -import re import os -import hiero -from Qt import QtWidgets, QtCore -from avalon.vendor import qargparse -import avalon.api as avalon -import openpype.api as openpype -from . import lib +import re from copy import deepcopy +import hiero + +from Qt import QtWidgets, QtCore +import qargparse + +import openpype.api as openpype +from openpype.pipeline import LoaderPlugin, LegacyCreator +from . import lib + log = openpype.Logger().get_logger(__name__) @@ -303,7 +306,7 @@ def get_reference_node_parents(ref): return parents -class SequenceLoader(avalon.Loader): +class SequenceLoader(LoaderPlugin): """A basic SequenceLoader for Resolve This will implement the basic behavior for a loader to inherit from that @@ -589,7 +592,7 @@ class ClipLoader: return track_item -class Creator(openpype.Creator): +class Creator(LegacyCreator): """Creator class wrapper """ clip_color = "Purple" diff --git a/openpype/hosts/hiero/plugins/load/load_clip.py b/openpype/hosts/hiero/plugins/load/load_clip.py index b905dd4431..d3908695a2 100644 --- a/openpype/hosts/hiero/plugins/load/load_clip.py +++ b/openpype/hosts/hiero/plugins/load/load_clip.py @@ -1,4 +1,5 @@ -from avalon import io, api +from avalon import io +from openpype.pipeline import get_representation_path import openpype.hosts.hiero.api as phiero # from openpype.hosts.hiero.api import plugin, lib # reload(lib) @@ -112,7 +113,7 @@ class LoadClip(phiero.SequenceLoader): version_name = version.get("name", None) colorspace = version_data.get("colorspace", None) object_name = "{}_{}".format(name, namespace) - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") clip = track_item.source() # reconnect media to new path diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py index 1c08e72d65..7d4e58efb7 100644 --- a/openpype/hosts/houdini/api/pipeline.py +++ b/openpype/hosts/houdini/api/pipeline.py @@ -11,11 +11,17 @@ import avalon.api from avalon.pipeline import AVALON_CONTAINER_ID from avalon.lib import find_submodule +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, +) import openpype.hosts.houdini from openpype.hosts.houdini.api import lib from openpype.lib import ( - any_outdated + register_event_callback, + emit_event, + any_outdated, ) from .lib import get_asset_fps @@ -47,15 +53,15 @@ def install(): pyblish.api.register_host("hpython") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH) + register_loader_plugin_path(LOAD_PATH) + avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) log.info("Installing callbacks ... ") - # avalon.on("init", on_init) - avalon.api.before("save", before_save) - avalon.api.on("save", on_save) - avalon.api.on("open", on_open) - avalon.api.on("new", on_new) + # register_event_callback("init", on_init) + register_event_callback("before.save", before_save) + register_event_callback("save", on_save) + register_event_callback("open", on_open) + register_event_callback("new", on_new) pyblish.api.register_callback( "instanceToggled", on_pyblish_instance_toggled @@ -101,13 +107,13 @@ def _register_callbacks(): def on_file_event_callback(event): if event == hou.hipFileEventType.AfterLoad: - avalon.api.emit("open", [event]) + emit_event("open") elif event == hou.hipFileEventType.AfterSave: - avalon.api.emit("save", [event]) + emit_event("save") elif event == hou.hipFileEventType.BeforeSave: - avalon.api.emit("before_save", [event]) + emit_event("before.save") elif event == hou.hipFileEventType.AfterClear: - avalon.api.emit("new", [event]) + emit_event("new") def get_main_window(): @@ -229,11 +235,11 @@ def ls(): yield data -def before_save(*args): +def before_save(): return lib.validate_fps() -def on_save(*args): +def on_save(): log.info("Running callback on save..") @@ -242,7 +248,7 @@ def on_save(*args): lib.set_id(node, new_id, overwrite=False) -def on_open(*args): +def on_open(): if not hou.isUIAvailable(): log.debug("Batch mode detected, ignoring `on_open` callbacks..") @@ -279,7 +285,7 @@ def on_open(*args): dialog.show() -def on_new(_): +def on_new(): """Set project resolution and fps when create a new file""" if hou.hipFile.isLoadingHipFile(): diff --git a/openpype/hosts/houdini/api/plugin.py b/openpype/hosts/houdini/api/plugin.py index 4967d01d43..2bbb65aa05 100644 --- a/openpype/hosts/houdini/api/plugin.py +++ b/openpype/hosts/houdini/api/plugin.py @@ -2,11 +2,12 @@ """Houdini specific Avalon/Pyblish plugin definitions.""" import sys import six -import avalon.api -from avalon.api import CreatorError import hou -from openpype.api import PypeCreatorMixin +from openpype.pipeline import ( + CreatorError, + LegacyCreator +) from .lib import imprint @@ -14,7 +15,7 @@ class OpenPypeCreatorError(CreatorError): pass -class Creator(PypeCreatorMixin, avalon.api.Creator): +class Creator(LegacyCreator): """Creator plugin to create instances in Houdini To support the wide range of node types for render output (Alembic, VDB, diff --git a/openpype/hosts/houdini/plugins/load/actions.py b/openpype/hosts/houdini/plugins/load/actions.py index acdb998c16..63d74c39a5 100644 --- a/openpype/hosts/houdini/plugins/load/actions.py +++ b/openpype/hosts/houdini/plugins/load/actions.py @@ -2,10 +2,10 @@ """ -from avalon import api +from openpype.pipeline import load -class SetFrameRangeLoader(api.Loader): +class SetFrameRangeLoader(load.LoaderPlugin): """Set Houdini frame range""" families = [ @@ -43,7 +43,7 @@ class SetFrameRangeLoader(api.Loader): hou.playbar.setPlaybackRange(start, end) -class SetFrameRangeWithHandlesLoader(api.Loader): +class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): """Set Maya frame range including pre- and post-handles""" families = [ diff --git a/openpype/hosts/houdini/plugins/load/load_alembic.py b/openpype/hosts/houdini/plugins/load/load_alembic.py index eaab81f396..0214229d5a 100644 --- a/openpype/hosts/houdini/plugins/load/load_alembic.py +++ b/openpype/hosts/houdini/plugins/load/load_alembic.py @@ -1,10 +1,12 @@ import os -from avalon import api - +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.houdini.api import pipeline -class AbcLoader(api.Loader): +class AbcLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["model", "animation", "pointcache", "gpuCache"] @@ -90,7 +92,7 @@ class AbcLoader(api.Loader): return # Update the file path - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = file_path.replace("\\", "/") alembic_node.setParms({"fileName": file_path}) diff --git a/openpype/hosts/houdini/plugins/load/load_ass.py b/openpype/hosts/houdini/plugins/load/load_ass.py index 8c272044ec..0144bbaefd 100644 --- a/openpype/hosts/houdini/plugins/load/load_ass.py +++ b/openpype/hosts/houdini/plugins/load/load_ass.py @@ -1,11 +1,15 @@ import os -from avalon import api -from avalon.houdini import pipeline import clique +from openpype.pipeline import ( + load, + get_representation_path, +) + +from openpype.hosts.houdini.api import pipeline -class AssLoader(api.Loader): +class AssLoader(load.LoaderPlugin): """Load .ass with Arnold Procedural""" families = ["ass"] @@ -88,7 +92,7 @@ class AssLoader(api.Loader): def update(self, container, representation): # Update the file path - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = file_path.replace("\\", "/") procedural = container["node"] diff --git a/openpype/hosts/houdini/plugins/load/load_camera.py b/openpype/hosts/houdini/plugins/load/load_camera.py index 8916d3b9b7..ef57d115da 100644 --- a/openpype/hosts/houdini/plugins/load/load_camera.py +++ b/openpype/hosts/houdini/plugins/load/load_camera.py @@ -1,4 +1,7 @@ -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.houdini.api import pipeline @@ -74,7 +77,7 @@ def transfer_non_default_values(src, dest, ignore=None): dest_parm.setFromParm(parm) -class CameraLoader(api.Loader): +class CameraLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["camera"] @@ -129,7 +132,7 @@ class CameraLoader(api.Loader): node = container["node"] # Update the file path - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = file_path.replace("\\", "/") # Update attributes diff --git a/openpype/hosts/houdini/plugins/load/load_hda.py b/openpype/hosts/houdini/plugins/load/load_hda.py index f5f2fb7481..2438570c6e 100644 --- a/openpype/hosts/houdini/plugins/load/load_hda.py +++ b/openpype/hosts/houdini/plugins/load/load_hda.py @@ -1,10 +1,13 @@ # -*- coding: utf-8 -*- -from avalon import api - +import os +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.houdini.api import pipeline -class HdaLoader(api.Loader): +class HdaLoader(load.LoaderPlugin): """Load Houdini Digital Asset file.""" families = ["hda"] @@ -15,7 +18,6 @@ class HdaLoader(api.Loader): color = "orange" def load(self, context, name=None, namespace=None, data=None): - import os import hou # Format file name, Houdini only wants forward slashes @@ -49,7 +51,7 @@ class HdaLoader(api.Loader): import hou hda_node = container["node"] - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = file_path.replace("\\", "/") hou.hda.installFile(file_path) defs = hda_node.type().allInstalledDefinitions() diff --git a/openpype/hosts/houdini/plugins/load/load_image.py b/openpype/hosts/houdini/plugins/load/load_image.py index 39f583677b..bd9ea3eee3 100644 --- a/openpype/hosts/houdini/plugins/load/load_image.py +++ b/openpype/hosts/houdini/plugins/load/load_image.py @@ -1,6 +1,9 @@ import os -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.houdini.api import lib, pipeline import hou @@ -37,7 +40,7 @@ def get_image_avalon_container(): return image_container -class ImageLoader(api.Loader): +class ImageLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["colorbleed.imagesequence"] @@ -87,7 +90,7 @@ class ImageLoader(api.Loader): node = container["node"] # Update the file path - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = file_path.replace("\\", "/") file_path = self._get_file_sequence(file_path) diff --git a/openpype/hosts/houdini/plugins/load/load_usd_layer.py b/openpype/hosts/houdini/plugins/load/load_usd_layer.py index 0d4378b480..d803e6abfe 100644 --- a/openpype/hosts/houdini/plugins/load/load_usd_layer.py +++ b/openpype/hosts/houdini/plugins/load/load_usd_layer.py @@ -1,8 +1,11 @@ -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.houdini.api import lib, pipeline -class USDSublayerLoader(api.Loader): +class USDSublayerLoader(load.LoaderPlugin): """Sublayer USD file in Solaris""" families = [ @@ -57,7 +60,7 @@ class USDSublayerLoader(api.Loader): node = container["node"] # Update the file path - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = file_path.replace("\\", "/") # Update attributes diff --git a/openpype/hosts/houdini/plugins/load/load_usd_reference.py b/openpype/hosts/houdini/plugins/load/load_usd_reference.py index 0edd8d9af6..fdb443f4cf 100644 --- a/openpype/hosts/houdini/plugins/load/load_usd_reference.py +++ b/openpype/hosts/houdini/plugins/load/load_usd_reference.py @@ -1,8 +1,11 @@ -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.houdini.api import lib, pipeline -class USDReferenceLoader(api.Loader): +class USDReferenceLoader(load.LoaderPlugin): """Reference USD file in Solaris""" families = [ @@ -57,7 +60,7 @@ class USDReferenceLoader(api.Loader): node = container["node"] # Update the file path - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = file_path.replace("\\", "/") # Update attributes diff --git a/openpype/hosts/houdini/plugins/load/load_vdb.py b/openpype/hosts/houdini/plugins/load/load_vdb.py index 40aa7a1d18..06bb9e45e4 100644 --- a/openpype/hosts/houdini/plugins/load/load_vdb.py +++ b/openpype/hosts/houdini/plugins/load/load_vdb.py @@ -1,11 +1,14 @@ import os import re -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.houdini.api import pipeline -class VdbLoader(api.Loader): +class VdbLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["vdbcache"] @@ -96,7 +99,7 @@ class VdbLoader(api.Loader): return # Update the file path - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = self.format_path(file_path) file_node.setParms({"fileName": file_path}) diff --git a/openpype/hosts/houdini/plugins/load/show_usdview.py b/openpype/hosts/houdini/plugins/load/show_usdview.py index f23974094e..8066615181 100644 --- a/openpype/hosts/houdini/plugins/load/show_usdview.py +++ b/openpype/hosts/houdini/plugins/load/show_usdview.py @@ -1,7 +1,7 @@ -from avalon import api +from openpype.pipeline import load -class ShowInUsdview(api.Loader): +class ShowInUsdview(load.LoaderPlugin): """Open USD file in usdview""" families = ["colorbleed.usd"] diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py index 645bd05d4b..3e842ae766 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py +++ b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py @@ -7,6 +7,7 @@ from collections import deque import pyblish.api import openpype.api +from openpype.pipeline import get_representation_path import openpype.hosts.houdini.api.usd as hou_usdlib from openpype.hosts.houdini.api.lib import render_rop @@ -308,7 +309,7 @@ class ExtractUSDLayered(openpype.api.Extractor): self.log.debug("No existing representation..") return False - old_file = api.get_representation_path(representation) + old_file = get_representation_path(representation) if not os.path.exists(old_file): return False diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 41c67a6209..376c033d46 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -17,10 +17,16 @@ import bson from maya import cmds, mel import maya.api.OpenMaya as om -from avalon import api, io, pipeline +from avalon import api, io from openpype import lib from openpype.api import get_anatomy_settings +from openpype.pipeline import ( + discover_loader_plugins, + loaders_from_representation, + get_representation_path, + load_container, +) from .commands import reset_frame_range @@ -1580,21 +1586,21 @@ def assign_look_by_version(nodes, version_id): log.info("Using look for the first time ..") # Load file - loaders = api.loaders_from_representation(api.discover(api.Loader), - representation_id) + _loaders = discover_loader_plugins() + loaders = loaders_from_representation(_loaders, representation_id) Loader = next((i for i in loaders if i.__name__ == "LookLoader"), None) if Loader is None: raise RuntimeError("Could not find LookLoader, this is a bug") # Reference the look file with maintained_selection(): - container_node = pipeline.load(Loader, look_representation) + container_node = load_container(Loader, look_representation) # Get container members shader_nodes = get_container_members(container_node) # Load relationships - shader_relation = api.get_representation_path(json_representation) + shader_relation = get_representation_path(json_representation) with open(shader_relation, "r") as f: relationships = json.load(f) @@ -1931,18 +1937,26 @@ def remove_other_uv_sets(mesh): cmds.removeMultiInstance(attr, b=True) -def get_id_from_history(node): +def get_id_from_sibling(node, history_only=True): """Return first node id in the history chain that matches this node. The nodes in history must be of the exact same node type and must be parented under the same parent. + Optionally, if no matching node is found from the history, all the + siblings of the node that are of the same type are checked. + Additionally to having the same parent, the sibling must be marked as + 'intermediate object'. + Args: - node (str): node to retrieve the + node (str): node to retrieve the history from + history_only (bool): if True and if nothing found in history, + look for an 'intermediate object' in all the node's siblings + of same type Returns: - str or None: The id from the node in history or None when no id found - on any valid nodes in the history. + str or None: The id from the sibling node or None when no id found + on any valid nodes in the history or siblings. """ @@ -1971,6 +1985,45 @@ def get_id_from_history(node): if _id: return _id + if not history_only: + # Get siblings of same type + similar_nodes = cmds.listRelatives(parent, + type=node_type, + fullPath=True) + similar_nodes = cmds.ls(similar_nodes, exactType=node_type, long=True) + + # Exclude itself + similar_nodes = [x for x in similar_nodes if x != node] + + # Get all unique ids from siblings in order since + # we consistently take the first one found + sibling_ids = OrderedDict() + for similar_node in similar_nodes: + # Check if "intermediate object" + if not cmds.getAttr(similar_node + ".intermediateObject"): + continue + + _id = get_id(similar_node) + if not _id: + continue + + if _id in sibling_ids: + sibling_ids[_id].append(similar_node) + else: + sibling_ids[_id] = [similar_node] + + if sibling_ids: + first_id, found_nodes = next(iter(sibling_ids.items())) + + # Log a warning if we've found multiple unique ids + if len(sibling_ids) > 1: + log.warning(("Found more than 1 intermediate shape with" + " unique id for '{}'. Using id of first" + " found: '{}'".format(node, found_nodes[0]))) + + return first_id + + # Project settings def set_scene_fps(fps, update=True): diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index 1b3bb9feb3..5cdc3ff4fd 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -2,7 +2,6 @@ import os import sys import errno import logging -import contextlib from maya import utils, cmds, OpenMaya import maya.api.OpenMaya as om @@ -15,8 +14,17 @@ from avalon.pipeline import AVALON_CONTAINER_ID import openpype.hosts.maya from openpype.tools.utils import host_tools -from openpype.lib import any_outdated +from openpype.lib import ( + any_outdated, + register_event_callback, + emit_event +) from openpype.lib.path_tools import HostDirmap +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, +) from openpype.hosts.maya.lib import copy_workspace_mel from . import menu, lib @@ -49,13 +57,13 @@ def install(): pyblish.api.register_host("mayapy") pyblish.api.register_host("maya") - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH) + register_loader_plugin_path(LOAD_PATH) + avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH) log.info(PUBLISH_PATH) log.info("Installing callbacks ... ") - avalon.api.on("init", on_init) + register_event_callback("init", on_init) # Callbacks below are not required for headless mode, the `init` however # is important to load referenced Alembics correctly at rendertime. @@ -69,12 +77,12 @@ def install(): menu.install() - avalon.api.on("save", on_save) - avalon.api.on("open", on_open) - avalon.api.on("new", on_new) - avalon.api.before("save", on_before_save) - avalon.api.on("taskChanged", on_task_changed) - avalon.api.on("before.workfile.save", before_workfile_save) + register_event_callback("save", on_save) + register_event_callback("open", on_open) + register_event_callback("new", on_new) + register_event_callback("before.save", on_before_save) + register_event_callback("taskChanged", on_task_changed) + register_event_callback("workfile.save.before", before_workfile_save) def _set_project(): @@ -137,7 +145,7 @@ def _register_callbacks(): def _on_maya_initialized(*args): - avalon.api.emit("init", args) + emit_event("init") if cmds.about(batch=True): log.warning("Running batch mode ...") @@ -148,15 +156,15 @@ def _on_maya_initialized(*args): def _on_scene_new(*args): - avalon.api.emit("new", args) + emit_event("new") def _on_scene_save(*args): - avalon.api.emit("save", args) + emit_event("save") def _on_scene_open(*args): - avalon.api.emit("open", args) + emit_event("open") def _before_scene_save(return_code, client_data): @@ -166,7 +174,10 @@ def _before_scene_save(return_code, client_data): # in order to block the operation. OpenMaya.MScriptUtil.setBool(return_code, True) - avalon.api.emit("before_save", [return_code, client_data]) + emit_event( + "before.save", + {"return_code": return_code} + ) def uninstall(): @@ -175,8 +186,8 @@ def uninstall(): pyblish.api.deregister_host("mayapy") pyblish.api.deregister_host("maya") - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH) + deregister_loader_plugin_path(LOAD_PATH) + avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) avalon.api.deregister_plugin_path( avalon.api.InventoryAction, INVENTORY_PATH ) @@ -343,7 +354,7 @@ def containerise(name, return container -def on_init(_): +def on_init(): log.info("Running callback on init..") def safe_deferred(fn): @@ -384,12 +395,12 @@ def on_init(_): safe_deferred(override_toolbox_ui) -def on_before_save(return_code, _): +def on_before_save(): """Run validation for scene's FPS prior to saving""" return lib.validate_fps() -def on_save(_): +def on_save(): """Automatically add IDs to new nodes Any transform of a mesh, without an existing ID, is given one @@ -407,7 +418,7 @@ def on_save(_): lib.set_id(node, new_id, overwrite=False) -def on_open(_): +def on_open(): """On scene open let's assume the containers have changed.""" from Qt import QtWidgets @@ -455,7 +466,7 @@ def on_open(_): dialog.show() -def on_new(_): +def on_new(): """Set project resolution and fps when create a new file""" log.info("Running callback on new..") with lib.suspended_refresh(): @@ -471,7 +482,7 @@ def on_new(_): lib.set_context_settings() -def on_task_changed(*args): +def on_task_changed(): """Wrapped function of app initialize and maya's on task changed""" # Run menu.update_menu_task_label() @@ -509,7 +520,7 @@ def on_task_changed(*args): def before_workfile_save(event): - workdir_path = event.workdir_path + workdir_path = event["workdir_path"] if workdir_path: copy_workspace_mel(workdir_path) diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index bdb8fcf13a..84379bc145 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -2,9 +2,14 @@ import os from maya import cmds -from avalon import api -from avalon.vendor import qargparse -from openpype.api import PypeCreatorMixin +import qargparse + +from avalon.pipeline import AVALON_CONTAINER_ID +from openpype.pipeline import ( + LegacyCreator, + LoaderPlugin, + get_representation_path, +) from .pipeline import containerise from . import lib @@ -77,7 +82,7 @@ def get_reference_node_parents(ref): return parents -class Creator(PypeCreatorMixin, api.Creator): +class Creator(LegacyCreator): defaults = ['Main'] def process(self): @@ -93,7 +98,7 @@ class Creator(PypeCreatorMixin, api.Creator): return instance -class Loader(api.Loader): +class Loader(LoaderPlugin): hosts = ["maya"] @@ -168,16 +173,18 @@ class ReferenceLoader(Loader): return ref_node = get_reference_node(nodes, self.log) - loaded_containers.append(containerise( + container = containerise( name=name, namespace=namespace, nodes=[ref_node], context=context, loader=self.__class__.__name__ - )) - + ) + loaded_containers.append(container) + self._organize_containers(nodes, container) c += 1 namespace = None + return loaded_containers def process_reference(self, context, name, namespace, data): @@ -190,7 +197,7 @@ class ReferenceLoader(Loader): node = container["objectName"] - path = api.get_representation_path(representation) + path = get_representation_path(representation) # Get reference node from container members members = get_container_members(node) @@ -243,6 +250,8 @@ class ReferenceLoader(Loader): self.log.warning("Ignoring file read error:\n%s", exc) + self._organize_containers(content, container["objectName"]) + # Reapply alembic settings. if representation["name"] == "abc" and alembic_data: alembic_nodes = cmds.ls( @@ -280,7 +289,6 @@ class ReferenceLoader(Loader): to remove from scene. """ - from maya import cmds node = container["objectName"] @@ -310,3 +318,14 @@ class ReferenceLoader(Loader): deleteNamespaceContent=True) except RuntimeError: pass + + @staticmethod + def _organize_containers(nodes, container): + # type: (list, str) -> None + """Put containers in loaded data to correct hierarchy.""" + for node in nodes: + id_attr = "{}.id".format(node) + if not cmds.attributeQuery("id", node=node, exists=True): + continue + if cmds.getAttr(id_attr) == AVALON_CONTAINER_ID: + cmds.sets(node, forceElement=container) diff --git a/openpype/hosts/maya/api/setdress.py b/openpype/hosts/maya/api/setdress.py index 1a7c3933a1..96a9700b88 100644 --- a/openpype/hosts/maya/api/setdress.py +++ b/openpype/hosts/maya/api/setdress.py @@ -8,7 +8,15 @@ import copy import six from maya import cmds -from avalon import api, io +from avalon import io +from openpype.pipeline import ( + discover_loader_plugins, + loaders_from_representation, + load_container, + update_container, + remove_container, + get_representation_path, +) from openpype.hosts.maya.api.lib import ( matrix_equals, unique_namespace @@ -120,12 +128,13 @@ def load_package(filepath, name, namespace=None): root = "{}:{}".format(namespace, name) containers = [] - all_loaders = api.discover(api.Loader) + all_loaders = discover_loader_plugins() for representation_id, instances in data.items(): # Find the compatible loaders - loaders = api.loaders_from_representation(all_loaders, - representation_id) + loaders = loaders_from_representation( + all_loaders, representation_id + ) for instance in instances: container = _add(instance=instance, @@ -180,9 +189,11 @@ def _add(instance, representation_id, loaders, namespace, root="|"): instance['loader'], instance) raise RuntimeError("Loader is missing.") - container = api.load(Loader, - representation_id, - namespace=instance['namespace']) + container = load_container( + Loader, + representation_id, + namespace=instance['namespace'] + ) # Get the root from the loaded container loaded_root = get_container_transforms({"objectName": container}, @@ -320,13 +331,13 @@ def update_package(set_container, representation): "type": "representation" }) - current_file = api.get_representation_path(current_representation) + current_file = get_representation_path(current_representation) assert current_file.endswith(".json") with open(current_file, "r") as fp: current_data = json.load(fp) # Load the new package data - new_file = api.get_representation_path(representation) + new_file = get_representation_path(representation) assert new_file.endswith(".json") with open(new_file, "r") as fp: new_data = json.load(fp) @@ -460,12 +471,12 @@ def update_scene(set_container, containers, current_data, new_data, new_file): # considered as new element and added afterwards. processed_containers.pop() processed_namespaces.remove(container_ns) - api.remove(container) + remove_container(container) continue # Check whether the conversion can be done by the Loader. # They *must* use the same asset, subset and Loader for - # `api.update` to make sense. + # `update_container` to make sense. old = io.find_one({ "_id": io.ObjectId(representation_current) }) @@ -479,20 +490,21 @@ def update_scene(set_container, containers, current_data, new_data, new_file): continue new_version = new["context"]["version"] - api.update(container, version=new_version) + update_container(container, version=new_version) else: # Remove this container because it's not in the new data log.warning("Removing content: %s", container_ns) - api.remove(container) + remove_container(container) # Add new assets - all_loaders = api.discover(api.Loader) + all_loaders = discover_loader_plugins() for representation_id, instances in new_data.items(): # Find the compatible loaders - loaders = api.loaders_from_representation(all_loaders, - representation_id) + loaders = loaders_from_representation( + all_loaders, representation_id + ) for instance in instances: # Already processed in update functionality @@ -517,7 +529,7 @@ def update_scene(set_container, containers, current_data, new_data, new_file): def compare_representations(old, new): """Check if the old representation given can be updated - Due to limitations of the `api.update` function we cannot allow + Due to limitations of the `update_container` function we cannot allow differences in the following data: * Representation name (extension) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 743ec26778..9002ae3876 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -19,9 +19,9 @@ from openpype.api import ( get_project_settings, get_asset) from openpype.modules import ModulesManager +from openpype.pipeline import CreatorError from avalon.api import Session -from avalon.api import CreatorError class CreateRender(plugin.Creator): diff --git a/openpype/hosts/maya/plugins/create/create_vrayscene.py b/openpype/hosts/maya/plugins/create/create_vrayscene.py index f2096d902e..fa9c59e016 100644 --- a/openpype/hosts/maya/plugins/create/create_vrayscene.py +++ b/openpype/hosts/maya/plugins/create/create_vrayscene.py @@ -19,10 +19,10 @@ from openpype.api import ( get_project_settings ) +from openpype.pipeline import CreatorError from openpype.modules import ModulesManager from avalon.api import Session -from avalon.api import CreatorError class CreateVRayScene(plugin.Creator): diff --git a/openpype/hosts/maya/plugins/inventory/import_modelrender.py b/openpype/hosts/maya/plugins/inventory/import_modelrender.py index 119edccb7a..c5d3d0c8f4 100644 --- a/openpype/hosts/maya/plugins/inventory/import_modelrender.py +++ b/openpype/hosts/maya/plugins/inventory/import_modelrender.py @@ -1,5 +1,9 @@ import json -from avalon import api, io, pipeline +from avalon import api, io +from openpype.pipeline import ( + get_representation_context, + get_representation_path_from_context, +) from openpype.hosts.maya.api.lib import ( maintained_selection, apply_shaders @@ -73,11 +77,11 @@ class ImportModelRender(api.InventoryAction): "name": self.look_data_type, }) - context = pipeline.get_representation_context(look_repr["_id"]) - maya_file = pipeline.get_representation_path_from_context(context) + context = get_representation_context(look_repr["_id"]) + maya_file = get_representation_path_from_context(context) - context = pipeline.get_representation_context(json_repr["_id"]) - json_file = pipeline.get_representation_path_from_context(context) + context = get_representation_context(json_repr["_id"]) + json_file = get_representation_path_from_context(context) # Import the look file with maintained_selection(): diff --git a/openpype/hosts/maya/plugins/load/actions.py b/openpype/hosts/maya/plugins/load/actions.py index 1cb63c8a7a..483ad32402 100644 --- a/openpype/hosts/maya/plugins/load/actions.py +++ b/openpype/hosts/maya/plugins/load/actions.py @@ -2,14 +2,14 @@ """ -from avalon import api +from openpype.pipeline import load from openpype.hosts.maya.api.lib import ( maintained_selection, unique_namespace ) -class SetFrameRangeLoader(api.Loader): +class SetFrameRangeLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["animation", @@ -43,7 +43,7 @@ class SetFrameRangeLoader(api.Loader): animationEndTime=end) -class SetFrameRangeWithHandlesLoader(api.Loader): +class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["animation", @@ -81,7 +81,7 @@ class SetFrameRangeWithHandlesLoader(api.Loader): animationEndTime=end) -class ImportMayaLoader(api.Loader): +class ImportMayaLoader(load.LoaderPlugin): """Import action for Maya (unmanaged) Warning: diff --git a/openpype/hosts/maya/plugins/load/load_ass.py b/openpype/hosts/maya/plugins/load/load_ass.py index 18b34d2233..18de4df3b1 100644 --- a/openpype/hosts/maya/plugins/load/load_ass.py +++ b/openpype/hosts/maya/plugins/load/load_ass.py @@ -1,8 +1,11 @@ import os import clique -from avalon import api from openpype.api import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) import openpype.hosts.maya.api.plugin from openpype.hosts.maya.api.plugin import get_reference_node from openpype.hosts.maya.api.lib import ( @@ -106,7 +109,7 @@ class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): node = container["objectName"] representation["context"].pop("frame", None) - path = api.get_representation_path(representation) + path = get_representation_path(representation) print(path) # path = self.fname print(self.fname) @@ -164,7 +167,7 @@ class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): type="string") -class AssStandinLoader(api.Loader): +class AssStandinLoader(load.LoaderPlugin): """Load .ASS file as standin""" families = ["ass"] @@ -240,7 +243,7 @@ class AssStandinLoader(api.Loader): import pymel.core as pm - path = api.get_representation_path(representation) + path = get_representation_path(representation) files_in_path = os.listdir(os.path.split(path)[0]) sequence = 0 diff --git a/openpype/hosts/maya/plugins/load/load_assembly.py b/openpype/hosts/maya/plugins/load/load_assembly.py index 0151da7253..902f38695c 100644 --- a/openpype/hosts/maya/plugins/load/load_assembly.py +++ b/openpype/hosts/maya/plugins/load/load_assembly.py @@ -1,7 +1,10 @@ -from avalon import api +from openpype.pipeline import ( + load, + remove_container +) -class AssemblyLoader(api.Loader): +class AssemblyLoader(load.LoaderPlugin): families = ["assembly"] representations = ["json"] @@ -48,13 +51,11 @@ class AssemblyLoader(api.Loader): def update(self, container, representation): from openpype import setdress - return setdress.update_package(container, - representation) + return setdress.update_package(container, representation) def remove(self, container): """Remove all sub containers""" - from avalon import api from openpype import setdress import maya.cmds as cmds @@ -63,7 +64,7 @@ class AssemblyLoader(api.Loader): for member_container in member_containers: self.log.info("Removing container %s", member_container['objectName']) - api.remove(member_container) + remove_container(member_container) # Remove alembic hierarchy reference # TODO: Check whether removing all contained references is safe enough diff --git a/openpype/hosts/maya/plugins/load/load_audio.py b/openpype/hosts/maya/plugins/load/load_audio.py index 99f1f7c172..d8844ffea6 100644 --- a/openpype/hosts/maya/plugins/load/load_audio.py +++ b/openpype/hosts/maya/plugins/load/load_audio.py @@ -1,10 +1,14 @@ from maya import cmds, mel -from avalon import api, io +from avalon import io +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.hosts.maya.api.pipeline import containerise from openpype.hosts.maya.api.lib import unique_namespace -class AudioLoader(api.Loader): +class AudioLoader(load.LoaderPlugin): """Specific loader of audio.""" families = ["audio"] @@ -51,7 +55,7 @@ class AudioLoader(api.Loader): assert audio_node is not None, "Audio node not found." - path = api.get_representation_path(representation) + path = get_representation_path(representation) audio_node.filename.set(path) cmds.setAttr( container["objectName"] + ".representation", diff --git a/openpype/hosts/maya/plugins/load/load_gpucache.py b/openpype/hosts/maya/plugins/load/load_gpucache.py index 2e0b7bb810..591e568e4c 100644 --- a/openpype/hosts/maya/plugins/load/load_gpucache.py +++ b/openpype/hosts/maya/plugins/load/load_gpucache.py @@ -1,9 +1,13 @@ import os -from avalon import api + +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.api import get_project_settings -class GpuCacheLoader(api.Loader): +class GpuCacheLoader(load.LoaderPlugin): """Load model Alembic as gpuCache""" families = ["model"] @@ -73,7 +77,7 @@ class GpuCacheLoader(api.Loader): import maya.cmds as cmds - path = api.get_representation_path(representation) + path = get_representation_path(representation) # Update the cache members = cmds.sets(container['objectName'], query=True) diff --git a/openpype/hosts/maya/plugins/load/load_image_plane.py b/openpype/hosts/maya/plugins/load/load_image_plane.py index 8e33f51389..b250986489 100644 --- a/openpype/hosts/maya/plugins/load/load_image_plane.py +++ b/openpype/hosts/maya/plugins/load/load_image_plane.py @@ -1,6 +1,10 @@ from Qt import QtWidgets, QtCore -from avalon import api, io +from avalon import io +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.hosts.maya.api.pipeline import containerise from openpype.hosts.maya.api.lib import unique_namespace @@ -74,7 +78,7 @@ class CameraWindow(QtWidgets.QDialog): self.close() -class ImagePlaneLoader(api.Loader): +class ImagePlaneLoader(load.LoaderPlugin): """Specific loader of plate for image planes on selected camera.""" families = ["image", "plate", "render"] @@ -203,7 +207,7 @@ class ImagePlaneLoader(api.Loader): assert image_plane_shape is not None, "Image plane not found." - path = api.get_representation_path(representation) + path = get_representation_path(representation) image_plane_shape.imageName.set(path) cmds.setAttr( container["objectName"] + ".representation", diff --git a/openpype/hosts/maya/plugins/load/load_look.py b/openpype/hosts/maya/plugins/load/load_look.py index 96c1ecbb20..8f02ed59b8 100644 --- a/openpype/hosts/maya/plugins/load/load_look.py +++ b/openpype/hosts/maya/plugins/load/load_look.py @@ -5,7 +5,8 @@ from collections import defaultdict from Qt import QtWidgets -from avalon import api, io +from avalon import io +from openpype.pipeline import get_representation_path import openpype.hosts.maya.api.plugin from openpype.hosts.maya.api import lib from openpype.widgets.message_window import ScrollMessageBox @@ -77,7 +78,7 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): }) # Load relationships - shader_relation = api.get_representation_path(json_representation) + shader_relation = get_representation_path(json_representation) with open(shader_relation, "r") as f: json_data = json.load(f) diff --git a/openpype/hosts/maya/plugins/load/load_matchmove.py b/openpype/hosts/maya/plugins/load/load_matchmove.py index abc702cde8..ee3332bd09 100644 --- a/openpype/hosts/maya/plugins/load/load_matchmove.py +++ b/openpype/hosts/maya/plugins/load/load_matchmove.py @@ -1,8 +1,8 @@ -from avalon import api from maya import mel +from openpype.pipeline import load -class MatchmoveLoader(api.Loader): +class MatchmoveLoader(load.LoaderPlugin): """ This will run matchmove script to create track in scene. diff --git a/openpype/hosts/maya/plugins/load/load_redshift_proxy.py b/openpype/hosts/maya/plugins/load/load_redshift_proxy.py index fd2ae0f1d3..d93a9f02a2 100644 --- a/openpype/hosts/maya/plugins/load/load_redshift_proxy.py +++ b/openpype/hosts/maya/plugins/load/load_redshift_proxy.py @@ -5,8 +5,11 @@ import clique import maya.cmds as cmds -from avalon import api from openpype.api import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.hosts.maya.api.lib import ( namespaced, maintained_selection, @@ -15,7 +18,7 @@ from openpype.hosts.maya.api.lib import ( from openpype.hosts.maya.api.pipeline import containerise -class RedshiftProxyLoader(api.Loader): +class RedshiftProxyLoader(load.LoaderPlugin): """Load Redshift proxy""" families = ["redshiftproxy"] @@ -78,7 +81,7 @@ class RedshiftProxyLoader(api.Loader): rs_meshes = cmds.ls(members, type="RedshiftProxyMesh") assert rs_meshes, "Cannot find RedshiftProxyMesh in container" - filename = api.get_representation_path(representation) + filename = get_representation_path(representation) for rs_mesh in rs_meshes: cmds.setAttr("{}.fileName".format(rs_mesh), diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py index 0565b0b95c..04a25f6493 100644 --- a/openpype/hosts/maya/plugins/load/load_reference.py +++ b/openpype/hosts/maya/plugins/load/load_reference.py @@ -1,8 +1,10 @@ import os from maya import cmds from avalon import api + from openpype.api import get_project_settings from openpype.lib import get_creator_by_name +from openpype.pipeline import legacy_create import openpype.hosts.maya.api.plugin from openpype.hosts.maya.api.lib import maintained_selection @@ -119,10 +121,8 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): if family == "rig": self._post_process_rig(name, namespace, context, options) else: - if "translate" in options: cmds.setAttr(group_name + ".t", *options["translate"]) - return new_nodes def switch(self, container, representation): @@ -151,7 +151,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): creator_plugin = get_creator_by_name(self.animation_creator_name) with maintained_selection(): cmds.select([output, controls] + roots, noExpand=True) - api.create( + legacy_create( creator_plugin, name=namespace, asset=asset, diff --git a/openpype/hosts/maya/plugins/load/load_rendersetup.py b/openpype/hosts/maya/plugins/load/load_rendersetup.py index efeff2f193..7a2d8b1002 100644 --- a/openpype/hosts/maya/plugins/load/load_rendersetup.py +++ b/openpype/hosts/maya/plugins/load/load_rendersetup.py @@ -7,10 +7,13 @@ instance. """ import json -import six import sys +import six -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.hosts.maya.api import lib from openpype.hosts.maya.api.pipeline import containerise @@ -18,7 +21,7 @@ from maya import cmds import maya.app.renderSetup.model.renderSetup as renderSetup -class RenderSetupLoader(api.Loader): +class RenderSetupLoader(load.LoaderPlugin): """Load json preset for RenderSetup overwriting current one.""" families = ["rendersetup"] @@ -87,7 +90,7 @@ class RenderSetupLoader(api.Loader): "Render setup setting will be overwritten by new version. All " "setting specified by user not included in loaded version " "will be lost.") - path = api.get_representation_path(representation) + path = get_representation_path(representation) with open(path, "r") as file: try: renderSetup.instance().decode( diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py b/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py index 3e1d67ae9a..70bd9d22e2 100644 --- a/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py +++ b/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py @@ -1,9 +1,10 @@ import os -from avalon import api + from openpype.api import get_project_settings +from openpype.pipeline import load -class LoadVDBtoRedShift(api.Loader): +class LoadVDBtoRedShift(load.LoaderPlugin): """Load OpenVDB in a Redshift Volume Shape""" families = ["vdbcache"] diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py b/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py index 099c020093..4f14235bfb 100644 --- a/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py +++ b/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py @@ -1,6 +1,10 @@ import os -from avalon import api + from openpype.api import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) from maya import cmds @@ -69,7 +73,7 @@ def _fix_duplicate_vvg_callbacks(): matched.add(callback) -class LoadVDBtoVRay(api.Loader): +class LoadVDBtoVRay(load.LoaderPlugin): families = ["vdbcache"] representations = ["vdb"] @@ -174,7 +178,7 @@ class LoadVDBtoVRay(api.Loader): fname = files[0] else: # Sequence - from avalon.vendor import clique + import clique # todo: check support for negative frames as input collections, remainder = clique.assemble(files) assert len(collections) == 1, ( @@ -252,7 +256,7 @@ class LoadVDBtoVRay(api.Loader): def update(self, container, representation): - path = api.get_representation_path(representation) + path = get_representation_path(representation) # Find VRayVolumeGrid members = cmds.sets(container['objectName'], query=True) diff --git a/openpype/hosts/maya/plugins/load/load_vrayproxy.py b/openpype/hosts/maya/plugins/load/load_vrayproxy.py index ac2fe635b3..5b79b1efb3 100644 --- a/openpype/hosts/maya/plugins/load/load_vrayproxy.py +++ b/openpype/hosts/maya/plugins/load/load_vrayproxy.py @@ -9,8 +9,12 @@ import os import maya.cmds as cmds -from avalon import api, io +from avalon import io from openpype.api import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.hosts.maya.api.lib import ( maintained_selection, namespaced, @@ -19,7 +23,7 @@ from openpype.hosts.maya.api.lib import ( from openpype.hosts.maya.api.pipeline import containerise -class VRayProxyLoader(api.Loader): +class VRayProxyLoader(load.LoaderPlugin): """Load VRay Proxy with Alembic or VrayMesh.""" families = ["vrayproxy", "model", "pointcache", "animation"] @@ -100,7 +104,10 @@ class VRayProxyLoader(api.Loader): assert vraymeshes, "Cannot find VRayMesh in container" # get all representations for this version - filename = self._get_abc(representation["parent"]) or api.get_representation_path(representation) # noqa: E501 + filename = ( + self._get_abc(representation["parent"]) + or get_representation_path(representation) + ) for vray_mesh in vraymeshes: cmds.setAttr("{}.fileName".format(vray_mesh), @@ -185,7 +192,7 @@ class VRayProxyLoader(api.Loader): if abc_rep: self.log.debug("Found, we'll link alembic to vray proxy.") - file_name = api.get_representation_path(abc_rep) + file_name = get_representation_path(abc_rep) self.log.debug("File: {}".format(self.fname)) return file_name diff --git a/openpype/hosts/maya/plugins/load/load_vrayscene.py b/openpype/hosts/maya/plugins/load/load_vrayscene.py index dfe2b85edc..61132088cc 100644 --- a/openpype/hosts/maya/plugins/load/load_vrayscene.py +++ b/openpype/hosts/maya/plugins/load/load_vrayscene.py @@ -1,8 +1,11 @@ # -*- coding: utf-8 -*- import os import maya.cmds as cmds # noqa -from avalon import api from openpype.api import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.hosts.maya.api.lib import ( maintained_selection, namespaced, @@ -11,7 +14,7 @@ from openpype.hosts.maya.api.lib import ( from openpype.hosts.maya.api.pipeline import containerise -class VRaySceneLoader(api.Loader): +class VRaySceneLoader(load.LoaderPlugin): """Load Vray scene""" families = ["vrayscene_layer"] @@ -78,7 +81,7 @@ class VRaySceneLoader(api.Loader): vraymeshes = cmds.ls(members, type="VRayScene") assert vraymeshes, "Cannot find VRayScene in container" - filename = api.get_representation_path(representation) + filename = get_representation_path(representation) for vray_mesh in vraymeshes: cmds.setAttr("{}.FilePath".format(vray_mesh), diff --git a/openpype/hosts/maya/plugins/load/load_yeti_cache.py b/openpype/hosts/maya/plugins/load/load_yeti_cache.py index dfe75173ac..c64e1c540b 100644 --- a/openpype/hosts/maya/plugins/load/load_yeti_cache.py +++ b/openpype/hosts/maya/plugins/load/load_yeti_cache.py @@ -7,13 +7,17 @@ from pprint import pprint from maya import cmds -from avalon import api, io +from avalon import io from openpype.api import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.hosts.maya.api import lib from openpype.hosts.maya.api.pipeline import containerise -class YetiCacheLoader(api.Loader): +class YetiCacheLoader(load.LoaderPlugin): families = ["yeticache", "yetiRig"] representations = ["fur"] @@ -121,8 +125,8 @@ class YetiCacheLoader(api.Loader): "cannot find fursettings representation" ) - settings_fname = api.get_representation_path(fur_settings) - path = api.get_representation_path(representation) + settings_fname = get_representation_path(fur_settings) + path = get_representation_path(representation) # Get all node data with open(settings_fname, "r") as fp: settings = json.load(fp) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index d99e81573b..a525b562f3 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -50,6 +50,7 @@ import maya.app.renderSetup.model.renderSetup as renderSetup import pyblish.api from avalon import api +from openpype.lib import get_formatted_current_time from openpype.hosts.maya.api.lib_renderproducts import get as get_layer_render_products # noqa: E501 from openpype.hosts.maya.api import lib @@ -328,7 +329,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "family": "renderlayer", "families": ["renderlayer"], "asset": asset, - "time": api.time(), + "time": get_formatted_current_time(), "author": context.data["user"], # Add source to allow tracing back to the scene from # which was submitted originally diff --git a/openpype/hosts/maya/plugins/publish/collect_vrayscene.py b/openpype/hosts/maya/plugins/publish/collect_vrayscene.py index c1e5d388af..327fc836dc 100644 --- a/openpype/hosts/maya/plugins/publish/collect_vrayscene.py +++ b/openpype/hosts/maya/plugins/publish/collect_vrayscene.py @@ -7,6 +7,7 @@ from maya import cmds import pyblish.api from avalon import api +from openpype.lib import get_formatted_current_time from openpype.hosts.maya.api import lib @@ -117,7 +118,7 @@ class CollectVrayScene(pyblish.api.InstancePlugin): "family": "vrayscene_layer", "families": ["vrayscene_layer"], "asset": api.Session["AVALON_ASSET"], - "time": api.time(), + "time": get_formatted_current_time(), "author": context.data["user"], # Add source to allow tracing back to the scene from # which was submitted originally diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py index a9a2a7b60c..a8893072d0 100644 --- a/openpype/hosts/maya/plugins/publish/extract_look.py +++ b/openpype/hosts/maya/plugins/publish/extract_look.py @@ -4,7 +4,6 @@ import os import sys import json import tempfile -import platform import contextlib import subprocess from collections import OrderedDict @@ -64,10 +63,6 @@ def maketx(source, destination, *args): maketx_path = get_oiio_tools_path("maketx") - if platform.system().lower() == "windows": - # Ensure .exe extension - maketx_path += ".exe" - if not os.path.exists(maketx_path): print( "OIIO tool not found in {}".format(maketx_path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py index 9c432cbc67..389995d30c 100644 --- a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py +++ b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py @@ -6,6 +6,7 @@ from maya import cmds import openpype.api from openpype.hosts.maya.api.lib import maintained_selection +from avalon.pipeline import AVALON_CONTAINER_ID class ExtractMayaSceneRaw(openpype.api.Extractor): @@ -57,10 +58,16 @@ class ExtractMayaSceneRaw(openpype.api.Extractor): else: members = instance[:] + selection = members + if set(self.add_for_families).intersection( + set(instance.data.get("families", []))) or \ + instance.data.get("family") in self.add_for_families: + selection += self._get_loaded_containers(members) + # Perform extraction self.log.info("Performing extraction ...") with maintained_selection(): - cmds.select(members, noExpand=True) + cmds.select(selection, noExpand=True) cmds.file(path, force=True, typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501 @@ -83,3 +90,33 @@ class ExtractMayaSceneRaw(openpype.api.Extractor): instance.data["representations"].append(representation) self.log.info("Extracted instance '%s' to: %s" % (instance.name, path)) + + @staticmethod + def _get_loaded_containers(members): + # type: (list) -> list + refs_to_include = { + cmds.referenceQuery(node, referenceNode=True) + for node in members + if cmds.referenceQuery(node, isNodeReferenced=True) + } + + members_with_refs = refs_to_include.union(members) + + obj_sets = cmds.ls("*.id", long=True, type="objectSet", recursive=True, + objectsOnly=True) + + loaded_containers = [] + for obj_set in obj_sets: + + if not cmds.attributeQuery("id", node=obj_set, exists=True): + continue + + id_attr = "{}.id".format(obj_set) + if cmds.getAttr(id_attr) != AVALON_CONTAINER_ID: + continue + + set_content = set(cmds.sets(obj_set, query=True)) + if set_content.intersection(members_with_refs): + loaded_containers.append(obj_set) + + return loaded_containers diff --git a/openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py b/openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py index 00f0d38775..05d63f1d56 100644 --- a/openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py +++ b/openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py @@ -32,8 +32,8 @@ class ValidateOutRelatedNodeIds(pyblish.api.InstancePlugin): # if a deformer has been created on the shape invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Nodes found with non-related " - "asset IDs: {0}".format(invalid)) + raise RuntimeError("Nodes found with mismatching " + "IDs: {0}".format(invalid)) @classmethod def get_invalid(cls, instance): @@ -65,7 +65,7 @@ class ValidateOutRelatedNodeIds(pyblish.api.InstancePlugin): invalid.append(node) continue - history_id = lib.get_id_from_history(node) + history_id = lib.get_id_from_sibling(node) if history_id is not None and node_id != history_id: invalid.append(node) @@ -76,7 +76,7 @@ class ValidateOutRelatedNodeIds(pyblish.api.InstancePlugin): for node in cls.get_invalid(instance): # Get the original id from history - history_id = lib.get_id_from_history(node) + history_id = lib.get_id_from_sibling(node) if not history_id: cls.log.error("Could not find ID in history for '%s'", node) continue diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py b/openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py index a4d4d2bcc2..0324be9fc9 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py @@ -48,7 +48,7 @@ class ValidateNodeIdsDeformedShape(pyblish.api.InstancePlugin): invalid = [] for shape in shapes: - history_id = lib.get_id_from_history(shape) + history_id = lib.get_id_from_sibling(shape) if history_id: current_id = lib.get_id(shape) if current_id != history_id: @@ -61,7 +61,7 @@ class ValidateNodeIdsDeformedShape(pyblish.api.InstancePlugin): for node in cls.get_invalid(instance): # Get the original id from history - history_id = lib.get_id_from_history(node) + history_id = lib.get_id_from_sibling(node) if not history_id: cls.log.error("Could not find ID in history for '%s'", node) continue diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py b/openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py index e2090080f6..cc3723a6e1 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py +++ b/openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py @@ -24,6 +24,7 @@ class ValidateRigOutSetNodeIds(pyblish.api.InstancePlugin): openpype.hosts.maya.api.action.SelectInvalidAction, openpype.api.RepairAction ] + allow_history_only = False def process(self, instance): """Process all meshes""" @@ -32,8 +33,8 @@ class ValidateRigOutSetNodeIds(pyblish.api.InstancePlugin): # if a deformer has been created on the shape invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Nodes found with non-related " - "asset IDs: {0}".format(invalid)) + raise RuntimeError("Nodes found with mismatching " + "IDs: {0}".format(invalid)) @classmethod def get_invalid(cls, instance): @@ -51,10 +52,13 @@ class ValidateRigOutSetNodeIds(pyblish.api.InstancePlugin): noIntermediate=True) for shape in shapes: - history_id = lib.get_id_from_history(shape) - if history_id: + sibling_id = lib.get_id_from_sibling( + shape, + history_only=cls.allow_history_only + ) + if sibling_id: current_id = lib.get_id(shape) - if current_id != history_id: + if current_id != sibling_id: invalid.append(shape) return invalid @@ -63,10 +67,13 @@ class ValidateRigOutSetNodeIds(pyblish.api.InstancePlugin): def repair(cls, instance): for node in cls.get_invalid(instance): - # Get the original id from history - history_id = lib.get_id_from_history(node) - if not history_id: - cls.log.error("Could not find ID in history for '%s'", node) + # Get the original id from sibling + sibling_id = lib.get_id_from_sibling( + node, + history_only=cls.allow_history_only + ) + if not sibling_id: + cls.log.error("Could not find ID in siblings for '%s'", node) continue - lib.set_id(node, history_id, overwrite=True) + lib.set_id(node, sibling_id, overwrite=True) diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py index 8c6c9ca55b..fd2e16b8d3 100644 --- a/openpype/hosts/nuke/api/pipeline.py +++ b/openpype/hosts/nuke/api/pipeline.py @@ -14,6 +14,12 @@ from openpype.api import ( BuildWorkfile, get_current_project_settings ) +from openpype.lib import register_event_callback +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, +) from openpype.tools.utils import host_tools from .command import viewer_update_and_undo_stop @@ -97,13 +103,13 @@ def install(): log.info("Registering Nuke plug-ins..") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH) + register_loader_plugin_path(LOAD_PATH) + avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH) # Register Avalon event for workfiles loading. - avalon.api.on("workio.open_file", check_inventory_versions) - avalon.api.on("taskChanged", change_context_label) + register_event_callback("workio.open_file", check_inventory_versions) + register_event_callback("taskChanged", change_context_label) pyblish.api.register_callback( "instanceToggled", on_pyblish_instance_toggled) @@ -123,8 +129,8 @@ def uninstall(): log.info("Deregistering Nuke plug-ins..") pyblish.deregister_host("nuke") pyblish.api.deregister_plugin_path(PUBLISH_PATH) - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH) + deregister_loader_plugin_path(LOAD_PATH) + avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) pyblish.api.deregister_callback( "instanceToggled", on_pyblish_instance_toggled) @@ -226,7 +232,7 @@ def _uninstall_menu(): menu.removeItem(item.name()) -def change_context_label(*args): +def change_context_label(): menubar = nuke.menu("Nuke") menu = menubar.findItem(MENU_LABEL) diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index fd754203d4..d0bb45a05d 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -4,11 +4,10 @@ import string import nuke -import avalon.api - -from openpype.api import ( - get_current_project_settings, - PypeCreatorMixin +from openpype.api import get_current_project_settings +from openpype.pipeline import ( + LegacyCreator, + LoaderPlugin, ) from .lib import ( Knobby, @@ -20,7 +19,7 @@ from .lib import ( ) -class OpenPypeCreator(PypeCreatorMixin, avalon.api.Creator): +class OpenPypeCreator(LegacyCreator): """Pype Nuke Creator class wrapper""" node_color = "0xdfea5dff" @@ -87,7 +86,7 @@ def get_review_presets_config(): return [str(name) for name, _prop in outputs.items()] -class NukeLoader(avalon.api.Loader): +class NukeLoader(LoaderPlugin): container_id_knob = "containerId" container_id = None @@ -152,6 +151,7 @@ class ExporterReview(object): """ data = None + publish_on_farm = False def __init__(self, klass, @@ -210,6 +210,9 @@ class ExporterReview(object): if self.multiple_presets: repre["outputName"] = self.name + if self.publish_on_farm: + repre["tags"].append("publish_on_farm") + self.data["representations"].append(repre) def get_view_input_process_node(self): @@ -446,6 +449,9 @@ class ExporterReviewMov(ExporterReview): return path def generate_mov(self, farm=False, **kwargs): + self.publish_on_farm = farm + reformat_node_add = kwargs["reformat_node_add"] + reformat_node_config = kwargs["reformat_node_config"] bake_viewer_process = kwargs["bake_viewer_process"] bake_viewer_input_process_node = kwargs[ "bake_viewer_input_process"] @@ -483,6 +489,30 @@ class ExporterReviewMov(ExporterReview): self.previous_node = r_node self.log.debug("Read... `{}`".format(self._temp_nodes[subset])) + # add reformat node + if reformat_node_add: + # append reformated tag + add_tags.append("reformated") + + rf_node = nuke.createNode("Reformat") + for kn_conf in reformat_node_config: + _type = kn_conf["type"] + k_name = str(kn_conf["name"]) + k_value = kn_conf["value"] + + # to remove unicode as nuke doesn't like it + if _type == "string": + k_value = str(kn_conf["value"]) + + rf_node[k_name].setValue(k_value) + + # connect + rf_node.setInput(0, self.previous_node) + self._temp_nodes[subset].append(rf_node) + self.previous_node = rf_node + self.log.debug( + "Reformat... `{}`".format(self._temp_nodes[subset])) + # only create colorspace baking if toggled on if bake_viewer_process: if bake_viewer_input_process_node: @@ -537,7 +567,7 @@ class ExporterReviewMov(ExporterReview): # ---------- end nodes creation # ---------- render or save to nk - if farm: + if self.publish_on_farm: nuke.scriptSave() path_nk = self.save_file() self.data.update({ @@ -547,11 +577,12 @@ class ExporterReviewMov(ExporterReview): }) else: self.render(write_node.name()) - # ---------- generate representation data - self.get_representation_data( - tags=["review", "delete"] + add_tags, - range=True - ) + + # ---------- generate representation data + self.get_representation_data( + tags=["review", "delete"] + add_tags, + range=True + ) self.log.debug("Representation... `{}`".format(self.data)) diff --git a/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py b/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py index 49405fd213..5f834be557 100644 --- a/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py +++ b/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py @@ -1,4 +1,4 @@ -from avalon import api, style +from avalon import api from openpype.api import Logger from openpype.hosts.nuke.api.lib import set_avalon_knob_data @@ -7,7 +7,7 @@ class RepairOldLoaders(api.InventoryAction): label = "Repair Old Loaders" icon = "gears" - color = style.colors.alert + color = "#cc0000" log = Logger.get_logger(__name__) diff --git a/openpype/hosts/nuke/plugins/load/actions.py b/openpype/hosts/nuke/plugins/load/actions.py index 07dcf2d8e1..81840b3a38 100644 --- a/openpype/hosts/nuke/plugins/load/actions.py +++ b/openpype/hosts/nuke/plugins/load/actions.py @@ -2,13 +2,13 @@ """ -from avalon import api from openpype.api import Logger +from openpype.pipeline import load log = Logger().get_logger(__name__) -class SetFrameRangeLoader(api.Loader): +class SetFrameRangeLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["animation", @@ -42,7 +42,7 @@ class SetFrameRangeLoader(api.Loader): lib.update_frame_range(start, end) -class SetFrameRangeWithHandlesLoader(api.Loader): +class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["animation", diff --git a/openpype/hosts/nuke/plugins/load/load_backdrop.py b/openpype/hosts/nuke/plugins/load/load_backdrop.py index 6619cfb414..36cec6f4c5 100644 --- a/openpype/hosts/nuke/plugins/load/load_backdrop.py +++ b/openpype/hosts/nuke/plugins/load/load_backdrop.py @@ -1,7 +1,11 @@ -from avalon import api, style, io +from avalon import io import nuke import nukescripts +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api.lib import ( find_free_space_to_paste_nodes, maintained_selection, @@ -14,7 +18,7 @@ from openpype.hosts.nuke.api.commands import viewer_update_and_undo_stop from openpype.hosts.nuke.api import containerise, update_container -class LoadBackdropNodes(api.Loader): +class LoadBackdropNodes(load.LoaderPlugin): """Loading Published Backdrop nodes (workfile, nukenodes)""" representations = ["nk"] @@ -23,7 +27,7 @@ class LoadBackdropNodes(api.Loader): label = "Iport Nuke Nodes" order = 0 icon = "eye" - color = style.colors.light + color = "white" node_color = "0x7533c1ff" def load(self, context, name, namespace, data): @@ -191,7 +195,7 @@ class LoadBackdropNodes(api.Loader): # get corresponding node GN = nuke.toNode(container['objectName']) - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") context = representation["context"] name = container['name'] version_data = version.get("data", {}) diff --git a/openpype/hosts/nuke/plugins/load/load_camera_abc.py b/openpype/hosts/nuke/plugins/load/load_camera_abc.py index 9610940619..fb5f7f8ede 100644 --- a/openpype/hosts/nuke/plugins/load/load_camera_abc.py +++ b/openpype/hosts/nuke/plugins/load/load_camera_abc.py @@ -1,6 +1,10 @@ import nuke -from avalon import api, io +from avalon import io +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api import ( containerise, update_container, @@ -11,7 +15,7 @@ from openpype.hosts.nuke.api.lib import ( ) -class AlembicCameraLoader(api.Loader): +class AlembicCameraLoader(load.LoaderPlugin): """ This will load alembic camera into script. """ @@ -127,7 +131,7 @@ class AlembicCameraLoader(api.Loader): data_imprint.update({k: version_data[k]}) # getting file path - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") with maintained_selection(): camera_node = nuke.toNode(object_name) diff --git a/openpype/hosts/nuke/plugins/load/load_clip.py b/openpype/hosts/nuke/plugins/load/load_clip.py index 21b7a6a816..9b0588feac 100644 --- a/openpype/hosts/nuke/plugins/load/load_clip.py +++ b/openpype/hosts/nuke/plugins/load/load_clip.py @@ -1,7 +1,8 @@ import nuke -from avalon.vendor import qargparse -from avalon import api, io +import qargparse +from avalon import io +from openpype.pipeline import get_representation_path from openpype.hosts.nuke.api.lib import ( get_imageio_input_colorspace, maintained_selection @@ -41,6 +42,9 @@ class LoadClip(plugin.NukeLoader): icon = "file-video-o" color = "white" + # Loaded from settings + _representations = [] + script_start = int(nuke.root()["first_frame"].value()) # option gui @@ -97,7 +101,7 @@ class LoadClip(plugin.NukeLoader): last += self.handle_end if not is_sequence: - duration = last - first + 1 + duration = last - first first = 1 last = first + duration elif "#" not in file: @@ -186,7 +190,7 @@ class LoadClip(plugin.NukeLoader): is_sequence = len(representation["files"]) > 1 read_node = nuke.toNode(container['objectName']) - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") start_at_workfile = bool("start at" in read_node['frame_mode'].value()) @@ -212,7 +216,7 @@ class LoadClip(plugin.NukeLoader): last += self.handle_end if not is_sequence: - duration = last - first + 1 + duration = last - first first = 1 last = first + duration elif "#" not in file: diff --git a/openpype/hosts/nuke/plugins/load/load_effects.py b/openpype/hosts/nuke/plugins/load/load_effects.py index f636c6b510..68c3952942 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects.py +++ b/openpype/hosts/nuke/plugins/load/load_effects.py @@ -1,7 +1,13 @@ import json from collections import OrderedDict import nuke -from avalon import api, style, io + +from avalon import io + +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api import ( containerise, update_container, @@ -9,7 +15,7 @@ from openpype.hosts.nuke.api import ( ) -class LoadEffects(api.Loader): +class LoadEffects(load.LoaderPlugin): """Loading colorspace soft effect exported from nukestudio""" representations = ["effectJson"] @@ -18,7 +24,7 @@ class LoadEffects(api.Loader): label = "Load Effects - nodes" order = 0 icon = "cc" - color = style.colors.light + color = "white" ignore_attr = ["useLifetime"] @@ -149,7 +155,7 @@ class LoadEffects(api.Loader): # get corresponding node GN = nuke.toNode(container['objectName']) - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") name = container['name'] version_data = version.get("data", {}) vname = version.get("name", None) diff --git a/openpype/hosts/nuke/plugins/load/load_effects_ip.py b/openpype/hosts/nuke/plugins/load/load_effects_ip.py index 990bce54f1..9c4fd4c2c6 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_effects_ip.py @@ -3,7 +3,12 @@ from collections import OrderedDict import nuke -from avalon import api, style, io +from avalon import io + +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api import lib from openpype.hosts.nuke.api import ( containerise, @@ -12,7 +17,7 @@ from openpype.hosts.nuke.api import ( ) -class LoadEffectsInputProcess(api.Loader): +class LoadEffectsInputProcess(load.LoaderPlugin): """Loading colorspace soft effect exported from nukestudio""" representations = ["effectJson"] @@ -21,7 +26,7 @@ class LoadEffectsInputProcess(api.Loader): label = "Load Effects - Input Process" order = 0 icon = "eye" - color = style.colors.alert + color = "#cc0000" ignore_attr = ["useLifetime"] def load(self, context, name, namespace, data): @@ -156,7 +161,7 @@ class LoadEffectsInputProcess(api.Loader): # get corresponding node GN = nuke.toNode(container['objectName']) - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") name = container['name'] version_data = version.get("data", {}) vname = version.get("name", None) diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo.py b/openpype/hosts/nuke/plugins/load/load_gizmo.py index 659977d789..6f2b191be9 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo.py @@ -1,5 +1,11 @@ import nuke -from avalon import api, style, io + +from avalon import io + +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api.lib import ( maintained_selection, get_avalon_knob_data, @@ -12,7 +18,7 @@ from openpype.hosts.nuke.api import ( ) -class LoadGizmo(api.Loader): +class LoadGizmo(load.LoaderPlugin): """Loading nuke Gizmo""" representations = ["gizmo"] @@ -21,7 +27,7 @@ class LoadGizmo(api.Loader): label = "Load Gizmo" order = 0 icon = "dropbox" - color = style.colors.light + color = "white" node_color = "0x75338eff" def load(self, context, name, namespace, data): @@ -103,7 +109,7 @@ class LoadGizmo(api.Loader): # get corresponding node GN = nuke.toNode(container['objectName']) - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") name = container['name'] version_data = version.get("data", {}) vname = version.get("name", None) diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py index 240bfd467d..87bebce15b 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py @@ -1,5 +1,11 @@ -from avalon import api, style, io import nuke + +from avalon import io + +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api.lib import ( maintained_selection, create_backdrop, @@ -13,7 +19,7 @@ from openpype.hosts.nuke.api import ( ) -class LoadGizmoInputProcess(api.Loader): +class LoadGizmoInputProcess(load.LoaderPlugin): """Loading colorspace soft effect exported from nukestudio""" representations = ["gizmo"] @@ -22,7 +28,7 @@ class LoadGizmoInputProcess(api.Loader): label = "Load Gizmo - Input Process" order = 0 icon = "eye" - color = style.colors.alert + color = "#cc0000" node_color = "0x7533c1ff" def load(self, context, name, namespace, data): @@ -109,7 +115,7 @@ class LoadGizmoInputProcess(api.Loader): # get corresponding node GN = nuke.toNode(container['objectName']) - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") name = container['name'] version_data = version.get("data", {}) vname = version.get("name", None) diff --git a/openpype/hosts/nuke/plugins/load/load_image.py b/openpype/hosts/nuke/plugins/load/load_image.py index d36226b139..9a175a0cba 100644 --- a/openpype/hosts/nuke/plugins/load/load_image.py +++ b/openpype/hosts/nuke/plugins/load/load_image.py @@ -1,9 +1,12 @@ -import re import nuke -from avalon.vendor import qargparse -from avalon import api, io +import qargparse +from avalon import io +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api.lib import ( get_imageio_input_colorspace ) @@ -14,7 +17,7 @@ from openpype.hosts.nuke.api import ( ) -class LoadImage(api.Loader): +class LoadImage(load.LoaderPlugin): """Load still image into Nuke""" families = [ @@ -33,6 +36,9 @@ class LoadImage(api.Loader): icon = "image" color = "white" + # Loaded from settings + _representations = [] + node_name_template = "{class_name}_{ext}" options = [ @@ -162,7 +168,7 @@ class LoadImage(api.Loader): repr_cont = representation["context"] - file = api.get_representation_path(representation) + file = get_representation_path(representation) if not file: repr_id = representation["_id"] diff --git a/openpype/hosts/nuke/plugins/load/load_matchmove.py b/openpype/hosts/nuke/plugins/load/load_matchmove.py index 60d5dc026f..f5a90706c7 100644 --- a/openpype/hosts/nuke/plugins/load/load_matchmove.py +++ b/openpype/hosts/nuke/plugins/load/load_matchmove.py @@ -1,8 +1,8 @@ -from avalon import api import nuke +from openpype.pipeline import load -class MatchmoveLoader(api.Loader): +class MatchmoveLoader(load.LoaderPlugin): """ This will run matchmove script to create track in script. """ diff --git a/openpype/hosts/nuke/plugins/load/load_model.py b/openpype/hosts/nuke/plugins/load/load_model.py index 2b52bbf00f..e445beca05 100644 --- a/openpype/hosts/nuke/plugins/load/load_model.py +++ b/openpype/hosts/nuke/plugins/load/load_model.py @@ -1,5 +1,9 @@ import nuke -from avalon import api, io +from avalon import io +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api.lib import maintained_selection from openpype.hosts.nuke.api import ( containerise, @@ -8,7 +12,7 @@ from openpype.hosts.nuke.api import ( ) -class AlembicModelLoader(api.Loader): +class AlembicModelLoader(load.LoaderPlugin): """ This will load alembic model into script. """ @@ -124,7 +128,7 @@ class AlembicModelLoader(api.Loader): data_imprint.update({k: version_data[k]}) # getting file path - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") with maintained_selection(): model_node = nuke.toNode(object_name) diff --git a/openpype/hosts/nuke/plugins/load/load_script_precomp.py b/openpype/hosts/nuke/plugins/load/load_script_precomp.py index aa48b631c5..779f101682 100644 --- a/openpype/hosts/nuke/plugins/load/load_script_precomp.py +++ b/openpype/hosts/nuke/plugins/load/load_script_precomp.py @@ -1,5 +1,11 @@ import nuke -from avalon import api, style, io + +from avalon import io + +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api.lib import get_avalon_knob_data from openpype.hosts.nuke.api import ( containerise, @@ -8,7 +14,7 @@ from openpype.hosts.nuke.api import ( ) -class LinkAsGroup(api.Loader): +class LinkAsGroup(load.LoaderPlugin): """Copy the published file to be pasted at the desired location""" representations = ["nk"] @@ -17,7 +23,7 @@ class LinkAsGroup(api.Loader): label = "Load Precomp" order = 0 icon = "file" - color = style.colors.alert + color = "#cc0000" def load(self, context, name, namespace, data): # for k, v in context.items(): @@ -108,7 +114,7 @@ class LinkAsGroup(api.Loader): """ node = nuke.toNode(container['objectName']) - root = api.get_representation_path(representation).replace("\\", "/") + root = get_representation_path(representation).replace("\\", "/") # Get start frame from version data version = io.find_one({ diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py index 5bbc88266a..544b9e04da 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py @@ -1,4 +1,5 @@ import os +import re import pyblish.api import openpype from openpype.hosts.nuke.api import plugin @@ -25,6 +26,7 @@ class ExtractReviewDataMov(openpype.api.Extractor): def process(self, instance): families = instance.data["families"] task_type = instance.context.data["taskType"] + subset = instance.data["subset"] self.log.info("Creating staging dir...") if "representations" not in instance.data: @@ -46,6 +48,7 @@ class ExtractReviewDataMov(openpype.api.Extractor): for o_name, o_data in self.outputs.items(): f_families = o_data["filter"]["families"] f_task_types = o_data["filter"]["task_types"] + f_subsets = o_data["filter"]["sebsets"] # test if family found in context test_families = any([ @@ -69,11 +72,25 @@ class ExtractReviewDataMov(openpype.api.Extractor): bool(not f_task_types) ]) + # test subsets from filter + test_subsets = any([ + # check if any of subset filter inputs + # converted to regex patern is not found in subset + # we keep strict case sensitivity + bool(next(( + s for s in f_subsets + if re.search(re.compile(s), subset) + ), None)), + # but if no subsets were set then make this acuntable too + bool(not f_subsets) + ]) + # we need all filters to be positive for this # preset to be activated test_all = all([ test_families, - test_task_types + test_task_types, + test_subsets ]) # if it is not positive then skip this preset @@ -113,13 +130,22 @@ class ExtractReviewDataMov(openpype.api.Extractor): }) else: data = exporter.generate_mov(**o_data) - generated_repres.extend(data["representations"]) - self.log.info(generated_repres) + # add representation generated by exporter + generated_repres.extend(data["representations"]) + self.log.debug( + "__ generated_repres: {}".format(generated_repres)) if generated_repres: # assign to representations instance.data["representations"] += generated_repres + else: + instance.data["families"].remove("review") + self.log.info(( + "Removing `review` from families. " + "Not available baking profile." + )) + self.log.debug(instance.data["families"]) self.log.debug( "_ representations: {}".format( diff --git a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py index 50e5f995f4..e917a28046 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py +++ b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py @@ -48,8 +48,13 @@ class ExtractSlateFrame(openpype.api.Extractor): self.log.info( "StagingDir `{0}`...".format(instance.data["stagingDir"])) + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + frame_length = int( - instance.data["frameEnd"] - instance.data["frameStart"] + 1 + (frame_end - frame_start + 1) + (handle_start + handle_end) ) temporary_nodes = [] diff --git a/openpype/hosts/nuke/plugins/publish/precollect_instances.py b/openpype/hosts/nuke/plugins/publish/precollect_instances.py index 97ddef0a59..29c706f302 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_instances.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_instances.py @@ -80,7 +80,7 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): # Add all nodes in group instances. if node.Class() == "Group": # only alter families for render family - if "write" in families_ak.lower(): + if families_ak and "write" in families_ak.lower(): target = node["render"].value() if target == "Use existing frames": # Local rendering diff --git a/openpype/hosts/nuke/plugins/publish/precollect_writes.py b/openpype/hosts/nuke/plugins/publish/precollect_writes.py index 189f28f7c6..85e98db7ed 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_writes.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_writes.py @@ -3,8 +3,9 @@ import re from pprint import pformat import nuke import pyblish.api +from avalon import io import openpype.api as pype -from avalon import io, api +from openpype.pipeline import get_representation_path @pyblish.api.log @@ -182,7 +183,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): if repre_doc: instance.data["audio"] = [{ "offset": 0, - "filename": api.get_representation_path(repre_doc) + "filename": get_representation_path(repre_doc) }] self.log.debug("instance.data: {}".format(pformat(instance.data))) diff --git a/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py b/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py index 22a9b3678e..2bf1ff81f8 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py +++ b/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py @@ -1,12 +1,16 @@ import os -import toml import nuke +import toml import pyblish.api -from avalon import api from bson.objectid import ObjectId +from openpype.pipeline import ( + discover_loader_plugins, + load_container, +) + class RepairReadLegacyAction(pyblish.api.Action): @@ -49,13 +53,13 @@ class RepairReadLegacyAction(pyblish.api.Action): loader_name = "LoadMov" loader_plugin = None - for Loader in api.discover(api.Loader): + for Loader in discover_loader_plugins(): if Loader.__name__ != loader_name: continue loader_plugin = Loader - api.load( + load_container( Loader=loader_plugin, representation=ObjectId(data["representation"]) ) diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py b/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py index a73bed8edd..08f09f8097 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py +++ b/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py @@ -34,9 +34,9 @@ class ValidateWriteLegacy(pyblish.api.InstancePlugin): # test if render in family test knob # and only one item should be available assert len(family_test) == 1, msg + " > More avalon attributes" - assert "render" in node[family_test[0]].value(), msg + \ + assert "render" in node[family_test[0]].value() \ + or "still" in node[family_test[0]].value(), msg + \ " > Not correct family" - # test if `file` knob in node, this way old # non-group-node write could be detected assert "file" not in node.knobs(), msg + \ @@ -74,6 +74,8 @@ class ValidateWriteLegacy(pyblish.api.InstancePlugin): Create_name = "CreateWriteRender" elif family == "prerender": Create_name = "CreateWritePrerender" + elif family == "still": + Create_name = "CreateWriteStill" # get appropriate plugin class creator_plugin = None diff --git a/openpype/hosts/photoshop/api/README.md b/openpype/hosts/photoshop/api/README.md index b958f53803..80792a4da0 100644 --- a/openpype/hosts/photoshop/api/README.md +++ b/openpype/hosts/photoshop/api/README.md @@ -195,11 +195,12 @@ class ExtractImage(openpype.api.Extractor): #### Loader Plugin ```python from avalon import api, photoshop +from openpype.pipeline import load, get_representation_path stub = photoshop.stub() -class ImageLoader(api.Loader): +class ImageLoader(load.LoaderPlugin): """Load images Stores the imported asset in a container named after the asset. @@ -227,7 +228,7 @@ class ImageLoader(api.Loader): with photoshop.maintained_selection(): stub.replace_smart_object( - layer, api.get_representation_path(representation) + layer, get_representation_path(representation) ) stub.imprint( @@ -245,7 +246,7 @@ https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/ Add --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome then localhost:8078 (port set in `photoshop\extension\.debug`) -Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01 +Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01 Or install CEF client from https://github.com/Adobe-CEP/CEP-Resources/tree/master/CEP_9.x ## Resources diff --git a/openpype/hosts/photoshop/api/__init__.py b/openpype/hosts/photoshop/api/__init__.py index 4cc2aa2c78..17ea957066 100644 --- a/openpype/hosts/photoshop/api/__init__.py +++ b/openpype/hosts/photoshop/api/__init__.py @@ -16,7 +16,6 @@ from .pipeline import ( ) from .plugin import ( PhotoshopLoader, - Creator, get_unique_layer_name ) from .workio import ( @@ -42,11 +41,11 @@ __all__ = [ "list_instances", "remove_instance", "install", + "uninstall", "containerise", # Plugin "PhotoshopLoader", - "Creator", "get_unique_layer_name", # workfiles diff --git a/openpype/hosts/photoshop/api/launch_logic.py b/openpype/hosts/photoshop/api/launch_logic.py index 112cd8fe3f..0021905cb5 100644 --- a/openpype/hosts/photoshop/api/launch_logic.py +++ b/openpype/hosts/photoshop/api/launch_logic.py @@ -14,7 +14,7 @@ from openpype.api import Logger from openpype.tools.utils import host_tools from avalon import api -from avalon.tools.webserver.app import WebServerTool +from openpype.tools.adobe_webserver.app import WebServerTool from .ws_stub import PhotoshopServerStub diff --git a/openpype/hosts/photoshop/api/pipeline.py b/openpype/hosts/photoshop/api/pipeline.py index 25983f2471..e814e1ca4d 100644 --- a/openpype/hosts/photoshop/api/pipeline.py +++ b/openpype/hosts/photoshop/api/pipeline.py @@ -1,5 +1,4 @@ import os -import sys from Qt import QtWidgets import pyblish.api @@ -7,6 +6,12 @@ import avalon.api from avalon import pipeline, io from openpype.api import Logger +from openpype.lib import register_event_callback +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, +) import openpype.hosts.photoshop from . import lib @@ -67,21 +72,21 @@ def install(): pyblish.api.register_host("photoshop") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH) + register_loader_plugin_path(LOAD_PATH) + avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) log.info(PUBLISH_PATH) pyblish.api.register_callback( "instanceToggled", on_pyblish_instance_toggled ) - avalon.api.on("application.launched", on_application_launch) + register_event_callback("application.launched", on_application_launch) def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH) + deregister_loader_plugin_path(LOAD_PATH) + avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) def ls(): diff --git a/openpype/hosts/photoshop/api/plugin.py b/openpype/hosts/photoshop/api/plugin.py index e0db67de2c..c80e6bbd06 100644 --- a/openpype/hosts/photoshop/api/plugin.py +++ b/openpype/hosts/photoshop/api/plugin.py @@ -1,6 +1,6 @@ import re -import avalon.api +from openpype.pipeline import LoaderPlugin from .launch_logic import stub @@ -29,41 +29,7 @@ def get_unique_layer_name(layers, asset_name, subset_name): return "{}_{:0>3d}".format(name, occurrences + 1) -class PhotoshopLoader(avalon.api.Loader): +class PhotoshopLoader(LoaderPlugin): @staticmethod def get_stub(): return stub() - - -class Creator(avalon.api.Creator): - """Creator plugin to create instances in Photoshop - - A LayerSet is created to support any number of layers in an instance. If - the selection is used, these layers will be added to the LayerSet. - """ - - def process(self): - # Photoshop can have multiple LayerSets with the same name, which does - # not work with Avalon. - msg = "Instance with name \"{}\" already exists.".format(self.name) - stub = lib.stub() # only after Photoshop is up - for layer in stub.get_layers(): - if self.name.lower() == layer.Name.lower(): - msg = QtWidgets.QMessageBox() - msg.setIcon(QtWidgets.QMessageBox.Warning) - msg.setText(msg) - msg.exec_() - return False - - # Store selection because adding a group will change selection. - with lib.maintained_selection(): - - # Add selection to group. - if (self.options or {}).get("useSelection"): - group = stub.group_selected_layers(self.name) - else: - group = stub.create_group(self.name) - - stub.imprint(group, self.data) - - return group diff --git a/openpype/hosts/photoshop/api/ws_stub.py b/openpype/hosts/photoshop/api/ws_stub.py index d4406d17b9..64d89f5420 100644 --- a/openpype/hosts/photoshop/api/ws_stub.py +++ b/openpype/hosts/photoshop/api/ws_stub.py @@ -2,12 +2,11 @@ Stub handling connection from server to client. Used anywhere solution is calling client methods. """ -import sys import json import attr from wsrpc_aiohttp import WebSocketAsync -from avalon.tools.webserver.app import WebServerTool +from openpype.tools.adobe_webserver.app import WebServerTool @attr.s diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py index 344a53f47e..a001b5f171 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_image.py @@ -1,9 +1,9 @@ from Qt import QtWidgets -import openpype.api +from openpype.pipeline import create from openpype.hosts.photoshop import api as photoshop -class CreateImage(openpype.api.Creator): +class CreateImage(create.LegacyCreator): """Image folder for publish.""" name = "imageDefault" diff --git a/openpype/hosts/photoshop/plugins/load/load_image.py b/openpype/hosts/photoshop/plugins/load/load_image.py index 3b1cfe9636..0a9421b8f2 100644 --- a/openpype/hosts/photoshop/plugins/load/load_image.py +++ b/openpype/hosts/photoshop/plugins/load/load_image.py @@ -1,6 +1,6 @@ import re -from avalon import api +from openpype.pipeline import get_representation_path from openpype.hosts.photoshop import api as photoshop from openpype.hosts.photoshop.api import get_unique_layer_name @@ -54,7 +54,7 @@ class ImageLoader(photoshop.PhotoshopLoader): else: # switching version - keep same name layer_name = container["namespace"] - path = api.get_representation_path(representation) + path = get_representation_path(representation) with photoshop.maintained_selection(): stub.replace_smart_object( layer, path, layer_name diff --git a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py b/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py index 6627aded51..5f39121ae1 100644 --- a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py +++ b/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py @@ -1,8 +1,8 @@ import os -from avalon.pipeline import get_representation_path_from_context -from avalon.vendor import qargparse +import qargparse +from openpype.pipeline import get_representation_path_from_context from openpype.hosts.photoshop import api as photoshop from openpype.hosts.photoshop.api import get_unique_layer_name @@ -92,4 +92,3 @@ class ImageFromSequenceLoader(photoshop.PhotoshopLoader): def remove(self, container): """No update possible, not containerized.""" pass - diff --git a/openpype/hosts/photoshop/plugins/load/load_reference.py b/openpype/hosts/photoshop/plugins/load/load_reference.py index 60142d4a1f..f5f0545d39 100644 --- a/openpype/hosts/photoshop/plugins/load/load_reference.py +++ b/openpype/hosts/photoshop/plugins/load/load_reference.py @@ -1,7 +1,6 @@ import re -from avalon import api - +from openpype.pipeline import get_representation_path from openpype.hosts.photoshop import api as photoshop from openpype.hosts.photoshop.api import get_unique_layer_name @@ -55,7 +54,7 @@ class ReferenceLoader(photoshop.PhotoshopLoader): else: # switching version - keep same name layer_name = container["namespace"] - path = api.get_representation_path(representation) + path = get_representation_path(representation) with photoshop.maintained_selection(): stub.replace_smart_object( layer, path, layer_name diff --git a/openpype/hosts/resolve/api/pipeline.py b/openpype/hosts/resolve/api/pipeline.py index 2dc5136c8a..fa309e3503 100644 --- a/openpype/hosts/resolve/api/pipeline.py +++ b/openpype/hosts/resolve/api/pipeline.py @@ -9,6 +9,11 @@ from avalon import schema from avalon.pipeline import AVALON_CONTAINER_ID from pyblish import api as pyblish from openpype.api import Logger +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, +) from . import lib from . import PLUGINS_DIR from openpype.tools.utils import host_tools @@ -41,8 +46,8 @@ def install(): pyblish.register_plugin_path(PUBLISH_PATH) log.info("Registering DaVinci Resovle plug-ins..") - avalon.register_plugin_path(avalon.Loader, LOAD_PATH) - avalon.register_plugin_path(avalon.Creator, CREATE_PATH) + register_loader_plugin_path(LOAD_PATH) + avalon.register_plugin_path(LegacyCreator, CREATE_PATH) avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) # register callback for switching publishable @@ -66,8 +71,8 @@ def uninstall(): pyblish.deregister_plugin_path(PUBLISH_PATH) log.info("Deregistering DaVinci Resovle plug-ins..") - avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) - avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH) + deregister_loader_plugin_path(LOAD_PATH) + avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH) avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH) # register callback for switching publishable diff --git a/openpype/hosts/resolve/api/plugin.py b/openpype/hosts/resolve/api/plugin.py index 8612cf82ec..8e1436021c 100644 --- a/openpype/hosts/resolve/api/plugin.py +++ b/openpype/hosts/resolve/api/plugin.py @@ -1,13 +1,17 @@ import re import uuid -from avalon import api -import openpype.api as pype -from openpype.hosts import resolve -from avalon.vendor import qargparse -from . import lib +import qargparse from Qt import QtWidgets, QtCore +import openpype.api as pype +from openpype.pipeline import ( + LegacyCreator, + LoaderPlugin, +) +from openpype.hosts import resolve +from . import lib + class CreatorWidget(QtWidgets.QDialog): @@ -289,7 +293,7 @@ class ClipLoader: """ Initialize object Arguments: - cls (avalon.api.Loader): plugin object + cls (openpype.pipeline.load.LoaderPlugin): plugin object context (dict): loader plugin context options (dict)[optional]: possible keys: projectBinPath: "path/to/binItem" @@ -445,7 +449,7 @@ class ClipLoader: return timeline_item -class TimelineItemLoader(api.Loader): +class TimelineItemLoader(LoaderPlugin): """A basic SequenceLoader for Resolve This will implement the basic behavior for a loader to inherit from that @@ -493,7 +497,7 @@ class TimelineItemLoader(api.Loader): pass -class Creator(pype.PypeCreatorMixin, api.Creator): +class Creator(LegacyCreator): """Creator class wrapper """ marker_color = "Purple" diff --git a/openpype/hosts/resolve/plugins/load/load_clip.py b/openpype/hosts/resolve/plugins/load/load_clip.py index e20384ee6c..71850d95f6 100644 --- a/openpype/hosts/resolve/plugins/load/load_clip.py +++ b/openpype/hosts/resolve/plugins/load/load_clip.py @@ -1,11 +1,14 @@ -from avalon import io, api -from openpype.hosts import resolve from copy import deepcopy from importlib import reload + +from avalon import io +from openpype.hosts import resolve +from openpype.pipeline import get_representation_path from openpype.hosts.resolve.api import lib, plugin reload(plugin) reload(lib) + class LoadClip(resolve.TimelineItemLoader): """Load a subset to timeline as clip @@ -99,7 +102,7 @@ class LoadClip(resolve.TimelineItemLoader): version_name = version.get("name", None) colorspace = version_data.get("colorspace", None) object_name = "{}_{}".format(name, namespace) - self.fname = api.get_representation_path(representation) + self.fname = get_representation_path(representation) context["version"] = {"data": version_data} loader = resolve.ClipLoader(self, context) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py index 596a8ccfd2..c1c48ec72d 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py @@ -3,9 +3,10 @@ import re import pyblish.api import json -from avalon.api import format_template_with_optional_keys - -from openpype.lib import prepare_template_data +from openpype.lib import ( + prepare_template_data, + StringTemplate, +) class CollectTextures(pyblish.api.ContextPlugin): @@ -81,14 +82,10 @@ class CollectTextures(pyblish.api.ContextPlugin): parsed_subset = instance.data["subset"].replace( instance.data["family"], '') - fill_pairs = { + explicit_data = { "subset": parsed_subset } - fill_pairs = prepare_template_data(fill_pairs) - workfile_subset = format_template_with_optional_keys( - fill_pairs, self.workfile_subset_template) - processed_instance = False for repre in instance.data["representations"]: ext = repre["ext"].replace('.', '') @@ -102,6 +99,22 @@ class CollectTextures(pyblish.api.ContextPlugin): if ext in self.main_workfile_extensions or \ ext in self.other_workfile_extensions: + formatting_data = self._get_parsed_groups( + repre_file, + self.input_naming_patterns["workfile"], + self.input_naming_groups["workfile"], + self.color_space + ) + self.log.info("Parsed groups from workfile " + "name '{}': {}".format(repre_file, + formatting_data)) + + formatting_data.update(explicit_data) + fill_pairs = prepare_template_data(formatting_data) + workfile_subset = StringTemplate.format_strict_template( + self.workfile_subset_template, fill_pairs + ) + asset_build = self._get_asset_build( repre_file, self.input_naming_patterns["workfile"], @@ -148,11 +161,23 @@ class CollectTextures(pyblish.api.ContextPlugin): resource_files[workfile_subset].append(item) if ext in self.texture_extensions: + formatting_data = self._get_parsed_groups( + repre_file, + self.input_naming_patterns["textures"], + self.input_naming_groups["textures"], + self.color_space + ) + + self.log.info("Parsed groups from texture " + "name '{}': {}".format(repre_file, + formatting_data)) + c_space = self._get_color_space( repre_file, self.color_space ) + # optional value channel = self._get_channel_name( repre_file, self.input_naming_patterns["textures"], @@ -160,6 +185,7 @@ class CollectTextures(pyblish.api.ContextPlugin): self.color_space ) + # optional value shader = self._get_shader_name( repre_file, self.input_naming_patterns["textures"], @@ -167,16 +193,19 @@ class CollectTextures(pyblish.api.ContextPlugin): self.color_space ) - formatting_data = { + explicit_data = { "color_space": c_space or '', # None throws exception "channel": channel or '', "shader": shader or '', "subset": parsed_subset or '' } + formatting_data.update(explicit_data) + fill_pairs = prepare_template_data(formatting_data) - subset = format_template_with_optional_keys( - fill_pairs, self.texture_subset_template) + subset = StringTemplate.format_strict_template( + self.texture_subset_template, fill_pairs + ) asset_build = self._get_asset_build( repre_file, @@ -243,6 +272,13 @@ class CollectTextures(pyblish.api.ContextPlugin): for asset_build, version, subset, family in asset_builds: if not main_version: main_version = version + + try: + version_int = int(version or main_version or 1) + except ValueError: + self.log.error("Parsed version {} is not " + "an number".format(version)) + new_instance = context.create_instance(subset) new_instance.data.update( { @@ -251,7 +287,7 @@ class CollectTextures(pyblish.api.ContextPlugin): "label": subset, "name": subset, "family": family, - "version": int(version or main_version or 1), + "version": version_int, "asset_build": asset_build # remove in validator } ) @@ -320,13 +356,14 @@ class CollectTextures(pyblish.api.ContextPlugin): """ asset_name = "NOT_AVAIL" - return self._parse(name, input_naming_patterns, input_naming_groups, - color_spaces, 'asset') or asset_name + return (self._parse_key(name, input_naming_patterns, + input_naming_groups, color_spaces, 'asset') or + asset_name) def _get_version(self, name, input_naming_patterns, input_naming_groups, color_spaces): - found = self._parse(name, input_naming_patterns, input_naming_groups, - color_spaces, 'version') + found = self._parse_key(name, input_naming_patterns, + input_naming_groups, color_spaces, 'version') if found: return found.replace('v', '') @@ -336,8 +373,8 @@ class CollectTextures(pyblish.api.ContextPlugin): def _get_udim(self, name, input_naming_patterns, input_naming_groups, color_spaces): """Parses from 'name' udim value.""" - found = self._parse(name, input_naming_patterns, input_naming_groups, - color_spaces, 'udim') + found = self._parse_key(name, input_naming_patterns, + input_naming_groups, color_spaces, 'udim') if found: return found @@ -375,12 +412,15 @@ class CollectTextures(pyblish.api.ContextPlugin): Unknown format of channel name and color spaces >> cs are known list - 'color_space' used as a placeholder """ - found = self._parse(name, input_naming_patterns, input_naming_groups, - color_spaces, 'shader') - if found: - return found + found = None + try: + found = self._parse_key(name, input_naming_patterns, + input_naming_groups, color_spaces, + 'shader') + except ValueError: + self.log.warning("Didn't find shader in {}".format(name)) - self.log.warning("Didn't find shader in {}".format(name)) + return found def _get_channel_name(self, name, input_naming_patterns, input_naming_groups, color_spaces): @@ -389,15 +429,18 @@ class CollectTextures(pyblish.api.ContextPlugin): Unknown format of channel name and color spaces >> cs are known list - 'color_space' used as a placeholder """ - found = self._parse(name, input_naming_patterns, input_naming_groups, - color_spaces, 'channel') - if found: - return found + found = None + try: + found = self._parse_key(name, input_naming_patterns, + input_naming_groups, color_spaces, + 'channel') + except ValueError: + self.log.warning("Didn't find channel in {}".format(name)) - self.log.warning("Didn't find channel in {}".format(name)) + return found - def _parse(self, name, input_naming_patterns, input_naming_groups, - color_spaces, key): + def _parse_key(self, name, input_naming_patterns, input_naming_groups, + color_spaces, key): """Universal way to parse 'name' with configurable regex groups. Args: @@ -411,23 +454,47 @@ class CollectTextures(pyblish.api.ContextPlugin): Raises: ValueError - if broken 'input_naming_groups' """ + parsed_groups = self._get_parsed_groups(name, + input_naming_patterns, + input_naming_groups, + color_spaces) + + try: + parsed_value = parsed_groups[key] + return parsed_value + except (IndexError, KeyError): + msg = ("'Textures group positions' must " + + "have '{}' key".format(key)) + raise ValueError(msg) + + def _get_parsed_groups(self, name, input_naming_patterns, + input_naming_groups, color_spaces): + """Universal way to parse 'name' with configurable regex groups. + + Args: + name (str): workfile name or texture name + input_naming_patterns (list): + [workfile_pattern] or [texture_pattern] + input_naming_groups (list) + ordinal position of regex groups matching to input_naming.. + color_spaces (list) - predefined color spaces + + Returns: + (dict) {group_name:parsed_value} + """ for input_pattern in input_naming_patterns: for cs in color_spaces: pattern = input_pattern.replace('{color_space}', cs) regex_result = re.findall(pattern, name) if regex_result: - idx = list(input_naming_groups).index(key) - if idx < 0: - msg = "input_naming_groups must " +\ - "have '{}' key".format(key) - raise ValueError(msg) + if len(regex_result[0]) == len(input_naming_groups): + return dict(zip(input_naming_groups, regex_result[0])) + else: + self.log.warning("No of parsed groups doesn't match " + "no of group labels") - try: - parsed_value = regex_result[0][idx] - return parsed_value - except IndexError: - self.log.warning("Wrong index, probably " - "wrong name {}".format(name)) + raise ValueError("Name '{}' cannot be parsed by any " + "'{}' patterns".format(name, input_naming_patterns)) def _update_representations(self, upd_representations): """Frames dont have sense for textures, add collected udims instead.""" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py index c18de5bc1c..f327895b83 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py @@ -2,6 +2,9 @@ import os import pyblish.api import openpype.api +from openpype.lib import ( + get_ffmpeg_tool_path, +) from pprint import pformat @@ -27,7 +30,7 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): instance.data["representations"] = list() # get ffmpet path - ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") # get staging dir staging_dir = self.staging_dir(instance) @@ -44,7 +47,7 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): clip_trimed_path = os.path.join( staging_dir, instance.data["name"] + ext) # # check video file metadata - # input_data = plib.ffprobe_streams(video_file_path)[0] + # input_data = plib.get_ffprobe_streams(video_file_path)[0] # self.log.debug(f"__ input_data: `{input_data}`") start = float(instance.data["clipInH"]) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_editorial_resources.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_editorial_resources.xml new file mode 100644 index 0000000000..803de6bf11 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_editorial_resources.xml @@ -0,0 +1,17 @@ + + + +Missing source video file + +## No attached video file found + +Process expects presence of source video file with same name prefix as an editorial file in same folder. +(example `simple_editorial_setup_Layer1.edl` expects `simple_editorial_setup.mp4` in same folder) + + +### How to repair? + +Copy source video file to the folder next to `.edl` file. (On a disk, do not put it into Standalone Publisher.) + + + diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_frame_ranges.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_frame_ranges.xml new file mode 100644 index 0000000000..933df1c7c5 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_frame_ranges.xml @@ -0,0 +1,15 @@ + + + +Invalid frame range + +## Invalid frame range + +Expected duration or '{duration}' frames set in database, workfile contains only '{found}' frames. + +### How to repair? + +Modify configuration in the database or tweak frame range in the workfile. + + + \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_shot_duplicates.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_shot_duplicates.xml new file mode 100644 index 0000000000..77b8727162 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_shot_duplicates.xml @@ -0,0 +1,15 @@ + + + +Duplicate shots + +## Duplicate shot names + +Process contains duplicated shot names '{duplicates_str}'. + +### How to repair? + +Remove shot duplicates. + + + \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_sources.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_sources.xml new file mode 100644 index 0000000000..d527d2173e --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_sources.xml @@ -0,0 +1,16 @@ + + + +Files not found + +## Source files not found + +Process contains duplicated shot names: +'{files_not_found}' + +### How to repair? + +Add missing files or run Publish again to collect new publishable files. + + + \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_task_existence.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_task_existence.xml new file mode 100644 index 0000000000..a943f560d0 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_task_existence.xml @@ -0,0 +1,16 @@ + + + +Task not found + +## Task not found in database + +Process contains tasks that don't exist in database: +'{task_not_found}' + +### How to repair? + +Remove set task or add task into database into proper place. + + + \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_batch.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_batch.xml new file mode 100644 index 0000000000..a645df8d02 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_batch.xml @@ -0,0 +1,15 @@ + + + +No texture files found + +## Batch doesn't contain texture files + +Batch must contain at least one texture file. + +### How to repair? + +Add texture file to the batch or check name if it follows naming convention to match texture files to the batch. + + + \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_has_workfile.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_has_workfile.xml new file mode 100644 index 0000000000..077987a96d --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_has_workfile.xml @@ -0,0 +1,15 @@ + + + +No workfile found + +## Batch should contain workfile + +It is expected that published contains workfile that served as a source for textures. + +### How to repair? + +Add workfile to the batch, or disable this validator if you do not want workfile published. + + + \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_name.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_name.xml new file mode 100644 index 0000000000..2610917736 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_name.xml @@ -0,0 +1,32 @@ + + + +Asset name not found + +## Couldn't parse asset name from a file + +Unable to parse asset name from '{file_name}'. File name doesn't match configured naming convention. + +### How to repair? + +Check Settings: project_settings/standalonepublisher/publish/CollectTextures for naming convention. + + +### __Detailed Info__ (optional) + +This error happens when parsing cannot figure out name of asset texture files belong under. + + + +Missing keys + +## Texture file name is missing some required keys + +Texture '{file_name}' is missing values for {missing_str} keys. + +### How to repair? + +Fix name of texture file and Publish again. + + + diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_versions.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_versions.xml new file mode 100644 index 0000000000..1e536e604f --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_versions.xml @@ -0,0 +1,35 @@ + + + +Texture version + +## Texture version mismatch with workfile + +Workfile '{file_name}' version doesn't match with '{version}' of a texture. + +### How to repair? + +Rename either workfile or texture to contain matching versions + + +### __Detailed Info__ (optional) + +This might happen if you are trying to publish textures for older version of workfile (or the other way). +(Eg. publishing 'workfile_v001' and 'texture_file_v002') + + + +Too many versions + +## Too many versions published at same time + +It is currently expected to publish only batch with single version. + +Found {found} versions. + +### How to repair? + +Please remove files with different version and split publishing into multiple steps. + + + diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_workfiles.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_workfiles.xml new file mode 100644 index 0000000000..8187eb0bc8 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_workfiles.xml @@ -0,0 +1,23 @@ + + + +No secondary workfile + +## No secondary workfile found + +Current process expects that primary workfile (for example with a extension '{extension}') will contain also 'secondary' workfile. + +Secondary workfile for '{file_name}' wasn't found. + +### How to repair? + +Attach secondary workfile or disable this validator and Publish again. + + +### __Detailed Info__ (optional) + +This process was implemented for a possible use case of first workfile coming from Mari, secondary workfile for textures from Substance. +Publish should contain both if primary workfile is present. + + + diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py index 6759b87ceb..afb828474d 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py @@ -1,5 +1,6 @@ import pyblish.api import openpype.api +from openpype.pipeline import PublishXmlValidationError class ValidateEditorialResources(pyblish.api.InstancePlugin): @@ -19,5 +20,7 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin): f"Instance: {instance}, Families: " f"{[instance.data['family']] + instance.data['families']}") check_file = instance.data["editorialSourcePath"] - msg = f"Missing \"{check_file}\"." - assert check_file, msg + msg = "Missing source video file." + + if not check_file: + raise PublishXmlValidationError(self, msg) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py index 943cb73b98..005157af62 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py @@ -1,8 +1,10 @@ import re import pyblish.api + import openpype.api from openpype import lib +from openpype.pipeline import PublishXmlValidationError class ValidateFrameRange(pyblish.api.InstancePlugin): @@ -48,9 +50,15 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): files = [files] frames = len(files) - err_msg = "Frame duration from DB:'{}' ". format(int(duration)) +\ - " doesn't match number of files:'{}'".format(frames) +\ - " Please change frame range for Asset or limit no. of files" - assert frames == duration, err_msg + msg = "Frame duration from DB:'{}' ". format(int(duration)) +\ + " doesn't match number of files:'{}'".format(frames) +\ + " Please change frame range for Asset or limit no. of files" - self.log.debug("Valid ranges {} - {}".format(int(duration), frames)) + formatting_data = {"duration": duration, + "found": frames} + if frames != duration: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + self.log.debug("Valid ranges expected '{}' - found '{}'". + format(int(duration), frames)) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py index 85ec9379ce..fe655f6b74 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py @@ -1,6 +1,7 @@ import pyblish.api -import openpype.api +import openpype.api +from openpype.pipeline import PublishXmlValidationError class ValidateShotDuplicates(pyblish.api.ContextPlugin): """Validating no duplicate names are in context.""" @@ -20,4 +21,8 @@ class ValidateShotDuplicates(pyblish.api.ContextPlugin): shot_names.append(name) msg = "There are duplicate shot names:\n{}".format(duplicate_names) - assert not duplicate_names, msg + + formatting_data = {"duplicates_str": ','.join(duplicate_names)} + if duplicate_names: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py index eec675e97f..316f58988f 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py @@ -1,8 +1,10 @@ -import pyblish.api -import openpype.api - import os +import pyblish.api + +import openpype.api +from openpype.pipeline import PublishXmlValidationError + class ValidateSources(pyblish.api.InstancePlugin): """Validates source files. @@ -11,7 +13,6 @@ class ValidateSources(pyblish.api.InstancePlugin): got deleted between starting of SP and now. """ - order = openpype.api.ValidateContentsOrder label = "Check source files" @@ -22,6 +23,7 @@ class ValidateSources(pyblish.api.InstancePlugin): def process(self, instance): self.log.info("instance {}".format(instance.data)) + missing_files = set() for repre in instance.data.get("representations") or []: files = [] if isinstance(repre["files"], str): @@ -34,4 +36,10 @@ class ValidateSources(pyblish.api.InstancePlugin): file_name) if not os.path.exists(source_file): - raise ValueError("File {} not found".format(source_file)) + missing_files.add(source_file) + + msg = "Files '{}' not found".format(','.join(missing_files)) + formatting_data = {"files_not_found": ' - {}'.join(missing_files)} + if missing_files: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py index e3b2ae1646..825092c81b 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py @@ -1,6 +1,8 @@ import pyblish.api from avalon import io +from openpype.pipeline import PublishXmlValidationError + class ValidateTaskExistence(pyblish.api.ContextPlugin): """Validating tasks on instances are filled and existing.""" @@ -53,4 +55,9 @@ class ValidateTaskExistence(pyblish.api.ContextPlugin): "Asset: \"{}\" Task: \"{}\"".format(*missing_pair) ) - raise AssertionError(msg.format("\n".join(pair_msgs))) + msg = msg.format("\n".join(pair_msgs)) + + formatting_data = {"task_not_found": ' - {}'.join(pair_msgs)} + if pair_msgs: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py index d592a4a059..d66fb257bb 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py @@ -1,6 +1,8 @@ import pyblish.api import openpype.api +from openpype.pipeline import PublishXmlValidationError + class ValidateTextureBatch(pyblish.api.InstancePlugin): """Validates that some texture files are present.""" @@ -15,8 +17,10 @@ class ValidateTextureBatch(pyblish.api.InstancePlugin): present = False for instance in instance.context: if instance.data["family"] == "textures": - self.log.info("Some textures present.") + self.log.info("At least some textures present.") return - assert present, "No textures found in published batch!" + msg = "No textures found in published batch!" + if not present: + raise PublishXmlValidationError(self, msg) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py index 7cd540668c..0e67464f59 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py @@ -1,5 +1,7 @@ import pyblish.api + import openpype.api +from openpype.pipeline import PublishXmlValidationError class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin): @@ -17,4 +19,6 @@ class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin): def process(self, instance): wfile = instance.data["versionData"].get("workfile") - assert wfile, "Textures are missing attached workfile" + msg = "Textures are missing attached workfile" + if not wfile: + raise PublishXmlValidationError(self, msg) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py index 4bafe81020..751ad917ca 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py @@ -1,6 +1,7 @@ import pyblish.api -import openpype.api +import openpype.api +from openpype.pipeline import PublishXmlValidationError class ValidateTextureBatchNaming(pyblish.api.InstancePlugin): """Validates that all instances had properly formatted name.""" @@ -19,9 +20,13 @@ class ValidateTextureBatchNaming(pyblish.api.InstancePlugin): msg = "Couldn't find asset name in '{}'\n".format(file_name) + \ "File name doesn't follow configured pattern.\n" + \ "Please rename the file." - assert "NOT_AVAIL" not in instance.data["asset_build"], msg - instance.data.pop("asset_build") + formatting_data = {"file_name": file_name} + if "NOT_AVAIL" in instance.data["asset_build"]: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + instance.data.pop("asset_build") # not needed anymore if instance.data["family"] == "textures": file_name = instance.data["representations"][0]["files"][0] @@ -47,4 +52,10 @@ class ValidateTextureBatchNaming(pyblish.api.InstancePlugin): "Name of the texture file doesn't match expected pattern.\n" + \ "Please rename file(s) {}".format(file_name) - assert not missing_key_values, msg + missing_str = ','.join(["'{}'".format(key) + for key in missing_key_values]) + formatting_data = {"file_name": file_name, + "missing_str": missing_str} + if missing_key_values: + raise PublishXmlValidationError(self, msg, key="missing_values", + formatting_data=formatting_data) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py index 90d0e8e512..84d9def895 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py @@ -1,5 +1,7 @@ import pyblish.api + import openpype.api +from openpype.pipeline import PublishXmlValidationError class ValidateTextureBatchVersions(pyblish.api.InstancePlugin): @@ -25,14 +27,21 @@ class ValidateTextureBatchVersions(pyblish.api.InstancePlugin): self.log.info("No workfile present for textures") return - msg = "Not matching version: texture v{:03d} - workfile {}" - assert version_str in wfile, \ + if version_str not in wfile: + msg = "Not matching version: texture v{:03d} - workfile {}" msg.format( instance.data["version"], wfile ) + raise PublishXmlValidationError(self, msg) present_versions = set() for instance in instance.context: present_versions.add(instance.data["version"]) - assert len(present_versions) == 1, "Too many versions in a batch!" + if len(present_versions) != 1: + msg = "Too many versions in a batch!" + found = ','.join(["'{}'".format(val) for val in present_versions]) + formatting_data = {"found": found} + + raise PublishXmlValidationError(self, msg, key="too_many", + formatting_data=formatting_data) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py index 25bb5aea4a..fa492a80d8 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py @@ -1,11 +1,13 @@ import pyblish.api + import openpype.api +from openpype.pipeline import PublishXmlValidationError class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin): """Validates that textures workfile has collected resources (optional). - Collected recourses means secondary workfiles (in most cases). + Collected resources means secondary workfiles (in most cases). """ label = "Validate Texture Workfile Has Resources" @@ -24,6 +26,13 @@ class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin): self.log.warning("Only secondary workfile present!") return - msg = "No secondary workfiles present for workfile {}".\ - format(instance.data["name"]) - assert instance.data.get("resources"), msg + if not instance.data.get("resources"): + msg = "No secondary workfile present for workfile '{}'". \ + format(instance.data["name"]) + ext = self.main_workfile_extensions[0] + formatting_data = {"file_name": instance.data["name"], + "extension": ext} + + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data + ) diff --git a/openpype/hosts/testhost/plugins/publish/collect_context.py b/openpype/hosts/testhost/plugins/publish/collect_context.py index bbb8477cdf..0ab98fb84b 100644 --- a/openpype/hosts/testhost/plugins/publish/collect_context.py +++ b/openpype/hosts/testhost/plugins/publish/collect_context.py @@ -19,7 +19,7 @@ class CollectContextDataTestHost( hosts = ["testhost"] @classmethod - def get_instance_attr_defs(cls): + def get_attribute_defs(cls): return [ attribute_definitions.BoolDef( "test_bool", diff --git a/openpype/hosts/testhost/plugins/publish/collect_instance_1.py b/openpype/hosts/testhost/plugins/publish/collect_instance_1.py index 979ab83f11..3c035eccb6 100644 --- a/openpype/hosts/testhost/plugins/publish/collect_instance_1.py +++ b/openpype/hosts/testhost/plugins/publish/collect_instance_1.py @@ -20,7 +20,7 @@ class CollectInstanceOneTestHost( hosts = ["testhost"] @classmethod - def get_instance_attr_defs(cls): + def get_attribute_defs(cls): return [ attribute_definitions.NumberDef( "version", diff --git a/openpype/hosts/tvpaint/api/communication_server.py b/openpype/hosts/tvpaint/api/communication_server.py index e9c5f4c73e..65cb9aa2f3 100644 --- a/openpype/hosts/tvpaint/api/communication_server.py +++ b/openpype/hosts/tvpaint/api/communication_server.py @@ -21,7 +21,7 @@ from aiohttp_json_rpc.protocol import ( ) from aiohttp_json_rpc.exceptions import RpcError -from avalon import api +from openpype.lib import emit_event from openpype.hosts.tvpaint.tvpaint_plugin import get_plugin_files_path log = logging.getLogger(__name__) @@ -754,7 +754,7 @@ class BaseCommunicator: self._on_client_connect() - api.emit("application.launched") + emit_event("application.launched") def _on_client_connect(self): self._initial_textfile_write() @@ -938,5 +938,5 @@ class QtCommunicator(BaseCommunicator): def _exit(self, *args, **kwargs): super()._exit(*args, **kwargs) - api.emit("application.exit") + emit_event("application.exit") self.qt_app.exit(self.exit_code) diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py index 74eb41892c..46c9d3a1dd 100644 --- a/openpype/hosts/tvpaint/api/pipeline.py +++ b/openpype/hosts/tvpaint/api/pipeline.py @@ -14,6 +14,12 @@ from avalon.pipeline import AVALON_CONTAINER_ID from openpype.hosts import tvpaint from openpype.api import get_current_project_settings +from openpype.lib import register_event_callback +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, +) from .lib import ( execute_george, @@ -75,8 +81,8 @@ def install(): pyblish.api.register_host("tvpaint") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH) + register_loader_plugin_path(LOAD_PATH) + avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) registered_callbacks = ( pyblish.api.registered_callbacks().get("instanceToggled") or [] @@ -84,8 +90,8 @@ def install(): if on_instance_toggle not in registered_callbacks: pyblish.api.register_callback("instanceToggled", on_instance_toggle) - avalon.api.on("application.launched", initial_launch) - avalon.api.on("application.exit", application_exit) + register_event_callback("application.launched", initial_launch) + register_event_callback("application.exit", application_exit) def uninstall(): @@ -97,8 +103,8 @@ def uninstall(): log.info("OpenPype - Uninstalling TVPaint integration") pyblish.api.deregister_host("tvpaint") pyblish.api.deregister_plugin_path(PUBLISH_PATH) - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH) + deregister_loader_plugin_path(LOAD_PATH) + avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) def containerise( diff --git a/openpype/hosts/tvpaint/api/plugin.py b/openpype/hosts/tvpaint/api/plugin.py index af80c9eae2..15ad8905e0 100644 --- a/openpype/hosts/tvpaint/api/plugin.py +++ b/openpype/hosts/tvpaint/api/plugin.py @@ -1,16 +1,17 @@ import re import uuid -import avalon.api - -from openpype.api import PypeCreatorMixin +from openpype.pipeline import ( + LegacyCreator, + LoaderPlugin, +) from openpype.hosts.tvpaint.api import ( pipeline, lib ) -class Creator(PypeCreatorMixin, avalon.api.Creator): +class Creator(LegacyCreator): def __init__(self, *args, **kwargs): super(Creator, self).__init__(*args, **kwargs) # Add unified identifier created with `uuid` module @@ -74,7 +75,7 @@ class Creator(PypeCreatorMixin, avalon.api.Creator): self.write_instances(data) -class Loader(avalon.api.Loader): +class Loader(LoaderPlugin): hosts = ["tvpaint"] @staticmethod diff --git a/openpype/hosts/tvpaint/plugins/create/create_render_layer.py b/openpype/hosts/tvpaint/plugins/create/create_render_layer.py index 40a7d15990..c1af9632b1 100644 --- a/openpype/hosts/tvpaint/plugins/create/create_render_layer.py +++ b/openpype/hosts/tvpaint/plugins/create/create_render_layer.py @@ -1,5 +1,4 @@ -from avalon.api import CreatorError - +from openpype.pipeline import CreatorError from openpype.lib import prepare_template_data from openpype.hosts.tvpaint.api import ( plugin, diff --git a/openpype/hosts/tvpaint/plugins/create/create_render_pass.py b/openpype/hosts/tvpaint/plugins/create/create_render_pass.py index af962052fc..a7f717ccec 100644 --- a/openpype/hosts/tvpaint/plugins/create/create_render_pass.py +++ b/openpype/hosts/tvpaint/plugins/create/create_render_pass.py @@ -1,4 +1,4 @@ -from avalon.api import CreatorError +from openpype.pipeline import CreatorError from openpype.lib import prepare_template_data from openpype.hosts.tvpaint.api import ( plugin, diff --git a/openpype/hosts/tvpaint/plugins/load/load_image.py b/openpype/hosts/tvpaint/plugins/load/load_image.py index 7dba1e3619..f861d0119e 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_image.py +++ b/openpype/hosts/tvpaint/plugins/load/load_image.py @@ -1,4 +1,4 @@ -from avalon.vendor import qargparse +import qargparse from openpype.hosts.tvpaint.api import lib, plugin diff --git a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py b/openpype/hosts/tvpaint/plugins/load/load_reference_image.py index 0a85e5dc76..5e4e3965d2 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py +++ b/openpype/hosts/tvpaint/plugins/load/load_reference_image.py @@ -1,6 +1,6 @@ import collections +import qargparse from avalon.pipeline import get_representation_context -from avalon.vendor import qargparse from openpype.hosts.tvpaint.api import lib, pipeline, plugin diff --git a/openpype/hosts/tvpaint/plugins/load/load_workfile.py b/openpype/hosts/tvpaint/plugins/load/load_workfile.py index 33e2a76cc9..d224cfc390 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_workfile.py +++ b/openpype/hosts/tvpaint/plugins/load/load_workfile.py @@ -1,10 +1,11 @@ -import getpass import os from avalon import api, io from openpype.lib import ( + StringTemplate, get_workfile_template_key_from_context, - get_workdir_data + get_workdir_data, + get_last_workfile_with_version, ) from openpype.api import Anatomy from openpype.hosts.tvpaint.api import lib, pipeline, plugin @@ -67,9 +68,8 @@ class LoadWorkfile(plugin.Loader): data = get_workdir_data(project_doc, asset_doc, task_name, host_name) data["root"] = anatomy.roots - data["user"] = getpass.getuser() - template = anatomy.templates[template_key]["file"] + file_template = anatomy.templates[template_key]["file"] # Define saving file extension if current_file: @@ -81,11 +81,12 @@ class LoadWorkfile(plugin.Loader): data["ext"] = extension - work_root = api.format_template_with_optional_keys( - data, anatomy.templates[template_key]["folder"] + folder_template = anatomy.templates[template_key]["folder"] + work_root = StringTemplate.format_strict_template( + folder_template, data ) - version = api.last_workfile_with_version( - work_root, template, data, host.file_extensions() + version = get_last_workfile_with_version( + work_root, file_template, data, host.file_extensions() )[1] if version is None: @@ -95,8 +96,8 @@ class LoadWorkfile(plugin.Loader): data["version"] = version - path = os.path.join( - work_root, - api.format_template_with_optional_keys(data, template) + filename = StringTemplate.format_strict_template( + file_template, data ) + path = os.path.join(work_root, filename) host.save_file(path) diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_asset_name.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_asset_name.xml new file mode 100644 index 0000000000..33a9ca4247 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_asset_name.xml @@ -0,0 +1,22 @@ + + + +Subset context +## Invalid subset context + +Context of the given subset doesn't match your current scene. + +### How to repair? + +Yout can fix this with "Repair" button on the right. This will use '{expected_asset}' asset name and overwrite '{found_asset}' asset name in scene metadata. + +After that restart publishing with Reload button. + + +### How could this happen? + +The subset was created in different scene with different context +or the scene file was copy pasted from different context. + + + diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_duplicated_layer_names.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_duplicated_layer_names.xml new file mode 100644 index 0000000000..5d798544c0 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_duplicated_layer_names.xml @@ -0,0 +1,22 @@ + + + +Layer names +## Duplicated layer names + +Can't determine which layers should be published because there are duplicated layer names in the scene. + +### Duplicated layer names + +{layer_names} + +*Check layer names for all subsets in list on left side.* + +### How to repair? + +Hide/rename/remove layers that should not be published. + +If all of them should be published then you have duplicated subset names in the scene. In that case you have to recrete them and use different variant name. + + + diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml new file mode 100644 index 0000000000..e7be735888 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml @@ -0,0 +1,20 @@ + + + +Layers visiblity +## All layers are not visible + +Layers visibility was changed during publishing which caused that all layers for subset "{instance_name}" are hidden. + +### Layer names for **{instance_name}** + +{layer_names} + +*Check layer names for all subsets in the list on the left side.* + +### How to repair? + +Reset publishing and do not change visibility of layers after hitting publish button. + + + diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_marks.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_marks.xml new file mode 100644 index 0000000000..f0e01ebaa7 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_marks.xml @@ -0,0 +1,21 @@ + + + +Frame range +## Invalid render frame range + +Scene frame range which will be rendered is defined by MarkIn and MarkOut. Expected frame range is {expected_frame_range} and current frame range is {current_frame_range}. + +It is also required that MarkIn and MarkOut are enabled in the scene. Their color is highlighted on timeline when are enabled. + +- MarkIn is {mark_in_enable_state} +- MarkOut is {mark_out_enable_state} + +### How to repair? + +Yout can fix this with "Repair" button on the right. That will change MarkOut to {expected_mark_out}. + +Or you can manually modify MarkIn and MarkOut in the scene timeline. + + + diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_missing_layer_names.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_missing_layer_names.xml new file mode 100644 index 0000000000..e96e7c5044 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_missing_layer_names.xml @@ -0,0 +1,18 @@ + + + +Missing layers +## Missing layers for render pass + +Render pass subset "{instance_name}" has stored layer names that belong to it's rendering scope but layers were not found in scene. + +### Missing layer names + +{layer_names} + +### How to repair? + +Find layers that belong to subset {instance_name} and rename them back to expected layer names or remove the subset and create new with right layers. + + + diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_render_pass_group.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_render_pass_group.xml new file mode 100644 index 0000000000..df7bdf36e5 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_render_pass_group.xml @@ -0,0 +1,14 @@ + + + +Render pass group +## Invalid group of Render Pass layers + +Layers of Render Pass {instance_name} belong to Render Group which is defined by TVPaint color group {expected_group}. But the layers are not in the group. + +### How to repair? + +Change the color group to {expected_group} on layers {layer_names}. + + + diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_scene_settings.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_scene_settings.xml new file mode 100644 index 0000000000..f741c71456 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_scene_settings.xml @@ -0,0 +1,26 @@ + + + +Scene settings +## Invalid scene settings + +Scene settings do not match to expected values. + +**FPS** +- Expected value: {expected_fps} +- Current value: {current_fps} + +**Resolution** +- Expected value: {expected_width}x{expected_height} +- Current value: {current_width}x{current_height} + +**Pixel ratio** +- Expected value: {expected_pixel_ratio} +- Current value: {current_pixel_ratio} + +### How to repair? + +FPS and Pixel ratio can be modified in scene setting. Wrong resolution can be fixed with changing resolution of scene but due to TVPaint limitations it is possible that you will need to create new scene. + + + diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_start_frame.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_start_frame.xml new file mode 100644 index 0000000000..9052abf66c --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_start_frame.xml @@ -0,0 +1,14 @@ + + + +First frame +## MarkIn is not set to 0 + +MarkIn in your scene must start from 0 fram index but MarkIn is set to {current_start_frame}. + +### How to repair? + +You can modify MarkIn manually or hit the "Repair" button on the right which will change MarkIn to 0 (does not change MarkOut). + + + diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml new file mode 100644 index 0000000000..7397f6ef0b --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml @@ -0,0 +1,19 @@ + + + +Missing metadata +## Your scene miss context metadata + +Your scene does not contain metadata about {missing_metadata}. + +### How to repair? + +Resave the scene using Workfiles tool or hit the "Repair" button on the right. + + +### How this could happend? + +You're using scene file that was not created using Workfiles tool. + + + diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml new file mode 100644 index 0000000000..c4ffafc8b5 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml @@ -0,0 +1,24 @@ + + + +Project name +## Your scene is from different project + +It is not possible to publish into project "{workfile_project_name}" when TVPaint was opened with project "{env_project_name}" in context. + +### How to repair? + +If the workfile belongs to project "{env_project_name}" then use Workfiles tool to resave it. + +Otherwise close TVPaint and launch it again from project you want to publish in. + + +### How this could happend? + +You've opened workfile from different project. You've opened TVPaint on a task from "{env_project_name}" then you've opened TVPaint again on task from "{workfile_project_name}" without closing the TVPaint. Because TVPaint can run only once the project didn't change. + +### Why it is important? +Because project may affect how TVPaint works or change publishing behavior it is dangerous to allow change project context in many ways. For example publishing will not run as expected. + + + diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py index 0fdeba0a21..70816f9f18 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py @@ -1,4 +1,5 @@ import pyblish.api +from openpype.pipeline import PublishXmlValidationError from openpype.hosts.tvpaint.api import pipeline @@ -27,7 +28,7 @@ class FixAssetNames(pyblish.api.Action): pipeline._write_instances(new_instance_items) -class ValidateMissingLayers(pyblish.api.ContextPlugin): +class ValidateAssetNames(pyblish.api.ContextPlugin): """Validate assset name present on instance. Asset name on instance should be the same as context's. @@ -48,8 +49,18 @@ class ValidateMissingLayers(pyblish.api.ContextPlugin): instance_label = ( instance.data.get("label") or instance.data["name"] ) - raise AssertionError(( - "Different asset name on instance then context's." - " Instance \"{}\" has asset name: \"{}\"" - " Context asset name is: \"{}\"" - ).format(instance_label, asset_name, context_asset_name)) + + raise PublishXmlValidationError( + self, + ( + "Different asset name on instance then context's." + " Instance \"{}\" has asset name: \"{}\"" + " Context asset name is: \"{}\"" + ).format( + instance_label, asset_name, context_asset_name + ), + formatting_data={ + "expected_asset": context_asset_name, + "found_asset": asset_name + } + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py b/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py index efccf19ef9..9f61bdbcd0 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py @@ -1,4 +1,5 @@ import pyblish.api +from openpype.pipeline import PublishXmlValidationError class ValidateLayersGroup(pyblish.api.InstancePlugin): @@ -30,14 +31,20 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin): "\"{}\"".format(layer_name) for layer_name in duplicated_layer_names ]) - - # Raise an error - raise AssertionError( + detail_lines = [ + "- {}".format(layer_name) + for layer_name in set(duplicated_layer_names) + ] + raise PublishXmlValidationError( + self, ( "Layers have duplicated names for instance {}." # Description what's wrong " There are layers with same name and one of them is marked" " for publishing so it is not possible to know which should" " be published. Please look for layers with names: {}" - ).format(instance.data["label"], layers_msg) + ).format(instance.data["label"], layers_msg), + formatting_data={ + "layer_names": "
".join(detail_lines) + } ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py index 74ef34169e..7ea0587b8f 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py @@ -1,6 +1,8 @@ import pyblish.api +from openpype.pipeline import PublishXmlValidationError +# TODO @iLLiCiTiT add repair action to disable instances? class ValidateLayersVisiblity(pyblish.api.InstancePlugin): """Validate existence of renderPass layers.""" @@ -9,8 +11,26 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin): families = ["review", "renderPass", "renderLayer"] def process(self, instance): + layer_names = set() for layer in instance.data["layers"]: + layer_names.add(layer["name"]) if layer["visible"]: return - raise AssertionError("All layers of instance are not visible.") + instance_label = ( + instance.data.get("label") or instance.data["name"] + ) + + raise PublishXmlValidationError( + self, + "All layers of instance \"{}\" are not visible.".format( + instance_label + ), + formatting_data={ + "instance_name": instance_label, + "layer_names": "
".join([ + "- {}".format(layer_name) + for layer_name in layer_names + ]) + } + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py index f45247ceac..d1f299e006 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py @@ -1,6 +1,7 @@ import json import pyblish.api +from openpype.pipeline import PublishXmlValidationError from openpype.hosts.tvpaint.api import lib @@ -73,9 +74,34 @@ class ValidateMarks(pyblish.api.ContextPlugin): "expected": expected_data[k] } - if invalid: - raise AssertionError( - "Marks does not match database:\n{}".format( - json.dumps(invalid, sort_keys=True, indent=4) - ) - ) + # Validation ends + if not invalid: + return + + current_frame_range = ( + (current_data["markOut"] - current_data["markIn"]) + 1 + ) + expected_frame_range = ( + (expected_data["markOut"] - expected_data["markIn"]) + 1 + ) + mark_in_enable_state = "disabled" + if current_data["markInState"]: + mark_in_enable_state = "enabled" + + mark_out_enable_state = "disabled" + if current_data["markOutState"]: + mark_out_enable_state = "enabled" + + raise PublishXmlValidationError( + self, + "Marks does not match database:\n{}".format( + json.dumps(invalid, sort_keys=True, indent=4) + ), + formatting_data={ + "current_frame_range": str(current_frame_range), + "expected_frame_range": str(expected_frame_range), + "mark_in_enable_state": mark_in_enable_state, + "mark_out_enable_state": mark_out_enable_state, + "expected_mark_out": expected_data["markOut"] + } + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_missing_layer_names.py b/openpype/hosts/tvpaint/plugins/publish/validate_missing_layer_names.py index db9d354fcd..294ce6cf4f 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_missing_layer_names.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_missing_layer_names.py @@ -1,4 +1,5 @@ import pyblish.api +from openpype.pipeline import PublishXmlValidationError class ValidateMissingLayers(pyblish.api.InstancePlugin): @@ -30,13 +31,25 @@ class ValidateMissingLayers(pyblish.api.InstancePlugin): "\"{}\"".format(layer_name) for layer_name in missing_layer_names ]) + instance_label = ( + instance.data.get("label") or instance.data["name"] + ) + description_layer_names = "
".join([ + "- {}".format(layer_name) + for layer_name in missing_layer_names + ]) # Raise an error - raise AssertionError( + raise PublishXmlValidationError( + self, ( "Layers were not found by name for instance \"{}\"." # Description what's wrong " Layer names marked for publishing are not available" " in layers list. Missing layer names: {}" - ).format(instance.data["label"], layers_msg) + ).format(instance.data["label"], layers_msg), + formatting_data={ + "instance_name": instance_label, + "layer_names": description_layer_names + } ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_project_settings.py b/openpype/hosts/tvpaint/plugins/publish/validate_project_settings.py deleted file mode 100644 index 84c03a9857..0000000000 --- a/openpype/hosts/tvpaint/plugins/publish/validate_project_settings.py +++ /dev/null @@ -1,34 +0,0 @@ -import json - -import pyblish.api - - -class ValidateProjectSettings(pyblish.api.ContextPlugin): - """Validate project settings against database. - """ - - label = "Validate Project Settings" - order = pyblish.api.ValidatorOrder - optional = True - - def process(self, context): - scene_data = { - "fps": context.data.get("sceneFps"), - "resolutionWidth": context.data.get("sceneWidth"), - "resolutionHeight": context.data.get("sceneHeight"), - "pixelAspect": context.data.get("scenePixelAspect") - } - invalid = {} - for k in scene_data.keys(): - expected_value = context.data["assetEntity"]["data"][k] - if scene_data[k] != expected_value: - invalid[k] = { - "current": scene_data[k], "expected": expected_value - } - - if invalid: - raise AssertionError( - "Project settings does not match database:\n{}".format( - json.dumps(invalid, sort_keys=True, indent=4) - ) - ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py b/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py index 5047b8d729..0fbfca6c56 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py @@ -1,5 +1,6 @@ import collections import pyblish.api +from openpype.pipeline import PublishXmlValidationError class ValidateLayersGroup(pyblish.api.InstancePlugin): @@ -26,11 +27,13 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin): layer_names = instance.data["layer_names"] # Check if all layers from render pass are in right group invalid_layers_by_group_id = collections.defaultdict(list) + invalid_layer_names = set() for layer_name in layer_names: layer = layers_by_name.get(layer_name) _group_id = layer["group_id"] if _group_id != group_id: invalid_layers_by_group_id[_group_id].append(layer) + invalid_layer_names.add(layer_name) # Everything is OK and skip exception if not invalid_layers_by_group_id: @@ -61,16 +64,27 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin): ) # Raise an error - raise AssertionError(( - # Short message - "Layers in wrong group." - # Description what's wrong - " Layers from render pass \"{}\" must be in group {} (id: {})." - # Detailed message - " Layers in wrong group: {}" - ).format( - instance.data["label"], - correct_group["name"], - correct_group["group_id"], - " | ".join(per_group_msgs) - )) + raise PublishXmlValidationError( + self, + ( + # Short message + "Layers in wrong group." + # Description what's wrong + " Layers from render pass \"{}\" must be in group {} (id: {})." + # Detailed message + " Layers in wrong group: {}" + ).format( + instance.data["label"], + correct_group["name"], + correct_group["group_id"], + " | ".join(per_group_msgs) + ), + formatting_data={ + "instance_name": ( + instance.data.get("label") or instance.data["name"] + ), + "expected_group": correct_group["name"], + "layer_names": ", ".join(invalid_layer_names) + + } + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py b/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py new file mode 100644 index 0000000000..d235215ac9 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py @@ -0,0 +1,49 @@ +import json + +import pyblish.api +from openpype.pipeline import PublishXmlValidationError + + +# TODO @iLliCiTiT add fix action for fps +class ValidateProjectSettings(pyblish.api.ContextPlugin): + """Validate scene settings against database.""" + + label = "Validate Scene Settings" + order = pyblish.api.ValidatorOrder + optional = True + + def process(self, context): + expected_data = context.data["assetEntity"]["data"] + scene_data = { + "fps": context.data.get("sceneFps"), + "resolutionWidth": context.data.get("sceneWidth"), + "resolutionHeight": context.data.get("sceneHeight"), + "pixelAspect": context.data.get("scenePixelAspect") + } + invalid = {} + for k in scene_data.keys(): + expected_value = expected_data[k] + if scene_data[k] != expected_value: + invalid[k] = { + "current": scene_data[k], "expected": expected_value + } + + if not invalid: + return + + raise PublishXmlValidationError( + self, + "Scene settings does not match database:\n{}".format( + json.dumps(invalid, sort_keys=True, indent=4) + ), + formatting_data={ + "expected_fps": expected_data["fps"], + "current_fps": scene_data["fps"], + "expected_width": expected_data["resolutionWidth"], + "expected_height": expected_data["resolutionHeight"], + "current_width": scene_data["resolutionWidth"], + "current_height": scene_data["resolutionWidth"], + "expected_pixel_ratio": expected_data["pixelAspect"], + "current_pixel_ratio": scene_data["pixelAspect"] + } + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py index e2f8386757..ddc738c6ed 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py @@ -1,4 +1,5 @@ import pyblish.api +from openpype.pipeline import PublishXmlValidationError from openpype.hosts.tvpaint.api import lib @@ -24,4 +25,13 @@ class ValidateStartFrame(pyblish.api.ContextPlugin): def process(self, context): start_frame = lib.execute_george("tv_startframe") - assert int(start_frame) == 0, "Start frame has to be frame 0." + if start_frame == 0: + return + + raise PublishXmlValidationError( + self, + "Start frame has to be frame 0.", + formatting_data={ + "current_start_frame": start_frame + } + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py index 48fbeedb59..eac345f395 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py @@ -1,4 +1,5 @@ import pyblish.api +from openpype.pipeline import PublishXmlValidationError from openpype.hosts.tvpaint.api import save_file @@ -42,8 +43,12 @@ class ValidateWorkfileMetadata(pyblish.api.ContextPlugin): missing_keys.append(key) if missing_keys: - raise AssertionError( + raise PublishXmlValidationError( + self, "Current workfile is missing metadata about {}.".format( ", ".join(missing_keys) - ) + ), + formatting_data={ + "missing_metadata": ", ".join(missing_keys) + } ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py index cc664d8030..0f25f2f7be 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py @@ -1,5 +1,6 @@ import os import pyblish.api +from openpype.pipeline import PublishXmlValidationError class ValidateWorkfileProjectName(pyblish.api.ContextPlugin): @@ -31,15 +32,23 @@ class ValidateWorkfileProjectName(pyblish.api.ContextPlugin): return # Raise an error - raise AssertionError(( - # Short message - "Workfile from different Project ({})." - # Description what's wrong - " It is not possible to publish when TVPaint was launched in" - "context of different project. Current context project is \"{}\"." - " Launch TVPaint in context of project \"{}\" and then publish." - ).format( - workfile_project_name, - env_project_name, - workfile_project_name, - )) + raise PublishXmlValidationError( + self, + ( + # Short message + "Workfile from different Project ({})." + # Description what's wrong + " It is not possible to publish when TVPaint was launched in" + "context of different project. Current context project is" + " \"{}\". Launch TVPaint in context of project \"{}\"" + " and then publish." + ).format( + workfile_project_name, + env_project_name, + workfile_project_name, + ), + formatting_data={ + "workfile_project_name": workfile_project_name, + "expected_project_name": env_project_name + } + ) diff --git a/openpype/hosts/unreal/api/pipeline.py b/openpype/hosts/unreal/api/pipeline.py index ad64d56e9e..9ec11b942d 100644 --- a/openpype/hosts/unreal/api/pipeline.py +++ b/openpype/hosts/unreal/api/pipeline.py @@ -7,6 +7,11 @@ import pyblish.api from avalon.pipeline import AVALON_CONTAINER_ID from avalon import api +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, +) from openpype.tools.utils import host_tools import openpype.hosts.unreal @@ -43,8 +48,8 @@ def install(): print("-=" * 40) logger.info("installing OpenPype for Unreal") pyblish.api.register_plugin_path(str(PUBLISH_PATH)) - api.register_plugin_path(api.Loader, str(LOAD_PATH)) - api.register_plugin_path(api.Creator, str(CREATE_PATH)) + register_loader_plugin_path(str(LOAD_PATH)) + api.register_plugin_path(LegacyCreator, str(CREATE_PATH)) _register_callbacks() _register_events() @@ -52,8 +57,8 @@ def install(): def uninstall(): """Uninstall Unreal configuration for Avalon.""" pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) - api.deregister_plugin_path(api.Loader, str(LOAD_PATH)) - api.deregister_plugin_path(api.Creator, str(CREATE_PATH)) + deregister_loader_plugin_path(str(LOAD_PATH)) + api.deregister_plugin_path(LegacyCreator, str(CREATE_PATH)) def _register_callbacks(): @@ -70,7 +75,7 @@ def _register_events(): pass -class Creator(api.Creator): +class Creator(LegacyCreator): hosts = ["unreal"] asset_types = [] diff --git a/openpype/hosts/unreal/api/plugin.py b/openpype/hosts/unreal/api/plugin.py index 278b296379..d8d2f2420d 100644 --- a/openpype/hosts/unreal/api/plugin.py +++ b/openpype/hosts/unreal/api/plugin.py @@ -1,16 +1,18 @@ # -*- coding: utf-8 -*- from abc import ABC -import openpype.api -import avalon.api +from openpype.pipeline import ( + LegacyCreator, + LoaderPlugin, +) -class Creator(openpype.api.Creator): +class Creator(LegacyCreator): """This serves as skeleton for future OpenPype specific functionality""" defaults = ['Main'] maintain_selection = False -class Loader(avalon.api.Loader, ABC): +class Loader(LoaderPlugin, ABC): """This serves as skeleton for future OpenPype specific functionality""" pass diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py index 027e9f4cd3..3508fe5ed7 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py @@ -2,7 +2,8 @@ """Loader for published alembics.""" import os -from avalon import api, pipeline +from avalon import pipeline +from openpype.pipeline import get_representation_path from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -140,7 +141,7 @@ class PointCacheAlembicLoader(plugin.Loader): def update(self, container, representation): name = container["asset_name"] - source_path = api.get_representation_path(representation) + source_path = get_representation_path(representation) destination_path = container["namespace"] task = self.get_task(source_path, destination_path, name, True) diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py index 0236bab138..180942de51 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py @@ -2,7 +2,8 @@ """Load Skeletal Mesh alembics.""" import os -from avalon import api, pipeline +from avalon import pipeline +from openpype.pipeline import get_representation_path from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa @@ -104,7 +105,7 @@ class SkeletalMeshAlembicLoader(plugin.Loader): def update(self, container, representation): name = container["asset_name"] - source_path = api.get_representation_path(representation) + source_path = get_representation_path(representation) destination_path = container["namespace"] task = unreal.AssetImportTask() diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py index 3bcc8b476f..4e00af1d97 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py @@ -2,7 +2,8 @@ """Loader for Static Mesh alembics.""" import os -from avalon import api, pipeline +from avalon import pipeline +from openpype.pipeline import get_representation_path from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa @@ -123,7 +124,7 @@ class StaticMeshAlembicLoader(plugin.Loader): def update(self, container, representation): name = container["asset_name"] - source_path = api.get_representation_path(representation) + source_path = get_representation_path(representation) destination_path = container["namespace"] task = self.get_task(source_path, destination_path, name, True) diff --git a/openpype/hosts/unreal/plugins/load/load_animation.py b/openpype/hosts/unreal/plugins/load/load_animation.py index c1f7942ef0..ebfce75ca9 100644 --- a/openpype/hosts/unreal/plugins/load/load_animation.py +++ b/openpype/hosts/unreal/plugins/load/load_animation.py @@ -3,12 +3,8 @@ import os import json -import unreal -from unreal import EditorAssetLibrary -from unreal import MovieSceneSkeletalAnimationTrack -from unreal import MovieSceneSkeletalAnimationSection - -from avalon import api, pipeline +from avalon import pipeline +from openpype.pipeline import get_representation_path from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -223,7 +219,7 @@ class AnimationFBXLoader(plugin.Loader): def update(self, container, representation): name = container["asset_name"] - source_path = api.get_representation_path(representation) + source_path = get_representation_path(representation) destination_path = container["namespace"] task = unreal.AssetImportTask() diff --git a/openpype/hosts/unreal/plugins/load/load_layout.py b/openpype/hosts/unreal/plugins/load/load_layout.py index d99224042a..5a82ad6df6 100644 --- a/openpype/hosts/unreal/plugins/load/load_layout.py +++ b/openpype/hosts/unreal/plugins/load/load_layout.py @@ -12,7 +12,13 @@ from unreal import AssetToolsHelpers from unreal import FBXImportType from unreal import MathLibrary as umath -from avalon import api, io, pipeline +from avalon.pipeline import AVALON_CONTAINER_ID +from openpype.pipeline import ( + discover_loader_plugins, + loaders_from_representation, + load_container, + get_representation_path, +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -308,7 +314,7 @@ class LayoutLoader(plugin.Loader): with open(lib_path, "r") as fp: data = json.load(fp) - all_loaders = api.discover(api.Loader) + all_loaders = discover_loader_plugins() if not loaded: loaded = [] @@ -339,7 +345,7 @@ class LayoutLoader(plugin.Loader): loaded.append(reference) family = element.get('family') - loaders = api.loaders_from_representation( + loaders = loaders_from_representation( all_loaders, reference) loader = None @@ -356,7 +362,7 @@ class LayoutLoader(plugin.Loader): "asset_dir": asset_dir } - assets = api.load( + assets = load_container( loader, reference, namespace=instance_name, @@ -641,7 +647,7 @@ class LayoutLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -667,9 +673,9 @@ class LayoutLoader(plugin.Loader): def update(self, container, representation): ar = unreal.AssetRegistryHelpers.get_asset_registry() - source_path = api.get_representation_path(representation) + source_path = get_representation_path(representation) destination_path = container["namespace"] - lib_path = Path(api.get_representation_path(representation)) + lib_path = Path(get_representation_path(representation)) self._remove_actors(destination_path) diff --git a/openpype/hosts/unreal/plugins/load/load_rig.py b/openpype/hosts/unreal/plugins/load/load_rig.py index a7ecb0ef7d..3d5616364c 100644 --- a/openpype/hosts/unreal/plugins/load/load_rig.py +++ b/openpype/hosts/unreal/plugins/load/load_rig.py @@ -2,7 +2,8 @@ """Load Skeletal Meshes form FBX.""" import os -from avalon import api, pipeline +from avalon import pipeline +from openpype.pipeline import get_representation_path from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa @@ -124,7 +125,7 @@ class SkeletalMeshFBXLoader(plugin.Loader): def update(self, container, representation): name = container["asset_name"] - source_path = api.get_representation_path(representation) + source_path = get_representation_path(representation) destination_path = container["namespace"] task = unreal.AssetImportTask() diff --git a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py index c8a6964ffb..587fc83a77 100644 --- a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py +++ b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py @@ -2,7 +2,8 @@ """Load Static meshes form FBX.""" import os -from avalon import api, pipeline +from avalon import pipeline +from openpype.pipeline import get_representation_path from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa @@ -118,7 +119,7 @@ class StaticMeshFBXLoader(plugin.Loader): def update(self, container, representation): name = container["asset_name"] - source_path = api.get_representation_path(representation) + source_path = get_representation_path(representation) destination_path = container["namespace"] task = self.get_task(source_path, destination_path, name, True) diff --git a/openpype/hosts/webpublisher/api/__init__.py b/openpype/hosts/webpublisher/api/__init__.py index e40d46d662..dbeb628073 100644 --- a/openpype/hosts/webpublisher/api/__init__.py +++ b/openpype/hosts/webpublisher/api/__init__.py @@ -12,30 +12,19 @@ HOST_DIR = os.path.dirname(os.path.abspath( openpype.hosts.webpublisher.__file__)) PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") - - -def application_launch(): - pass def install(): print("Installing Pype config...") pyblish.register_plugin_path(PUBLISH_PATH) - avalon.register_plugin_path(avalon.Loader, LOAD_PATH) - avalon.register_plugin_path(avalon.Creator, CREATE_PATH) log.info(PUBLISH_PATH) io.install() - avalon.on("application.launched", application_launch) def uninstall(): pyblish.deregister_plugin_path(PUBLISH_PATH) - avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) - avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH) # to have required methods for interface diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py index abad14106f..65cef14703 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py @@ -10,14 +10,22 @@ Provides: import os import clique import tempfile +import math + from avalon import io import pyblish.api -from openpype.lib import prepare_template_data +from openpype.lib import ( + prepare_template_data, + get_asset, + get_ffprobe_streams, + convert_ffprobe_fps_value, +) from openpype.lib.plugin_tools import ( parse_json, get_subset_name_with_asset_doc ) + class CollectPublishedFiles(pyblish.api.ContextPlugin): """ This collector will try to find json files in provided @@ -49,10 +57,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): self.log.info("task_sub:: {}".format(task_subfolders)) asset_name = context.data["asset"] - asset_doc = io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset() task_name = context.data["task"] task_type = context.data["taskType"] project_name = context.data["project_name"] @@ -97,11 +102,27 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): instance.data["frameEnd"] = \ instance.data["representations"][0]["frameEnd"] else: - instance.data["frameStart"] = 0 - instance.data["frameEnd"] = 1 + frame_start = asset_doc["data"]["frameStart"] + instance.data["frameStart"] = frame_start + instance.data["frameEnd"] = asset_doc["data"]["frameEnd"] instance.data["representations"] = self._get_single_repre( task_dir, task_data["files"], tags ) + file_url = os.path.join(task_dir, task_data["files"][0]) + no_of_frames = self._get_number_of_frames(file_url) + if no_of_frames: + try: + frame_end = int(frame_start) + math.ceil(no_of_frames) + instance.data["frameEnd"] = math.ceil(frame_end) - 1 + self.log.debug("frameEnd:: {}".format( + instance.data["frameEnd"])) + except ValueError: + self.log.warning("Unable to count frames " + "duration {}".format(no_of_frames)) + + # raise ValueError("STOP") + instance.data["handleStart"] = asset_doc["data"]["handleStart"] + instance.data["handleEnd"] = asset_doc["data"]["handleEnd"] self.log.info("instance.data:: {}".format(instance.data)) @@ -127,7 +148,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): return [repre_data] def _process_sequence(self, files, task_dir, tags): - """Prepare reprentations for sequence of files.""" + """Prepare representation for sequence of files.""" collections, remainder = clique.assemble(files) assert len(collections) == 1, \ "Too many collections in {}".format(files) @@ -188,6 +209,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): msg = "No family found for combination of " +\ "task_type: {}, is_sequence:{}, extension: {}".format( task_type, is_sequence, extension) + found_family = "render" assert found_family, msg return (found_family, @@ -243,3 +265,43 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): return version[0].get("version") or 0 else: return 0 + + def _get_number_of_frames(self, file_url): + """Return duration in frames""" + try: + streams = get_ffprobe_streams(file_url, self.log) + except Exception as exc: + raise AssertionError(( + "FFprobe couldn't read information about input file: \"{}\"." + " Error message: {}" + ).format(file_url, str(exc))) + + first_video_stream = None + for stream in streams: + if "width" in stream and "height" in stream: + first_video_stream = stream + break + + if first_video_stream: + nb_frames = stream.get("nb_frames") + if nb_frames: + try: + return int(nb_frames) + except ValueError: + self.log.warning( + "nb_frames {} not convertible".format(nb_frames)) + + duration = stream.get("duration") + frame_rate = convert_ffprobe_fps_value( + stream.get("r_frame_rate", '0/0') + ) + self.log.debug("duration:: {} frame_rate:: {}".format( + duration, frame_rate)) + try: + return float(duration) * float(frame_rate) + except ValueError: + self.log.warning( + "{} or {} cannot be converted".format(duration, + frame_rate)) + + self.log.warning("Cannot get number of frames") diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index 6a24f30455..1ebafbb2d2 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -16,6 +16,19 @@ sys.path.insert(0, python_version_dir) site.addsitedir(python_version_dir) +from .events import ( + emit_event, + register_event_callback +) + +from .vendor_bin_utils import ( + find_executable, + get_vendor_bin_path, + get_oiio_tools_path, + get_ffmpeg_tool_path, + is_oiio_supported +) + from .env_tools import ( env_value_to_bool, get_paths_from_environ, @@ -55,14 +68,9 @@ from .anatomy import ( Anatomy ) -from .config import get_datetime_data - -from .vendor_bin_utils import ( - get_vendor_bin_path, - get_oiio_tools_path, - get_ffmpeg_tool_path, - ffprobe_streams, - is_oiio_supported +from .config import ( + get_datetime_data, + get_formatted_current_time ) from .python_module_tools import ( @@ -81,7 +89,12 @@ from .profiles_filtering import ( from .transcoding import ( get_transcode_temp_directory, should_convert_for_ffmpeg, - convert_for_ffmpeg + convert_for_ffmpeg, + get_ffprobe_data, + get_ffprobe_streams, + get_ffmpeg_codec_args, + get_ffmpeg_format_args, + convert_ffprobe_fps_value, ) from .avalon_context import ( CURRENT_DOC_SCHEMAS, @@ -101,6 +114,8 @@ from .avalon_context import ( get_workdir_data, get_workdir, get_workdir_with_workdir_data, + get_last_workfile_with_version, + get_last_workfile, create_workfile_doc, save_workfile_data_to_doc, @@ -159,6 +174,7 @@ from .plugin_tools import ( ) from .path_tools import ( + create_hard_link, version_up, get_version_from_path, get_last_version_from_path, @@ -193,6 +209,10 @@ from .openpype_version import ( terminal = Terminal __all__ = [ + "emit_event", + "register_event_callback", + + "find_executable", "get_openpype_execute_args", "get_pype_execute_args", "get_linux_launcher_args", @@ -211,7 +231,6 @@ __all__ = [ "get_vendor_bin_path", "get_oiio_tools_path", "get_ffmpeg_tool_path", - "ffprobe_streams", "is_oiio_supported", "import_filepath", @@ -223,6 +242,11 @@ __all__ = [ "get_transcode_temp_directory", "should_convert_for_ffmpeg", "convert_for_ffmpeg", + "get_ffprobe_data", + "get_ffprobe_streams", + "get_ffmpeg_codec_args", + "get_ffmpeg_format_args", + "convert_ffprobe_fps_value", "CURRENT_DOC_SCHEMAS", "PROJECT_NAME_ALLOWED_SYMBOLS", @@ -241,6 +265,8 @@ __all__ = [ "get_workdir_data", "get_workdir", "get_workdir_with_workdir_data", + "get_last_workfile_with_version", + "get_last_workfile", "create_workfile_doc", "save_workfile_data_to_doc", @@ -290,6 +316,7 @@ __all__ = [ "get_unique_layer_name", "get_background_layers", + "create_hard_link", "version_up", "get_version_from_path", "get_last_version_from_path", @@ -306,6 +333,7 @@ __all__ = [ "Anatomy", "get_datetime_data", + "get_formatted_current_time", "PypeLogger", "get_default_components", diff --git a/openpype/lib/abstract_collect_render.py b/openpype/lib/abstract_collect_render.py index 3839aad45d..7c768e280c 100644 --- a/openpype/lib/abstract_collect_render.py +++ b/openpype/lib/abstract_collect_render.py @@ -26,7 +26,7 @@ class RenderInstance(object): # metadata version = attr.ib() # instance version - time = attr.ib() # time of instance creation (avalon.api.time()) + time = attr.ib() # time of instance creation (get_formatted_current_time) source = attr.ib() # path to source scene file label = attr.ib() # label to show in GUI subset = attr.ib() # subset name diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index 5b32df066f..557c016d74 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -7,7 +7,6 @@ import platform import collections import inspect import subprocess -import distutils.spawn from abc import ABCMeta, abstractmethod import six @@ -29,15 +28,18 @@ from .local_settings import get_openpype_username from .avalon_context import ( get_workdir_data, get_workdir_with_workdir_data, - get_workfile_template_key + get_workfile_template_key, + get_last_workfile ) from .python_module_tools import ( modules_from_path, classes_from_module ) -from .execute import get_linux_launcher_args - +from .execute import ( + find_executable, + get_linux_launcher_args +) _logger = None @@ -647,7 +649,7 @@ class ApplicationExecutable: def _realpath(self): """Check if path is valid executable path.""" # Check for executable in PATH - result = distutils.spawn.find_executable(self.executable_path) + result = find_executable(self.executable_path) if result is not None: return result @@ -1608,7 +1610,7 @@ def _prepare_last_workfile(data, workdir): "ext": extensions[0] }) - last_workfile_path = avalon.api.last_workfile( + last_workfile_path = get_last_workfile( workdir, file_template, workdir_data, extensions, True ) diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index 1e8d21852b..8e9fff5f67 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -15,6 +15,8 @@ from openpype.settings import ( ) from .anatomy import Anatomy from .profiles_filtering import filter_profiles +from .events import emit_event +from .path_templates import StringTemplate # avalon module is not imported at the top # - may not be in path at the time of pype.lib initialization @@ -644,6 +646,165 @@ def get_workdir( ) +def template_data_from_session(session=None): + """ Return dictionary with template from session keys. + + Args: + session (dict, Optional): The Session to use. If not provided use the + currently active global Session. + Returns: + dict: All available data from session. + """ + from avalon import io + import avalon.api + + if session is None: + session = avalon.api.Session + + project_name = session["AVALON_PROJECT"] + project_doc = io._database[project_name].find_one({"type": "project"}) + asset_doc = io._database[project_name].find_one({ + "type": "asset", + "name": session["AVALON_ASSET"] + }) + task_name = session["AVALON_TASK"] + host_name = session["AVALON_APP"] + return get_workdir_data(project_doc, asset_doc, task_name, host_name) + + +def compute_session_changes( + session, task=None, asset=None, app=None, template_key=None +): + """Compute the changes for a Session object on asset, task or app switch + + This does *NOT* update the Session object, but returns the changes + required for a valid update of the Session. + + Args: + session (dict): The initial session to compute changes to. + This is required for computing the full Work Directory, as that + also depends on the values that haven't changed. + task (str, Optional): Name of task to switch to. + asset (str or dict, Optional): Name of asset to switch to. + You can also directly provide the Asset dictionary as returned + from the database to avoid an additional query. (optimization) + app (str, Optional): Name of app to switch to. + + Returns: + dict: The required changes in the Session dictionary. + + """ + changes = dict() + + # If no changes, return directly + if not any([task, asset, app]): + return changes + + # Get asset document and asset + asset_document = None + asset_tasks = None + if isinstance(asset, dict): + # Assume asset database document + asset_document = asset + asset_tasks = asset_document.get("data", {}).get("tasks") + asset = asset["name"] + + if not asset_document or not asset_tasks: + from avalon import io + + # Assume asset name + asset_document = io.find_one( + { + "name": asset, + "type": "asset" + }, + {"data.tasks": True} + ) + assert asset_document, "Asset must exist" + + # Detect any changes compared session + mapping = { + "AVALON_ASSET": asset, + "AVALON_TASK": task, + "AVALON_APP": app, + } + changes = { + key: value + for key, value in mapping.items() + if value and value != session.get(key) + } + if not changes: + return changes + + # Compute work directory (with the temporary changed session so far) + _session = session.copy() + _session.update(changes) + + changes["AVALON_WORKDIR"] = get_workdir_from_session(_session) + + return changes + + +def get_workdir_from_session(session=None, template_key=None): + import avalon.api + + if session is None: + session = avalon.api.Session + project_name = session["AVALON_PROJECT"] + host_name = session["AVALON_APP"] + anatomy = Anatomy(project_name) + template_data = template_data_from_session(session) + anatomy_filled = anatomy.format(template_data) + + if not template_key: + task_type = template_data["task"]["type"] + template_key = get_workfile_template_key( + task_type, + host_name, + project_name=project_name + ) + return anatomy_filled[template_key]["folder"] + + +def update_current_task(task=None, asset=None, app=None, template_key=None): + """Update active Session to a new task work area. + + This updates the live Session to a different `asset`, `task` or `app`. + + Args: + task (str): The task to set. + asset (str): The asset to set. + app (str): The app to set. + + Returns: + dict: The changed key, values in the current Session. + + """ + import avalon.api + + changes = compute_session_changes( + avalon.api.Session, + task=task, + asset=asset, + app=app, + template_key=template_key + ) + + # Update the Session and environments. Pop from environments all keys with + # value set to None. + for key, value in changes.items(): + avalon.api.Session[key] = value + if value is None: + os.environ.pop(key, None) + else: + os.environ[key] = value + + # Emit session change + emit_event("taskChanged", changes.copy()) + + return changes + + @with_avalon def get_workfile_doc(asset_id, task_name, filename, dbcon=None): """Return workfile document for entered context. @@ -820,6 +981,8 @@ class BuildWorkfile: ... }] """ + from openpype.pipeline import discover_loader_plugins + # Get current asset name and entity current_asset_name = avalon.io.Session["AVALON_ASSET"] current_asset_entity = avalon.io.find_one({ @@ -836,7 +999,7 @@ class BuildWorkfile: # Prepare available loaders loaders_by_name = {} - for loader in avalon.api.discover(avalon.api.Loader): + for loader in discover_loader_plugins(): loader_name = loader.__name__ if loader_name in loaders_by_name: raise KeyError( @@ -1230,6 +1393,11 @@ class BuildWorkfile: Returns: (list) Objects of loaded containers. """ + from openpype.pipeline import ( + IncompatibleLoaderError, + load_container, + ) + loaded_containers = [] # Get subset id order from build presets. @@ -1291,7 +1459,7 @@ class BuildWorkfile: if not loader: continue try: - container = avalon.api.load( + container = load_container( loader, repre["_id"], name=subset_name @@ -1300,7 +1468,7 @@ class BuildWorkfile: is_loaded = True except Exception as exc: - if exc == avalon.pipeline.IncompatibleLoaderError: + if exc == IncompatibleLoaderError: self.log.info(( "Loader `{}` is not compatible with" " representation `{}`" @@ -1434,11 +1602,13 @@ def get_creator_by_name(creator_name, case_sensitive=False): Returns: Creator: Return first matching plugin or `None`. """ + from openpype.pipeline import LegacyCreator + # Lower input creator name if is not case sensitive if not case_sensitive: creator_name = creator_name.lower() - for creator_plugin in avalon.api.discover(avalon.api.Creator): + for creator_plugin in avalon.api.discover(LegacyCreator): _creator_name = creator_plugin.__name__ # Lower creator plugin name if is not case sensitive @@ -1566,8 +1736,6 @@ def get_custom_workfile_template_by_context( context. (Existence of formatted path is not validated.) """ - from openpype.lib import filter_profiles - if anatomy is None: anatomy = Anatomy(project_doc["name"]) @@ -1590,7 +1758,9 @@ def get_custom_workfile_template_by_context( # there are some anatomy template strings if matching_item: template = matching_item["path"][platform.system().lower()] - return template.format(**anatomy_context_data) + return StringTemplate.format_strict_template( + template, anatomy_context_data + ) return None @@ -1678,3 +1848,124 @@ def get_custom_workfile_template(template_profiles): io.Session["AVALON_TASK"], io ) + + +def get_last_workfile_with_version( + workdir, file_template, fill_data, extensions +): + """Return last workfile version. + + Args: + workdir(str): Path to dir where workfiles are stored. + file_template(str): Template of file name. + fill_data(dict): Data for filling template. + extensions(list, tuple): All allowed file extensions of workfile. + + Returns: + tuple: Last workfile with version if there is any otherwise + returns (None, None). + """ + if not os.path.exists(workdir): + return None, None + + # Fast match on extension + filenames = [ + filename + for filename in os.listdir(workdir) + if os.path.splitext(filename)[1] in extensions + ] + + # Build template without optionals, version to digits only regex + # and comment to any definable value. + _ext = [] + for ext in extensions: + if not ext.startswith("."): + ext = "." + ext + # Escape dot for regex + ext = "\\" + ext + _ext.append(ext) + ext_expression = "(?:" + "|".join(_ext) + ")" + + # Replace `.{ext}` with `{ext}` so we are sure there is not dot at the end + file_template = re.sub(r"\.?{ext}", ext_expression, file_template) + # Replace optional keys with optional content regex + file_template = re.sub(r"<.*?>", r".*?", file_template) + # Replace `{version}` with group regex + file_template = re.sub(r"{version.*?}", r"([0-9]+)", file_template) + file_template = re.sub(r"{comment.*?}", r".+?", file_template) + file_template = StringTemplate.format_strict_template( + file_template, fill_data + ) + + # Match with ignore case on Windows due to the Windows + # OS not being case-sensitive. This avoids later running + # into the error that the file did exist if it existed + # with a different upper/lower-case. + kwargs = {} + if platform.system().lower() == "windows": + kwargs["flags"] = re.IGNORECASE + + # Get highest version among existing matching files + version = None + output_filenames = [] + for filename in sorted(filenames): + match = re.match(file_template, filename, **kwargs) + if not match: + continue + + file_version = int(match.group(1)) + if version is None or file_version > version: + output_filenames[:] = [] + version = file_version + + if file_version == version: + output_filenames.append(filename) + + output_filename = None + if output_filenames: + if len(output_filenames) == 1: + output_filename = output_filenames[0] + else: + last_time = None + for _output_filename in output_filenames: + full_path = os.path.join(workdir, _output_filename) + mod_time = os.path.getmtime(full_path) + if last_time is None or last_time < mod_time: + output_filename = _output_filename + last_time = mod_time + + return output_filename, version + + +def get_last_workfile( + workdir, file_template, fill_data, extensions, full_path=False +): + """Return last workfile filename. + + Returns file with version 1 if there is not workfile yet. + + Args: + workdir(str): Path to dir where workfiles are stored. + file_template(str): Template of file name. + fill_data(dict): Data for filling template. + extensions(list, tuple): All allowed file extensions of workfile. + full_path(bool): Full path to file is returned if set to True. + + Returns: + str: Last or first workfile as filename of full path to filename. + """ + filename, version = get_last_workfile_with_version( + workdir, file_template, fill_data, extensions + ) + if filename is None: + data = copy.deepcopy(fill_data) + data["version"] = 1 + data.pop("comment", None) + if not data.get("ext"): + data["ext"] = extensions[0] + filename = StringTemplate.format_strict_template(file_template, data) + + if full_path: + return os.path.normpath(os.path.join(workdir, filename)) + + return filename diff --git a/openpype/lib/config.py b/openpype/lib/config.py index ba394cfd56..57e8efa57d 100644 --- a/openpype/lib/config.py +++ b/openpype/lib/config.py @@ -74,3 +74,9 @@ def get_datetime_data(datetime_obj=None): "S": str(int(seconds)), "SS": str(seconds), } + + +def get_formatted_current_time(): + return datetime.datetime.now().strftime( + "%Y%m%dT%H%M%SZ" + ) diff --git a/openpype/lib/delivery.py b/openpype/lib/delivery.py index a61603fa05..ffcfe9fa4d 100644 --- a/openpype/lib/delivery.py +++ b/openpype/lib/delivery.py @@ -5,19 +5,30 @@ import glob import clique import collections +from .path_templates import ( + StringTemplate, + TemplateUnsolved, +) + def collect_frames(files): """ Returns dict of source path and its frame, if from sequence - Uses clique as most precise solution + Uses clique as most precise solution, used when anatomy template that + created files is not known. + + Assumption is that frames are separated by '.', negative frames are not + allowed. Args: - files(list): list of source paths + files(list) or (set with single value): list of source paths Returns: (dict): {'/asset/subset_v001.0001.png': '0001', ....} """ - collections, remainder = clique.assemble(files, minimum_items=1) + patterns = [clique.PATTERNS["frames"]] + collections, remainder = clique.assemble(files, minimum_items=1, + patterns=patterns) sources_and_frames = {} if collections: @@ -46,8 +57,6 @@ def sizeof_fmt(num, suffix='B'): def path_from_representation(representation, anatomy): - from avalon import pipeline # safer importing - try: template = representation["data"]["template"] @@ -57,12 +66,10 @@ def path_from_representation(representation, anatomy): try: context = representation["context"] context["root"] = anatomy.roots - path = pipeline.format_template_with_optional_keys( - context, template - ) - path = os.path.normpath(path.replace("/", "\\")) + path = StringTemplate.format_strict_template(template, context) + return os.path.normpath(path) - except KeyError: + except TemplateUnsolved: # Template references unavailable data return None @@ -71,15 +78,14 @@ def path_from_representation(representation, anatomy): def copy_file(src_path, dst_path): """Hardlink file if possible(to save space), copy if not""" - from avalon.vendor import filelink # safer importing + from openpype.lib import create_hard_link # safer importing if os.path.exists(dst_path): return try: - filelink.create( + create_hard_link( src_path, - dst_path, - filelink.HARDLINK + dst_path ) except OSError: shutil.copyfile(src_path, dst_path) diff --git a/openpype/lib/events.py b/openpype/lib/events.py new file mode 100644 index 0000000000..7bec6ee30d --- /dev/null +++ b/openpype/lib/events.py @@ -0,0 +1,268 @@ +"""Events holding data about specific event.""" +import os +import re +import inspect +import logging +import weakref +from uuid import uuid4 +try: + from weakref import WeakMethod +except Exception: + from openpype.lib.python_2_comp import WeakMethod + + +class EventCallback(object): + """Callback registered to a topic. + + The callback function is registered to a topic. Topic is a string which + may contain '*' that will be handled as "any characters". + + # Examples: + - "workfile.save" Callback will be triggered if the event topic is + exactly "workfile.save" . + - "workfile.*" Callback will be triggered an event topic starts with + "workfile." so "workfile.save" and "workfile.open" + will trigger the callback. + - "*" Callback will listen to all events. + + Callback can be function or method. In both cases it should expect one + or none arguments. When 1 argument is expected then the processed 'Event' + object is passed in. + + The registered callbacks don't keep function in memory so it is not + possible to store lambda function as callback. + + Args: + topic(str): Topic which will be listened. + func(func): Callback to a topic. + + Raises: + TypeError: When passed function is not a callable object. + """ + + def __init__(self, topic, func): + self._log = None + self._topic = topic + # Replace '*' with any character regex and escape rest of text + # - when callback is registered for '*' topic it will receive all + # events + # - it is possible to register to a partial topis 'my.event.*' + # - it will receive all matching event topics + # e.g. 'my.event.start' and 'my.event.end' + topic_regex_str = "^{}$".format( + ".+".join( + re.escape(part) + for part in topic.split("*") + ) + ) + topic_regex = re.compile(topic_regex_str) + self._topic_regex = topic_regex + + # Convert callback into references + # - deleted functions won't cause crashes + if inspect.ismethod(func): + func_ref = WeakMethod(func) + elif callable(func): + func_ref = weakref.ref(func) + else: + raise TypeError(( + "Registered callback is not callable. \"{}\"" + ).format(str(func))) + + # Collect additional data about function + # - name + # - path + # - if expect argument or not + func_name = func.__name__ + func_path = os.path.abspath(inspect.getfile(func)) + if hasattr(inspect, "signature"): + sig = inspect.signature(func) + expect_args = len(sig.parameters) > 0 + else: + expect_args = len(inspect.getargspec(func)[0]) > 0 + + self._func_ref = func_ref + self._func_name = func_name + self._func_path = func_path + self._expect_args = expect_args + self._ref_valid = func_ref is not None + self._enabled = True + + def __repr__(self): + return "< {} - {} > {}".format( + self.__class__.__name__, self._func_name, self._func_path + ) + + @property + def log(self): + if self._log is None: + self._log = logging.getLogger(self.__class__.__name__) + return self._log + + @property + def is_ref_valid(self): + return self._ref_valid + + def validate_ref(self): + if not self._ref_valid: + return + + callback = self._func_ref() + if not callback: + self._ref_valid = False + + @property + def enabled(self): + """Is callback enabled.""" + return self._enabled + + def set_enabled(self, enabled): + """Change if callback is enabled.""" + self._enabled = enabled + + def deregister(self): + """Calling this funcion will cause that callback will be removed.""" + # Fake reference + self._ref_valid = False + + def topic_matches(self, topic): + """Check if event topic matches callback's topic.""" + return self._topic_regex.match(topic) + + def process_event(self, event): + """Process event. + + Args: + event(Event): Event that was triggered. + """ + + # Skip if callback is not enabled or has invalid reference + if not self._ref_valid or not self._enabled: + return + + # Get reference + callback = self._func_ref() + # Check if reference is valid or callback's topic matches the event + if not callback: + # Change state if is invalid so the callback is removed + self._ref_valid = False + + elif self.topic_matches(event.topic): + # Try execute callback + try: + if self._expect_args: + callback(event) + else: + callback() + + except Exception: + self.log.warning( + "Failed to execute event callback {}".format( + str(repr(self)) + ), + exc_info=True + ) + + +# Inherit from 'object' for Python 2 hosts +class Event(object): + """Base event object. + + Can be used for any event because is not specific. Only required argument + is topic which defines why event is happening and may be used for + filtering. + + Arg: + topic (str): Identifier of event. + data (Any): Data specific for event. Dictionary is recommended. + source (str): Identifier of source. + """ + _data = {} + + def __init__(self, topic, data=None, source=None): + self._id = str(uuid4()) + self._topic = topic + if data is None: + data = {} + self._data = data + self._source = source + + def __getitem__(self, key): + return self._data[key] + + def get(self, key, *args, **kwargs): + return self._data.get(key, *args, **kwargs) + + @property + def id(self): + return self._id + + @property + def source(self): + return self._source + + @property + def data(self): + return self._data + + @property + def topic(self): + return self._topic + + def emit(self): + """Emit event and trigger callbacks.""" + StoredCallbacks.emit_event(self) + + +class StoredCallbacks: + _registered_callbacks = [] + + @classmethod + def add_callback(cls, topic, callback): + callback = EventCallback(topic, callback) + cls._registered_callbacks.append(callback) + return callback + + @classmethod + def emit_event(cls, event): + invalid_callbacks = [] + for callback in cls._registered_callbacks: + callback.process_event(event) + if not callback.is_ref_valid: + invalid_callbacks.append(callback) + + for callback in invalid_callbacks: + cls._registered_callbacks.remove(callback) + + +def register_event_callback(topic, callback): + """Add callback that will be executed on specific topic. + + Args: + topic(str): Topic on which will callback be triggered. + callback(function): Callback that will be triggered when a topic + is triggered. Callback should expect none or 1 argument where + `Event` object is passed. + + Returns: + EventCallback: Object wrapping the callback. It can be used to + enable/disable listening to a topic or remove the callback from + the topic completely. + """ + return StoredCallbacks.add_callback(topic, callback) + + +def emit_event(topic, data=None, source=None): + """Emit event with topic and data. + + Arg: + topic(str): Event's topic. + data(dict): Event's additional data. Optional. + source(str): Who emitted the topic. Optional. + + Returns: + Event: Object of event that was emitted. + """ + event = Event(topic, data, source) + event.emit() + return event diff --git a/openpype/lib/execute.py b/openpype/lib/execute.py index f2eb97c5f5..c3e35772f3 100644 --- a/openpype/lib/execute.py +++ b/openpype/lib/execute.py @@ -4,9 +4,9 @@ import subprocess import platform import json import tempfile -import distutils.spawn from .log import PypeLogger as Logger +from .vendor_bin_utils import find_executable # MSDN process creation flag (Windows only) CREATE_NO_WINDOW = 0x08000000 @@ -341,7 +341,7 @@ def get_linux_launcher_args(*args): os.path.dirname(openpype_executable), filename ) - executable_path = distutils.spawn.find_executable(new_executable) + executable_path = find_executable(new_executable) if executable_path is None: return None launch_args = [executable_path] diff --git a/openpype/lib/log.py b/openpype/lib/log.py index a42faef008..98a3bae8e6 100644 --- a/openpype/lib/log.py +++ b/openpype/lib/log.py @@ -227,7 +227,7 @@ class PypeLogger: logger = logging.getLogger(name or "__main__") - if cls.pype_debug > 1: + if cls.pype_debug > 0: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) diff --git a/openpype/lib/path_templates.py b/openpype/lib/path_templates.py index 62bfdf774a..14e5fe59f8 100644 --- a/openpype/lib/path_templates.py +++ b/openpype/lib/path_templates.py @@ -187,6 +187,16 @@ class StringTemplate(object): result.validate() return result + @classmethod + def format_template(cls, template, data): + objected_template = cls(template) + return objected_template.format(data) + + @classmethod + def format_strict_template(cls, template, data): + objected_template = cls(template) + return objected_template.format_strict(data) + @staticmethod def find_optional_parts(parts): new_parts = [] diff --git a/openpype/lib/path_tools.py b/openpype/lib/path_tools.py index d6c32ad9e8..851bc872fb 100644 --- a/openpype/lib/path_tools.py +++ b/openpype/lib/path_tools.py @@ -4,9 +4,9 @@ import abc import json import logging import six +import platform from openpype.settings import get_project_settings -from openpype.settings.lib import get_site_local_overrides from .anatomy import Anatomy from .profiles_filtering import filter_profiles @@ -14,6 +14,42 @@ from .profiles_filtering import filter_profiles log = logging.getLogger(__name__) +def create_hard_link(src_path, dst_path): + """Create hardlink of file. + + Args: + src_path(str): Full path to a file which is used as source for + hardlink. + dst_path(str): Full path to a file where a link of source will be + added. + """ + # Use `os.link` if is available + # - should be for all platforms with newer python versions + if hasattr(os, "link"): + os.link(src_path, dst_path) + return + + # Windows implementation of hardlinks + # - used in Python 2 + if platform.system().lower() == "windows": + import ctypes + from ctypes.wintypes import BOOL + CreateHardLink = ctypes.windll.kernel32.CreateHardLinkW + CreateHardLink.argtypes = [ + ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_void_p + ] + CreateHardLink.restype = BOOL + + res = CreateHardLink(dst_path, src_path, None) + if res == 0: + raise ctypes.WinError() + return + # Raises not implemented error if gets here + raise NotImplementedError( + "Implementation of hardlink for current environment is missing." + ) + + def _rreplace(s, a, b, n=1): """Replace a with b in string s from right side n times.""" return b.join(s.rsplit(a, n)) diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index 183aad939a..f11ba56865 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -280,6 +280,7 @@ def set_plugin_attributes_from_settings( project_name (str): Name of project for which settings will be loaded. Value from environment `AVALON_PROJECT` is used if not entered. """ + from openpype.pipeline import LegacyCreator, LoaderPlugin # determine host application to use for finding presets if host_name is None: @@ -289,11 +290,11 @@ def set_plugin_attributes_from_settings( project_name = os.environ.get("AVALON_PROJECT") # map plugin superclass to preset json. Currently supported is load and - # create (avalon.api.Loader and avalon.api.Creator) + # create (LoaderPlugin and LegacyCreator) plugin_type = None - if superclass.__name__.split(".")[-1] in ("Loader", "SubsetLoader"): + if superclass is LoaderPlugin or issubclass(superclass, LoaderPlugin): plugin_type = "load" - elif superclass.__name__.split(".")[-1] == "Creator": + elif superclass is LegacyCreator or issubclass(superclass, LegacyCreator): plugin_type = "create" if not host_name or not project_name or plugin_type is None: diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py index e89fa6331e..6bab6a8160 100644 --- a/openpype/lib/transcoding.py +++ b/openpype/lib/transcoding.py @@ -1,15 +1,18 @@ import os import re import logging +import json import collections import tempfile +import subprocess import xml.etree.ElementTree from .execute import run_subprocess from .vendor_bin_utils import ( + get_ffmpeg_tool_path, get_oiio_tools_path, - is_oiio_supported + is_oiio_supported, ) # Max length of string that is supported by ffmpeg @@ -90,7 +93,7 @@ class RationalToInt: if len(parts) != 1: bottom = float(parts[1]) - self._value = top / bottom + self._value = float(top) / float(bottom) self._string_value = string_value @property @@ -170,6 +173,23 @@ def convert_value_by_type_name(value_type, value, logger=None): if value_type == "rational2i": return RationalToInt(value) + if value_type == "vector": + parts = [part.strip() for part in value.split(",")] + output = [] + for part in parts: + if part == "-nan": + output.append(None) + continue + try: + part = float(part) + except ValueError: + pass + output.append(part) + return output + + if value_type == "timecode": + return value + # Array of other types is converted to list re_result = ARRAY_TYPE_REGEX.findall(value_type) if re_result: @@ -466,3 +486,290 @@ def convert_for_ffmpeg( logger.debug("Conversion command: {}".format(" ".join(oiio_cmd))) run_subprocess(oiio_cmd, logger=logger) + + +# FFMPEG functions +def get_ffprobe_data(path_to_file, logger=None): + """Load data about entered filepath via ffprobe. + + Args: + path_to_file (str): absolute path + logger (logging.Logger): injected logger, if empty new is created + """ + if not logger: + logger = logging.getLogger(__name__) + logger.info( + "Getting information about input \"{}\".".format(path_to_file) + ) + args = [ + get_ffmpeg_tool_path("ffprobe"), + "-hide_banner", + "-loglevel", "fatal", + "-show_error", + "-show_format", + "-show_streams", + "-show_programs", + "-show_chapters", + "-show_private_data", + "-print_format", "json", + path_to_file + ] + + logger.debug("FFprobe command: {}".format( + subprocess.list2cmdline(args) + )) + popen = subprocess.Popen( + args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + + popen_stdout, popen_stderr = popen.communicate() + if popen_stdout: + logger.debug("FFprobe stdout:\n{}".format( + popen_stdout.decode("utf-8") + )) + + if popen_stderr: + logger.warning("FFprobe stderr:\n{}".format( + popen_stderr.decode("utf-8") + )) + + return json.loads(popen_stdout) + + +def get_ffprobe_streams(path_to_file, logger=None): + """Load streams from entered filepath via ffprobe. + + Args: + path_to_file (str): absolute path + logger (logging.Logger): injected logger, if empty new is created + """ + return get_ffprobe_data(path_to_file, logger)["streams"] + + +def get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd=None): + """Copy format from input metadata for output. + + Args: + ffprobe_data(dict): Data received from ffprobe. + source_ffmpeg_cmd(str): Command that created input if available. + """ + input_format = ffprobe_data.get("format") or {} + if input_format.get("format_name") == "mxf": + return _ffmpeg_mxf_format_args(ffprobe_data, source_ffmpeg_cmd) + return [] + + +def _ffmpeg_mxf_format_args(ffprobe_data, source_ffmpeg_cmd): + input_format = ffprobe_data["format"] + format_tags = input_format.get("tags") or {} + product_name = format_tags.get("product_name") or "" + output = [] + if "opatom" in product_name.lower(): + output.extend(["-f", "mxf_opatom"]) + return output + + +def get_ffmpeg_codec_args(ffprobe_data, source_ffmpeg_cmd=None, logger=None): + """Copy codec from input metadata for output. + + Args: + ffprobe_data(dict): Data received from ffprobe. + source_ffmpeg_cmd(str): Command that created input if available. + """ + if logger is None: + logger = logging.getLogger(__name__) + + video_stream = None + no_audio_stream = None + for stream in ffprobe_data["streams"]: + codec_type = stream["codec_type"] + if codec_type == "video": + video_stream = stream + break + elif no_audio_stream is None and codec_type != "audio": + no_audio_stream = stream + + if video_stream is None: + if no_audio_stream is None: + logger.warning( + "Couldn't find stream that is not an audio file." + ) + return [] + logger.info( + "Didn't find video stream. Using first non audio stream." + ) + video_stream = no_audio_stream + + codec_name = video_stream.get("codec_name") + # Codec "prores" + if codec_name == "prores": + return _ffmpeg_prores_codec_args(video_stream, source_ffmpeg_cmd) + + # Codec "h264" + if codec_name == "h264": + return _ffmpeg_h264_codec_args(video_stream, source_ffmpeg_cmd) + + # Coded DNxHD + if codec_name == "dnxhd": + return _ffmpeg_dnxhd_codec_args(video_stream, source_ffmpeg_cmd) + + output = [] + if codec_name: + output.extend(["-codec:v", codec_name]) + + bit_rate = video_stream.get("bit_rate") + if bit_rate: + output.extend(["-b:v", bit_rate]) + + pix_fmt = video_stream.get("pix_fmt") + if pix_fmt: + output.extend(["-pix_fmt", pix_fmt]) + + output.extend(["-g", "1"]) + + return output + + +def _ffmpeg_prores_codec_args(stream_data, source_ffmpeg_cmd): + output = [] + + tags = stream_data.get("tags") or {} + encoder = tags.get("encoder") or "" + if encoder.endswith("prores_ks"): + codec_name = "prores_ks" + + elif encoder.endswith("prores_aw"): + codec_name = "prores_aw" + + else: + codec_name = "prores" + + output.extend(["-codec:v", codec_name]) + + pix_fmt = stream_data.get("pix_fmt") + if pix_fmt: + output.extend(["-pix_fmt", pix_fmt]) + + # Rest of arguments is prores_kw specific + if codec_name == "prores_ks": + codec_tag_to_profile_map = { + "apco": "proxy", + "apcs": "lt", + "apcn": "standard", + "apch": "hq", + "ap4h": "4444", + "ap4x": "4444xq" + } + codec_tag_str = stream_data.get("codec_tag_string") + if codec_tag_str: + profile = codec_tag_to_profile_map.get(codec_tag_str) + if profile: + output.extend(["-profile:v", profile]) + + return output + + +def _ffmpeg_h264_codec_args(stream_data, source_ffmpeg_cmd): + output = ["-codec:v", "h264"] + + # Use arguments from source if are available source arguments + if source_ffmpeg_cmd: + copy_args = ( + "-crf", + "-b:v", "-vb", + "-minrate", "-minrate:", + "-maxrate", "-maxrate:", + "-bufsize", "-bufsize:" + ) + args = source_ffmpeg_cmd.split(" ") + for idx, arg in enumerate(args): + if arg in copy_args: + output.extend([arg, args[idx + 1]]) + + pix_fmt = stream_data.get("pix_fmt") + if pix_fmt: + output.extend(["-pix_fmt", pix_fmt]) + + output.extend(["-intra"]) + output.extend(["-g", "1"]) + + return output + + +def _ffmpeg_dnxhd_codec_args(stream_data, source_ffmpeg_cmd): + output = ["-codec:v", "dnxhd"] + + # Use source profile (profiles in metadata are not usable in args directly) + profile = stream_data.get("profile") or "" + # Lower profile and replace space with underscore + cleaned_profile = profile.lower().replace(" ", "_") + + # TODO validate this statement + # Looks like using 'dnxhd' profile must have set bit rate and in that case + # should be used bitrate from source. + # - related attributes 'bit_rate_defined', 'bit_rate_must_be_defined' + bit_rate_must_be_defined = True + dnx_profiles = { + "dnxhd", + "dnxhr_lb", + "dnxhr_sq", + "dnxhr_hq", + "dnxhr_hqx", + "dnxhr_444" + } + if cleaned_profile in dnx_profiles: + if cleaned_profile != "dnxhd": + bit_rate_must_be_defined = False + output.extend(["-profile:v", cleaned_profile]) + + pix_fmt = stream_data.get("pix_fmt") + if pix_fmt: + output.extend(["-pix_fmt", pix_fmt]) + + # Use arguments from source if are available source arguments + bit_rate_defined = False + if source_ffmpeg_cmd: + # Define bitrate arguments + bit_rate_args = ("-b:v", "-vb",) + # Seprate the two variables in case something else should be copied + # from source command + copy_args = [] + copy_args.extend(bit_rate_args) + + args = source_ffmpeg_cmd.split(" ") + for idx, arg in enumerate(args): + if arg in copy_args: + if arg in bit_rate_args: + bit_rate_defined = True + output.extend([arg, args[idx + 1]]) + + # Add bitrate if needed + if bit_rate_must_be_defined and not bit_rate_defined: + src_bit_rate = stream_data.get("bit_rate") + if src_bit_rate: + output.extend(["-b:v", src_bit_rate]) + + output.extend(["-g", "1"]) + return output + + +def convert_ffprobe_fps_value(str_value): + """Returns (str) value of fps from ffprobe frame format (120/1)""" + if str_value == "0/0": + print("WARNING: Source has \"r_frame_rate\" value set to \"0/0\".") + return "Unknown" + + items = str_value.split("/") + if len(items) == 1: + fps = float(items[0]) + + elif len(items) == 2: + fps = float(items[0]) / float(items[1]) + + # Check if fps is integer or float number + if int(fps) == fps: + fps = int(fps) + + return str(fps) diff --git a/openpype/lib/vendor_bin_utils.py b/openpype/lib/vendor_bin_utils.py index 4c2cf93dfa..23e28ea304 100644 --- a/openpype/lib/vendor_bin_utils.py +++ b/openpype/lib/vendor_bin_utils.py @@ -1,11 +1,87 @@ import os import logging -import json import platform -import subprocess -import distutils -log = logging.getLogger("FFmpeg utils") +log = logging.getLogger("Vendor utils") + + +def is_file_executable(filepath): + """Filepath lead to executable file. + + Args: + filepath(str): Full path to file. + """ + if not filepath: + return False + + if os.path.isfile(filepath): + if os.access(filepath, os.X_OK): + return True + + log.info( + "Filepath is not available for execution \"{}\"".format(filepath) + ) + return False + + +def find_executable(executable): + """Find full path to executable. + + Also tries additional extensions if passed executable does not contain one. + + Paths where it is looked for executable is defined by 'PATH' environment + variable, 'os.confstr("CS_PATH")' or 'os.defpath'. + + Args: + executable(str): Name of executable with or without extension. Can be + path to file. + + Returns: + str: Full path to executable with extension (is file). + None: When the executable was not found. + """ + # Skip if passed path is file + if is_file_executable(executable): + return executable + + low_platform = platform.system().lower() + _, ext = os.path.splitext(executable) + + # Prepare variants for which it will be looked + variants = [executable] + # Add other extension variants only if passed executable does not have one + if not ext: + if low_platform == "windows": + exts = [".exe", ".ps1", ".bat"] + for ext in os.getenv("PATHEXT", "").split(os.pathsep): + ext = ext.lower() + if ext and ext not in exts: + exts.append(ext) + else: + exts = [".sh"] + + for ext in exts: + variant = executable + ext + if is_file_executable(variant): + return variant + variants.append(variant) + + # Get paths where to look for executable + path_str = os.environ.get("PATH", None) + if path_str is None: + if hasattr(os, "confstr"): + path_str = os.confstr("CS_PATH") + elif hasattr(os, "defpath"): + path_str = os.defpath + + if path_str: + paths = path_str.split(os.pathsep) + for path in paths: + for variant in variants: + filepath = os.path.abspath(os.path.join(path, variant)) + if is_file_executable(filepath): + return filepath + return None def get_vendor_bin_path(bin_app): @@ -41,11 +117,7 @@ def get_oiio_tools_path(tool="oiiotool"): Default is "oiiotool". """ oiio_dir = get_vendor_bin_path("oiio") - if platform.system().lower() == "windows" and not tool.lower().endswith( - ".exe" - ): - tool = "{}.exe".format(tool) - return os.path.join(oiio_dir, tool) + return find_executable(os.path.join(oiio_dir, tool)) def get_ffmpeg_tool_path(tool="ffmpeg"): @@ -61,57 +133,7 @@ def get_ffmpeg_tool_path(tool="ffmpeg"): ffmpeg_dir = get_vendor_bin_path("ffmpeg") if platform.system().lower() == "windows": ffmpeg_dir = os.path.join(ffmpeg_dir, "bin") - return os.path.join(ffmpeg_dir, tool) - - -def ffprobe_streams(path_to_file, logger=None): - """Load streams from entered filepath via ffprobe. - - Args: - path_to_file (str): absolute path - logger (logging.getLogger): injected logger, if empty new is created - - """ - if not logger: - logger = log - logger.info( - "Getting information about input \"{}\".".format(path_to_file) - ) - args = [ - get_ffmpeg_tool_path("ffprobe"), - "-hide_banner", - "-loglevel", "fatal", - "-show_error", - "-show_format", - "-show_streams", - "-show_programs", - "-show_chapters", - "-show_private_data", - "-print_format", "json", - path_to_file - ] - - logger.debug("FFprobe command: {}".format( - subprocess.list2cmdline(args) - )) - popen = subprocess.Popen( - args, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) - - popen_stdout, popen_stderr = popen.communicate() - if popen_stdout: - logger.debug("FFprobe stdout:\n{}".format( - popen_stdout.decode("utf-8") - )) - - if popen_stderr: - logger.warning("FFprobe stderr:\n{}".format( - popen_stderr.decode("utf-8") - )) - - return json.loads(popen_stdout)["streams"] + return find_executable(os.path.join(ffmpeg_dir, tool)) def is_oiio_supported(): @@ -122,7 +144,7 @@ def is_oiio_supported(): """ loaded_path = oiio_path = get_oiio_tools_path() if oiio_path: - oiio_path = distutils.spawn.find_executable(oiio_path) + oiio_path = find_executable(oiio_path) if not oiio_path: log.debug("OIIOTool is not configured or not present at {}".format( diff --git a/openpype/modules/base.py b/openpype/modules/base.py index c7078475df..175957ae39 100644 --- a/openpype/modules/base.py +++ b/openpype/modules/base.py @@ -61,6 +61,7 @@ class _ModuleClass(object): def __init__(self, name): # Call setattr on super class super(_ModuleClass, self).__setattr__("name", name) + super(_ModuleClass, self).__setattr__("__name__", name) # Where modules and interfaces are stored super(_ModuleClass, self).__setattr__("__attributes__", dict()) @@ -72,7 +73,7 @@ class _ModuleClass(object): if attr_name not in self.__attributes__: if attr_name in ("__path__", "__file__"): return None - raise ImportError("No module named {}.{}".format( + raise AttributeError("'{}' has not attribute '{}'".format( self.name, attr_name )) return self.__attributes__[attr_name] diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index 2918b54d4a..c499c14d40 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -6,6 +6,7 @@ import pyblish.api from avalon import api from openpype.lib import env_value_to_bool +from openpype.lib.delivery import collect_frames from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo @@ -102,24 +103,18 @@ class AfterEffectsSubmitDeadline( def get_plugin_info(self): deadline_plugin_info = DeadlinePluginInfo() - context = self._instance.context - script_path = context.data["currentFile"] render_path = self._instance.data["expectedFiles"][0] - if len(self._instance.data["expectedFiles"]) > 1: + file_name, frame = list(collect_frames([render_path]).items())[0] + if frame: # replace frame ('000001') with Deadline's required '[#######]' # expects filename in format project_asset_subset_version.FRAME.ext render_dir = os.path.dirname(render_path) file_name = os.path.basename(render_path) - arr = file_name.split('.') - assert len(arr) == 3, \ - "Unable to parse frames from {}".format(file_name) - hashed = '[{}]'.format(len(arr[1]) * "#") - - render_path = os.path.join(render_dir, - '{}.{}.{}'.format(arr[0], hashed, - arr[2])) + hashed = '[{}]'.format(len(frame) * "#") + file_name = file_name.replace(frame, hashed) + render_path = os.path.join(render_dir, file_name) deadline_plugin_info.Comp = self._instance.data["comp_name"] deadline_plugin_info.Version = self._instance.data["app_version"] diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index c7a14791e4..fad4d14ea0 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -13,6 +13,8 @@ from avalon import api, io import pyblish.api +from openpype.pipeline import get_representation_path + def get_resources(version, extension=None): """Get the files from the specific version.""" @@ -23,7 +25,7 @@ def get_resources(version, extension=None): representation = io.find_one(query) assert representation, "This is a bug" - directory = api.get_representation_path(representation) + directory = get_representation_path(representation) print("Source: ", directory) resources = sorted( [ @@ -234,6 +236,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): environment["OPENPYPE_MONGO"] = mongo_url args = [ + "--headless", 'publish', roothless_metadata_path, "--targets", "deadline", @@ -516,7 +519,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): """ representations = [] collections, remainders = clique.assemble(exp_files) - bake_renders = instance.get("bakingNukeScripts", []) # create representation for every collected sequento ce for collection in collections: @@ -534,9 +536,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): preview = True break - if bake_renders: - preview = False - # toggle preview on if multipart is on if instance.get("multipartExr", False): preview = True @@ -608,17 +607,18 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "fps": instance.get("fps"), "tags": ["review"] }) - self._solve_families(instance, True) + self._solve_families(instance, True) - if (bake_renders - and remainder in bake_renders[0]["bakeRenderPath"]): - rep.update({ - "fps": instance.get("fps"), - "tags": ["review", "delete"] - }) - # solve families with `preview` attributes - self._solve_families(instance, True) - representations.append(rep) + already_there = False + for repre in instance.get("representations", []): + # might be added explicitly before by publish_on_farm + already_there = repre.get("files") == rep["files"] + if already_there: + self.log.debug("repre {} already_there".format(repre)) + break + + if not already_there: + representations.append(rep) return representations diff --git a/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py b/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py index d49e314179..c2426e0d78 100644 --- a/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py +++ b/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py @@ -107,6 +107,10 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin): explicitly and manually changed the frame list on the Deadline job. """ + # no frames in file name at all, eg 'renderCompositingMain.withLut.mov' + if not frame_placeholder: + return set([file_name_template]) + real_expected_rendered = set() src_padding_exp = "%0{}d".format(len(frame_placeholder)) for frames in frame_list: @@ -130,14 +134,13 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin): # There might be cases where clique was unable to collect # collections in `collect_frames` - thus we capture that case - if frame is None: - self.log.warning("Unable to detect frame from filename: " - "{}".format(file_name)) - continue + if frame is not None: + frame_placeholder = "#" * len(frame) - frame_placeholder = "#" * len(frame) - file_name_template = os.path.basename( - file_name.replace(frame, frame_placeholder)) + file_name_template = os.path.basename( + file_name.replace(frame, frame_placeholder)) + else: + file_name_template = file_name break return file_name_template, frame_placeholder diff --git a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py index 82c2494e7a..eeb1f7744c 100644 --- a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -46,6 +46,7 @@ def inject_openpype_environment(deadlinePlugin): args = [ openpype_app, + "--headless", 'extractenvironments', export_url ] diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py index cf864b03d3..9fca1b5391 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py @@ -5,8 +5,9 @@ Todo: Currently we support only EXRs with their data window set. """ import os +import re import subprocess -from xml.dom import minidom +import xml.etree.ElementTree from System.IO import Path @@ -15,17 +16,220 @@ from Deadline.Scripting import ( FileUtils, RepositoryUtils, SystemUtils) -INT_KEYS = { - "x", "y", "height", "width", "full_x", "full_y", - "full_width", "full_height", "full_depth", "full_z", - "tile_width", "tile_height", "tile_depth", "deep", "depth", - "nchannels", "z_channel", "alpha_channel", "subimages" +STRING_TAGS = { + "format" } -LIST_KEYS = { - "channelnames" +INT_TAGS = { + "x", "y", "z", + "width", "height", "depth", + "full_x", "full_y", "full_z", + "full_width", "full_height", "full_depth", + "tile_width", "tile_height", "tile_depth", + "nchannels", + "alpha_channel", + "z_channel", + "deep", + "subimages", } +XML_CHAR_REF_REGEX_HEX = re.compile(r"&#x?[0-9a-fA-F]+;") + +# Regex to parse array attributes +ARRAY_TYPE_REGEX = re.compile(r"^(int|float|string)\[\d+\]$") + + +def convert_value_by_type_name(value_type, value): + """Convert value to proper type based on type name. + + In some cases value types have custom python class. + """ + + # Simple types + if value_type == "string": + return value + + if value_type == "int": + return int(value) + + if value_type == "float": + return float(value) + + # Vectors will probably have more types + if value_type == "vec2f": + return [float(item) for item in value.split(",")] + + # Matrix should be always have square size of element 3x3, 4x4 + # - are returned as list of lists + if value_type == "matrix": + output = [] + current_index = -1 + parts = value.split(",") + parts_len = len(parts) + if parts_len == 1: + divisor = 1 + elif parts_len == 4: + divisor = 2 + elif parts_len == 9: + divisor == 3 + elif parts_len == 16: + divisor = 4 + else: + print("Unknown matrix resolution {}. Value: \"{}\"".format( + parts_len, value + )) + for part in parts: + output.append(float(part)) + return output + + for idx, item in enumerate(parts): + list_index = idx % divisor + if list_index > current_index: + current_index = list_index + output.append([]) + output[list_index].append(float(item)) + return output + + if value_type == "rational2i": + parts = value.split("/") + top = float(parts[0]) + bottom = 1.0 + if len(parts) != 1: + bottom = float(parts[1]) + return float(top) / float(bottom) + + if value_type == "vector": + parts = [part.strip() for part in value.split(",")] + output = [] + for part in parts: + if part == "-nan": + output.append(None) + continue + try: + part = float(part) + except ValueError: + pass + output.append(part) + return output + + if value_type == "timecode": + return value + + # Array of other types is converted to list + re_result = ARRAY_TYPE_REGEX.findall(value_type) + if re_result: + array_type = re_result[0] + output = [] + for item in value.split(","): + output.append( + convert_value_by_type_name(array_type, item) + ) + return output + + print(( + "MISSING IMPLEMENTATION:" + " Unknown attrib type \"{}\". Value: {}" + ).format(value_type, value)) + return value + + +def parse_oiio_xml_output(xml_string): + """Parse xml output from OIIO info command.""" + output = {} + if not xml_string: + return output + + # Fix values with ampresand (lazy fix) + # - oiiotool exports invalid xml which ElementTree can't handle + # e.g. "" + # WARNING: this will affect even valid character entities. If you need + # those values correctly, this must take care of valid character ranges. + # See https://github.com/pypeclub/OpenPype/pull/2729 + matches = XML_CHAR_REF_REGEX_HEX.findall(xml_string) + for match in matches: + new_value = match.replace("&", "&") + xml_string = xml_string.replace(match, new_value) + + tree = xml.etree.ElementTree.fromstring(xml_string) + attribs = {} + output["attribs"] = attribs + for child in tree: + tag_name = child.tag + if tag_name == "attrib": + attrib_def = child.attrib + value = convert_value_by_type_name( + attrib_def["type"], child.text + ) + + attribs[attrib_def["name"]] = value + continue + + # Channels are stored as tex on each child + if tag_name == "channelnames": + value = [] + for channel in child: + value.append(channel.text) + + # Convert known integer type tags to int + elif tag_name in INT_TAGS: + value = int(child.text) + + # Keep value of known string tags + elif tag_name in STRING_TAGS: + value = child.text + + # Keep value as text for unknown tags + # - feel free to add more tags + else: + value = child.text + print(( + "MISSING IMPLEMENTATION:" + " Unknown tag \"{}\". Value \"{}\"" + ).format(tag_name, value)) + + output[child.tag] = value + + return output + + +def info_about_input(oiiotool_path, filepath): + args = [ + oiiotool_path, + "--info", + "-v", + "-i:infoformat=xml", + filepath + ] + popen = subprocess.Popen(args, stdout=subprocess.PIPE) + _stdout, _stderr = popen.communicate() + output = "" + if _stdout: + output += _stdout.decode("utf-8") + + if _stderr: + output += _stderr.decode("utf-8") + + output = output.replace("\r\n", "\n") + xml_started = False + lines = [] + for line in output.split("\n"): + if not xml_started: + if not line.startswith("<"): + continue + xml_started = True + if xml_started: + lines.append(line) + + if not xml_started: + raise ValueError( + "Failed to read input file \"{}\".\nOutput:\n{}".format( + filepath, output + ) + ) + xml_text = "\n".join(lines) + return parse_oiio_xml_output(xml_text) + + def GetDeadlinePlugin(): # noqa: N802 """Helper.""" return OpenPypeTileAssembler() @@ -218,8 +422,9 @@ class OpenPypeTileAssembler(DeadlinePlugin): # Create new image with output resolution, and with same type and # channels as input + oiiotool_path = self.render_executable() first_tile_path = tile_info[0]["filepath"] - first_tile_info = self.info_about_input(first_tile_path) + first_tile_info = info_about_input(oiiotool_path, first_tile_path) create_arg_template = "--create{} {}x{} {}" image_type = "" @@ -236,7 +441,7 @@ class OpenPypeTileAssembler(DeadlinePlugin): for tile in tile_info: path = tile["filepath"] pos_x = tile["pos_x"] - tile_height = self.info_about_input(path)["height"] + tile_height = info_about_input(oiiotool_path, path)["height"] if self.renderer == "vray": pos_y = tile["pos_y"] else: @@ -326,47 +531,3 @@ class OpenPypeTileAssembler(DeadlinePlugin): ffmpeg_args.append("\"{}\"".format(output_path)) return ffmpeg_args - - def info_about_input(self, input_path): - args = [self.render_executable(), "--info:format=xml", input_path] - popen = subprocess.Popen( - " ".join(args), - shell=True, - stdout=subprocess.PIPE - ) - popen_output = popen.communicate()[0].replace(b"\r\n", b"") - - xmldoc = minidom.parseString(popen_output) - image_spec = None - for main_child in xmldoc.childNodes: - if main_child.nodeName.lower() == "imagespec": - image_spec = main_child - break - - info = {} - if not image_spec: - return info - - def child_check(node): - if len(node.childNodes) != 1: - self.FailRender(( - "Implementation BUG. Node {} has more children than 1" - ).format(node.nodeName)) - - for child in image_spec.childNodes: - if child.nodeName in LIST_KEYS: - values = [] - for node in child.childNodes: - child_check(node) - values.append(node.childNodes[0].nodeValue) - - info[child.nodeName] = values - - elif child.nodeName in INT_KEYS: - child_check(child) - info[child.nodeName] = int(child.childNodes[0].nodeValue) - - else: - child_check(child) - info[child.nodeName] = child.childNodes[0].nodeValue - return info diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index eea6436b53..46c333c4c4 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -199,8 +199,10 @@ class SyncToAvalonEvent(BaseEvent): if proj: ftrack_id = proj["data"].get("ftrackId") if ftrack_id is None: - ftrack_id = self._update_project_ftrack_id() - proj["data"]["ftrackId"] = ftrack_id + self.handle_missing_ftrack_id(proj) + ftrack_id = proj["data"]["ftrackId"] + self._avalon_ents_by_ftrack_id[ftrack_id] = proj + self._avalon_ents_by_ftrack_id[ftrack_id] = proj for ent in ents: ftrack_id = ent["data"].get("ftrackId") @@ -209,15 +211,78 @@ class SyncToAvalonEvent(BaseEvent): self._avalon_ents_by_ftrack_id[ftrack_id] = ent return self._avalon_ents_by_ftrack_id - def _update_project_ftrack_id(self): - ftrack_id = self.cur_project["id"] + def handle_missing_ftrack_id(self, doc): + # TODO handling of missing ftrack id is primarily issue of editorial + # publishing it would be better to find out what causes that + # ftrack id is removed during the publishing + ftrack_id = doc["data"].get("ftrackId") + if ftrack_id is not None: + return + if doc["type"] == "project": + ftrack_id = self.cur_project["id"] + + self.dbcon.update_one( + {"type": "project"}, + {"$set": { + "data.ftrackId": ftrack_id, + "data.entityType": self.cur_project.entity_type + }} + ) + + doc["data"]["ftrackId"] = ftrack_id + doc["data"]["entityType"] = self.cur_project.entity_type + self.log.info("Updated ftrack id of project \"{}\"".format( + self.cur_project["full_name"] + )) + return + + if doc["type"] != "asset": + return + + doc_parents = doc.get("data", {}).get("parents") + if doc_parents is None: + return + + entities = self.process_session.query(( + "select id, link from TypedContext" + " where project_id is \"{}\" and name is \"{}\"" + ).format(self.cur_project["id"], doc["name"])).all() + self.log.info("Entities: {}".format(str(entities))) + matching_entity = None + for entity in entities: + parents = [] + for item in entity["link"]: + if item["id"] == entity["id"]: + break + low_type = item["type"].lower() + if low_type == "typedcontext": + parents.append(item["name"]) + if doc_parents == parents: + matching_entity = entity + break + + if matching_entity is None: + return + + ftrack_id = matching_entity["id"] self.dbcon.update_one( - {"type": "project"}, - {"$set": {"data.ftrackId": ftrack_id}} + {"_id": doc["_id"]}, + {"$set": { + "data.ftrackId": ftrack_id, + "data.entityType": matching_entity.entity_type + }} ) + doc["data"]["ftrackId"] = ftrack_id + doc["data"]["entityType"] = matching_entity.entity_type - return ftrack_id + entity_path_items = [] + for item in entity["link"]: + entity_path_items.append(item["name"]) + self.log.info("Updated ftrack id of entity \"{}\"".format( + "/".join(entity_path_items) + )) + self._avalon_ents_by_ftrack_id[ftrack_id] = doc @property def avalon_subsets_by_parents(self): @@ -857,7 +922,14 @@ class SyncToAvalonEvent(BaseEvent): if vis_par is None: vis_par = proj["_id"] parent_ent = self.avalon_ents_by_id[vis_par] - parent_ftrack_id = parent_ent["data"]["ftrackId"] + + parent_ftrack_id = parent_ent["data"].get("ftrackId") + if parent_ftrack_id is None: + self.handle_missing_ftrack_id(parent_ent) + parent_ftrack_id = parent_ent["data"].get("ftrackId") + if parent_ftrack_id is None: + continue + parent_ftrack_ent = self.ftrack_ents_by_id.get( parent_ftrack_id ) @@ -2128,7 +2200,13 @@ class SyncToAvalonEvent(BaseEvent): vis_par = avalon_ent["parent"] parent_ent = self.avalon_ents_by_id[vis_par] - parent_ftrack_id = parent_ent["data"]["ftrackId"] + parent_ftrack_id = parent_ent["data"].get("ftrackId") + if parent_ftrack_id is None: + self.handle_missing_ftrack_id(parent_ent) + parent_ftrack_id = parent_ent["data"].get("ftrackId") + if parent_ftrack_id is None: + continue + if parent_ftrack_id not in entities_dict: entities_dict[parent_ftrack_id] = { "children": [], diff --git a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py index efc1e76775..96243c8c36 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py +++ b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py @@ -87,8 +87,8 @@ class UserAssigmentEvent(BaseEvent): if not user_id: return None, None - task = session.query('Task where id is "{}"'.format(task_id)).one() - user = session.query('User where id is "{}"'.format(user_id)).one() + task = session.query('Task where id is "{}"'.format(task_id)).first() + user = session.query('User where id is "{}"'.format(user_id)).first() return task, user diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py index c66d1819ac..1b694e25f1 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py @@ -5,11 +5,11 @@ import uuid import clique from pymongo import UpdateOne -from openpype_modules.ftrack.lib import BaseAction, statics_icon from avalon.api import AvalonMongoDB -from openpype.api import Anatomy -import avalon.pipeline +from openpype.api import Anatomy +from openpype.lib import StringTemplate, TemplateUnsolved +from openpype_modules.ftrack.lib import BaseAction, statics_icon class DeleteOldVersions(BaseAction): @@ -563,18 +563,16 @@ class DeleteOldVersions(BaseAction): try: context = representation["context"] context["root"] = anatomy.roots - path = avalon.pipeline.format_template_with_optional_keys( - context, template - ) + path = StringTemplate.format_strict_template(template, context) if "frame" in context: context["frame"] = self.sequence_splitter sequence_path = os.path.normpath( - avalon.pipeline.format_template_with_optional_keys( + StringTemplate.format_strict_template( context, template ) ) - except KeyError: + except (KeyError, TemplateUnsolved): # Template references unavailable data return (None, None) diff --git a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py new file mode 100644 index 0000000000..3888379e04 --- /dev/null +++ b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py @@ -0,0 +1,494 @@ +import os +import sys +import json +import collections +import tempfile +import datetime + +import ftrack_api + +from avalon.api import AvalonMongoDB +from openpype.api import get_project_settings +from openpype.lib import ( + get_workfile_template_key, + get_workdir_data, + Anatomy, + StringTemplate, +) +from openpype_modules.ftrack.lib import BaseAction, statics_icon +from openpype_modules.ftrack.lib.avalon_sync import create_chunks + +NOT_SYNCHRONIZED_TITLE = "Not synchronized" + + +class FillWorkfileAttributeAction(BaseAction): + """Action fill work filename into custom attribute on tasks. + + Prerequirements are that the project is synchronized so it is possible to + access project anatomy and project/asset documents. Tasks that are not + synchronized are skipped too. + """ + + identifier = "fill.workfile.attr" + label = "OpenPype Admin" + variant = "- Fill workfile attribute" + description = "Precalculate and fill workfile name into a custom attribute" + icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") + + settings_key = "fill_workfile_attribute" + + def discover(self, session, entities, event): + """ Validate selection. """ + is_valid = False + for ent in event["data"]["selection"]: + # Ignore entities that are not tasks or projects + if ent["entityType"].lower() in ["show", "task"]: + is_valid = True + break + + if is_valid: + is_valid = self.valid_roles(session, entities, event) + return is_valid + + def launch(self, session, entities, event): + # Separate entities and get project entity + project_entity = None + for entity in entities: + if project_entity is None: + project_entity = self.get_project_from_entity(entity) + break + + if not project_entity: + return { + "message": ( + "Couldn't find project entity." + " Could be an issue with permissions." + ), + "success": False + } + + # Get project settings and check if custom attribute where workfile + # should be set is defined. + project_name = project_entity["full_name"] + project_settings = get_project_settings(project_name) + custom_attribute_key = ( + project_settings + .get("ftrack", {}) + .get("user_handlers", {}) + .get(self.settings_key, {}) + .get("custom_attribute_key") + ) + if not custom_attribute_key: + return { + "success": False, + "message": "Custom attribute key is not set in settings" + } + + # Try to find the custom attribute + # - get Task type object id + task_obj_type = session.query( + "select id from ObjectType where name is \"Task\"" + ).one() + # - get text custom attribute type + text_type = session.query( + "select id from CustomAttributeType where name is \"text\"" + ).one() + # - find the attribute + attr_conf = session.query( + ( + "select id, key from CustomAttributeConfiguration" + " where object_type_id is \"{}\"" + " and type_id is \"{}\"" + " and key is \"{}\"" + ).format( + task_obj_type["id"], text_type["id"], custom_attribute_key + ) + ).first() + if not attr_conf: + return { + "success": False, + "message": ( + "Could not find Task (text) Custom attribute \"{}\"" + ).format(custom_attribute_key) + } + + # Store report information + report = collections.defaultdict(list) + user_entity = session.query( + "User where id is {}".format(event["source"]["user"]["id"]) + ).one() + job_entity = session.create("Job", { + "user": user_entity, + "status": "running", + "data": json.dumps({ + "description": "(0/3) Fill of workfiles started" + }) + }) + session.commit() + + try: + self.in_job_process( + session, + entities, + job_entity, + project_entity, + project_settings, + attr_conf, + report + ) + except Exception: + self.log.error( + "Fill of workfiles to custom attribute failed", exc_info=True + ) + session.rollback() + + description = "Fill of workfiles Failed (Download traceback)" + self.add_traceback_to_job( + job_entity, session, sys.exc_info(), description + ) + return { + "message": ( + "Fill of workfiles failed." + " Check job for more information" + ), + "success": False + } + + job_entity["status"] = "done" + job_entity["data"] = json.dumps({ + "description": "Fill of workfiles completed." + }) + session.commit() + if report: + temp_obj = tempfile.NamedTemporaryFile( + mode="w", + prefix="openpype_ftrack_", + suffix=".json", + delete=False + ) + temp_obj.close() + temp_filepath = temp_obj.name + with open(temp_filepath, "w") as temp_file: + json.dump(report, temp_file) + + component_name = "{}_{}".format( + "FillWorkfilesReport", + datetime.datetime.now().strftime("%y-%m-%d-%H%M") + ) + self.add_file_component_to_job( + job_entity, session, temp_filepath, component_name + ) + # Delete temp file + os.remove(temp_filepath) + self._show_report(event, report, project_name) + return { + "message": ( + "Fill of workfiles finished with few issues." + " Check job for more information" + ), + "success": True + } + + return { + "success": True, + "message": "Finished with filling of work filenames" + } + + def _show_report(self, event, report, project_name): + items = [] + title = "Fill workfiles report ({}):".format(project_name) + + for subtitle, lines in report.items(): + if items: + items.append({ + "type": "label", + "value": "---" + }) + items.append({ + "type": "label", + "value": "# {}".format(subtitle) + }) + items.append({ + "type": "label", + "value": '

{}

'.format("
".join(lines)) + }) + + self.show_interface( + items=items, + title=title, + event=event + ) + + def in_job_process( + self, + session, + entities, + job_entity, + project_entity, + project_settings, + attr_conf, + report + ): + task_entities = [] + other_entities = [] + project_selected = False + for entity in entities: + ent_type_low = entity.entity_type.lower() + if ent_type_low == "project": + project_selected = True + break + + elif ent_type_low == "task": + task_entities.append(entity) + else: + other_entities.append(entity) + + project_name = project_entity["full_name"] + + # Find matchin asset documents and map them by ftrack task entities + # - result stored to 'asset_docs_with_task_entities' is list with + # tuple `(asset document, [task entitis, ...])` + dbcon = AvalonMongoDB() + dbcon.Session["AVALON_PROJECT"] = project_name + # Quety all asset documents + asset_docs = list(dbcon.find({"type": "asset"})) + job_entity["data"] = json.dumps({ + "description": "(1/3) Asset documents queried." + }) + session.commit() + + # When project is selected then we can query whole project + if project_selected: + asset_docs_with_task_entities = self._get_asset_docs_for_project( + session, project_entity, asset_docs, report + ) + + else: + asset_docs_with_task_entities = self._get_tasks_for_selection( + session, other_entities, task_entities, asset_docs, report + ) + + job_entity["data"] = json.dumps({ + "description": "(2/3) Queried related task entities." + }) + session.commit() + + # Keep placeholders in the template unfilled + host_name = "{app}" + extension = "{ext}" + project_doc = dbcon.find_one({"type": "project"}) + project_settings = get_project_settings(project_name) + anatomy = Anatomy(project_name) + templates_by_key = {} + + operations = [] + for asset_doc, task_entities in asset_docs_with_task_entities: + for task_entity in task_entities: + workfile_data = get_workdir_data( + project_doc, asset_doc, task_entity["name"], host_name + ) + # Use version 1 for each workfile + workfile_data["version"] = 1 + workfile_data["ext"] = extension + + task_type = workfile_data["task"]["type"] + template_key = get_workfile_template_key( + task_type, host_name, project_settings=project_settings + ) + if template_key in templates_by_key: + template = templates_by_key[template_key] + else: + template = StringTemplate( + anatomy.templates[template_key]["file"] + ) + templates_by_key[template_key] = template + + result = template.format(workfile_data) + if not result.solved: + # TODO report + pass + else: + table_values = collections.OrderedDict(( + ("configuration_id", attr_conf["id"]), + ("entity_id", task_entity["id"]) + )) + operations.append( + ftrack_api.operation.UpdateEntityOperation( + "ContextCustomAttributeValue", + table_values, + "value", + ftrack_api.symbol.NOT_SET, + str(result) + ) + ) + + if operations: + for sub_operations in create_chunks(operations, 50): + for op in sub_operations: + session.recorded_operations.push(op) + session.commit() + + job_entity["data"] = json.dumps({ + "description": "(3/3) Set custom attribute values." + }) + session.commit() + + def _get_entity_path(self, entity): + path_items = [] + for item in entity["link"]: + if item["type"].lower() != "project": + path_items.append(item["name"]) + return "/".join(path_items) + + def _get_asset_docs_for_project( + self, session, project_entity, asset_docs, report + ): + asset_docs_task_names = {} + + for asset_doc in asset_docs: + asset_data = asset_doc["data"] + ftrack_id = asset_data.get("ftrackId") + if not ftrack_id: + hierarchy = list(asset_data.get("parents") or []) + hierarchy.append(asset_doc["name"]) + path = "/".join(hierarchy) + report[NOT_SYNCHRONIZED_TITLE].append(path) + continue + + asset_tasks = asset_data.get("tasks") or {} + asset_docs_task_names[ftrack_id] = ( + asset_doc, list(asset_tasks.keys()) + ) + + task_entities = session.query(( + "select id, name, parent_id, link from Task where project_id is {}" + ).format(project_entity["id"])).all() + task_entities_by_parent_id = collections.defaultdict(list) + for task_entity in task_entities: + parent_id = task_entity["parent_id"] + task_entities_by_parent_id[parent_id].append(task_entity) + + output = [] + for ftrack_id, item in asset_docs_task_names.items(): + asset_doc, task_names = item + valid_task_entities = [] + for task_entity in task_entities_by_parent_id[ftrack_id]: + if task_entity["name"] in task_names: + valid_task_entities.append(task_entity) + else: + path = self._get_entity_path(task_entity) + report[NOT_SYNCHRONIZED_TITLE].append(path) + + if valid_task_entities: + output.append((asset_doc, valid_task_entities)) + + return output + + def _get_tasks_for_selection( + self, session, other_entities, task_entities, asset_docs, report + ): + all_tasks = object() + asset_docs_by_ftrack_id = {} + asset_docs_by_parent_id = collections.defaultdict(list) + for asset_doc in asset_docs: + asset_data = asset_doc["data"] + ftrack_id = asset_data.get("ftrackId") + parent_id = asset_data.get("visualParent") + asset_docs_by_parent_id[parent_id].append(asset_doc) + if ftrack_id: + asset_docs_by_ftrack_id[ftrack_id] = asset_doc + + missing_doc_ftrack_ids = {} + all_tasks_ids = set() + task_names_by_ftrack_id = collections.defaultdict(list) + for other_entity in other_entities: + ftrack_id = other_entity["id"] + if ftrack_id not in asset_docs_by_ftrack_id: + missing_doc_ftrack_ids[ftrack_id] = None + continue + all_tasks_ids.add(ftrack_id) + task_names_by_ftrack_id[ftrack_id] = all_tasks + + for task_entity in task_entities: + parent_id = task_entity["parent_id"] + if parent_id not in asset_docs_by_ftrack_id: + missing_doc_ftrack_ids[parent_id] = None + continue + + if all_tasks_ids not in all_tasks_ids: + task_names_by_ftrack_id[ftrack_id].append(task_entity["name"]) + + ftrack_ids = set() + asset_doc_with_task_names_by_id = {} + for ftrack_id, task_names in task_names_by_ftrack_id.items(): + asset_doc = asset_docs_by_ftrack_id[ftrack_id] + asset_data = asset_doc["data"] + asset_tasks = asset_data.get("tasks") or {} + + if task_names is all_tasks: + task_names = list(asset_tasks.keys()) + else: + new_task_names = [] + for task_name in task_names: + if task_name in asset_tasks: + new_task_names.append(task_name) + continue + + if ftrack_id not in missing_doc_ftrack_ids: + missing_doc_ftrack_ids[ftrack_id] = [] + if missing_doc_ftrack_ids[ftrack_id] is not None: + missing_doc_ftrack_ids[ftrack_id].append(task_name) + + task_names = new_task_names + + if task_names: + ftrack_ids.add(ftrack_id) + asset_doc_with_task_names_by_id[ftrack_id] = ( + asset_doc, task_names + ) + + task_entities = session.query(( + "select id, name, parent_id from Task where parent_id in ({})" + ).format(self.join_query_keys(ftrack_ids))).all() + task_entitiy_by_parent_id = collections.defaultdict(list) + for task_entity in task_entities: + parent_id = task_entity["parent_id"] + task_entitiy_by_parent_id[parent_id].append(task_entity) + + output = [] + for ftrack_id, item in asset_doc_with_task_names_by_id.items(): + asset_doc, task_names = item + valid_task_entities = [] + for task_entity in task_entitiy_by_parent_id[ftrack_id]: + if task_entity["name"] in task_names: + valid_task_entities.append(task_entity) + else: + if ftrack_id not in missing_doc_ftrack_ids: + missing_doc_ftrack_ids[ftrack_id] = [] + if missing_doc_ftrack_ids[ftrack_id] is not None: + missing_doc_ftrack_ids[ftrack_id].append(task_name) + if valid_task_entities: + output.append((asset_doc, valid_task_entities)) + + # Store report information about not synchronized entities + if missing_doc_ftrack_ids: + missing_entities = session.query( + "select id, link from TypedContext where id in ({})".format( + self.join_query_keys(missing_doc_ftrack_ids.keys()) + ) + ).all() + for missing_entity in missing_entities: + path = self._get_entity_path(missing_entity) + task_names = missing_doc_ftrack_ids[missing_entity["id"]] + if task_names is None: + report[NOT_SYNCHRONIZED_TITLE].append(path) + else: + for task_name in task_names: + task_path = "/".join([path, task_name]) + report[NOT_SYNCHRONIZED_TITLE].append(task_path) + + return output + + +def register(session): + FillWorkfileAttributeAction(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_rv.py b/openpype/modules/ftrack/event_handlers_user/action_rv.py index 71d790f7e7..bdb0eaf250 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_rv.py +++ b/openpype/modules/ftrack/event_handlers_user/action_rv.py @@ -3,9 +3,10 @@ import subprocess import traceback import json -from openpype_modules.ftrack.lib import BaseAction, statics_icon import ftrack_api from avalon import io, api +from openpype.pipeline import get_representation_path +from openpype_modules.ftrack.lib import BaseAction, statics_icon class RVAction(BaseAction): @@ -307,7 +308,7 @@ class RVAction(BaseAction): "name": "preview" } ) - paths.append(api.get_representation_path(representation)) + paths.append(get_representation_path(representation)) return paths diff --git a/openpype/modules/ftrack/lib/ftrack_event_handler.py b/openpype/modules/ftrack/lib/ftrack_event_handler.py index af565c5421..0a70b0e301 100644 --- a/openpype/modules/ftrack/lib/ftrack_event_handler.py +++ b/openpype/modules/ftrack/lib/ftrack_event_handler.py @@ -44,7 +44,7 @@ class BaseEvent(BaseHandler): return self._get_entities( event, session, - ignore=['socialfeed', 'socialnotification'] + ignore=['socialfeed', 'socialnotification', 'team'] ) def get_project_name_from_event(self, session, event, project_id): diff --git a/openpype/modules/ftrack/plugins/publish/collect_username.py b/openpype/modules/ftrack/plugins/publish/collect_username.py index 84d7f60a3f..a9b746ea51 100644 --- a/openpype/modules/ftrack/plugins/publish/collect_username.py +++ b/openpype/modules/ftrack/plugins/publish/collect_username.py @@ -23,8 +23,11 @@ class CollectUsername(pyblish.api.ContextPlugin): Expects "pype.club" user created on Ftrack and FTRACK_BOT_API_KEY env var set up. + Resets `context.data["user"] to correctly populate `version.author` and + `representation.context.username` + """ - order = pyblish.api.CollectorOrder - 0.488 + order = pyblish.api.CollectorOrder + 0.0015 label = "Collect ftrack username" hosts = ["webpublisher", "photoshop"] targets = ["remotepublish", "filespublish", "tvpaint_worker"] @@ -65,3 +68,4 @@ class CollectUsername(pyblish.api.ContextPlugin): if '@' in burnin_name: burnin_name = burnin_name[:burnin_name.index('@')] os.environ["WEBPUBLISH_OPENPYPE_USERNAME"] = burnin_name + context.data["user"] = burnin_name diff --git a/openpype/modules/log_viewer/tray/widgets.py b/openpype/modules/log_viewer/tray/widgets.py index 5a67780413..ff77405de5 100644 --- a/openpype/modules/log_viewer/tray/widgets.py +++ b/openpype/modules/log_viewer/tray/widgets.py @@ -1,5 +1,5 @@ from Qt import QtCore, QtWidgets -from avalon.vendor import qtawesome +import qtawesome from .models import LogModel, LogsFilterProxy diff --git a/openpype/modules/sync_server/tray/models.py b/openpype/modules/sync_server/tray/models.py index 80f41992cb..7241cc3472 100644 --- a/openpype/modules/sync_server/tray/models.py +++ b/openpype/modules/sync_server/tray/models.py @@ -4,9 +4,9 @@ from bson.objectid import ObjectId from Qt import QtCore from Qt.QtCore import Qt +import qtawesome from openpype.tools.utils.delegates import pretty_timestamp -from avalon.vendor import qtawesome from openpype.lib import PypeLogger from openpype.api import get_local_site_id diff --git a/openpype/modules/sync_server/tray/widgets.py b/openpype/modules/sync_server/tray/widgets.py index 18487b3d11..6aae9562cf 100644 --- a/openpype/modules/sync_server/tray/widgets.py +++ b/openpype/modules/sync_server/tray/widgets.py @@ -5,6 +5,7 @@ from functools import partial from Qt import QtWidgets, QtCore, QtGui from Qt.QtCore import Qt +import qtawesome from openpype.tools.settings import style @@ -12,7 +13,6 @@ from openpype.api import get_local_site_id from openpype.lib import PypeLogger from openpype.tools.utils.delegates import pretty_timestamp -from avalon.vendor import qtawesome from .models import ( SyncRepresentationSummaryModel, diff --git a/openpype/pipeline/__init__.py b/openpype/pipeline/__init__.py index e968df4011..26970e4edc 100644 --- a/openpype/pipeline/__init__.py +++ b/openpype/pipeline/__init__.py @@ -4,11 +4,39 @@ from .create import ( BaseCreator, Creator, AutoCreator, - CreatedInstance + CreatedInstance, + + CreatorError, + + LegacyCreator, + legacy_create, +) + +from .load import ( + HeroVersionType, + IncompatibleLoaderError, + LoaderPlugin, + SubsetLoaderPlugin, + + discover_loader_plugins, + register_loader_plugin, + deregister_loader_plugin_path, + register_loader_plugin_path, + deregister_loader_plugin, + + load_container, + remove_container, + update_container, + switch_container, + + loaders_from_representation, + get_representation_path, + get_repres_contexts, ) from .publish import ( PublishValidationError, + PublishXmlValidationError, KnownPublishError, OpenPypePyblishPluginMixin ) @@ -17,12 +45,42 @@ from .publish import ( __all__ = ( "attribute_definitions", + # --- Create --- "BaseCreator", "Creator", "AutoCreator", "CreatedInstance", + "CreatorError", + + # - legacy creation + "LegacyCreator", + "legacy_create", + + # --- Load --- + "HeroVersionType", + "IncompatibleLoaderError", + "LoaderPlugin", + "SubsetLoaderPlugin", + + "discover_loader_plugins", + "register_loader_plugin", + "deregister_loader_plugin_path", + "register_loader_plugin_path", + "deregister_loader_plugin", + + "load_container", + "remove_container", + "update_container", + "switch_container", + + "loaders_from_representation", + "get_representation_path", + "get_repres_contexts", + + # --- Publish --- "PublishValidationError", + "PublishXmlValidationError", "KnownPublishError", "OpenPypePyblishPluginMixin" ) diff --git a/openpype/pipeline/create/__init__.py b/openpype/pipeline/create/__init__.py index 948b719851..9571f56b8f 100644 --- a/openpype/pipeline/create/__init__.py +++ b/openpype/pipeline/create/__init__.py @@ -14,6 +14,11 @@ from .context import ( CreateContext ) +from .legacy_create import ( + LegacyCreator, + legacy_create, +) + __all__ = ( "SUBSET_NAME_ALLOWED_SYMBOLS", @@ -25,5 +30,8 @@ __all__ = ( "AutoCreator", "CreatedInstance", - "CreateContext" + "CreateContext", + + "LegacyCreator", + "legacy_create", ) diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index e11d32091f..c2757a4502 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -399,15 +399,6 @@ class CreatedInstance: self._data["active"] = data.get("active", True) self._data["creator_identifier"] = creator.identifier - # QUESTION handle version of instance here or in creator? - version = None - if not new: - version = data.get("version") - - if version is None: - version = 1 - self._data["version"] = version - # Pop from source data all keys that are defined in `_data` before # this moment and through their values away # - they should be the same and if are not then should not change @@ -1014,12 +1005,14 @@ class CreateContext: if not instances: return - task_names_by_asset_name = collections.defaultdict(set) + task_names_by_asset_name = {} for instance in instances: task_name = instance.get("task") asset_name = instance.get("asset") - if asset_name and task_name: - task_names_by_asset_name[asset_name].add(task_name) + if asset_name: + task_names_by_asset_name[asset_name] = set() + if task_name: + task_names_by_asset_name[asset_name].add(task_name) asset_names = [ asset_name diff --git a/openpype/pipeline/create/legacy_create.py b/openpype/pipeline/create/legacy_create.py new file mode 100644 index 0000000000..cf6629047e --- /dev/null +++ b/openpype/pipeline/create/legacy_create.py @@ -0,0 +1,157 @@ +"""Create workflow moved from avalon-core repository. + +Renamed classes and functions +- 'Creator' -> 'LegacyCreator' +- 'create' -> 'legacy_create' +""" + +import logging +import collections + +from openpype.lib import get_subset_name + + +class LegacyCreator(object): + """Determine how assets are created""" + label = None + family = None + defaults = None + maintain_selection = True + + dynamic_subset_keys = [] + + log = logging.getLogger("LegacyCreator") + log.propagate = True + + def __init__(self, name, asset, options=None, data=None): + self.name = name # For backwards compatibility + self.options = options + + # Default data + self.data = collections.OrderedDict() + self.data["id"] = "pyblish.avalon.instance" + self.data["family"] = self.family + self.data["asset"] = asset + self.data["subset"] = name + self.data["active"] = True + + self.data.update(data or {}) + + def process(self): + pass + + @classmethod + def get_dynamic_data( + cls, variant, task_name, asset_id, project_name, host_name + ): + """Return dynamic data for current Creator plugin. + + By default return keys from `dynamic_subset_keys` attribute as mapping + to keep formatted template unchanged. + + ``` + dynamic_subset_keys = ["my_key"] + --- + output = { + "my_key": "{my_key}" + } + ``` + + Dynamic keys may override default Creator keys (family, task, asset, + ...) but do it wisely if you need. + + All of keys will be converted into 3 variants unchanged, capitalized + and all upper letters. Because of that are all keys lowered. + + This method can be modified to prefill some values just keep in mind it + is class method. + + Returns: + dict: Fill data for subset name template. + """ + dynamic_data = {} + for key in cls.dynamic_subset_keys: + key = key.lower() + dynamic_data[key] = "{" + key + "}" + return dynamic_data + + @classmethod + def get_subset_name( + cls, variant, task_name, asset_id, project_name, host_name=None + ): + """Return subset name created with entered arguments. + + Logic extracted from Creator tool. This method should give ability + to get subset name without the tool. + + TODO: Maybe change `variant` variable. + + By default is output concatenated family with user text. + + Args: + variant (str): What is entered by user in creator tool. + task_name (str): Context's task name. + asset_id (ObjectId): Mongo ID of context's asset. + project_name (str): Context's project name. + host_name (str): Name of host. + + Returns: + str: Formatted subset name with entered arguments. Should match + config's logic. + """ + + dynamic_data = cls.get_dynamic_data( + variant, task_name, asset_id, project_name, host_name + ) + + return get_subset_name( + cls.family, + variant, + task_name, + asset_id, + project_name, + host_name, + dynamic_data=dynamic_data + ) + + +def legacy_create(Creator, name, asset, options=None, data=None): + """Create a new instance + + Associate nodes with a subset and family. These nodes are later + validated, according to their `family`, and integrated into the + shared environment, relative their `subset`. + + Data relative each family, along with default data, are imprinted + into the resulting objectSet. This data is later used by extractors + and finally asset browsers to help identify the origin of the asset. + + Arguments: + Creator (Creator): Class of creator + name (str): Name of subset + asset (str): Name of asset + options (dict, optional): Additional options from GUI + data (dict, optional): Additional data from GUI + + Raises: + NameError on `subset` already exists + KeyError on invalid dynamic property + RuntimeError on host error + + Returns: + Name of instance + + """ + from avalon.api import registered_host + host = registered_host() + plugin = Creator(name, asset, options, data) + + if plugin.maintain_selection is True: + with host.maintained_selection(): + print("Running %s with maintained selection" % plugin) + instance = plugin.process() + return instance + + print("Running %s" % plugin) + instance = plugin.process() + return instance diff --git a/openpype/pipeline/lib/__init__.py b/openpype/pipeline/lib/__init__.py index ed38889c66..f762c4205d 100644 --- a/openpype/pipeline/lib/__init__.py +++ b/openpype/pipeline/lib/__init__.py @@ -1,8 +1,3 @@ -from .events import ( - BaseEvent, - BeforeWorkfileSave -) - from .attribute_definitions import ( AbtractAttrDef, @@ -20,9 +15,6 @@ from .attribute_definitions import ( __all__ = ( - "BaseEvent", - "BeforeWorkfileSave", - "AbtractAttrDef", "UIDef", diff --git a/openpype/pipeline/lib/events.py b/openpype/pipeline/lib/events.py deleted file mode 100644 index 05dea20e8c..0000000000 --- a/openpype/pipeline/lib/events.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Events holding data about specific event.""" - - -# Inherit from 'object' for Python 2 hosts -class BaseEvent(object): - """Base event object. - - Can be used to anything because data are not much specific. Only required - argument is topic which defines why event is happening and may be used for - filtering. - - Arg: - topic (str): Identifier of event. - data (Any): Data specific for event. Dictionary is recommended. - """ - _data = {} - - def __init__(self, topic, data=None): - self._topic = topic - if data is None: - data = {} - self._data = data - - @property - def data(self): - return self._data - - @property - def topic(self): - return self._topic - - @classmethod - def emit(cls, *args, **kwargs): - """Create object of event and emit. - - Args: - Same args as '__init__' expects which may be class specific. - """ - from avalon import pipeline - - obj = cls(*args, **kwargs) - pipeline.emit(obj.topic, [obj]) - return obj - - -class BeforeWorkfileSave(BaseEvent): - """Before workfile changes event data.""" - def __init__(self, filename, workdir): - super(BeforeWorkfileSave, self).__init__("before.workfile.save") - self.filename = filename - self.workdir_path = workdir diff --git a/openpype/pipeline/load/__init__.py b/openpype/pipeline/load/__init__.py new file mode 100644 index 0000000000..6e7612d4c1 --- /dev/null +++ b/openpype/pipeline/load/__init__.py @@ -0,0 +1,78 @@ +from .utils import ( + HeroVersionType, + IncompatibleLoaderError, + + get_repres_contexts, + get_subset_contexts, + get_representation_context, + + load_with_repre_context, + load_with_subset_context, + load_with_subset_contexts, + + load_container, + remove_container, + update_container, + switch_container, + + get_loader_identifier, + + get_representation_path_from_context, + get_representation_path, + + is_compatible_loader, + + loaders_from_repre_context, + loaders_from_representation, +) + +from .plugins import ( + LoaderPlugin, + SubsetLoaderPlugin, + + discover_loader_plugins, + register_loader_plugin, + deregister_loader_plugin_path, + register_loader_plugin_path, + deregister_loader_plugin, +) + + +__all__ = ( + # utils.py + "HeroVersionType", + "IncompatibleLoaderError", + + "get_repres_contexts", + "get_subset_contexts", + "get_representation_context", + + "load_with_repre_context", + "load_with_subset_context", + "load_with_subset_contexts", + + "load_container", + "remove_container", + "update_container", + "switch_container", + + "get_loader_identifier", + + "get_representation_path_from_context", + "get_representation_path", + + "is_compatible_loader", + + "loaders_from_repre_context", + "loaders_from_representation", + + # plugins.py + "LoaderPlugin", + "SubsetLoaderPlugin", + + "discover_loader_plugins", + "register_loader_plugin", + "deregister_loader_plugin_path", + "register_loader_plugin_path", + "deregister_loader_plugin", +) diff --git a/openpype/pipeline/load/plugins.py b/openpype/pipeline/load/plugins.py new file mode 100644 index 0000000000..601ad3b258 --- /dev/null +++ b/openpype/pipeline/load/plugins.py @@ -0,0 +1,130 @@ +import logging + +from .utils import get_representation_path_from_context + + +class LoaderPlugin(list): + """Load representation into host application + + Arguments: + context (dict): avalon-core:context-1.0 + name (str, optional): Use pre-defined name + namespace (str, optional): Use pre-defined namespace + + .. versionadded:: 4.0 + This class was introduced + + """ + + families = list() + representations = list() + order = 0 + is_multiple_contexts_compatible = False + + options = [] + + log = logging.getLogger("SubsetLoader") + log.propagate = True + + def __init__(self, context): + self.fname = self.filepath_from_context(context) + + @classmethod + def get_representations(cls): + return cls.representations + + def filepath_from_context(self, context): + return get_representation_path_from_context(context) + + def load(self, context, name=None, namespace=None, options=None): + """Load asset via database + + Arguments: + context (dict): Full parenthood of representation to load + name (str, optional): Use pre-defined name + namespace (str, optional): Use pre-defined namespace + options (dict, optional): Additional settings dictionary + + """ + raise NotImplementedError("Loader.load() must be " + "implemented by subclass") + + def update(self, container, representation): + """Update `container` to `representation` + + Arguments: + container (avalon-core:container-1.0): Container to update, + from `host.ls()`. + representation (dict): Update the container to this representation. + + """ + raise NotImplementedError("Loader.update() must be " + "implemented by subclass") + + def remove(self, container): + """Remove a container + + Arguments: + container (avalon-core:container-1.0): Container to remove, + from `host.ls()`. + + Returns: + bool: Whether the container was deleted + + """ + + raise NotImplementedError("Loader.remove() must be " + "implemented by subclass") + + @classmethod + def get_options(cls, contexts): + """ + Returns static (cls) options or could collect from 'contexts'. + + Args: + contexts (list): of repre or subset contexts + Returns: + (list) + """ + return cls.options or [] + + +class SubsetLoaderPlugin(LoaderPlugin): + """Load subset into host application + Arguments: + context (dict): avalon-core:context-1.0 + name (str, optional): Use pre-defined name + namespace (str, optional): Use pre-defined namespace + """ + + def __init__(self, context): + pass + + +def discover_loader_plugins(): + import avalon.api + + return avalon.api.discover(LoaderPlugin) + + +def register_loader_plugin(plugin): + import avalon.api + + return avalon.api.register_plugin(LoaderPlugin, plugin) + + +def deregister_loader_plugin_path(path): + import avalon.api + + avalon.api.deregister_plugin_path(LoaderPlugin, path) + + +def register_loader_plugin_path(path): + import avalon.api + + return avalon.api.register_plugin_path(LoaderPlugin, path) + + +def deregister_loader_plugin(plugin): + import avalon.api + avalon.api.deregister_plugin(LoaderPlugin, plugin) diff --git a/openpype/pipeline/load/utils.py b/openpype/pipeline/load/utils.py new file mode 100644 index 0000000000..6d32c11cd7 --- /dev/null +++ b/openpype/pipeline/load/utils.py @@ -0,0 +1,707 @@ +import os +import platform +import copy +import getpass +import logging +import inspect +import numbers + +import six + +from avalon import io, schema +from avalon.api import Session, registered_root + +from openpype.lib import Anatomy + +log = logging.getLogger(__name__) + + +class HeroVersionType(object): + def __init__(self, version): + assert isinstance(version, numbers.Integral), ( + "Version is not an integer. \"{}\" {}".format( + version, str(type(version)) + ) + ) + self.version = version + + def __str__(self): + return str(self.version) + + def __int__(self): + return int(self.version) + + def __format__(self, format_spec): + return self.version.__format__(format_spec) + + +class IncompatibleLoaderError(ValueError): + """Error when Loader is incompatible with a representation.""" + pass + + +def get_repres_contexts(representation_ids, dbcon=None): + """Return parenthood context for representation. + + Args: + representation_ids (list): The representation ids. + dbcon (AvalonMongoDB): Mongo connection object. `avalon.io` used when + not entered. + + Returns: + dict: The full representation context by representation id. + keys are repre_id, value is dictionary with full: + asset_doc + version_doc + subset_doc + repre_doc + + """ + if not dbcon: + dbcon = io + + contexts = {} + if not representation_ids: + return contexts + + _representation_ids = [] + for repre_id in representation_ids: + if isinstance(repre_id, six.string_types): + repre_id = io.ObjectId(repre_id) + _representation_ids.append(repre_id) + + repre_docs = dbcon.find({ + "type": "representation", + "_id": {"$in": _representation_ids} + }) + repre_docs_by_id = {} + version_ids = set() + for repre_doc in repre_docs: + version_ids.add(repre_doc["parent"]) + repre_docs_by_id[repre_doc["_id"]] = repre_doc + + version_docs = dbcon.find({ + "type": {"$in": ["version", "hero_version"]}, + "_id": {"$in": list(version_ids)} + }) + + version_docs_by_id = {} + hero_version_docs = [] + versions_for_hero = set() + subset_ids = set() + for version_doc in version_docs: + if version_doc["type"] == "hero_version": + hero_version_docs.append(version_doc) + versions_for_hero.add(version_doc["version_id"]) + version_docs_by_id[version_doc["_id"]] = version_doc + subset_ids.add(version_doc["parent"]) + + if versions_for_hero: + _version_docs = dbcon.find({ + "type": "version", + "_id": {"$in": list(versions_for_hero)} + }) + _version_data_by_id = { + version_doc["_id"]: version_doc["data"] + for version_doc in _version_docs + } + + for hero_version_doc in hero_version_docs: + hero_version_id = hero_version_doc["_id"] + version_id = hero_version_doc["version_id"] + version_data = copy.deepcopy(_version_data_by_id[version_id]) + version_docs_by_id[hero_version_id]["data"] = version_data + + subset_docs = dbcon.find({ + "type": "subset", + "_id": {"$in": list(subset_ids)} + }) + subset_docs_by_id = {} + asset_ids = set() + for subset_doc in subset_docs: + subset_docs_by_id[subset_doc["_id"]] = subset_doc + asset_ids.add(subset_doc["parent"]) + + asset_docs = dbcon.find({ + "type": "asset", + "_id": {"$in": list(asset_ids)} + }) + asset_docs_by_id = { + asset_doc["_id"]: asset_doc + for asset_doc in asset_docs + } + + project_doc = dbcon.find_one({"type": "project"}) + + for repre_id, repre_doc in repre_docs_by_id.items(): + version_doc = version_docs_by_id[repre_doc["parent"]] + subset_doc = subset_docs_by_id[version_doc["parent"]] + asset_doc = asset_docs_by_id[subset_doc["parent"]] + context = { + "project": { + "name": project_doc["name"], + "code": project_doc["data"].get("code") + }, + "asset": asset_doc, + "subset": subset_doc, + "version": version_doc, + "representation": repre_doc, + } + contexts[repre_id] = context + + return contexts + + +def get_subset_contexts(subset_ids, dbcon=None): + """Return parenthood context for subset. + + Provides context on subset granularity - less detail than + 'get_repre_contexts'. + Args: + subset_ids (list): The subset ids. + dbcon (AvalonMongoDB): Mongo connection object. `avalon.io` used when + not entered. + Returns: + dict: The full representation context by representation id. + """ + if not dbcon: + dbcon = io + + contexts = {} + if not subset_ids: + return contexts + + _subset_ids = set() + for subset_id in subset_ids: + if isinstance(subset_id, six.string_types): + subset_id = io.ObjectId(subset_id) + _subset_ids.add(subset_id) + + subset_docs = dbcon.find({ + "type": "subset", + "_id": {"$in": list(_subset_ids)} + }) + subset_docs_by_id = {} + asset_ids = set() + for subset_doc in subset_docs: + subset_docs_by_id[subset_doc["_id"]] = subset_doc + asset_ids.add(subset_doc["parent"]) + + asset_docs = dbcon.find({ + "type": "asset", + "_id": {"$in": list(asset_ids)} + }) + asset_docs_by_id = { + asset_doc["_id"]: asset_doc + for asset_doc in asset_docs + } + + project_doc = dbcon.find_one({"type": "project"}) + + for subset_id, subset_doc in subset_docs_by_id.items(): + asset_doc = asset_docs_by_id[subset_doc["parent"]] + context = { + "project": { + "name": project_doc["name"], + "code": project_doc["data"].get("code") + }, + "asset": asset_doc, + "subset": subset_doc + } + contexts[subset_id] = context + + return contexts + + +def get_representation_context(representation): + """Return parenthood context for representation. + + Args: + representation (str or io.ObjectId or dict): The representation id + or full representation as returned by the database. + + Returns: + dict: The full representation context. + + """ + + assert representation is not None, "This is a bug" + + if isinstance(representation, (six.string_types, io.ObjectId)): + representation = io.find_one( + {"_id": io.ObjectId(str(representation))}) + + version, subset, asset, project = io.parenthood(representation) + + assert all([representation, version, subset, asset, project]), ( + "This is a bug" + ) + + context = { + "project": { + "name": project["name"], + "code": project["data"].get("code", '') + }, + "asset": asset, + "subset": subset, + "version": version, + "representation": representation, + } + + return context + + +def load_with_repre_context( + Loader, repre_context, namespace=None, name=None, options=None, **kwargs +): + + # Ensure the Loader is compatible for the representation + if not is_compatible_loader(Loader, repre_context): + raise IncompatibleLoaderError( + "Loader {} is incompatible with {}".format( + Loader.__name__, repre_context["subset"]["name"] + ) + ) + + # Ensure options is a dictionary when no explicit options provided + if options is None: + options = kwargs.get("data", dict()) # "data" for backward compat + + assert isinstance(options, dict), "Options must be a dictionary" + + # Fallback to subset when name is None + if name is None: + name = repre_context["subset"]["name"] + + log.info( + "Running '%s' on '%s'" % ( + Loader.__name__, repre_context["asset"]["name"] + ) + ) + + loader = Loader(repre_context) + return loader.load(repre_context, name, namespace, options) + + +def load_with_subset_context( + Loader, subset_context, namespace=None, name=None, options=None, **kwargs +): + + # Ensure options is a dictionary when no explicit options provided + if options is None: + options = kwargs.get("data", dict()) # "data" for backward compat + + assert isinstance(options, dict), "Options must be a dictionary" + + # Fallback to subset when name is None + if name is None: + name = subset_context["subset"]["name"] + + log.info( + "Running '%s' on '%s'" % ( + Loader.__name__, subset_context["asset"]["name"] + ) + ) + + loader = Loader(subset_context) + return loader.load(subset_context, name, namespace, options) + + +def load_with_subset_contexts( + Loader, subset_contexts, namespace=None, name=None, options=None, **kwargs +): + + # Ensure options is a dictionary when no explicit options provided + if options is None: + options = kwargs.get("data", dict()) # "data" for backward compat + + assert isinstance(options, dict), "Options must be a dictionary" + + # Fallback to subset when name is None + joined_subset_names = " | ".join( + context["subset"]["name"] + for context in subset_contexts + ) + if name is None: + name = joined_subset_names + + log.info( + "Running '{}' on '{}'".format(Loader.__name__, joined_subset_names) + ) + + loader = Loader(subset_contexts) + return loader.load(subset_contexts, name, namespace, options) + + +def load_container( + Loader, representation, namespace=None, name=None, options=None, **kwargs +): + """Use Loader to load a representation. + + Args: + Loader (Loader): The loader class to trigger. + representation (str or io.ObjectId or dict): The representation id + or full representation as returned by the database. + namespace (str, Optional): The namespace to assign. Defaults to None. + name (str, Optional): The name to assign. Defaults to subset name. + options (dict, Optional): Additional options to pass on to the loader. + + Returns: + The return of the `loader.load()` method. + + Raises: + IncompatibleLoaderError: When the loader is not compatible with + the representation. + + """ + + context = get_representation_context(representation) + return load_with_repre_context( + Loader, + context, + namespace=namespace, + name=name, + options=options, + **kwargs + ) + + +def get_loader_identifier(loader): + """Loader identifier from loader plugin or object. + + Identifier should be stored to container for future management. + """ + if not inspect.isclass(loader): + loader = loader.__class__ + return loader.__name__ + + +def _get_container_loader(container): + """Return the Loader corresponding to the container""" + from .plugins import discover_loader_plugins + + loader = container["loader"] + for Plugin in discover_loader_plugins(): + # TODO: Ensure the loader is valid + if get_loader_identifier(Plugin) == loader: + return Plugin + return None + + +def remove_container(container): + """Remove a container""" + + Loader = _get_container_loader(container) + if not Loader: + raise RuntimeError("Can't remove container. See log for details.") + + loader = Loader(get_representation_context(container["representation"])) + return loader.remove(container) + + +def update_container(container, version=-1): + """Update a container""" + + # Compute the different version from 'representation' + current_representation = io.find_one({ + "_id": io.ObjectId(container["representation"]) + }) + + assert current_representation is not None, "This is a bug" + + current_version, subset, asset, project = io.parenthood( + current_representation) + + if version == -1: + new_version = io.find_one({ + "type": "version", + "parent": subset["_id"] + }, sort=[("name", -1)]) + else: + if isinstance(version, HeroVersionType): + version_query = { + "parent": subset["_id"], + "type": "hero_version" + } + else: + version_query = { + "parent": subset["_id"], + "type": "version", + "name": version + } + new_version = io.find_one(version_query) + + assert new_version is not None, "This is a bug" + + new_representation = io.find_one({ + "type": "representation", + "parent": new_version["_id"], + "name": current_representation["name"] + }) + + assert new_representation is not None, "Representation wasn't found" + + path = get_representation_path(new_representation) + assert os.path.exists(path), "Path {} doesn't exist".format(path) + + # Run update on the Loader for this container + Loader = _get_container_loader(container) + if not Loader: + raise RuntimeError("Can't update container. See log for details.") + + loader = Loader(get_representation_context(container["representation"])) + return loader.update(container, new_representation) + + +def switch_container(container, representation, loader_plugin=None): + """Switch a container to representation + + Args: + container (dict): container information + representation (dict): representation data from document + + Returns: + function call + """ + + # Get the Loader for this container + if loader_plugin is None: + loader_plugin = _get_container_loader(container) + + if not loader_plugin: + raise RuntimeError("Can't switch container. See log for details.") + + if not hasattr(loader_plugin, "switch"): + # Backwards compatibility (classes without switch support + # might be better to just have "switch" raise NotImplementedError + # on the base class of Loader\ + raise RuntimeError("Loader '{}' does not support 'switch'".format( + loader_plugin.label + )) + + # Get the new representation to switch to + new_representation = io.find_one({ + "type": "representation", + "_id": representation["_id"], + }) + + new_context = get_representation_context(new_representation) + if not is_compatible_loader(loader_plugin, new_context): + raise AssertionError("Must be compatible Loader") + + loader = loader_plugin(new_context) + + return loader.switch(container, new_representation) + + +def get_representation_path_from_context(context): + """Preparation wrapper using only context as a argument""" + representation = context['representation'] + project_doc = context.get("project") + root = None + session_project = Session.get("AVALON_PROJECT") + if project_doc and project_doc["name"] != session_project: + anatomy = Anatomy(project_doc["name"]) + root = anatomy.roots + + return get_representation_path(representation, root) + + +def get_representation_path(representation, root=None, dbcon=None): + """Get filename from representation document + + There are three ways of getting the path from representation which are + tried in following sequence until successful. + 1. Get template from representation['data']['template'] and data from + representation['context']. Then format template with the data. + 2. Get template from project['config'] and format it with default data set + 3. Get representation['data']['path'] and use it directly + + Args: + representation(dict): representation document from the database + + Returns: + str: fullpath of the representation + + """ + + from openpype.lib import StringTemplate, TemplateUnsolved + + if dbcon is None: + dbcon = io + + if root is None: + root = registered_root() + + def path_from_represenation(): + try: + template = representation["data"]["template"] + except KeyError: + return None + + try: + context = representation["context"] + context["root"] = root + path = StringTemplate.format_strict_template( + template, context + ) + # Force replacing backslashes with forward slashed if not on + # windows + if platform.system().lower() != "windows": + path = path.replace("\\", "/") + except (TemplateUnsolved, KeyError): + # Template references unavailable data + return None + + if not path: + return path + + normalized_path = os.path.normpath(path) + if os.path.exists(normalized_path): + return normalized_path + return path + + def path_from_config(): + try: + version_, subset, asset, project = dbcon.parenthood(representation) + except ValueError: + log.debug( + "Representation %s wasn't found in database, " + "like a bug" % representation["name"] + ) + return None + + try: + template = project["config"]["template"]["publish"] + except KeyError: + log.debug( + "No template in project %s, " + "likely a bug" % project["name"] + ) + return None + + # default list() in get would not discover missing parents on asset + parents = asset.get("data", {}).get("parents") + if parents is not None: + hierarchy = "/".join(parents) + + # Cannot fail, required members only + data = { + "root": root, + "project": { + "name": project["name"], + "code": project.get("data", {}).get("code") + }, + "asset": asset["name"], + "silo": asset.get("silo"), + "hierarchy": hierarchy, + "subset": subset["name"], + "version": version_["name"], + "representation": representation["name"], + "family": representation.get("context", {}).get("family"), + "user": dbcon.Session.get("AVALON_USER", getpass.getuser()), + "app": dbcon.Session.get("AVALON_APP", ""), + "task": dbcon.Session.get("AVALON_TASK", "") + } + + try: + template_obj = StringTemplate(template) + path = str(template_obj.format(data)) + # Force replacing backslashes with forward slashed if not on + # windows + if platform.system().lower() != "windows": + path = path.replace("\\", "/") + + except KeyError as e: + log.debug("Template references unavailable data: %s" % e) + return None + + normalized_path = os.path.normpath(path) + if os.path.exists(normalized_path): + return normalized_path + return path + + def path_from_data(): + if "path" not in representation["data"]: + return None + + path = representation["data"]["path"] + # Force replacing backslashes with forward slashed if not on + # windows + if platform.system().lower() != "windows": + path = path.replace("\\", "/") + + if os.path.exists(path): + return os.path.normpath(path) + + dir_path, file_name = os.path.split(path) + if not os.path.exists(dir_path): + return + + base_name, ext = os.path.splitext(file_name) + file_name_items = None + if "#" in base_name: + file_name_items = [part for part in base_name.split("#") if part] + elif "%" in base_name: + file_name_items = base_name.split("%") + + if not file_name_items: + return + + filename_start = file_name_items[0] + + for _file in os.listdir(dir_path): + if _file.startswith(filename_start) and _file.endswith(ext): + return os.path.normpath(path) + + return ( + path_from_represenation() or + path_from_config() or + path_from_data() + ) + + +def is_compatible_loader(Loader, context): + """Return whether a loader is compatible with a context. + + This checks the version's families and the representation for the given + Loader. + + Returns: + bool + + """ + maj_version, _ = schema.get_schema_version(context["subset"]["schema"]) + if maj_version < 3: + families = context["version"]["data"].get("families", []) + else: + families = context["subset"]["data"]["families"] + + representation = context["representation"] + has_family = ( + "*" in Loader.families or any( + family in Loader.families for family in families + ) + ) + representations = Loader.get_representations() + has_representation = ( + "*" in representations or representation["name"] in representations + ) + return has_family and has_representation + + +def loaders_from_repre_context(loaders, repre_context): + """Return compatible loaders for by representaiton's context.""" + + return [ + loader + for loader in loaders + if is_compatible_loader(loader, repre_context) + ] + + +def loaders_from_representation(loaders, representation): + """Return all compatible loaders for a representation.""" + + context = get_representation_context(representation) + return loaders_from_repre_context(loaders, context) diff --git a/openpype/pipeline/publish/__init__.py b/openpype/pipeline/publish/__init__.py index ca958816fe..c2729a46ce 100644 --- a/openpype/pipeline/publish/__init__.py +++ b/openpype/pipeline/publish/__init__.py @@ -1,20 +1,26 @@ from .publish_plugins import ( PublishValidationError, + PublishXmlValidationError, KnownPublishError, - OpenPypePyblishPluginMixin + OpenPypePyblishPluginMixin, ) from .lib import ( DiscoverResult, - publish_plugins_discover + publish_plugins_discover, + load_help_content_from_plugin, + load_help_content_from_filepath, ) __all__ = ( "PublishValidationError", + "PublishXmlValidationError", "KnownPublishError", "OpenPypePyblishPluginMixin", "DiscoverResult", - "publish_plugins_discover" + "publish_plugins_discover", + "load_help_content_from_plugin", + "load_help_content_from_filepath", ) diff --git a/openpype/pipeline/publish/lib.py b/openpype/pipeline/publish/lib.py index d3e4ec8a02..739b2c8806 100644 --- a/openpype/pipeline/publish/lib.py +++ b/openpype/pipeline/publish/lib.py @@ -1,6 +1,8 @@ import os import sys import types +import inspect +import xml.etree.ElementTree import six import pyblish.plugin @@ -28,6 +30,60 @@ class DiscoverResult: self.plugins[item] = value +class HelpContent: + def __init__(self, title, description, detail=None): + self.title = title + self.description = description + self.detail = detail + + +def load_help_content_from_filepath(filepath): + """Load help content from xml file. + Xml file may containt errors and warnings. + """ + errors = {} + warnings = {} + output = { + "errors": errors, + "warnings": warnings + } + if not os.path.exists(filepath): + return output + tree = xml.etree.ElementTree.parse(filepath) + root = tree.getroot() + for child in root: + child_id = child.attrib.get("id") + if child_id is None: + continue + + # Make sure ID is string + child_id = str(child_id) + + title = child.find("title").text + description = child.find("description").text + detail_node = child.find("detail") + detail = None + if detail_node is not None: + detail = detail_node.text + if child.tag == "error": + errors[child_id] = HelpContent(title, description, detail) + elif child.tag == "warning": + warnings[child_id] = HelpContent(title, description, detail) + return output + + +def load_help_content_from_plugin(plugin): + cls = plugin + if not inspect.isclass(plugin): + cls = plugin.__class__ + plugin_filepath = inspect.getfile(cls) + plugin_dir = os.path.dirname(plugin_filepath) + basename = os.path.splitext(os.path.basename(plugin_filepath))[0] + filename = basename + ".xml" + filepath = os.path.join(plugin_dir, "help", filename) + return load_help_content_from_filepath(filepath) + + def publish_plugins_discover(paths=None): """Find and return available pyblish plug-ins diff --git a/openpype/pipeline/publish/publish_plugins.py b/openpype/pipeline/publish/publish_plugins.py index b60b9f43a7..bce64ec709 100644 --- a/openpype/pipeline/publish/publish_plugins.py +++ b/openpype/pipeline/publish/publish_plugins.py @@ -1,3 +1,6 @@ +from .lib import load_help_content_from_plugin + + class PublishValidationError(Exception): """Validation error happened during publishing. @@ -12,13 +15,34 @@ class PublishValidationError(Exception): description(str): Detailed description of an error. It is possible to use Markdown syntax. """ - def __init__(self, message, title=None, description=None): + def __init__(self, message, title=None, description=None, detail=None): self.message = message self.title = title or "< Missing title >" self.description = description or message + self.detail = detail super(PublishValidationError, self).__init__(message) +class PublishXmlValidationError(PublishValidationError): + def __init__( + self, plugin, message, key=None, formatting_data=None + ): + if key is None: + key = "main" + + if not formatting_data: + formatting_data = {} + result = load_help_content_from_plugin(plugin) + content_obj = result["errors"][key] + description = content_obj.description.format(**formatting_data) + detail = content_obj.detail + if detail: + detail = detail.format(**formatting_data) + super(PublishXmlValidationError, self).__init__( + message, content_obj.title, description, detail + ) + + class KnownPublishError(Exception): """Publishing crashed because of known error. diff --git a/openpype/plugin.py b/openpype/plugin.py index 45c9a08209..3569936dac 100644 --- a/openpype/plugin.py +++ b/openpype/plugin.py @@ -3,79 +3,12 @@ import os import pyblish.api import avalon.api -from openpype.lib import get_subset_name - ValidatePipelineOrder = pyblish.api.ValidatorOrder + 0.05 ValidateContentsOrder = pyblish.api.ValidatorOrder + 0.1 ValidateSceneOrder = pyblish.api.ValidatorOrder + 0.2 ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3 -class PypeCreatorMixin: - """Helper to override avalon's default class methods. - - Mixin class must be used as first in inheritance order to override methods. - """ - dynamic_subset_keys = [] - - @classmethod - def get_dynamic_data( - cls, variant, task_name, asset_id, project_name, host_name - ): - """Return dynamic data for current Creator plugin. - - By default return keys from `dynamic_subset_keys` attribute as mapping - to keep formatted template unchanged. - - ``` - dynamic_subset_keys = ["my_key"] - --- - output = { - "my_key": "{my_key}" - } - ``` - - Dynamic keys may override default Creator keys (family, task, asset, - ...) but do it wisely if you need. - - All of keys will be converted into 3 variants unchanged, capitalized - and all upper letters. Because of that are all keys lowered. - - This method can be modified to prefill some values just keep in mind it - is class method. - - Returns: - dict: Fill data for subset name template. - """ - dynamic_data = {} - for key in cls.dynamic_subset_keys: - key = key.lower() - dynamic_data[key] = "{" + key + "}" - return dynamic_data - - @classmethod - def get_subset_name( - cls, variant, task_name, asset_id, project_name, host_name=None - ): - dynamic_data = cls.get_dynamic_data( - variant, task_name, asset_id, project_name, host_name - ) - - return get_subset_name( - cls.family, - variant, - task_name, - asset_id, - project_name, - host_name, - dynamic_data=dynamic_data - ) - - -class Creator(PypeCreatorMixin, avalon.api.Creator): - pass - - class ContextPlugin(pyblish.api.ContextPlugin): def process(cls, *args, **kwargs): super(ContextPlugin, cls).process(cls, *args, **kwargs) diff --git a/openpype/plugins/load/add_site.py b/openpype/plugins/load/add_site.py index 09448d553c..95001691e2 100644 --- a/openpype/plugins/load/add_site.py +++ b/openpype/plugins/load/add_site.py @@ -1,8 +1,8 @@ -from avalon import api from openpype.modules import ModulesManager +from openpype.pipeline import load -class AddSyncSite(api.Loader): +class AddSyncSite(load.LoaderPlugin): """Add sync site to representation""" representations = ["*"] families = ["*"] diff --git a/openpype/plugins/load/copy_file.py b/openpype/plugins/load/copy_file.py index eaf5853035..60db094cfb 100644 --- a/openpype/plugins/load/copy_file.py +++ b/openpype/plugins/load/copy_file.py @@ -1,7 +1,8 @@ -from avalon import api, style +from openpype.style import get_default_entity_icon_color +from openpype.pipeline import load -class CopyFile(api.Loader): +class CopyFile(load.LoaderPlugin): """Copy the published file to be pasted at the desired location""" representations = ["*"] @@ -10,7 +11,7 @@ class CopyFile(api.Loader): label = "Copy File" order = 10 icon = "copy" - color = style.colors.default + color = get_default_entity_icon_color() def load(self, context, name=None, namespace=None, data=None): self.log.info("Added copy to clipboard: {0}".format(self.fname)) diff --git a/openpype/plugins/load/copy_file_path.py b/openpype/plugins/load/copy_file_path.py index 2041c79f6d..565d8d1ff1 100644 --- a/openpype/plugins/load/copy_file_path.py +++ b/openpype/plugins/load/copy_file_path.py @@ -1,9 +1,9 @@ import os -from avalon import api +from openpype.pipeline import load -class CopyFilePath(api.Loader): +class CopyFilePath(load.LoaderPlugin): """Copy published file path to clipboard""" representations = ["*"] families = ["*"] diff --git a/openpype/plugins/load/delete_old_versions.py b/openpype/plugins/load/delete_old_versions.py index b2f2c88975..692acdec02 100644 --- a/openpype/plugins/load/delete_old_versions.py +++ b/openpype/plugins/load/delete_old_versions.py @@ -5,16 +5,17 @@ import uuid import clique from pymongo import UpdateOne import ftrack_api +import qargparse from Qt import QtWidgets, QtCore -from avalon import api, style -from avalon.vendor import qargparse from avalon.api import AvalonMongoDB -import avalon.pipeline +from openpype import style +from openpype.pipeline import load +from openpype.lib import StringTemplate from openpype.api import Anatomy -class DeleteOldVersions(api.SubsetLoader): +class DeleteOldVersions(load.SubsetLoaderPlugin): """Deletes specific number of old version""" is_multiple_contexts_compatible = True @@ -89,16 +90,12 @@ class DeleteOldVersions(api.SubsetLoader): try: context = representation["context"] context["root"] = anatomy.roots - path = avalon.pipeline.format_template_with_optional_keys( - context, template - ) + path = str(StringTemplate.format_template(template, context)) if "frame" in context: context["frame"] = self.sequence_splitter - sequence_path = os.path.normpath( - avalon.pipeline.format_template_with_optional_keys( - context, template - ) - ) + sequence_path = os.path.normpath(str( + StringTemplate.format_template(template, context) + )) except KeyError: # Template references unavailable data diff --git a/openpype/plugins/load/delivery.py b/openpype/plugins/load/delivery.py index 1037d6dc16..04080053e3 100644 --- a/openpype/plugins/load/delivery.py +++ b/openpype/plugins/load/delivery.py @@ -3,9 +3,9 @@ from collections import defaultdict from Qt import QtWidgets, QtCore, QtGui -from avalon import api from avalon.api import AvalonMongoDB +from openpype.pipeline import load from openpype.api import Anatomy, config from openpype import resources, style @@ -20,7 +20,7 @@ from openpype.lib.delivery import ( ) -class Delivery(api.SubsetLoader): +class Delivery(load.SubsetLoaderPlugin): """Export selected versions to folder structure from Template""" is_multiple_contexts_compatible = True diff --git a/openpype/plugins/load/open_djv.py b/openpype/plugins/load/open_djv.py index 4b0e8411c8..273c77c93f 100644 --- a/openpype/plugins/load/open_djv.py +++ b/openpype/plugins/load/open_djv.py @@ -1,6 +1,6 @@ import os -from avalon import api from openpype.api import ApplicationManager +from openpype.pipeline import load def existing_djv_path(): @@ -13,7 +13,8 @@ def existing_djv_path(): return djv_list -class OpenInDJV(api.Loader): + +class OpenInDJV(load.LoaderPlugin): """Open Image Sequence with system default""" djv_list = existing_djv_path() diff --git a/openpype/plugins/load/open_file.py b/openpype/plugins/load/open_file.py index 4133a64eb3..f21cd07c7f 100644 --- a/openpype/plugins/load/open_file.py +++ b/openpype/plugins/load/open_file.py @@ -2,7 +2,7 @@ import sys import os import subprocess -from avalon import api +from openpype.pipeline import load def open(filepath): @@ -15,7 +15,7 @@ def open(filepath): subprocess.call(('xdg-open', filepath)) -class Openfile(api.Loader): +class Openfile(load.LoaderPlugin): """Open Image Sequence with system default""" families = ["render2d"] diff --git a/openpype/plugins/load/remove_site.py b/openpype/plugins/load/remove_site.py index aedb5d1f2f..adffec9986 100644 --- a/openpype/plugins/load/remove_site.py +++ b/openpype/plugins/load/remove_site.py @@ -1,8 +1,8 @@ -from avalon import api from openpype.modules import ModulesManager +from openpype.pipeline import load -class RemoveSyncSite(api.Loader): +class RemoveSyncSite(load.LoaderPlugin): """Remove sync site and its files on representation""" representations = ["*"] families = ["*"] diff --git a/openpype/plugins/publish/collect_time.py b/openpype/plugins/publish/collect_time.py index e0adc7dfc3..7a005cc9cb 100644 --- a/openpype/plugins/publish/collect_time.py +++ b/openpype/plugins/publish/collect_time.py @@ -1,12 +1,12 @@ import pyblish.api -from avalon import api +from openpype.lib import get_formatted_current_time class CollectTime(pyblish.api.ContextPlugin): """Store global time at the time of publish""" label = "Collect Current Time" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.499 def process(self, context): - context.data["time"] = api.time() + context.data["time"] = get_formatted_current_time() diff --git a/openpype/plugins/publish/extract_jpeg_exr.py b/openpype/plugins/publish/extract_jpeg_exr.py index d80b7bb9c3..468ed96199 100644 --- a/openpype/plugins/publish/extract_jpeg_exr.py +++ b/openpype/plugins/publish/extract_jpeg_exr.py @@ -34,7 +34,12 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): self.log.info("subset {}".format(instance.data['subset'])) # skip crypto passes. - if 'crypto' in instance.data['subset']: + # TODO: This is just a quick fix and has its own side-effects - it is + # affecting every subset name with `crypto` in its name. + # This must be solved properly, maybe using tags on + # representation that can be determined much earlier and + # with better precision. + if 'crypto' in instance.data['subset'].lower(): self.log.info("Skipping crypto passes.") return diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index 5f286a53e6..3ecea1f8bd 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -13,13 +13,12 @@ import pyblish.api import openpype.api from openpype.lib import ( get_ffmpeg_tool_path, - ffprobe_streams, + get_ffprobe_streams, path_to_subprocess_arg, should_convert_for_ffmpeg, convert_for_ffmpeg, - get_transcode_temp_directory, get_transcode_temp_directory ) import speedcopy @@ -104,9 +103,10 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("Matching profile: \"{}\"".format(json.dumps(profile))) + subset_name = instance.data.get("subset") instance_families = self.families_from_instance(instance) - filtered_outputs = self.filter_outputs_by_families( - profile, instance_families + filtered_outputs = self.filter_output_defs( + profile, subset_name, instance_families ) # Store `filename_suffix` to save arguments profile_outputs = [] @@ -747,10 +747,14 @@ class ExtractReview(pyblish.api.InstancePlugin): collections = clique.assemble(files)[0] assert len(collections) == 1, "Multiple collections found." col = collections[0] - # do nothing if sequence is complete - if list(col.indexes)[0] == start_frame and \ - list(col.indexes)[-1] == end_frame and \ - col.is_contiguous(): + + # do nothing if no gap is found in input range + not_gap = True + for fr in range(start_frame, end_frame + 1): + if fr not in col.indexes: + not_gap = False + + if not_gap: return [] holes = col.holes() @@ -972,16 +976,12 @@ class ExtractReview(pyblish.api.InstancePlugin): def get_letterbox_filters( self, letter_box_def, - input_res_ratio, - output_res_ratio, - pixel_aspect, - scale_factor_by_width, - scale_factor_by_height + output_width, + output_height ): output = [] ratio = letter_box_def["ratio"] - state = letter_box_def["state"] fill_color = letter_box_def["fill_color"] f_red, f_green, f_blue, f_alpha = fill_color fill_color_hex = "{0:0>2X}{1:0>2X}{2:0>2X}".format( @@ -997,75 +997,129 @@ class ExtractReview(pyblish.api.InstancePlugin): ) line_color_alpha = float(l_alpha) / 255 - if input_res_ratio == output_res_ratio: - ratio /= pixel_aspect - elif input_res_ratio < output_res_ratio: - ratio /= scale_factor_by_width - else: - ratio /= scale_factor_by_height + # test ratios and define if pillar or letter boxes + output_ratio = float(output_width) / float(output_height) + self.log.debug("Output ratio: {} LetterBox ratio: {}".format( + output_ratio, ratio + )) + pillar = output_ratio > ratio + need_mask = format(output_ratio, ".3f") != format(ratio, ".3f") + if not need_mask: + return [] - if state == "letterbox": + if not pillar: if fill_color_alpha > 0: top_box = ( - "drawbox=0:0:iw:round((ih-(iw*(1/{})))/2):t=fill:c={}@{}" - ).format(ratio, fill_color_hex, fill_color_alpha) + "drawbox=0:0:{width}" + ":round(({height}-({width}/{ratio}))/2)" + ":t=fill:c={color}@{alpha}" + ).format( + width=output_width, + height=output_height, + ratio=ratio, + color=fill_color_hex, + alpha=fill_color_alpha + ) bottom_box = ( - "drawbox=0:ih-round((ih-(iw*(1/{0})))/2)" - ":iw:round((ih-(iw*(1/{0})))/2):t=fill:c={1}@{2}" - ).format(ratio, fill_color_hex, fill_color_alpha) - + "drawbox=0" + ":{height}-round(({height}-({width}/{ratio}))/2)" + ":{width}" + ":round(({height}-({width}/{ratio}))/2)" + ":t=fill:c={color}@{alpha}" + ).format( + width=output_width, + height=output_height, + ratio=ratio, + color=fill_color_hex, + alpha=fill_color_alpha + ) output.extend([top_box, bottom_box]) if line_color_alpha > 0 and line_thickness > 0: top_line = ( - "drawbox=0:round((ih-(iw*(1/{0})))/2)-{1}:iw:{1}:" - "t=fill:c={2}@{3}" + "drawbox=0" + ":round(({height}-({width}/{ratio}))/2)-{l_thick}" + ":{width}:{l_thick}:t=fill:c={l_color}@{l_alpha}" ).format( - ratio, line_thickness, line_color_hex, line_color_alpha + width=output_width, + height=output_height, + ratio=ratio, + l_thick=line_thickness, + l_color=line_color_hex, + l_alpha=line_color_alpha ) bottom_line = ( - "drawbox=0:ih-round((ih-(iw*(1/{})))/2)" - ":iw:{}:t=fill:c={}@{}" + "drawbox=0" + ":{height}-round(({height}-({width}/{ratio}))/2)" + ":{width}:{l_thick}:t=fill:c={l_color}@{l_alpha}" ).format( - ratio, line_thickness, line_color_hex, line_color_alpha + width=output_width, + height=output_height, + ratio=ratio, + l_thick=line_thickness, + l_color=line_color_hex, + l_alpha=line_color_alpha ) output.extend([top_line, bottom_line]) - elif state == "pillar": + else: if fill_color_alpha > 0: left_box = ( - "drawbox=0:0:round((iw-(ih*{}))/2):ih:t=fill:c={}@{}" - ).format(ratio, fill_color_hex, fill_color_alpha) + "drawbox=0:0" + ":round(({width}-({height}*{ratio}))/2)" + ":{height}" + ":t=fill:c={color}@{alpha}" + ).format( + width=output_width, + height=output_height, + ratio=ratio, + color=fill_color_hex, + alpha=fill_color_alpha + ) right_box = ( - "drawbox=iw-round((iw-(ih*{0}))/2))" - ":0:round((iw-(ih*{0}))/2):ih:t=fill:c={1}@{2}" - ).format(ratio, fill_color_hex, fill_color_alpha) - + "drawbox=" + "{width}-round(({width}-({height}*{ratio}))/2)" + ":0" + ":round(({width}-({height}*{ratio}))/2)" + ":{height}" + ":t=fill:c={color}@{alpha}" + ).format( + width=output_width, + height=output_height, + ratio=ratio, + color=fill_color_hex, + alpha=fill_color_alpha + ) output.extend([left_box, right_box]) if line_color_alpha > 0 and line_thickness > 0: left_line = ( - "drawbox=round((iw-(ih*{}))/2):0:{}:ih:t=fill:c={}@{}" + "drawbox=round(({width}-({height}*{ratio}))/2)" + ":0:{l_thick}:{height}:t=fill:c={l_color}@{l_alpha}" ).format( - ratio, line_thickness, line_color_hex, line_color_alpha + width=output_width, + height=output_height, + ratio=ratio, + l_thick=line_thickness, + l_color=line_color_hex, + l_alpha=line_color_alpha ) right_line = ( - "drawbox=iw-round((iw-(ih*{}))/2))" - ":0:{}:ih:t=fill:c={}@{}" + "drawbox={width}-round(({width}-({height}*{ratio}))/2)" + ":0:{l_thick}:{height}:t=fill:c={l_color}@{l_alpha}" ).format( - ratio, line_thickness, line_color_hex, line_color_alpha + width=output_width, + height=output_height, + ratio=ratio, + l_thick=line_thickness, + l_color=line_color_hex, + l_alpha=line_color_alpha ) - output.extend([left_line, right_line]) - else: - raise ValueError( - "Letterbox state \"{}\" is not recognized".format(state) - ) - return output def rescaling_filters(self, temp_data, output_def, new_repre): @@ -1079,10 +1133,24 @@ class ExtractReview(pyblish.api.InstancePlugin): """ filters = [] + # if reformat input video file is already reforamted from upstream + reformat_in_baking = bool("reformated" in new_repre["tags"]) + self.log.debug("reformat_in_baking: `{}`".format(reformat_in_baking)) + + # Get instance data + pixel_aspect = temp_data["pixel_aspect"] + + if reformat_in_baking: + self.log.debug(( + "Using resolution from input. It is already " + "reformated from upstream process" + )) + pixel_aspect = 1 + # NOTE Skipped using instance's resolution full_input_path_single_file = temp_data["full_input_path_single_file"] try: - streams = ffprobe_streams( + streams = get_ffprobe_streams( full_input_path_single_file, self.log ) except Exception as exc: @@ -1096,12 +1164,26 @@ class ExtractReview(pyblish.api.InstancePlugin): # - there may be a better way (checking `codec_type`?) input_width = None input_height = None + output_width = None + output_height = None for stream in streams: if "width" in stream and "height" in stream: input_width = int(stream["width"]) input_height = int(stream["height"]) break + # Get instance data + pixel_aspect = temp_data["pixel_aspect"] + + if reformat_in_baking: + self.log.debug(( + "Using resolution from input. It is already " + "reformated from upstream process" + )) + pixel_aspect = 1 + output_width = input_width + output_height = input_height + # Raise exception of any stream didn't define input resolution if input_width is None: raise AssertionError(( @@ -1110,8 +1192,8 @@ class ExtractReview(pyblish.api.InstancePlugin): # NOTE Setting only one of `width` or `heigth` is not allowed # - settings value can't have None but has value of 0 - output_width = output_def.get("width") or None - output_height = output_def.get("height") or None + output_width = output_def.get("width") or output_width or None + output_height = output_def.get("height") or output_height or None # Overscal color overscan_color_value = "black" @@ -1141,12 +1223,6 @@ class ExtractReview(pyblish.api.InstancePlugin): output_width = input_width output_height = input_height - letter_box_def = output_def["letter_box"] - letter_box_enabled = letter_box_def["enabled"] - - # Get instance data - pixel_aspect = temp_data["pixel_aspect"] - # Make sure input width and height is not an odd number input_width_is_odd = bool(input_width % 2 != 0) input_height_is_odd = bool(input_height % 2 != 0) @@ -1205,6 +1281,9 @@ class ExtractReview(pyblish.api.InstancePlugin): "Output resolution is {}x{}".format(output_width, output_height) ) + letter_box_def = output_def["letter_box"] + letter_box_enabled = letter_box_def["enabled"] + # Skip processing if resolution is same as input's and letterbox is # not set if ( @@ -1248,25 +1327,6 @@ class ExtractReview(pyblish.api.InstancePlugin): "scale_factor_by_height: `{}`".format(scale_factor_by_height) ) - # letter_box - if letter_box_enabled: - filters.extend([ - "scale={}x{}:flags=lanczos".format( - output_width, output_height - ), - "setsar=1" - ]) - filters.extend( - self.get_letterbox_filters( - letter_box_def, - input_res_ratio, - output_res_ratio, - pixel_aspect, - scale_factor_by_width, - scale_factor_by_height - ) - ) - # scaling none square pixels and 1920 width if ( input_height != output_height @@ -1305,6 +1365,16 @@ class ExtractReview(pyblish.api.InstancePlugin): "setsar=1" ]) + # letter_box + if letter_box_enabled: + filters.extend( + self.get_letterbox_filters( + letter_box_def, + output_width, + output_height + ) + ) + new_repre["resolutionWidth"] = output_width new_repre["resolutionHeight"] = output_height @@ -1586,7 +1656,7 @@ class ExtractReview(pyblish.api.InstancePlugin): return True return False - def filter_outputs_by_families(self, profile, families): + def filter_output_defs(self, profile, subset_name, families): """Return outputs matching input instance families. Output definitions without families filter are marked as valid. @@ -1619,6 +1689,24 @@ class ExtractReview(pyblish.api.InstancePlugin): if not self.families_filter_validation(families, families_filters): continue + # Subsets name filters + subset_filters = [ + subset_filter + for subset_filter in output_filters.get("subsets", []) + # Skip empty strings + if subset_filter + ] + if subset_name and subset_filters: + match = False + for subset_filter in subset_filters: + compiled = re.compile(subset_filter) + if compiled.search(subset_name): + match = True + break + + if not match: + continue + filtered_outputs[filename_suffix] = output_def return filtered_outputs diff --git a/openpype/plugins/publish/extract_review_slate.py b/openpype/plugins/publish/extract_review_slate.py index 7002168cdb..505ae75169 100644 --- a/openpype/plugins/publish/extract_review_slate.py +++ b/openpype/plugins/publish/extract_review_slate.py @@ -1,7 +1,14 @@ import os import openpype.api -import openpype.lib import pyblish +from openpype.lib import ( + path_to_subprocess_arg, + get_ffmpeg_tool_path, + get_ffprobe_data, + get_ffprobe_streams, + get_ffmpeg_codec_args, + get_ffmpeg_format_args, +) class ExtractReviewSlate(openpype.api.Extractor): @@ -14,7 +21,7 @@ class ExtractReviewSlate(openpype.api.Extractor): families = ["slate", "review"] match = pyblish.api.Subset - hosts = ["nuke", "maya", "shell"] + hosts = ["nuke", "shell"] optional = True def process(self, instance): @@ -24,9 +31,9 @@ class ExtractReviewSlate(openpype.api.Extractor): suffix = "_slate" slate_path = inst_data.get("slateFrame") - ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") - slate_streams = openpype.lib.ffprobe_streams(slate_path, self.log) + slate_streams = get_ffprobe_streams(slate_path, self.log) # Try to find first stream with defined 'width' and 'height' # - this is to avoid order of streams where audio can be as first # - there may be a better way (checking `codec_type`?)+ @@ -59,13 +66,44 @@ class ExtractReviewSlate(openpype.api.Extractor): if "slate-frame" not in p_tags: continue + # get repre file + stagingdir = repre["stagingDir"] + input_file = "{0}".format(repre["files"]) + input_path = os.path.join( + os.path.normpath(stagingdir), repre["files"]) + self.log.debug("__ input_path: {}".format(input_path)) + + video_streams = get_ffprobe_streams( + input_path, self.log + ) + + # Try to find first stream with defined 'width' and 'height' + # - this is to avoid order of streams where audio can be as first + # - there may be a better way (checking `codec_type`?) + input_width = None + input_height = None + for stream in video_streams: + if "width" in stream and "height" in stream: + input_width = int(stream["width"]) + input_height = int(stream["height"]) + break + + # Raise exception of any stream didn't define input resolution + if input_width is None: + raise AssertionError(( + "FFprobe couldn't read resolution from input file: \"{}\"" + ).format(input_path)) + # values are set in ExtractReview if use_legacy_code: to_width = inst_data["reviewToWidth"] to_height = inst_data["reviewToHeight"] else: - to_width = repre["resolutionWidth"] - to_height = repre["resolutionHeight"] + to_width = input_width + to_height = input_height + + self.log.debug("to_width: `{}`".format(to_width)) + self.log.debug("to_height: `{}`".format(to_height)) # defining image ratios resolution_ratio = ( @@ -94,15 +132,9 @@ class ExtractReviewSlate(openpype.api.Extractor): _remove_at_end = [] - stagingdir = repre["stagingDir"] - input_file = "{0}".format(repre["files"]) - ext = os.path.splitext(input_file)[1] output_file = input_file.replace(ext, "") + suffix + ext - input_path = os.path.join( - os.path.normpath(stagingdir), repre["files"]) - self.log.debug("__ input_path: {}".format(input_path)) _remove_at_end.append(input_path) output_path = os.path.join( @@ -118,7 +150,7 @@ class ExtractReviewSlate(openpype.api.Extractor): else: input_args.extend(repre["outputDef"].get('input', [])) input_args.append("-loop 1 -i {}".format( - openpype.lib.path_to_subprocess_arg(slate_path) + path_to_subprocess_arg(slate_path) )) input_args.extend([ "-r {}".format(fps), @@ -132,7 +164,7 @@ class ExtractReviewSlate(openpype.api.Extractor): output_args.extend(repre["_profile"].get('output', [])) else: # Codecs are copied from source for whole input - codec_args = self.codec_args(repre) + codec_args = self._get_codec_args(repre) output_args.extend(codec_args) # make sure colors are correct @@ -191,12 +223,12 @@ class ExtractReviewSlate(openpype.api.Extractor): slate_v_path = slate_path.replace(".png", ext) output_args.append( - openpype.lib.path_to_subprocess_arg(slate_v_path) + path_to_subprocess_arg(slate_v_path) ) _remove_at_end.append(slate_v_path) slate_args = [ - openpype.lib.path_to_subprocess_arg(ffmpeg_path), + path_to_subprocess_arg(ffmpeg_path), " ".join(input_args), " ".join(output_args) ] @@ -306,7 +338,7 @@ class ExtractReviewSlate(openpype.api.Extractor): return vf_back - def codec_args(self, repre): + def _get_codec_args(self, repre): """Detect possible codec arguments from representation.""" codec_args = [] @@ -320,7 +352,7 @@ class ExtractReviewSlate(openpype.api.Extractor): try: # Get information about input file via ffprobe tool - streams = openpype.lib.ffprobe_streams(full_input_path, self.log) + ffprobe_data = get_ffprobe_data(full_input_path, self.log) except Exception: self.log.warning( "Could not get codec data from input.", @@ -328,29 +360,14 @@ class ExtractReviewSlate(openpype.api.Extractor): ) return codec_args - # Try to find first stream that is not an audio - no_audio_stream = None - for stream in streams: - if stream.get("codec_type") != "audio": - no_audio_stream = stream - break + source_ffmpeg_cmd = repre.get("ffmpeg_cmd") + codec_args.extend( + get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd) + ) + codec_args.extend( + get_ffmpeg_codec_args( + ffprobe_data, source_ffmpeg_cmd, logger=self.log + ) + ) - if no_audio_stream is None: - self.log.warning(( - "Couldn't find stream that is not an audio from file \"{}\"" - ).format(full_input_path)) - return codec_args - - codec_name = no_audio_stream.get("codec_name") - if codec_name: - codec_args.append("-codec:v {}".format(codec_name)) - - profile_name = no_audio_stream.get("profile") - if profile_name: - profile_name = profile_name.replace(" ", "_").lower() - codec_args.append("-profile:v {}".format(profile_name)) - - pix_fmt = no_audio_stream.get("pix_fmt") - if pix_fmt: - codec_args.append("-pix_fmt {}".format(pix_fmt)) return codec_args diff --git a/openpype/plugins/publish/integrate_hero_version.py b/openpype/plugins/publish/integrate_hero_version.py index ec836954e8..60245314f4 100644 --- a/openpype/plugins/publish/integrate_hero_version.py +++ b/openpype/plugins/publish/integrate_hero_version.py @@ -7,7 +7,7 @@ import shutil from pymongo import InsertOne, ReplaceOne import pyblish.api from avalon import api, io, schema -from avalon.vendor import filelink +from openpype.lib import create_hard_link class IntegrateHeroVersion(pyblish.api.InstancePlugin): @@ -518,7 +518,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): # First try hardlink and copy if paths are cross drive try: - filelink.create(src_path, dst_path, filelink.HARDLINK) + create_hard_link(src_path, dst_path) # Return when successful return diff --git a/openpype/plugins/publish/integrate_new.py b/openpype/plugins/publish/integrate_new.py index 6e0940d459..6ca6125cb2 100644 --- a/openpype/plugins/publish/integrate_new.py +++ b/openpype/plugins/publish/integrate_new.py @@ -12,13 +12,16 @@ import shutil from pymongo import DeleteOne, InsertOne import pyblish.api from avalon import io -from avalon.api import format_template_with_optional_keys -from avalon.vendor import filelink import openpype.api from datetime import datetime # from pype.modules import ModulesManager from openpype.lib.profiles_filtering import filter_profiles -from openpype.lib import prepare_template_data +from openpype.lib import ( + prepare_template_data, + create_hard_link, + StringTemplate, + TemplateUnsolved +) # this is needed until speedcopy for linux is fixed if sys.platform == "win32": @@ -192,11 +195,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "short": task_code } - else: + elif "task" in anatomy_data: # Just set 'task_name' variable to context task task_name = anatomy_data["task"]["name"] task_type = anatomy_data["task"]["type"] + else: + task_name = None + task_type = None + # Fill family in anatomy data anatomy_data["family"] = instance.data.get("family") @@ -730,7 +737,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.critical("An unexpected error occurred.") six.reraise(*sys.exc_info()) - filelink.create(src, dst, filelink.HARDLINK) + create_hard_link(src, dst) def get_subset(self, asset, instance): subset_name = instance.data["subset"] @@ -816,8 +823,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # - is there a chance that task name is not filled in anatomy # data? # - should we use context task in that case? - task_name = instance.data["anatomyData"]["task"]["name"] - task_type = instance.data["anatomyData"]["task"]["type"] + anatomy_data = instance.data["anatomyData"] + task_name = None + task_type = None + if "task" in anatomy_data: + task_name = anatomy_data["task"]["name"] + task_type = anatomy_data["task"]["type"] filtering_criteria = { "families": instance.data["family"], "hosts": instance.context.data["hostName"], @@ -844,9 +855,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): fill_pairs = prepare_template_data(fill_pairs) try: - filled_template = \ - format_template_with_optional_keys(fill_pairs, template) - except KeyError: + filled_template = StringTemplate.format_strict_template( + template, fill_pairs + ) + except (KeyError, TemplateUnsolved): keys = [] if fill_pairs: keys = fill_pairs.keys() diff --git a/openpype/resources/images/spinner-200.svg b/openpype/resources/images/spinner-200.svg new file mode 100644 index 0000000000..73d8ae2890 --- /dev/null +++ b/openpype/resources/images/spinner-200.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/openpype/scripts/fusion_switch_shot.py b/openpype/scripts/fusion_switch_shot.py index 26f5356336..6db8ff36a8 100644 --- a/openpype/scripts/fusion_switch_shot.py +++ b/openpype/scripts/fusion_switch_shot.py @@ -4,13 +4,15 @@ import sys import logging # Pipeline imports -from avalon import api, io, pipeline +from avalon import api, io import avalon.fusion # Config imports import openpype.lib as pype import openpype.hosts.fusion.lib as fusion_lib +from openpype.lib.avalon_context import get_workdir_from_session + log = logging.getLogger("Update Slap Comp") self = sys.modules[__name__] @@ -44,16 +46,6 @@ def _format_version_folder(folder): return version_folder -def _get_work_folder(session): - """Convenience function to get the work folder path of the current asset""" - - # Get new filename, create path based on asset and work template - template_work = self._project["config"]["template"]["work"] - work_path = pipeline._format_work_template(template_work, session) - - return os.path.normpath(work_path) - - def _get_fusion_instance(): fusion = getattr(sys.modules["__main__"], "fusion", None) if fusion is None: @@ -72,7 +64,7 @@ def _format_filepath(session): asset = session["AVALON_ASSET"] # Save updated slap comp - work_path = _get_work_folder(session) + work_path = get_workdir_from_session(session) walk_to_dir = os.path.join(work_path, "scenes", "slapcomp") slapcomp_dir = os.path.abspath(walk_to_dir) @@ -103,7 +95,7 @@ def _update_savers(comp, session): None """ - new_work = _get_work_folder(session) + new_work = get_workdir_from_session(session) renders = os.path.join(new_work, "renders") version_folder = _format_version_folder(renders) renders_version = os.path.join(renders, version_folder) diff --git a/openpype/scripts/otio_burnin.py b/openpype/scripts/otio_burnin.py index abf69645b7..1f57891b84 100644 --- a/openpype/scripts/otio_burnin.py +++ b/openpype/scripts/otio_burnin.py @@ -5,11 +5,17 @@ import subprocess import platform import json import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins -import openpype.lib + +from openpype.lib import ( + get_ffmpeg_tool_path, + get_ffmpeg_codec_args, + get_ffmpeg_format_args, + convert_ffprobe_fps_value, +) -ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") -ffprobe_path = openpype.lib.get_ffmpeg_tool_path("ffprobe") +ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") +ffprobe_path = get_ffmpeg_tool_path("ffprobe") FFMPEG = ( @@ -50,176 +56,6 @@ def _get_ffprobe_data(source): return json.loads(out) -def get_fps(str_value): - if str_value == "0/0": - print("WARNING: Source has \"r_frame_rate\" value set to \"0/0\".") - return "Unknown" - - items = str_value.split("/") - if len(items) == 1: - fps = float(items[0]) - - elif len(items) == 2: - fps = float(items[0]) / float(items[1]) - - # Check if fps is integer or float number - if int(fps) == fps: - fps = int(fps) - - return str(fps) - - -def _prores_codec_args(stream_data, source_ffmpeg_cmd): - output = [] - - tags = stream_data.get("tags") or {} - encoder = tags.get("encoder") or "" - if encoder.endswith("prores_ks"): - codec_name = "prores_ks" - - elif encoder.endswith("prores_aw"): - codec_name = "prores_aw" - - else: - codec_name = "prores" - - output.extend(["-codec:v", codec_name]) - - pix_fmt = stream_data.get("pix_fmt") - if pix_fmt: - output.extend(["-pix_fmt", pix_fmt]) - - # Rest of arguments is prores_kw specific - if codec_name == "prores_ks": - codec_tag_to_profile_map = { - "apco": "proxy", - "apcs": "lt", - "apcn": "standard", - "apch": "hq", - "ap4h": "4444", - "ap4x": "4444xq" - } - codec_tag_str = stream_data.get("codec_tag_string") - if codec_tag_str: - profile = codec_tag_to_profile_map.get(codec_tag_str) - if profile: - output.extend(["-profile:v", profile]) - - return output - - -def _h264_codec_args(stream_data, source_ffmpeg_cmd): - output = ["-codec:v", "h264"] - - # Use arguments from source if are available source arguments - if source_ffmpeg_cmd: - copy_args = ( - "-crf", - "-b:v", "-vb", - "-minrate", "-minrate:", - "-maxrate", "-maxrate:", - "-bufsize", "-bufsize:" - ) - args = source_ffmpeg_cmd.split(" ") - for idx, arg in enumerate(args): - if arg in copy_args: - output.extend([arg, args[idx + 1]]) - - pix_fmt = stream_data.get("pix_fmt") - if pix_fmt: - output.extend(["-pix_fmt", pix_fmt]) - - output.extend(["-intra"]) - output.extend(["-g", "1"]) - - return output - - -def _dnxhd_codec_args(stream_data, source_ffmpeg_cmd): - output = ["-codec:v", "dnxhd"] - - # Use source profile (profiles in metadata are not usable in args directly) - profile = stream_data.get("profile") or "" - # Lower profile and replace space with underscore - cleaned_profile = profile.lower().replace(" ", "_") - dnx_profiles = { - "dnxhd", - "dnxhr_lb", - "dnxhr_sq", - "dnxhr_hq", - "dnxhr_hqx", - "dnxhr_444" - } - if cleaned_profile in dnx_profiles: - output.extend(["-profile:v", cleaned_profile]) - - pix_fmt = stream_data.get("pix_fmt") - if pix_fmt: - output.extend(["-pix_fmt", pix_fmt]) - - # Use arguments from source if are available source arguments - if source_ffmpeg_cmd: - copy_args = ( - "-b:v", "-vb", - ) - args = source_ffmpeg_cmd.split(" ") - for idx, arg in enumerate(args): - if arg in copy_args: - output.extend([arg, args[idx + 1]]) - - output.extend(["-g", "1"]) - return output - - -def _mxf_format_args(ffprobe_data, source_ffmpeg_cmd): - input_format = ffprobe_data["format"] - format_tags = input_format.get("tags") or {} - product_name = format_tags.get("product_name") or "" - output = [] - if "opatom" in product_name.lower(): - output.extend(["-f", "mxf_opatom"]) - return output - - -def get_format_args(ffprobe_data, source_ffmpeg_cmd): - input_format = ffprobe_data.get("format") or {} - if input_format.get("format_name") == "mxf": - return _mxf_format_args(ffprobe_data, source_ffmpeg_cmd) - return [] - - -def get_codec_args(ffprobe_data, source_ffmpeg_cmd): - stream_data = ffprobe_data["streams"][0] - codec_name = stream_data.get("codec_name") - # Codec "prores" - if codec_name == "prores": - return _prores_codec_args(stream_data, source_ffmpeg_cmd) - - # Codec "h264" - if codec_name == "h264": - return _h264_codec_args(stream_data, source_ffmpeg_cmd) - - # Coded DNxHD - if codec_name == "dnxhd": - return _dnxhd_codec_args(stream_data, source_ffmpeg_cmd) - - output = [] - if codec_name: - output.extend(["-codec:v", codec_name]) - - bit_rate = stream_data.get("bit_rate") - if bit_rate: - output.extend(["-b:v", bit_rate]) - - pix_fmt = stream_data.get("pix_fmt") - if pix_fmt: - output.extend(["-pix_fmt", pix_fmt]) - - output.extend(["-g", "1"]) - - return output - - class ModifiedBurnins(ffmpeg_burnins.Burnins): ''' This is modification of OTIO FFmpeg Burnin adapter. @@ -610,7 +446,9 @@ def burnins_from_data( data["resolution_height"] = stream.get("height", MISSING_KEY_VALUE) if "fps" not in data: - data["fps"] = get_fps(stream.get("r_frame_rate", "0/0")) + data["fps"] = convert_ffprobe_fps_value( + stream.get("r_frame_rate", "0/0") + ) # Check frame start and add expression if is available if frame_start is not None: @@ -721,10 +559,10 @@ def burnins_from_data( else: ffmpeg_args.extend( - get_format_args(burnin.ffprobe_data, source_ffmpeg_cmd) + get_ffmpeg_format_args(burnin.ffprobe_data, source_ffmpeg_cmd) ) ffmpeg_args.extend( - get_codec_args(burnin.ffprobe_data, source_ffmpeg_cmd) + get_ffmpeg_codec_args(burnin.ffprobe_data, source_ffmpeg_cmd) ) # Use arguments from source if are available source arguments if source_ffmpeg_cmd: diff --git a/openpype/settings/defaults/project_settings/deadline.json b/openpype/settings/defaults/project_settings/deadline.json index 7aff6d0b30..5bb0a4022e 100644 --- a/openpype/settings/defaults/project_settings/deadline.json +++ b/openpype/settings/defaults/project_settings/deadline.json @@ -46,7 +46,7 @@ "enabled": true, "optional": false, "active": true, - "tile_assembler_plugin": "oiio", + "tile_assembler_plugin": "OpenPypeTileAssembler", "use_published": true, "asset_dependencies": true, "group": "none", diff --git a/openpype/settings/defaults/project_settings/flame.json b/openpype/settings/defaults/project_settings/flame.json index 6fb6f55528..c7188b10b5 100644 --- a/openpype/settings/defaults/project_settings/flame.json +++ b/openpype/settings/defaults/project_settings/flame.json @@ -27,6 +27,8 @@ "ext": "exr", "xml_preset_file": "OpenEXR (16-bit fp DWAA).xml", "xml_preset_dir": "", + "export_type": "File Sequence", + "ignore_comment_attrs": false, "colorspace_out": "ACES - ACEScg", "representation_add_range": true, "representation_tags": [] diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json index 01831efad1..89bb41a164 100644 --- a/openpype/settings/defaults/project_settings/ftrack.json +++ b/openpype/settings/defaults/project_settings/ftrack.json @@ -193,6 +193,11 @@ "Administrator" ] }, + "fill_workfile_attribute": { + "enabled": false, + "custom_attribute_key": "", + "role_list": [] + }, "seed_project": { "enabled": true, "role_list": [ diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json index f08bee8b2d..30a71b044a 100644 --- a/openpype/settings/defaults/project_settings/global.json +++ b/openpype/settings/defaults/project_settings/global.json @@ -87,7 +87,8 @@ "render", "review", "ftrack" - ] + ], + "subsets": [] }, "overscan_crop": "", "overscan_color": [ @@ -107,7 +108,6 @@ "letter_box": { "enabled": false, "ratio": 0.0, - "state": "letterbox", "fill_color": [ 0, 0, diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index c25f416562..19d9a95595 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -385,6 +385,10 @@ "optional": true, "active": true }, + "ValidateRigOutSetNodeIds": { + "enabled": true, + "allow_history_only": false + }, "ValidateCameraAttributes": { "enabled": false, "optional": true, @@ -502,6 +506,12 @@ } } }, + "ExtractMayaSceneRaw": { + "enabled": true, + "add_for_families": [ + "layout" + ] + }, "ExtractCameraAlembic": { "enabled": true, "optional": true, diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json index 5a819e6904..6992fb6e3e 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/openpype/settings/defaults/project_settings/nuke.json @@ -116,13 +116,42 @@ "baking": { "filter": { "task_types": [], - "families": [] + "families": [], + "sebsets": [] }, "extension": "mov", "viewer_process_override": "", "bake_viewer_process": true, "bake_viewer_input_process": true, - "add_tags": [] + "add_tags": [], + "reformat_node_add": false, + "reformat_node_config": [ + { + "type": "string", + "name": "type", + "value": "to format" + }, + { + "type": "string", + "name": "format", + "value": "HD_1080" + }, + { + "type": "string", + "name": "filter", + "value": "Lanczos6" + }, + { + "type": "bool", + "name": "black_outside", + "value": true + }, + { + "type": "bool", + "name": "pbb", + "value": false + } + ] } } }, diff --git a/openpype/settings/entities/base_entity.py b/openpype/settings/entities/base_entity.py index b5bc44640b..76700d605d 100644 --- a/openpype/settings/entities/base_entity.py +++ b/openpype/settings/entities/base_entity.py @@ -28,6 +28,10 @@ class BaseEntity: def __init__(self, schema_data, *args, **kwargs): self.schema_data = schema_data + tooltip = None + if schema_data: + tooltip = schema_data.get("tooltip") + self.tooltip = tooltip # Entity id self._id = uuid4() diff --git a/openpype/settings/entities/dict_conditional.py b/openpype/settings/entities/dict_conditional.py index 963fd406ed..19f326aea7 100644 --- a/openpype/settings/entities/dict_conditional.py +++ b/openpype/settings/entities/dict_conditional.py @@ -584,8 +584,9 @@ class DictConditionalEntity(ItemEntity): self.enum_entity.update_default_value(enum_value) for children_by_key in self.non_gui_children.values(): + value_copy = copy.deepcopy(value) for key, child_obj in children_by_key.items(): - child_value = value.get(key, NOT_SET) + child_value = value_copy.get(key, NOT_SET) child_obj.update_default_value(child_value) def update_studio_value(self, value): @@ -620,8 +621,9 @@ class DictConditionalEntity(ItemEntity): self.enum_entity.update_studio_value(enum_value) for children_by_key in self.non_gui_children.values(): + value_copy = copy.deepcopy(value) for key, child_obj in children_by_key.items(): - child_value = value.get(key, NOT_SET) + child_value = value_copy.get(key, NOT_SET) child_obj.update_studio_value(child_value) def update_project_value(self, value): @@ -656,8 +658,9 @@ class DictConditionalEntity(ItemEntity): self.enum_entity.update_project_value(enum_value) for children_by_key in self.non_gui_children.values(): + value_copy = copy.deepcopy(value) for key, child_obj in children_by_key.items(): - child_value = value.get(key, NOT_SET) + child_value = value_copy.get(key, NOT_SET) child_obj.update_project_value(child_value) def _discard_changes(self, on_change_trigger): diff --git a/openpype/settings/entities/schemas/README.md b/openpype/settings/entities/schemas/README.md index dd7601c017..fbfd699937 100644 --- a/openpype/settings/entities/schemas/README.md +++ b/openpype/settings/entities/schemas/README.md @@ -14,6 +14,7 @@ - this keys is not allowed for all inputs as they may have not reason for that - key is validated, can be only once in hierarchy but is not required - currently there are `system settings` and `project settings` +- all entities can have set `"tooltip"` key with description which will be shown in UI ## Inner schema - GUI schemas are huge json files, to be able to split whole configuration into multiple schema there's type `schema` diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json index 788b1d8575..e6097a2b14 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json @@ -103,7 +103,7 @@ "DraftTileAssembler": "Draft Tile Assembler" }, { - "oiio": "Open Image IO" + "OpenPypeTileAssembler": "Open Image IO" } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json index dc88d11f61..e352f8b132 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json @@ -171,6 +171,35 @@ "label": "XML preset folder (optional)", "type": "text" }, + { + "key": "export_type", + "label": "Eport clip type", + "type": "enum", + "default": "File Sequence", + "enum_items": [ + { + "Movie": "Movie" + }, + { + "File Sequence": "File Sequence" + }, + { + "Sequence Publish": "Sequence Publish" + } + ] + + }, + { + "type": "separator" + }, + { + "type": "boolean", + "key": "ignore_comment_attrs", + "label": "Ignore attributes parsed from a segment comments" + }, + { + "type": "separator" + }, { "key": "colorspace_out", "label": "Output color (imageio)", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json index 6d0e2693d4..cb59e9d67e 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json @@ -589,6 +589,34 @@ } ] }, + { + "type": "dict", + "key": "fill_workfile_attribute", + "label": "Fill workfile Custom attribute", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "Custom attribute must be Text type added to Task entity type" + }, + { + "type": "text", + "key": "custom_attribute_key", + "label": "Custom attribute key" + }, + { + "type": "list", + "key": "role_list", + "label": "Roles", + "object_type": "text" + } + ] + }, { "type": "dict", "key": "seed_project", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json index 3bec19c3d0..9f142bad09 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json @@ -195,6 +195,9 @@ { "aces_1.1": "aces_1.1" }, + { + "aces_1.2": "aces_1.2" + }, { "custom": "custom" } @@ -457,7 +460,7 @@ { "type": "text", "key": "colourPolicy", - "label": "Colour Policy" + "label": "Colour Policy (name or path)" }, { "type": "text", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index e608e9ff63..12043d4205 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -291,6 +291,15 @@ "label": "Families", "type": "list", "object_type": "text" + }, + { + "type": "separator" + }, + { + "key": "subsets", + "label": "Subsets", + "type": "list", + "object_type": "text" } ] }, @@ -366,19 +375,6 @@ "minimum": 0, "maximum": 10000 }, - { - "key": "state", - "label": "Type", - "type": "enum", - "enum_items": [ - { - "letterbox": "Letterbox" - }, - { - "pillar": "Pillar" - } - ] - }, { "type": "color", "label": "Fill Color", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json index bf7b8a22e7..2e5bc64e1c 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json @@ -514,6 +514,26 @@ "label": "Validate Rig Controllers" } ] + }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "ValidateRigOutSetNodeIds", + "label": "Validate Rig Out Set Node Ids", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "allow_history_only", + "label": "Allow history only" + } + ] } ] }, @@ -547,6 +567,30 @@ "type": "schema", "name": "schema_maya_capture" }, + { + "type": "dict", + "collapsible": true, + "key": "ExtractMayaSceneRaw", + "label": "Maya Scene (Raw)", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "Add loaded instances to those published families:" + }, + { + "key": "add_for_families", + "label": "Families", + "type": "list", + "object_type": "text" + } + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json index 39390f355a..1636a8d700 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json @@ -195,6 +195,12 @@ "label": "Families", "type": "list", "object_type": "text" + }, + { + "key": "sebsets", + "label": "Subsets", + "type": "list", + "object_type": "text" } ] }, @@ -226,6 +232,121 @@ "label": "Add additional tags to representations", "type": "list", "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "boolean", + "key": "reformat_node_add", + "label": "Add Reformat Node", + "default": false + }, + { + "type": "collapsible-wrap", + "label": "Reformat Node Knobs", + "collapsible": true, + "collapsed": false, + "children": [ + { + "type": "list", + "key": "reformat_node_config", + "object_type": { + "type": "dict-conditional", + "enum_key": "type", + "enum_label": "Type", + "enum_children": [ + { + "key": "string", + "label": "String", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "text", + "key": "value", + "label": "Value" + } + ] + }, + { + "key": "bool", + "label": "Boolean", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "boolean", + "key": "value", + "label": "Value" + } + ] + }, + { + "key": "number", + "label": "Number", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "list-strict", + "key": "value", + "label": "Value", + "object_types": [ + { + "type": "number", + "key": "number", + "default": 1, + "decimal": 4 + } + ] + } + + ] + }, + { + "key": "list_numbers", + "label": "2 Numbers", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "list-strict", + "key": "value", + "label": "Value", + "object_types": [ + { + "type": "number", + "key": "x", + "default": 1, + "decimal": 4 + }, + { + "type": "number", + "key": "y", + "default": 1, + "decimal": 4 + } + ] + } + ] + } + ] + } + } + ] } ] } diff --git a/openpype/style/__init__.py b/openpype/style/__init__.py index ea88b342ee..b2a1a4ce6c 100644 --- a/openpype/style/__init__.py +++ b/openpype/style/__init__.py @@ -7,13 +7,19 @@ from openpype import resources from .color_defs import parse_color - -_STYLESHEET_CACHE = None -_FONT_IDS = None - current_dir = os.path.dirname(os.path.abspath(__file__)) +class _Cache: + stylesheet = None + font_ids = None + + tools_icon_color = None + default_entity_icon_color = None + disabled_entity_icon_color = None + deprecated_entity_font_color = None + + def get_style_image_path(image_name): # All filenames are lowered image_name = image_name.lower() @@ -125,21 +131,19 @@ def _load_font(): """Load and register fonts into Qt application.""" from Qt import QtGui - global _FONT_IDS - # Check if font ids are still loaded - if _FONT_IDS is not None: - for font_id in tuple(_FONT_IDS): + if _Cache.font_ids is not None: + for font_id in tuple(_Cache.font_ids): font_families = QtGui.QFontDatabase.applicationFontFamilies( font_id ) # Reset font if font id is not available if not font_families: - _FONT_IDS = None + _Cache.font_ids = None break - if _FONT_IDS is None: - _FONT_IDS = [] + if _Cache.font_ids is None: + _Cache.font_ids = [] fonts_dirpath = os.path.join(current_dir, "fonts") font_dirs = [] font_dirs.append(os.path.join(fonts_dirpath, "Noto_Sans")) @@ -157,7 +161,7 @@ def _load_font(): continue full_path = os.path.join(font_dir, filename) font_id = QtGui.QFontDatabase.addApplicationFont(full_path) - _FONT_IDS.append(font_id) + _Cache.font_ids.append(font_id) font_families = QtGui.QFontDatabase.applicationFontFamilies( font_id ) @@ -167,11 +171,11 @@ def _load_font(): def load_stylesheet(): """Load and return OpenPype Qt stylesheet.""" - global _STYLESHEET_CACHE - if _STYLESHEET_CACHE is None: - _STYLESHEET_CACHE = _load_stylesheet() + + if _Cache.stylesheet is None: + _Cache.stylesheet = _load_stylesheet() _load_font() - return _STYLESHEET_CACHE + return _Cache.stylesheet def get_app_icon_path(): @@ -182,3 +186,63 @@ def get_app_icon_path(): def app_icon_path(): # Backwards compatibility return get_app_icon_path() + + +def get_default_tools_icon_color(): + """Default color used in tool icons. + + Color must be possible to parse using QColor. + + Returns: + str: Color as a string. + """ + if _Cache.tools_icon_color is None: + color_data = get_colors_data() + _Cache.tools_icon_color = color_data["icon-tools"] + return _Cache.tools_icon_color + + +def get_default_entity_icon_color(): + """Default color of entities icons. + + Color must be possible to parse using QColor. + + Returns: + str: Color as a string. + """ + if _Cache.default_entity_icon_color is None: + color_data = get_colors_data() + _Cache.default_entity_icon_color = color_data["icon-entity-default"] + return _Cache.default_entity_icon_color + + +def get_disabled_entity_icon_color(): + """Default color of entities icons. + + TODO: Find more suitable function name. + + Color must be possible to parse using QColor. + + Returns: + str: Color as a string. + """ + if _Cache.disabled_entity_icon_color is None: + color_data = get_colors_data() + _Cache.disabled_entity_icon_color = color_data["icon-entity-disabled"] + return _Cache.disabled_entity_icon_color + + +def get_deprecated_entity_font_color(): + """Font color for deprecated entities. + + Color must be possible to parse using QColor. + + Returns: + str: Color as a string. + """ + if _Cache.deprecated_entity_font_color is None: + color_data = get_colors_data() + _Cache.deprecated_entity_font_color = ( + color_data["font-entity-deprecated"] + ) + return _Cache.deprecated_entity_font_color diff --git a/openpype/style/data.json b/openpype/style/data.json index b8ccef8bbd..a76a77015b 100644 --- a/openpype/style/data.json +++ b/openpype/style/data.json @@ -56,6 +56,12 @@ "delete-btn-bg": "rgb(201, 54, 54)", "delete-btn-bg-disabled": "rgba(201, 54, 54, 64)", + "icon-tools": "#ffffff", + "icon-alert-tools": "#AA5050", + "icon-entity-default": "#bfccd6", + "icon-entity-disabled": "#808080", + "font-entity-deprecated": "#666666", + "tab-widget": { "bg": "#21252B", "bg-selected": "#434a56", diff --git a/openpype/style/style.css b/openpype/style/style.css index ba40b780ab..df83600973 100644 --- a/openpype/style/style.css +++ b/openpype/style/style.css @@ -836,6 +836,19 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { } /* New Create/Publish UI */ +#CreateDialogHelpButton { + background: rgba(255, 255, 255, 31); + border-top-right-radius: 0; + border-bottom-right-radius: 0; + font-size: 10pt; + font-weight: bold; + padding: 3px 3px 3px 3px; +} + +#CreateDialogHelpButton:hover { + background: rgba(255, 255, 255, 63); +} + #PublishLogConsole { font-family: "Noto Sans Mono"; } @@ -1266,6 +1279,12 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { font-size: 15pt; font-weight: 750; } +#ChooseProjectFrame { + border-radius: 10px; +} +#ChooseProjectView { + background: transparent; +} /* Globally used names */ #Separator { diff --git a/openpype/tests/test_avalon_plugin_presets.py b/openpype/tests/test_avalon_plugin_presets.py index ec21385d23..f1b1a94713 100644 --- a/openpype/tests/test_avalon_plugin_presets.py +++ b/openpype/tests/test_avalon_plugin_presets.py @@ -1,8 +1,9 @@ import avalon.api as api import openpype +from openpype.pipeline import LegacyCreator -class MyTestCreator(api.Creator): +class MyTestCreator(LegacyCreator): my_test_property = "A" @@ -26,8 +27,8 @@ def test_avalon_plugin_presets(monkeypatch, printer): openpype.install() api.register_host(Test()) - api.register_plugin(api.Creator, MyTestCreator) - plugins = api.discover(api.Creator) + api.register_plugin(LegacyCreator, MyTestCreator) + plugins = api.discover(LegacyCreator) printer("Test if we got our test plugin") assert MyTestCreator in plugins for p in plugins: diff --git a/openpype/tests/test_lib_restructuralization.py b/openpype/tests/test_lib_restructuralization.py index d0461e55fb..94080e550d 100644 --- a/openpype/tests/test_lib_restructuralization.py +++ b/openpype/tests/test_lib_restructuralization.py @@ -24,7 +24,7 @@ def test_backward_compatibility(printer): from openpype.lib import get_hierarchy from openpype.lib import get_linked_assets from openpype.lib import get_latest_version - from openpype.lib import ffprobe_streams + from openpype.lib import get_ffprobe_streams from openpype.hosts.fusion.lib import switch_item diff --git a/openpype/tools/adobe_webserver/app.py b/openpype/tools/adobe_webserver/app.py new file mode 100644 index 0000000000..b79d6c6c60 --- /dev/null +++ b/openpype/tools/adobe_webserver/app.py @@ -0,0 +1,237 @@ +"""This Webserver tool is python 3 specific. + +Don't import directly to avalon.tools or implementation of Python 2 hosts +would break. +""" +import os +import logging +import urllib +import threading +import asyncio +import socket + +from aiohttp import web + +from wsrpc_aiohttp import ( + WSRPCClient +) + +from avalon import api + +log = logging.getLogger(__name__) + + +class WebServerTool: + """ + Basic POC implementation of asychronic websocket RPC server. + Uses class in external_app_1.py to mimic implementation for single + external application. + 'test_client' folder contains two test implementations of client + """ + _instance = None + + def __init__(self): + WebServerTool._instance = self + + self.client = None + self.handlers = {} + self.on_stop_callbacks = [] + + port = None + host_name = "localhost" + websocket_url = os.getenv("WEBSOCKET_URL") + if websocket_url: + parsed = urllib.parse.urlparse(websocket_url) + port = parsed.port + host_name = parsed.netloc.split(":")[0] + if not port: + port = 8098 # fallback + + self.port = port + self.host_name = host_name + + self.app = web.Application() + + # add route with multiple methods for single "external app" + self.webserver_thread = WebServerThread(self, self.port) + + def add_route(self, *args, **kwargs): + self.app.router.add_route(*args, **kwargs) + + def add_static(self, *args, **kwargs): + self.app.router.add_static(*args, **kwargs) + + def start_server(self): + if self.webserver_thread and not self.webserver_thread.is_alive(): + self.webserver_thread.start() + + def stop_server(self): + self.stop() + + async def send_context_change(self, host): + """ + Calls running webserver to inform about context change + + Used when new PS/AE should be triggered, + but one already running, without + this publish would point to old context. + """ + client = WSRPCClient(os.getenv("WEBSOCKET_URL"), + loop=asyncio.get_event_loop()) + await client.connect() + + project = api.Session["AVALON_PROJECT"] + asset = api.Session["AVALON_ASSET"] + task = api.Session["AVALON_TASK"] + log.info("Sending context change to {}-{}-{}".format(project, + asset, + task)) + + await client.call('{}.set_context'.format(host), + project=project, asset=asset, task=task) + await client.close() + + def port_occupied(self, host_name, port): + """ + Check if 'url' is already occupied. + + This could mean, that app is already running and we are trying open it + again. In that case, use existing running webserver. + Check here is easier than capturing exception from thread. + """ + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + result = True + try: + sock.bind((host_name, port)) + result = False + except: + print("Port is in use") + + return result + + def call(self, func): + log.debug("websocket.call {}".format(func)) + future = asyncio.run_coroutine_threadsafe( + func, + self.webserver_thread.loop + ) + result = future.result() + return result + + @staticmethod + def get_instance(): + if WebServerTool._instance is None: + WebServerTool() + return WebServerTool._instance + + @property + def is_running(self): + if not self.webserver_thread: + return False + return self.webserver_thread.is_running + + def stop(self): + if not self.is_running: + return + try: + log.debug("Stopping websocket server") + self.webserver_thread.is_running = False + self.webserver_thread.stop() + except Exception: + log.warning( + "Error has happened during Killing websocket server", + exc_info=True + ) + + def thread_stopped(self): + for callback in self.on_stop_callbacks: + callback() + + +class WebServerThread(threading.Thread): + """ Listener for websocket rpc requests. + + It would be probably better to "attach" this to main thread (as for + example Harmony needs to run something on main thread), but currently + it creates separate thread and separate asyncio event loop + """ + def __init__(self, module, port): + super(WebServerThread, self).__init__() + + self.is_running = False + self.port = port + self.module = module + self.loop = None + self.runner = None + self.site = None + self.tasks = [] + + def run(self): + self.is_running = True + + try: + log.info("Starting web server") + self.loop = asyncio.new_event_loop() # create new loop for thread + asyncio.set_event_loop(self.loop) + + self.loop.run_until_complete(self.start_server()) + + websocket_url = "ws://localhost:{}/ws".format(self.port) + + log.debug( + "Running Websocket server on URL: \"{}\"".format(websocket_url) + ) + + asyncio.ensure_future(self.check_shutdown(), loop=self.loop) + self.loop.run_forever() + except Exception: + self.is_running = False + log.warning( + "Websocket Server service has failed", exc_info=True + ) + raise + finally: + self.loop.close() # optional + + self.is_running = False + self.module.thread_stopped() + log.info("Websocket server stopped") + + async def start_server(self): + """ Starts runner and TCPsite """ + self.runner = web.AppRunner(self.module.app) + await self.runner.setup() + self.site = web.TCPSite(self.runner, 'localhost', self.port) + await self.site.start() + + def stop(self): + """Sets is_running flag to false, 'check_shutdown' shuts server down""" + self.is_running = False + + async def check_shutdown(self): + """ Future that is running and checks if server should be running + periodically. + """ + while self.is_running: + while self.tasks: + task = self.tasks.pop(0) + log.debug("waiting for task {}".format(task)) + await task + log.debug("returned value {}".format(task.result)) + + await asyncio.sleep(0.5) + + log.debug("Starting shutdown") + await self.site.stop() + log.debug("Site stopped") + await self.runner.cleanup() + log.debug("Runner stopped") + tasks = [task for task in asyncio.all_tasks() if + task is not asyncio.current_task()] + list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks + results = await asyncio.gather(*tasks, return_exceptions=True) + log.debug(f'Finished awaiting cancelled tasks, results: {results}...') + await self.loop.shutdown_asyncgens() + # to really make sure everything else has time to stop + await asyncio.sleep(0.07) + self.loop.stop() diff --git a/openpype/tools/adobe_webserver/readme.txt b/openpype/tools/adobe_webserver/readme.txt new file mode 100644 index 0000000000..06cf140fc4 --- /dev/null +++ b/openpype/tools/adobe_webserver/readme.txt @@ -0,0 +1,12 @@ +Adobe webserver +--------------- +Aiohttp (Asyncio) based websocket server used for communication with host +applications, currently only for Adobe (but could be used for any non python +DCC which has websocket client). + +This webserver is started in spawned Python process that opens DCC during +its launch, waits for connection from DCC and handles communication going +forward. Server is closed before Python process is killed. + +(Different from `openpype/modules/webserver` as that one is running in Tray, +this one is running in spawn Python process.) \ No newline at end of file diff --git a/openpype/tools/assetcreator/__init__.py b/openpype/tools/assetcreator/__init__.py deleted file mode 100644 index 3b88ebe984..0000000000 --- a/openpype/tools/assetcreator/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ - -from .app import ( - show, - cli -) - -__all__ = [ - "show", - "cli", -] diff --git a/openpype/tools/assetcreator/__main__.py b/openpype/tools/assetcreator/__main__.py deleted file mode 100644 index d77bc585c5..0000000000 --- a/openpype/tools/assetcreator/__main__.py +++ /dev/null @@ -1,5 +0,0 @@ -from . import cli - -if __name__ == '__main__': - import sys - sys.exit(cli(sys.argv[1:])) diff --git a/openpype/tools/assetcreator/app.py b/openpype/tools/assetcreator/app.py deleted file mode 100644 index 1d332d647e..0000000000 --- a/openpype/tools/assetcreator/app.py +++ /dev/null @@ -1,652 +0,0 @@ -import os -import sys -from subprocess import Popen - -import ftrack_api -from Qt import QtWidgets, QtCore -from openpype.api import get_current_project_settings -from openpype.tools.utils.lib import qt_app_context -from avalon import io, api, style, schema -from . import widget, model - -module = sys.modules[__name__] -module.window = None - - -class Window(QtWidgets.QDialog): - """Asset creator interface - - """ - - def __init__(self, parent=None, context=None): - super(Window, self).__init__(parent) - self.context = context - project_name = io.active_project() - self.setWindowTitle("Asset creator ({0})".format(project_name)) - self.setFocusPolicy(QtCore.Qt.StrongFocus) - self.setAttribute(QtCore.Qt.WA_DeleteOnClose) - - # Validators - self.valid_parent = False - - self.session = None - - # assets widget - assets_widget = QtWidgets.QWidget() - assets_widget.setContentsMargins(0, 0, 0, 0) - assets_layout = QtWidgets.QVBoxLayout(assets_widget) - assets = widget.AssetWidget() - assets.view.setSelectionMode(assets.view.ExtendedSelection) - assets_layout.addWidget(assets) - - # Outlink - label_outlink = QtWidgets.QLabel("Outlink:") - input_outlink = QtWidgets.QLineEdit() - input_outlink.setReadOnly(True) - input_outlink.setStyleSheet("background-color: #333333;") - checkbox_outlink = QtWidgets.QCheckBox("Use outlink") - # Parent - label_parent = QtWidgets.QLabel("*Parent:") - input_parent = QtWidgets.QLineEdit() - input_parent.setReadOnly(True) - input_parent.setStyleSheet("background-color: #333333;") - - # Name - label_name = QtWidgets.QLabel("*Name:") - input_name = QtWidgets.QLineEdit() - input_name.setPlaceholderText("") - - # Asset Build - label_assetbuild = QtWidgets.QLabel("Asset Build:") - combo_assetbuilt = QtWidgets.QComboBox() - - # Task template - label_task_template = QtWidgets.QLabel("Task template:") - combo_task_template = QtWidgets.QComboBox() - - # Info widget - info_widget = QtWidgets.QWidget() - info_widget.setContentsMargins(10, 10, 10, 10) - info_layout = QtWidgets.QVBoxLayout(info_widget) - - # Inputs widget - inputs_widget = QtWidgets.QWidget() - inputs_widget.setContentsMargins(0, 0, 0, 0) - - inputs_layout = QtWidgets.QFormLayout(inputs_widget) - inputs_layout.addRow(label_outlink, input_outlink) - inputs_layout.addRow(None, checkbox_outlink) - inputs_layout.addRow(label_parent, input_parent) - inputs_layout.addRow(label_name, input_name) - inputs_layout.addRow(label_assetbuild, combo_assetbuilt) - inputs_layout.addRow(label_task_template, combo_task_template) - - # Add button - btns_widget = QtWidgets.QWidget() - btns_widget.setContentsMargins(0, 0, 0, 0) - btn_layout = QtWidgets.QHBoxLayout(btns_widget) - btn_create_asset = QtWidgets.QPushButton("Create asset") - btn_create_asset.setToolTip( - "Creates all necessary components for asset" - ) - checkbox_app = None - if self.context is not None: - checkbox_app = QtWidgets.QCheckBox("Open {}".format( - self.context.capitalize()) - ) - btn_layout.addWidget(checkbox_app) - btn_layout.addWidget(btn_create_asset) - - task_view = QtWidgets.QTreeView() - task_view.setIndentation(0) - task_model = model.TasksModel() - task_view.setModel(task_model) - - info_layout.addWidget(inputs_widget) - info_layout.addWidget(task_view) - info_layout.addWidget(btns_widget) - - # Body - body = QtWidgets.QSplitter() - body.setContentsMargins(0, 0, 0, 0) - body.setSizePolicy(QtWidgets.QSizePolicy.Expanding, - QtWidgets.QSizePolicy.Expanding) - body.setOrientation(QtCore.Qt.Horizontal) - body.addWidget(assets_widget) - body.addWidget(info_widget) - body.setStretchFactor(0, 100) - body.setStretchFactor(1, 150) - - # statusbar - message = QtWidgets.QLabel() - message.setFixedHeight(20) - - statusbar = QtWidgets.QWidget() - layout = QtWidgets.QHBoxLayout(statusbar) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(message) - - layout = QtWidgets.QVBoxLayout(self) - layout.addWidget(body) - layout.addWidget(statusbar) - - self.data = { - "label": { - "message": message, - }, - "view": { - "tasks": task_view - }, - "model": { - "assets": assets, - "tasks": task_model - }, - "inputs": { - "outlink": input_outlink, - "outlink_cb": checkbox_outlink, - "parent": input_parent, - "name": input_name, - "assetbuild": combo_assetbuilt, - "tasktemplate": combo_task_template, - "open_app": checkbox_app - }, - "buttons": { - "create_asset": btn_create_asset - } - } - - # signals - btn_create_asset.clicked.connect(self.create_asset) - assets.selection_changed.connect(self.on_asset_changed) - input_name.textChanged.connect(self.on_asset_name_change) - checkbox_outlink.toggled.connect(self.on_outlink_checkbox_change) - combo_task_template.currentTextChanged.connect( - self.on_task_template_changed - ) - if self.context is not None: - checkbox_app.toggled.connect(self.on_app_checkbox_change) - # on start - self.on_start() - - self.resize(600, 500) - - self.echo("Connected to project: {0}".format(project_name)) - - def open_app(self): - if self.context == 'maya': - Popen("maya") - else: - message = QtWidgets.QMessageBox(self) - message.setWindowTitle("App is not set") - message.setIcon(QtWidgets.QMessageBox.Critical) - message.show() - - def on_start(self): - project_name = io.Session['AVALON_PROJECT'] - project_query = 'Project where full_name is "{}"'.format(project_name) - if self.session is None: - session = ftrack_api.Session() - self.session = session - else: - session = self.session - ft_project = session.query(project_query).one() - schema_name = ft_project['project_schema']['name'] - # Load config - schemas_items = get_current_project_settings().get('ftrack', {}).get( - 'project_schemas', {} - ) - # Get info if it is silo project - self.silos = io.distinct("silo") - if self.silos and None in self.silos: - self.silos = None - - key = "default" - if schema_name in schemas_items: - key = schema_name - - self.config_data = schemas_items[key] - - # set outlink - input_outlink = self.data['inputs']['outlink'] - checkbox_outlink = self.data['inputs']['outlink_cb'] - outlink_text = io.Session.get('AVALON_ASSET', '') - checkbox_outlink.setChecked(True) - if outlink_text == '': - outlink_text = '< No context >' - checkbox_outlink.setChecked(False) - checkbox_outlink.hide() - input_outlink.setText(outlink_text) - - # load asset build types - self.load_assetbuild_types() - - # Load task templates - self.load_task_templates() - self.data["model"]["assets"].refresh() - self.on_asset_changed() - - def create_asset(self): - name_input = self.data['inputs']['name'] - name = name_input.text() - test_name = name.replace(' ', '') - error_message = None - message = QtWidgets.QMessageBox(self) - message.setWindowTitle("Some errors have occurred") - message.setIcon(QtWidgets.QMessageBox.Critical) - # TODO: show error messages on any error - if self.valid_parent is not True and test_name == '': - error_message = "Name is not set and Parent is not selected" - elif self.valid_parent is not True: - error_message = "Parent is not selected" - elif test_name == '': - error_message = "Name is not set" - - if error_message is not None: - message.setText(error_message) - message.show() - return - - test_name_exists = io.find({ - 'type': 'asset', - 'name': name - }) - existing_assets = [x for x in test_name_exists] - if len(existing_assets) > 0: - message.setText("Entered Asset name is occupied") - message.show() - return - - checkbox_app = self.data['inputs']['open_app'] - if checkbox_app is not None and checkbox_app.isChecked() is True: - task_view = self.data["view"]["tasks"] - task_model = self.data["model"]["tasks"] - try: - index = task_view.selectedIndexes()[0] - task_name = task_model.itemData(index)[0] - except Exception: - message.setText("Please select task") - message.show() - return - - # Get ftrack session - if self.session is None: - session = ftrack_api.Session() - self.session = session - else: - session = self.session - - # Get Ftrack project entity - project_name = io.Session['AVALON_PROJECT'] - project_query = 'Project where full_name is "{}"'.format(project_name) - try: - ft_project = session.query(project_query).one() - except Exception: - message.setText("Ftrack project was not found") - message.show() - return - - # Get Ftrack entity of parent - ft_parent = None - assets_model = self.data["model"]["assets"] - selected = assets_model.get_selected_assets() - parent = io.find_one({"_id": selected[0], "type": "asset"}) - asset_id = parent.get('data', {}).get('ftrackId', None) - asset_entity_type = parent.get('data', {}).get('entityType', None) - asset_query = '{} where id is "{}"' - if asset_id is not None and asset_entity_type is not None: - try: - ft_parent = session.query(asset_query.format( - asset_entity_type, asset_id) - ).one() - except Exception: - ft_parent = None - - if ft_parent is None: - ft_parent = self.get_ftrack_asset(parent, ft_project) - - if ft_parent is None: - message.setText("Parent's Ftrack entity was not found") - message.show() - return - - asset_build_combo = self.data['inputs']['assetbuild'] - asset_type_name = asset_build_combo.currentText() - asset_type_query = 'Type where name is "{}"'.format(asset_type_name) - try: - asset_type = session.query(asset_type_query).one() - except Exception: - message.setText("Selected Asset Build type does not exists") - message.show() - return - - for children in ft_parent['children']: - if children['name'] == name: - message.setText("Entered Asset name is occupied") - message.show() - return - - task_template_combo = self.data['inputs']['tasktemplate'] - task_template = task_template_combo.currentText() - tasks = [] - for template in self.config_data['task_templates']: - if template['name'] == task_template: - tasks = template['task_types'] - break - - available_task_types = [] - task_types = ft_project['project_schema']['_task_type_schema'] - for task_type in task_types['types']: - available_task_types.append(task_type['name']) - - not_possible_tasks = [] - for task in tasks: - if task not in available_task_types: - not_possible_tasks.append(task) - - if len(not_possible_tasks) != 0: - message.setText(( - "These Task types weren't found" - " in Ftrack project schema:\n{}").format( - ', '.join(not_possible_tasks)) - ) - message.show() - return - - # Create asset build - asset_build_data = { - 'name': name, - 'project_id': ft_project['id'], - 'parent_id': ft_parent['id'], - 'type': asset_type - } - - new_entity = session.create('AssetBuild', asset_build_data) - - task_data = { - 'project_id': ft_project['id'], - 'parent_id': new_entity['id'] - } - - for task in tasks: - type = session.query('Type where name is "{}"'.format(task)).one() - - task_data['type_id'] = type['id'] - task_data['name'] = task - session.create('Task', task_data) - - av_project = io.find_one({'type': 'project'}) - - hiearchy_items = [] - hiearchy_items.extend(self.get_avalon_parent(parent)) - hiearchy_items.append(parent['name']) - - hierarchy = os.path.sep.join(hiearchy_items) - new_asset_data = { - 'ftrackId': new_entity['id'], - 'entityType': new_entity.entity_type, - 'visualParent': parent['_id'], - 'tasks': tasks, - 'parents': hiearchy_items, - 'hierarchy': hierarchy - } - new_asset_info = { - 'parent': av_project['_id'], - 'name': name, - 'schema': "openpype:asset-3.0", - 'type': 'asset', - 'data': new_asset_data - } - - # Backwards compatibility (add silo from parent if is silo project) - if self.silos: - new_asset_info["silo"] = parent["silo"] - - try: - schema.validate(new_asset_info) - except Exception: - message.setText(( - 'Asset information are not valid' - ' to create asset in avalon database' - )) - message.show() - session.rollback() - return - io.insert_one(new_asset_info) - session.commit() - - outlink_cb = self.data['inputs']['outlink_cb'] - if outlink_cb.isChecked() is True: - outlink_input = self.data['inputs']['outlink'] - outlink_name = outlink_input.text() - outlink_asset = io.find_one({ - 'type': 'asset', - 'name': outlink_name - }) - outlink_ft_id = outlink_asset.get('data', {}).get('ftrackId', None) - outlink_entity_type = outlink_asset.get( - 'data', {} - ).get('entityType', None) - if outlink_ft_id is not None and outlink_entity_type is not None: - try: - outlink_entity = session.query(asset_query.format()).one() - except Exception: - outlink_entity = None - - if outlink_entity is None: - outlink_entity = self.get_ftrack_asset( - outlink_asset, ft_project - ) - - if outlink_entity is None: - message.setText("Outlink's Ftrack entity was not found") - message.show() - return - - link_data = { - 'from_id': new_entity['id'], - 'to_id': outlink_entity['id'] - } - session.create('TypedContextLink', link_data) - session.commit() - - if checkbox_app is not None and checkbox_app.isChecked() is True: - origin_asset = api.Session.get('AVALON_ASSET', None) - origin_task = api.Session.get('AVALON_TASK', None) - asset_name = name - task_view = self.data["view"]["tasks"] - task_model = self.data["model"]["tasks"] - try: - index = task_view.selectedIndexes()[0] - except Exception: - message.setText("No task is selected. App won't be launched") - message.show() - return - task_name = task_model.itemData(index)[0] - try: - api.update_current_task(task=task_name, asset=asset_name) - self.open_app() - - finally: - if origin_task is not None and origin_asset is not None: - api.update_current_task( - task=origin_task, asset=origin_asset - ) - - message.setWindowTitle("Asset Created") - message.setText("Asset Created successfully") - message.setIcon(QtWidgets.QMessageBox.Information) - message.show() - - def get_ftrack_asset(self, asset, ft_project): - parenthood = [] - parenthood.extend(self.get_avalon_parent(asset)) - parenthood.append(asset['name']) - parenthood = list(reversed(parenthood)) - output_entity = None - ft_entity = ft_project - index = len(parenthood) - 1 - while True: - name = parenthood[index] - found = False - for children in ft_entity['children']: - if children['name'] == name: - ft_entity = children - found = True - break - if found is False: - return None - if index == 0: - output_entity = ft_entity - break - index -= 1 - - return output_entity - - def get_avalon_parent(self, entity): - parent_id = entity['data']['visualParent'] - parents = [] - if parent_id is not None: - parent = io.find_one({'_id': parent_id}) - parents.extend(self.get_avalon_parent(parent)) - parents.append(parent['name']) - return parents - - def echo(self, message): - widget = self.data["label"]["message"] - widget.setText(str(message)) - - QtCore.QTimer.singleShot(5000, lambda: widget.setText("")) - - print(message) - - def load_task_templates(self): - templates = self.config_data.get('task_templates', []) - all_names = [] - for template in templates: - all_names.append(template['name']) - - tt_combobox = self.data['inputs']['tasktemplate'] - tt_combobox.clear() - tt_combobox.addItems(all_names) - - def load_assetbuild_types(self): - types = [] - schemas = self.config_data.get('schemas', []) - for _schema in schemas: - if _schema['object_type'] == 'Asset Build': - types = _schema['task_types'] - break - ab_combobox = self.data['inputs']['assetbuild'] - ab_combobox.clear() - ab_combobox.addItems(types) - - def on_app_checkbox_change(self): - task_model = self.data['model']['tasks'] - app_checkbox = self.data['inputs']['open_app'] - if app_checkbox.isChecked() is True: - task_model.selectable = True - else: - task_model.selectable = False - - def on_outlink_checkbox_change(self): - checkbox_outlink = self.data['inputs']['outlink_cb'] - outlink_input = self.data['inputs']['outlink'] - if checkbox_outlink.isChecked() is True: - outlink_text = io.Session['AVALON_ASSET'] - else: - outlink_text = '< Outlinks won\'t be set >' - - outlink_input.setText(outlink_text) - - def on_task_template_changed(self): - combobox = self.data['inputs']['tasktemplate'] - task_model = self.data['model']['tasks'] - name = combobox.currentText() - tasks = [] - for template in self.config_data['task_templates']: - if template['name'] == name: - tasks = template['task_types'] - break - task_model.set_tasks(tasks) - - def on_asset_changed(self): - """Callback on asset selection changed - - This updates the task view. - - """ - assets_model = self.data["model"]["assets"] - parent_input = self.data['inputs']['parent'] - selected = assets_model.get_selected_assets() - - self.valid_parent = False - if len(selected) > 1: - parent_input.setText('< Please select only one asset! >') - elif len(selected) == 1: - if isinstance(selected[0], io.ObjectId): - self.valid_parent = True - asset = io.find_one({"_id": selected[0], "type": "asset"}) - parent_input.setText(asset['name']) - else: - parent_input.setText('< Selected invalid parent(silo) >') - else: - parent_input.setText('< Nothing is selected >') - - self.creatability_check() - - def on_asset_name_change(self): - self.creatability_check() - - def creatability_check(self): - name_input = self.data['inputs']['name'] - name = str(name_input.text()).strip() - creatable = False - if name and self.valid_parent: - creatable = True - - self.data["buttons"]["create_asset"].setEnabled(creatable) - - - -def show(parent=None, debug=False, context=None): - """Display Loader GUI - - Arguments: - debug (bool, optional): Run loader in debug-mode, - defaults to False - - """ - - try: - module.window.close() - del module.window - except (RuntimeError, AttributeError): - pass - - if debug is True: - io.install() - - with qt_app_context(): - window = Window(parent, context) - window.setStyleSheet(style.load_stylesheet()) - window.show() - - module.window = window - - -def cli(args): - import argparse - parser = argparse.ArgumentParser() - parser.add_argument("project") - parser.add_argument("asset") - - args = parser.parse_args(args) - project = args.project - asset = args.asset - io.install() - - api.Session["AVALON_PROJECT"] = project - if asset != '': - api.Session["AVALON_ASSET"] = asset - - show() diff --git a/openpype/tools/assetcreator/model.py b/openpype/tools/assetcreator/model.py deleted file mode 100644 index f84541ca2a..0000000000 --- a/openpype/tools/assetcreator/model.py +++ /dev/null @@ -1,310 +0,0 @@ -import re -import logging - -from Qt import QtCore, QtWidgets -from avalon.vendor import qtawesome -from avalon import io -from avalon import style - -log = logging.getLogger(__name__) - - -class Item(dict): - """An item that can be represented in a tree view using `TreeModel`. - - The item can store data just like a regular dictionary. - - >>> data = {"name": "John", "score": 10} - >>> item = Item(data) - >>> assert item["name"] == "John" - - """ - - def __init__(self, data=None): - super(Item, self).__init__() - - self._children = list() - self._parent = None - - if data is not None: - assert isinstance(data, dict) - self.update(data) - - def childCount(self): - return len(self._children) - - def child(self, row): - - if row >= len(self._children): - log.warning("Invalid row as child: {0}".format(row)) - return - - return self._children[row] - - def children(self): - return self._children - - def parent(self): - return self._parent - - def row(self): - """ - Returns: - int: Index of this item under parent""" - if self._parent is not None: - siblings = self.parent().children() - return siblings.index(self) - - def add_child(self, child): - """Add a child to this item""" - child._parent = self - self._children.append(child) - - -class TreeModel(QtCore.QAbstractItemModel): - - Columns = list() - ItemRole = QtCore.Qt.UserRole + 1 - - def __init__(self, parent=None): - super(TreeModel, self).__init__(parent) - self._root_item = Item() - - def rowCount(self, parent): - if parent.isValid(): - item = parent.internalPointer() - else: - item = self._root_item - - return item.childCount() - - def columnCount(self, parent): - return len(self.Columns) - - def data(self, index, role): - - if not index.isValid(): - return None - - if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole: - - item = index.internalPointer() - column = index.column() - - key = self.Columns[column] - return item.get(key, None) - - if role == self.ItemRole: - return index.internalPointer() - - def setData(self, index, value, role=QtCore.Qt.EditRole): - """Change the data on the items. - - Returns: - bool: Whether the edit was successful - """ - - if index.isValid(): - if role == QtCore.Qt.EditRole: - - item = index.internalPointer() - column = index.column() - key = self.Columns[column] - item[key] = value - - # passing `list()` for PyQt5 (see PYSIDE-462) - self.dataChanged.emit(index, index, list()) - - # must return true if successful - return True - - return False - - def setColumns(self, keys): - assert isinstance(keys, (list, tuple)) - self.Columns = keys - - def headerData(self, section, orientation, role): - - if role == QtCore.Qt.DisplayRole: - if section < len(self.Columns): - return self.Columns[section] - - super(TreeModel, self).headerData(section, orientation, role) - - def flags(self, index): - flags = QtCore.Qt.ItemIsEnabled - - item = index.internalPointer() - if item.get("enabled", True): - flags |= QtCore.Qt.ItemIsSelectable - - return flags - - def parent(self, index): - - item = index.internalPointer() - parent_item = item.parent() - - # If it has no parents we return invalid - if parent_item == self._root_item or not parent_item: - return QtCore.QModelIndex() - - return self.createIndex(parent_item.row(), 0, parent_item) - - def index(self, row, column, parent): - """Return index for row/column under parent""" - - if not parent.isValid(): - parent_item = self._root_item - else: - parent_item = parent.internalPointer() - - child_item = parent_item.child(row) - if child_item: - return self.createIndex(row, column, child_item) - else: - return QtCore.QModelIndex() - - def add_child(self, item, parent=None): - if parent is None: - parent = self._root_item - - parent.add_child(item) - - def column_name(self, column): - """Return column key by index""" - - if column < len(self.Columns): - return self.Columns[column] - - def clear(self): - self.beginResetModel() - self._root_item = Item() - self.endResetModel() - - -class TasksModel(TreeModel): - """A model listing the tasks combined for a list of assets""" - - Columns = ["Tasks"] - - def __init__(self): - super(TasksModel, self).__init__() - self._num_assets = 0 - self._icons = { - "__default__": qtawesome.icon("fa.male", - color=style.colors.default), - "__no_task__": qtawesome.icon("fa.exclamation-circle", - color=style.colors.mid) - } - - self._get_task_icons() - - def _get_task_icons(self): - # Get the project configured icons from database - project = io.find_one({"type": "project"}) - tasks = project["config"].get("tasks", []) - for task in tasks: - icon_name = task.get("icon", None) - if icon_name: - icon = qtawesome.icon("fa.{}".format(icon_name), - color=style.colors.default) - self._icons[task["name"]] = icon - - def set_tasks(self, tasks): - """Set assets to track by their database id - - Arguments: - asset_ids (list): List of asset ids. - - """ - - self.clear() - - # let cleared task view if no tasks are available - if len(tasks) == 0: - return - - self.beginResetModel() - - icon = self._icons["__default__"] - for task in tasks: - item = Item({ - "Tasks": task, - "icon": icon - }) - - self.add_child(item) - - self.endResetModel() - - def flags(self, index): - return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable - - def headerData(self, section, orientation, role): - - # Override header for count column to show amount of assets - # it is listing the tasks for - if role == QtCore.Qt.DisplayRole: - if orientation == QtCore.Qt.Horizontal: - if section == 1: # count column - return "count ({0})".format(self._num_assets) - - return super(TasksModel, self).headerData(section, orientation, role) - - def data(self, index, role): - - if not index.isValid(): - return - - # Add icon to the first column - if role == QtCore.Qt.DecorationRole: - if index.column() == 0: - return index.internalPointer()["icon"] - - return super(TasksModel, self).data(index, role) - - -class DeselectableTreeView(QtWidgets.QTreeView): - """A tree view that deselects on clicking on an empty area in the view""" - - def mousePressEvent(self, event): - - index = self.indexAt(event.pos()) - if not index.isValid(): - # clear the selection - self.clearSelection() - # clear the current index - self.setCurrentIndex(QtCore.QModelIndex()) - - QtWidgets.QTreeView.mousePressEvent(self, event) - - -class RecursiveSortFilterProxyModel(QtCore.QSortFilterProxyModel): - """Filters to the regex if any of the children matches allow parent""" - def filterAcceptsRow(self, row, parent): - - regex = self.filterRegExp() - if not regex.isEmpty(): - pattern = regex.pattern() - model = self.sourceModel() - source_index = model.index(row, self.filterKeyColumn(), parent) - if source_index.isValid(): - - # Check current index itself - key = model.data(source_index, self.filterRole()) - if re.search(pattern, key, re.IGNORECASE): - return True - - # Check children - rows = model.rowCount(source_index) - for i in range(rows): - if self.filterAcceptsRow(i, source_index): - return True - - # Otherwise filter it - return False - - return super(RecursiveSortFilterProxyModel, - self).filterAcceptsRow(row, parent) diff --git a/openpype/tools/assetcreator/widget.py b/openpype/tools/assetcreator/widget.py deleted file mode 100644 index fd0f438e68..0000000000 --- a/openpype/tools/assetcreator/widget.py +++ /dev/null @@ -1,448 +0,0 @@ -import logging -import contextlib -import collections - -from avalon.vendor import qtawesome -from Qt import QtWidgets, QtCore, QtGui -from avalon import style, io - -from .model import ( - TreeModel, - Item, - RecursiveSortFilterProxyModel, - DeselectableTreeView -) - -log = logging.getLogger(__name__) - - -def _iter_model_rows(model, - column, - include_root=False): - """Iterate over all row indices in a model""" - indices = [QtCore.QModelIndex()] # start iteration at root - - for index in indices: - - # Add children to the iterations - child_rows = model.rowCount(index) - for child_row in range(child_rows): - child_index = model.index(child_row, column, index) - indices.append(child_index) - - if not include_root and not index.isValid(): - continue - - yield index - - -@contextlib.contextmanager -def preserve_expanded_rows(tree_view, - column=0, - role=QtCore.Qt.DisplayRole): - """Preserves expanded row in QTreeView by column's data role. - - This function is created to maintain the expand vs collapse status of - the model items. When refresh is triggered the items which are expanded - will stay expanded and vice versa. - - Arguments: - tree_view (QWidgets.QTreeView): the tree view which is - nested in the application - column (int): the column to retrieve the data from - role (int): the role which dictates what will be returned - - Returns: - None - - """ - - model = tree_view.model() - - expanded = set() - - for index in _iter_model_rows(model, - column=column, - include_root=False): - if tree_view.isExpanded(index): - value = index.data(role) - expanded.add(value) - - try: - yield - finally: - if not expanded: - return - - for index in _iter_model_rows(model, - column=column, - include_root=False): - value = index.data(role) - state = value in expanded - if state: - tree_view.expand(index) - else: - tree_view.collapse(index) - - -@contextlib.contextmanager -def preserve_selection(tree_view, - column=0, - role=QtCore.Qt.DisplayRole, - current_index=True): - """Preserves row selection in QTreeView by column's data role. - - This function is created to maintain the selection status of - the model items. When refresh is triggered the items which are expanded - will stay expanded and vice versa. - - tree_view (QWidgets.QTreeView): the tree view nested in the application - column (int): the column to retrieve the data from - role (int): the role which dictates what will be returned - - Returns: - None - - """ - - model = tree_view.model() - selection_model = tree_view.selectionModel() - flags = selection_model.Select | selection_model.Rows - - if current_index: - current_index_value = tree_view.currentIndex().data(role) - else: - current_index_value = None - - selected_rows = selection_model.selectedRows() - if not selected_rows: - yield - return - - selected = set(row.data(role) for row in selected_rows) - try: - yield - finally: - if not selected: - return - - # Go through all indices, select the ones with similar data - for index in _iter_model_rows(model, - column=column, - include_root=False): - - value = index.data(role) - state = value in selected - if state: - tree_view.scrollTo(index) # Ensure item is visible - selection_model.select(index, flags) - - if current_index_value and value == current_index_value: - tree_view.setCurrentIndex(index) - - -class AssetModel(TreeModel): - """A model listing assets in the silo in the active project. - - The assets are displayed in a treeview, they are visually parented by - a `visualParent` field in the database containing an `_id` to a parent - asset. - - """ - - Columns = ["label"] - Name = 0 - Deprecated = 2 - ObjectId = 3 - - DocumentRole = QtCore.Qt.UserRole + 2 - ObjectIdRole = QtCore.Qt.UserRole + 3 - - def __init__(self, parent=None): - super(AssetModel, self).__init__(parent=parent) - self.refresh() - - def _add_hierarchy(self, assets, parent=None, silos=None): - """Add the assets that are related to the parent as children items. - - This method does *not* query the database. These instead are queried - in a single batch upfront as an optimization to reduce database - queries. Resulting in up to 10x speed increase. - - Args: - assets (dict): All assets in the currently active silo stored - by key/value - - Returns: - None - - """ - if silos: - # WARNING: Silo item "_id" is set to silo value - # mainly because GUI issue with preserve selection and expanded row - # and because of easier hierarchy parenting (in "assets") - for silo in silos: - item = Item({ - "_id": silo, - "name": silo, - "label": silo, - "type": "silo" - }) - self.add_child(item, parent=parent) - self._add_hierarchy(assets, parent=item) - - parent_id = parent["_id"] if parent else None - current_assets = assets.get(parent_id, list()) - - for asset in current_assets: - # get label from data, otherwise use name - data = asset.get("data", {}) - label = data.get("label", asset["name"]) - tags = data.get("tags", []) - - # store for the asset for optimization - deprecated = "deprecated" in tags - - item = Item({ - "_id": asset["_id"], - "name": asset["name"], - "label": label, - "type": asset["type"], - "tags": ", ".join(tags), - "deprecated": deprecated, - "_document": asset - }) - self.add_child(item, parent=parent) - - # Add asset's children recursively if it has children - if asset["_id"] in assets: - self._add_hierarchy(assets, parent=item) - - def refresh(self): - """Refresh the data for the model.""" - - self.clear() - self.beginResetModel() - - # Get all assets in current silo sorted by name - db_assets = io.find({"type": "asset"}).sort("name", 1) - silos = db_assets.distinct("silo") or None - # if any silo is set to None then it's expected it should not be used - if silos and None in silos: - silos = None - - # Group the assets by their visual parent's id - assets_by_parent = collections.defaultdict(list) - for asset in db_assets: - parent_id = ( - asset.get("data", {}).get("visualParent") or - asset.get("silo") - ) - assets_by_parent[parent_id].append(asset) - - # Build the hierarchical tree items recursively - self._add_hierarchy( - assets_by_parent, - parent=None, - silos=silos - ) - - self.endResetModel() - - def flags(self, index): - return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable - - def data(self, index, role): - - if not index.isValid(): - return - - item = index.internalPointer() - if role == QtCore.Qt.DecorationRole: # icon - - column = index.column() - if column == self.Name: - - # Allow a custom icon and custom icon color to be defined - data = item.get("_document", {}).get("data", {}) - icon = data.get("icon", None) - if icon is None and item.get("type") == "silo": - icon = "database" - color = data.get("color", style.colors.default) - - if icon is None: - # Use default icons if no custom one is specified. - # If it has children show a full folder, otherwise - # show an open folder - has_children = self.rowCount(index) > 0 - icon = "folder" if has_children else "folder-o" - - # Make the color darker when the asset is deprecated - if item.get("deprecated", False): - color = QtGui.QColor(color).darker(250) - - try: - key = "fa.{0}".format(icon) # font-awesome key - icon = qtawesome.icon(key, color=color) - return icon - except Exception as exception: - # Log an error message instead of erroring out completely - # when the icon couldn't be created (e.g. invalid name) - log.error(exception) - - return - - if role == QtCore.Qt.ForegroundRole: # font color - if "deprecated" in item.get("tags", []): - return QtGui.QColor(style.colors.light).darker(250) - - if role == self.ObjectIdRole: - return item.get("_id", None) - - if role == self.DocumentRole: - return item.get("_document", None) - - return super(AssetModel, self).data(index, role) - - -class AssetWidget(QtWidgets.QWidget): - """A Widget to display a tree of assets with filter - - To list the assets of the active project: - >>> # widget = AssetWidget() - >>> # widget.refresh() - >>> # widget.show() - - """ - - assets_refreshed = QtCore.Signal() # on model refresh - selection_changed = QtCore.Signal() # on view selection change - current_changed = QtCore.Signal() # on view current index change - - def __init__(self, parent=None): - super(AssetWidget, self).__init__(parent=parent) - self.setContentsMargins(0, 0, 0, 0) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.setSpacing(4) - - # Tree View - model = AssetModel(self) - proxy = RecursiveSortFilterProxyModel() - proxy.setSourceModel(model) - proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) - - view = DeselectableTreeView() - view.setIndentation(15) - view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) - view.setHeaderHidden(True) - view.setModel(proxy) - - # Header - header = QtWidgets.QHBoxLayout() - - icon = qtawesome.icon("fa.refresh", color=style.colors.light) - refresh = QtWidgets.QPushButton(icon, "") - refresh.setToolTip("Refresh items") - - filter = QtWidgets.QLineEdit() - filter.textChanged.connect(proxy.setFilterFixedString) - filter.setPlaceholderText("Filter assets..") - - header.addWidget(filter) - header.addWidget(refresh) - - # Layout - layout.addLayout(header) - layout.addWidget(view) - - # Signals/Slots - selection = view.selectionModel() - selection.selectionChanged.connect(self.selection_changed) - selection.currentChanged.connect(self.current_changed) - refresh.clicked.connect(self.refresh) - - self.refreshButton = refresh - self.model = model - self.proxy = proxy - self.view = view - - def _refresh_model(self): - with preserve_expanded_rows( - self.view, column=0, role=self.model.ObjectIdRole - ): - with preserve_selection( - self.view, column=0, role=self.model.ObjectIdRole - ): - self.model.refresh() - - self.assets_refreshed.emit() - - def refresh(self): - self._refresh_model() - - def get_active_asset(self): - """Return the asset id the current asset.""" - current = self.view.currentIndex() - return current.data(self.model.ItemRole) - - def get_active_index(self): - return self.view.currentIndex() - - def get_selected_assets(self): - """Return the assets' ids that are selected.""" - selection = self.view.selectionModel() - rows = selection.selectedRows() - return [row.data(self.model.ObjectIdRole) for row in rows] - - def select_assets(self, assets, expand=True, key="name"): - """Select assets by name. - - Args: - assets (list): List of asset names - expand (bool): Whether to also expand to the asset in the view - - Returns: - None - - """ - # TODO: Instead of individual selection optimize for many assets - - if not isinstance(assets, (tuple, list)): - assets = [assets] - assert isinstance( - assets, (tuple, list) - ), "Assets must be list or tuple" - - # convert to list - tuple cant be modified - assets = list(assets) - - # Clear selection - selection_model = self.view.selectionModel() - selection_model.clearSelection() - - # Select - mode = selection_model.Select | selection_model.Rows - for index in iter_model_rows( - self.proxy, column=0, include_root=False - ): - # stop iteration if there are no assets to process - if not assets: - break - - value = index.data(self.model.ItemRole).get(key) - if value not in assets: - continue - - # Remove processed asset - assets.pop(assets.index(value)) - - selection_model.select(index, mode) - - if expand: - # Expand parent index - self.view.expand(self.proxy.parent(index)) - - # Set the currently active index - self.view.setCurrentIndex(index) diff --git a/openpype/tools/creator/model.py b/openpype/tools/creator/model.py index 6907e8f0aa..ef61c6e0f0 100644 --- a/openpype/tools/creator/model.py +++ b/openpype/tools/creator/model.py @@ -2,6 +2,7 @@ import uuid from Qt import QtGui, QtCore from avalon import api +from openpype.pipeline import LegacyCreator from . constants import ( FAMILY_ROLE, @@ -21,7 +22,7 @@ class CreatorsModel(QtGui.QStandardItemModel): self._creators_by_id = {} items = [] - creators = api.discover(api.Creator) + creators = api.discover(LegacyCreator) for creator in creators: item_id = str(uuid.uuid4()) self._creators_by_id[item_id] = creator diff --git a/openpype/tools/creator/widgets.py b/openpype/tools/creator/widgets.py index 9dd435c1cc..43df08496b 100644 --- a/openpype/tools/creator/widgets.py +++ b/openpype/tools/creator/widgets.py @@ -3,9 +3,8 @@ import inspect from Qt import QtWidgets, QtCore, QtGui -from avalon.vendor import qtawesome +import qtawesome -from openpype import style from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS from openpype.tools.utils import ErrorMessageBox diff --git a/openpype/tools/creator/window.py b/openpype/tools/creator/window.py index f1d0849dfe..51cc66e715 100644 --- a/openpype/tools/creator/window.py +++ b/openpype/tools/creator/window.py @@ -9,7 +9,12 @@ from avalon import api, io from openpype import style from openpype.api import get_current_project_settings from openpype.tools.utils.lib import qt_app_context -from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS +from openpype.pipeline.create import ( + SUBSET_NAME_ALLOWED_SYMBOLS, + legacy_create, + CreatorError, + LegacyCreator, +) from .model import CreatorsModel from .widgets import ( @@ -422,7 +427,7 @@ class CreatorWindow(QtWidgets.QDialog): error_info = None try: - api.create( + legacy_create( creator_plugin, subset_name, asset_name, @@ -430,7 +435,7 @@ class CreatorWindow(QtWidgets.QDialog): data={"variant": variant} ) - except api.CreatorError as exc: + except CreatorError as exc: self.echo("Creator error: {}".format(str(exc))) error_info = (str(exc), None) @@ -486,7 +491,7 @@ def show(debug=False, parent=None): if debug: from avalon import mock for creator in mock.creators: - api.register_plugin(api.Creator, creator) + api.register_plugin(LegacyCreator, creator) import traceback sys.excepthook = lambda typ, val, tb: traceback.print_last() diff --git a/openpype/tools/launcher/lib.py b/openpype/tools/launcher/lib.py index b4e6a0c3e9..68c759f295 100644 --- a/openpype/tools/launcher/lib.py +++ b/openpype/tools/launcher/lib.py @@ -16,7 +16,7 @@ provides a bridge between the file-based project inventory and configuration. import os from Qt import QtGui -from avalon.vendor import qtawesome +import qtawesome from openpype.api import resources ICON_CACHE = {} diff --git a/openpype/tools/launcher/models.py b/openpype/tools/launcher/models.py index effa283318..85d553fca4 100644 --- a/openpype/tools/launcher/models.py +++ b/openpype/tools/launcher/models.py @@ -7,14 +7,17 @@ import time import appdirs from Qt import QtCore, QtGui -from avalon.vendor import qtawesome +import qtawesome from avalon import api from openpype.lib import JSONSettingRegistry from openpype.lib.applications import ( CUSTOM_LAUNCH_APP_GROUPS, ApplicationManager ) -from openpype.tools.utils.lib import DynamicQThread +from openpype.tools.utils.lib import ( + DynamicQThread, + get_project_icon, +) from openpype.tools.utils.assets_widget import ( AssetModel, ASSET_NAME_ROLE @@ -400,6 +403,7 @@ class LauncherModel(QtCore.QObject): self._dbcon = dbcon # Available project names self._project_names = set() + self._project_docs_by_name = {} # Context data self._asset_docs = [] @@ -460,6 +464,9 @@ class LauncherModel(QtCore.QObject): """Available project names.""" return self._project_names + def get_project_doc(self, project_name): + return self._project_docs_by_name.get(project_name) + @property def asset_filter_data_by_id(self): """Prepared filter data by asset id.""" @@ -516,9 +523,13 @@ class LauncherModel(QtCore.QObject): """Refresh projects.""" current_project = self.project_name project_names = set() + project_docs_by_name = {} for project_doc in self._dbcon.projects(only_active=True): - project_names.add(project_doc["name"]) + project_name = project_doc["name"] + project_names.add(project_name) + project_docs_by_name[project_name] = project_doc + self._project_docs_by_name = project_docs_by_name self._project_names = project_names self.projects_refreshed.emit() if ( @@ -694,6 +705,11 @@ class LauncherTaskModel(TasksModel): self._launcher_model = launcher_model super(LauncherTaskModel, self).__init__(*args, **kwargs) + def _refresh_project_doc(self): + self._project_doc = self._launcher_model.get_project_doc( + self._launcher_model.project_name + ) + def set_asset_id(self, asset_id): asset_doc = None if self._context_is_valid(): @@ -718,7 +734,6 @@ class AssetRecursiveSortFilterModel(QtCore.QSortFilterProxyModel): self._assignee_filter = self._launcher_model.assignee_filters self.invalidateFilter() - """Filters to the regex if any of the children matches allow parent""" def filterAcceptsRow(self, row, parent): if ( not self._name_filter @@ -818,7 +833,6 @@ class ProjectModel(QtGui.QStandardItemModel): super(ProjectModel, self).__init__(parent=parent) self._launcher_model = launcher_model - self.project_icon = qtawesome.icon("fa.map", color="white") self._project_names = set() launcher_model.projects_refreshed.connect(self._on_refresh) @@ -863,7 +877,11 @@ class ProjectModel(QtGui.QStandardItemModel): for row in reversed(sorted(row_counts.keys())): items = [] for project_name in row_counts[row]: - item = QtGui.QStandardItem(self.project_icon, project_name) + project_doc = self._launcher_model.get_project_doc( + project_name + ) + icon = get_project_icon(project_doc) + item = QtGui.QStandardItem(icon, project_name) items.append(item) self.invisibleRootItem().insertRows(row, items) diff --git a/openpype/tools/launcher/widgets.py b/openpype/tools/launcher/widgets.py index 30e6531843..62599664fe 100644 --- a/openpype/tools/launcher/widgets.py +++ b/openpype/tools/launcher/widgets.py @@ -2,7 +2,7 @@ import copy import time import collections from Qt import QtWidgets, QtCore, QtGui -from avalon.vendor import qtawesome +import qtawesome from openpype.tools.flickcharm import FlickCharm from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget diff --git a/openpype/tools/launcher/window.py b/openpype/tools/launcher/window.py index b5b6368865..d80b3eabf0 100644 --- a/openpype/tools/launcher/window.py +++ b/openpype/tools/launcher/window.py @@ -8,7 +8,7 @@ from avalon.api import AvalonMongoDB from openpype import style from openpype.api import resources -from avalon.vendor import qtawesome +import qtawesome from .models import ( LauncherModel, ProjectModel diff --git a/openpype/tools/loader/app.py b/openpype/tools/loader/app.py index aa743b05fe..d73a977ac6 100644 --- a/openpype/tools/loader/app.py +++ b/openpype/tools/loader/app.py @@ -1,9 +1,10 @@ import sys from Qt import QtWidgets, QtCore -from avalon import api, io, pipeline +from avalon import api, io from openpype import style +from openpype.lib import register_event_callback from openpype.tools.utils import ( lib, PlaceholderLineEdit @@ -25,17 +26,6 @@ module = sys.modules[__name__] module.window = None -# Register callback on task change -# - callback can't be defined in Window as it is weak reference callback -# so `WeakSet` will remove it immediately -def on_context_task_change(*args, **kwargs): - if module.window: - module.window.on_context_task_change(*args, **kwargs) - - -pipeline.on("taskChanged", on_context_task_change) - - class LoaderWindow(QtWidgets.QDialog): """Asset loader interface""" @@ -194,6 +184,8 @@ class LoaderWindow(QtWidgets.QDialog): self._first_show = True + register_event_callback("taskChanged", self.on_context_task_change) + def resizeEvent(self, event): super(LoaderWindow, self).resizeEvent(event) self._overlay_frame.resize(self.size()) diff --git a/openpype/tools/loader/lib.py b/openpype/tools/loader/lib.py index 180dee3eb5..28e94237ec 100644 --- a/openpype/tools/loader/lib.py +++ b/openpype/tools/loader/lib.py @@ -1,7 +1,7 @@ import inspect from Qt import QtGui +import qtawesome -from avalon.vendor import qtawesome from openpype.tools.utils.widgets import ( OptionalAction, OptionDialog diff --git a/openpype/tools/loader/model.py b/openpype/tools/loader/model.py index 10b22d0e17..6cc6fae1fb 100644 --- a/openpype/tools/loader/model.py +++ b/openpype/tools/loader/model.py @@ -3,15 +3,13 @@ import re import math from uuid import uuid4 -from avalon import ( - style, - schema -) from Qt import QtCore, QtGui +import qtawesome -from avalon.vendor import qtawesome -from avalon.lib import HeroVersionType +from avalon import schema +from openpype.pipeline import HeroVersionType +from openpype.style import get_default_entity_icon_color from openpype.tools.utils.models import TreeModel, Item from openpype.tools.utils import lib @@ -180,7 +178,10 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): self._sorter = None self._grouping = grouping self._icons = { - "subset": qtawesome.icon("fa.file-o", color=style.colors.default) + "subset": qtawesome.icon( + "fa.file-o", + color=get_default_entity_icon_color() + ) } self._items_by_id = {} @@ -1066,8 +1067,10 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): self._docs = {} self._icons = lib.get_repre_icons() - self._icons["repre"] = qtawesome.icon("fa.file-o", - color=style.colors.default) + self._icons["repre"] = qtawesome.icon( + "fa.file-o", + color=get_default_entity_icon_color() + ) self._items_by_id = {} def set_version_ids(self, version_ids): @@ -1165,7 +1168,7 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): "remote_site_name": self.remote_site, "icon": qtawesome.icon( "fa.folder", - color=style.colors.default + color=get_default_entity_icon_color() ) }) self._items_by_id[item_id] = group_item diff --git a/openpype/tools/loader/widgets.py b/openpype/tools/loader/widgets.py index f145756cc5..b14bdd0e93 100644 --- a/openpype/tools/loader/widgets.py +++ b/openpype/tools/loader/widgets.py @@ -1,6 +1,5 @@ import os import sys -import inspect import datetime import pprint import traceback @@ -9,8 +8,19 @@ import collections from Qt import QtWidgets, QtCore, QtGui from avalon import api, pipeline -from avalon.lib import HeroVersionType +from openpype.pipeline import HeroVersionType +from openpype.pipeline.load import ( + discover_loader_plugins, + SubsetLoaderPlugin, + loaders_from_repre_context, + get_repres_contexts, + get_subset_contexts, + load_with_repre_context, + load_with_subset_context, + load_with_subset_contexts, + IncompatibleLoaderError, +) from openpype.tools.utils import ( ErrorMessageBox, lib as tools_lib @@ -425,7 +435,7 @@ class SubsetWidget(QtWidgets.QWidget): # Get all representation->loader combinations available for the # index under the cursor, so we can list the user the options. - available_loaders = api.discover(api.Loader) + available_loaders = discover_loader_plugins() if self.tool_name: available_loaders = lib.remove_tool_name_from_loaders( available_loaders, self.tool_name @@ -435,7 +445,7 @@ class SubsetWidget(QtWidgets.QWidget): subset_loaders = [] for loader in available_loaders: # Skip if its a SubsetLoader. - if api.SubsetLoader in inspect.getmro(loader): + if issubclass(loader, SubsetLoaderPlugin): subset_loaders.append(loader) else: repre_loaders.append(loader) @@ -459,7 +469,7 @@ class SubsetWidget(QtWidgets.QWidget): repre_docs = repre_docs_by_version_id[version_id] for repre_doc in repre_docs: repre_context = repre_context_by_id[repre_doc["_id"]] - for loader in pipeline.loaders_from_repre_context( + for loader in loaders_from_repre_context( repre_loaders, repre_context ): @@ -515,7 +525,7 @@ class SubsetWidget(QtWidgets.QWidget): action = lib.get_no_loader_action(menu, one_item_selected) menu.addAction(action) else: - repre_contexts = pipeline.get_repres_contexts( + repre_contexts = get_repres_contexts( repre_context_by_id.keys(), self.dbcon) menu = lib.add_representation_loaders_to_menu( @@ -532,7 +542,7 @@ class SubsetWidget(QtWidgets.QWidget): self.load_started.emit() - if api.SubsetLoader in inspect.getmro(loader): + if issubclass(loader, SubsetLoaderPlugin): subset_ids = [] subset_version_docs = {} for item in items: @@ -541,8 +551,7 @@ class SubsetWidget(QtWidgets.QWidget): subset_version_docs[subset_id] = item["version_document"] # get contexts only for selected menu option - subset_contexts_by_id = pipeline.get_subset_contexts(subset_ids, - self.dbcon) + subset_contexts_by_id = get_subset_contexts(subset_ids, self.dbcon) subset_contexts = list(subset_contexts_by_id.values()) options = lib.get_options(action, loader, self, subset_contexts) @@ -575,8 +584,7 @@ class SubsetWidget(QtWidgets.QWidget): repre_ids.append(representation["_id"]) # get contexts only for selected menu option - repre_contexts = pipeline.get_repres_contexts(repre_ids, - self.dbcon) + repre_contexts = get_repres_contexts(repre_ids, self.dbcon) options = lib.get_options(action, loader, self, list(repre_contexts.values())) @@ -1339,12 +1347,12 @@ class RepresentationWidget(QtWidgets.QWidget): selected_side = self._get_selected_side(point_index, rows) # Get all representation->loader combinations available for the # index under the cursor, so we can list the user the options. - available_loaders = api.discover(api.Loader) + available_loaders = discover_loader_plugins() filtered_loaders = [] for loader in available_loaders: # Skip subset loaders - if api.SubsetLoader in inspect.getmro(loader): + if issubclass(loader, SubsetLoaderPlugin): continue if ( @@ -1370,7 +1378,7 @@ class RepresentationWidget(QtWidgets.QWidget): for item in items: repre_context = repre_context_by_id[item["_id"]] - for loader in pipeline.loaders_from_repre_context( + for loader in loaders_from_repre_context( filtered_loaders, repre_context ): @@ -1426,7 +1434,7 @@ class RepresentationWidget(QtWidgets.QWidget): action = lib.get_no_loader_action(menu) menu.addAction(action) else: - repre_contexts = pipeline.get_repres_contexts( + repre_contexts = get_repres_contexts( repre_context_by_id.keys(), self.dbcon) menu = lib.add_representation_loaders_to_menu(loaders, menu, repre_contexts) @@ -1472,8 +1480,7 @@ class RepresentationWidget(QtWidgets.QWidget): repre_ids.append(item.get("_id")) - repre_contexts = pipeline.get_repres_contexts(repre_ids, - self.dbcon) + repre_contexts = get_repres_contexts(repre_ids, self.dbcon) options = lib.get_options(action, loader, self, list(repre_contexts.values())) @@ -1540,7 +1547,7 @@ def _load_representations_by_loader(loader, repre_contexts, """Loops through list of repre_contexts and loads them with one loader Args: - loader (cls of api.Loader) - not initialized yet + loader (cls of LoaderPlugin) - not initialized yet repre_contexts (dicts) - full info about selected representations (containing repre_doc, version_doc, subset_doc, project info) options (dict) - qargparse arguments to fill OptionDialog @@ -1558,12 +1565,12 @@ def _load_representations_by_loader(loader, repre_contexts, _id = repre_context["representation"]["_id"] data = data_by_repre_id.get(_id) options.update(data) - pipeline.load_with_repre_context( + load_with_repre_context( loader, repre_context, options=options ) - except pipeline.IncompatibleLoaderError as exc: + except IncompatibleLoaderError as exc: print(exc) error_info.append(( "Incompatible Loader", @@ -1612,7 +1619,7 @@ def _load_subsets_by_loader(loader, subset_contexts, options, context["version"] = subset_version_docs[context["subset"]["_id"]] try: - pipeline.load_with_subset_contexts( + load_with_subset_contexts( loader, subset_contexts, options=options @@ -1638,7 +1645,7 @@ def _load_subsets_by_loader(loader, subset_contexts, options, version_doc = subset_version_docs[subset_context["subset"]["_id"]] subset_context["version"] = version_doc try: - pipeline.load_with_subset_context( + load_with_subset_context( loader, subset_context, options=options diff --git a/openpype/tools/mayalookassigner/__init__.py b/openpype/tools/mayalookassigner/__init__.py index 616a3e94d0..5e40777741 100644 --- a/openpype/tools/mayalookassigner/__init__.py +++ b/openpype/tools/mayalookassigner/__init__.py @@ -1,9 +1,9 @@ from .app import ( - App, + MayaLookAssignerWindow, show ) __all__ = [ - "App", + "MayaLookAssignerWindow", "show"] diff --git a/openpype/tools/mayalookassigner/app.py b/openpype/tools/mayalookassigner/app.py index 31bb455f95..0e633a21e3 100644 --- a/openpype/tools/mayalookassigner/app.py +++ b/openpype/tools/mayalookassigner/app.py @@ -4,10 +4,10 @@ import logging from Qt import QtWidgets, QtCore -from openpype.hosts.maya.api.lib import assign_look_by_version - -from avalon import style, io +from avalon import io +from openpype import style from openpype.tools.utils.lib import qt_app_context +from openpype.hosts.maya.api.lib import assign_look_by_version from maya import cmds # old api for MFileIO @@ -28,10 +28,10 @@ module = sys.modules[__name__] module.window = None -class App(QtWidgets.QWidget): +class MayaLookAssignerWindow(QtWidgets.QWidget): def __init__(self, parent=None): - QtWidgets.QWidget.__init__(self, parent=parent) + super(MayaLookAssignerWindow, self).__init__(parent=parent) self.log = logging.getLogger(__name__) @@ -56,30 +56,41 @@ class App(QtWidgets.QWidget): def setup_ui(self): """Build the UI""" + main_splitter = QtWidgets.QSplitter(self) + # Assets (left) - asset_outliner = AssetOutliner() + asset_outliner = AssetOutliner(main_splitter) # Looks (right) - looks_widget = QtWidgets.QWidget() - looks_layout = QtWidgets.QVBoxLayout(looks_widget) + looks_widget = QtWidgets.QWidget(main_splitter) - look_outliner = LookOutliner() # Database look overview + look_outliner = LookOutliner(looks_widget) # Database look overview - assign_selected = QtWidgets.QCheckBox("Assign to selected only") + assign_selected = QtWidgets.QCheckBox( + "Assign to selected only", looks_widget + ) assign_selected.setToolTip("Whether to assign only to selected nodes " "or to the full asset") - remove_unused_btn = QtWidgets.QPushButton("Remove Unused Looks") + remove_unused_btn = QtWidgets.QPushButton( + "Remove Unused Looks", looks_widget + ) + looks_layout = QtWidgets.QVBoxLayout(looks_widget) looks_layout.addWidget(look_outliner) looks_layout.addWidget(assign_selected) looks_layout.addWidget(remove_unused_btn) + main_splitter.addWidget(asset_outliner) + main_splitter.addWidget(looks_widget) + main_splitter.setSizes([350, 200]) + # Footer - status = QtWidgets.QStatusBar() + status = QtWidgets.QStatusBar(self) status.setSizeGripEnabled(False) status.setFixedHeight(25) - warn_layer = QtWidgets.QLabel("Current Layer is not " - "defaultRenderLayer") + warn_layer = QtWidgets.QLabel( + "Current Layer is not defaultRenderLayer", self + ) warn_layer.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) warn_layer.setStyleSheet("color: #DD5555; font-weight: bold;") warn_layer.setFixedHeight(25) @@ -92,11 +103,6 @@ class App(QtWidgets.QWidget): # Build up widgets main_layout = QtWidgets.QVBoxLayout(self) main_layout.setSpacing(0) - main_splitter = QtWidgets.QSplitter() - main_splitter.setStyleSheet("QSplitter{ border: 0px; }") - main_splitter.addWidget(asset_outliner) - main_splitter.addWidget(looks_widget) - main_splitter.setSizes([350, 200]) main_layout.addWidget(main_splitter) main_layout.addLayout(footer) @@ -124,6 +130,8 @@ class App(QtWidgets.QWidget): self.remove_unused = remove_unused_btn self.assign_selected = assign_selected + self._first_show = True + def setup_connections(self): """Connect interactive widgets with actions""" if self._connections_set_up: @@ -147,11 +155,14 @@ class App(QtWidgets.QWidget): def showEvent(self, event): self.setup_connections() - super(App, self).showEvent(event) + super(MayaLookAssignerWindow, self).showEvent(event) + if self._first_show: + self._first_show = False + self.setStyleSheet(style.load_stylesheet()) def closeEvent(self, event): self.remove_connection() - super(App, self).closeEvent(event) + super(MayaLookAssignerWindow, self).closeEvent(event) def _on_renderlayer_switch(self, *args): """Callback that updates on Maya renderlayer switch""" @@ -267,8 +278,7 @@ def show(): if widget.objectName() == "MayaWindow") with qt_app_context(): - window = App(parent=mainwindow) - window.setStyleSheet(style.load_stylesheet()) + window = MayaLookAssignerWindow(parent=mainwindow) window.show() module.window = window diff --git a/openpype/tools/mayalookassigner/commands.py b/openpype/tools/mayalookassigner/commands.py index 96fc28243b..df72e41354 100644 --- a/openpype/tools/mayalookassigner/commands.py +++ b/openpype/tools/mayalookassigner/commands.py @@ -4,10 +4,11 @@ import os import maya.cmds as cmds -from openpype.hosts.maya.api import lib - from avalon import io, api +from openpype.pipeline import remove_container +from openpype.hosts.maya.api import lib + from .vray_proxies import get_alembic_ids_cache log = logging.getLogger(__name__) @@ -206,6 +207,6 @@ def remove_unused_looks(): for container in unused: log.info("Removing unused look container: %s", container['objectName']) - api.remove(container) + remove_container(container) log.info("Finished removing unused looks. (see log for details)") diff --git a/openpype/tools/mayalookassigner/models.py b/openpype/tools/mayalookassigner/models.py index 39cab83c61..77a3c8a590 100644 --- a/openpype/tools/mayalookassigner/models.py +++ b/openpype/tools/mayalookassigner/models.py @@ -1,16 +1,21 @@ from collections import defaultdict from Qt import QtCore +import qtawesome -from avalon.vendor import qtawesome -from avalon.style import colors from openpype.tools.utils import models +from openpype.style import get_default_entity_icon_color class AssetModel(models.TreeModel): Columns = ["label"] + def __init__(self, *args, **kwargs): + super(AssetModel, self).__init__(*args, **kwargs) + + self._icon_color = get_default_entity_icon_color() + def add_items(self, items): """ Add items to model with needed data @@ -65,8 +70,10 @@ class AssetModel(models.TreeModel): node = index.internalPointer() icon = node.get("icon") if icon: - return qtawesome.icon("fa.{0}".format(icon), - color=colors.default) + return qtawesome.icon( + "fa.{0}".format(icon), + color=self._icon_color + ) return super(AssetModel, self).data(index, role) diff --git a/openpype/tools/mayalookassigner/views.py b/openpype/tools/mayalookassigner/views.py index 993023bb45..8e676ebc7f 100644 --- a/openpype/tools/mayalookassigner/views.py +++ b/openpype/tools/mayalookassigner/views.py @@ -1,9 +1,6 @@ from Qt import QtWidgets, QtCore -DEFAULT_COLOR = "#fb9c15" - - class View(QtWidgets.QTreeView): data_changed = QtCore.Signal() diff --git a/openpype/tools/mayalookassigner/vray_proxies.py b/openpype/tools/mayalookassigner/vray_proxies.py index b22ec95a4d..6a9347449a 100644 --- a/openpype/tools/mayalookassigner/vray_proxies.py +++ b/openpype/tools/mayalookassigner/vray_proxies.py @@ -12,6 +12,12 @@ from maya import cmds from avalon import io, api +from openpype.pipeline import ( + load_container, + loaders_from_representation, + discover_loader_plugins, + get_representation_path, +) from openpype.hosts.maya.api import lib @@ -155,7 +161,7 @@ def get_look_relationships(version_id): "name": "json"}) # Load relationships - shader_relation = api.get_representation_path(json_representation) + shader_relation = get_representation_path(json_representation) with open(shader_relation, "r") as f: relationships = json.load(f) @@ -193,8 +199,8 @@ def load_look(version_id): log.info("Using look for the first time ...") # Load file - loaders = api.loaders_from_representation(api.discover(api.Loader), - representation_id) + all_loaders = discover_loader_plugins() + loaders = loaders_from_representation(all_loaders, representation_id) loader = next( (i for i in loaders if i.__name__ == "LookLoader"), None) if loader is None: @@ -202,7 +208,7 @@ def load_look(version_id): # Reference the look file with lib.maintained_selection(): - container_node = api.load(loader, look_representation) + container_node = load_container(loader, look_representation) # Get container members shader_nodes = lib.get_container_members(container_node) diff --git a/openpype/tools/mayalookassigner/widgets.py b/openpype/tools/mayalookassigner/widgets.py index e546ee705d..10e573342a 100644 --- a/openpype/tools/mayalookassigner/widgets.py +++ b/openpype/tools/mayalookassigner/widgets.py @@ -14,7 +14,7 @@ from .models import ( LookModel ) from . import commands -from . import views +from .views import View from maya import cmds @@ -24,25 +24,28 @@ class AssetOutliner(QtWidgets.QWidget): selection_changed = QtCore.Signal() def __init__(self, parent=None): - QtWidgets.QWidget.__init__(self, parent) + super(AssetOutliner, self).__init__(parent) - layout = QtWidgets.QVBoxLayout() - - title = QtWidgets.QLabel("Assets") + title = QtWidgets.QLabel("Assets", self) title.setAlignment(QtCore.Qt.AlignCenter) title.setStyleSheet("font-weight: bold; font-size: 12px") model = AssetModel() - view = views.View() + view = View(self) view.setModel(model) view.customContextMenuRequested.connect(self.right_mouse_menu) view.setSortingEnabled(False) view.setHeaderHidden(True) view.setIndentation(10) - from_all_asset_btn = QtWidgets.QPushButton("Get All Assets") - from_selection_btn = QtWidgets.QPushButton("Get Assets From Selection") + from_all_asset_btn = QtWidgets.QPushButton( + "Get All Assets", self + ) + from_selection_btn = QtWidgets.QPushButton( + "Get Assets From Selection", self + ) + layout = QtWidgets.QVBoxLayout(self) layout.addWidget(title) layout.addWidget(from_all_asset_btn) layout.addWidget(from_selection_btn) @@ -58,8 +61,6 @@ class AssetOutliner(QtWidgets.QWidget): self.view = view self.model = model - self.setLayout(layout) - self.log = logging.getLogger(__name__) def clear(self): @@ -188,15 +189,10 @@ class LookOutliner(QtWidgets.QWidget): menu_apply_action = QtCore.Signal() def __init__(self, parent=None): - QtWidgets.QWidget.__init__(self, parent) - - # look manager layout - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.setSpacing(10) + super(LookOutliner, self).__init__(parent) # Looks from database - title = QtWidgets.QLabel("Looks") + title = QtWidgets.QLabel("Looks", self) title.setAlignment(QtCore.Qt.AlignCenter) title.setStyleSheet("font-weight: bold; font-size: 12px") title.setAlignment(QtCore.Qt.AlignCenter) @@ -207,13 +203,17 @@ class LookOutliner(QtWidgets.QWidget): proxy = QtCore.QSortFilterProxyModel() proxy.setSourceModel(model) - view = views.View() + view = View(self) view.setModel(proxy) view.setMinimumHeight(180) view.setToolTip("Use right mouse button menu for direct actions") view.customContextMenuRequested.connect(self.right_mouse_menu) view.sortByColumn(0, QtCore.Qt.AscendingOrder) + # look manager layout + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(10) layout.addWidget(title) layout.addWidget(view) diff --git a/openpype/tools/project_manager/project_manager/multiselection_combobox.py b/openpype/tools/project_manager/project_manager/multiselection_combobox.py index 890567de6d..f776831298 100644 --- a/openpype/tools/project_manager/project_manager/multiselection_combobox.py +++ b/openpype/tools/project_manager/project_manager/multiselection_combobox.py @@ -1,4 +1,4 @@ -from Qt import QtCore, QtGui, QtWidgets +from Qt import QtCore, QtWidgets class ComboItemDelegate(QtWidgets.QStyledItemDelegate): diff --git a/openpype/tools/project_manager/project_manager/style.py b/openpype/tools/project_manager/project_manager/style.py index d24fc7102f..4405d05960 100644 --- a/openpype/tools/project_manager/project_manager/style.py +++ b/openpype/tools/project_manager/project_manager/style.py @@ -1,7 +1,7 @@ import os -from Qt import QtCore, QtGui +from Qt import QtGui -from avalon.vendor import qtawesome +import qtawesome from openpype.tools.utils import paint_image_with_color diff --git a/openpype/tools/publisher/control.py b/openpype/tools/publisher/control.py index 5a84b1d8ca..6707feac9c 100644 --- a/openpype/tools/publisher/control.py +++ b/openpype/tools/publisher/control.py @@ -873,8 +873,6 @@ class PublisherController: """ for idx, plugin in enumerate(self.publish_plugins): self._publish_progress = idx - # Add plugin to publish report - self._publish_report.add_plugin_iter(plugin, self._publish_context) # Reset current plugin validations error self._publish_current_plugin_validation_errors = None @@ -902,6 +900,9 @@ class PublisherController: ): yield MainThreadItem(self.stop_publish) + # Add plugin to publish report + self._publish_report.add_plugin_iter(plugin, self._publish_context) + # Trigger callback that new plugin is going to be processed self._trigger_callbacks( self._publish_plugin_changed_callback_refs, plugin diff --git a/openpype/tools/publisher/widgets/assets_widget.py b/openpype/tools/publisher/widgets/assets_widget.py index b8696a2665..984da59c77 100644 --- a/openpype/tools/publisher/widgets/assets_widget.py +++ b/openpype/tools/publisher/widgets/assets_widget.py @@ -3,7 +3,8 @@ import collections from Qt import QtWidgets, QtCore, QtGui from openpype.tools.utils import ( PlaceholderLineEdit, - RecursiveSortFilterProxyModel + RecursiveSortFilterProxyModel, + get_asset_icon, ) from openpype.tools.utils.assets_widget import ( SingleSelectAssetsWidget, @@ -102,11 +103,15 @@ class AssetsHierarchyModel(QtGui.QStandardItemModel): for name in sorted(children_by_name.keys()): child = children_by_name[name] child_id = child["_id"] + has_children = bool(assets_by_parent_id.get(child_id)) + icon = get_asset_icon(child, has_children) + item = QtGui.QStandardItem(name) item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable ) + item.setData(icon, QtCore.Qt.DecorationRole) item.setData(child_id, ASSET_ID_ROLE) item.setData(name, ASSET_NAME_ROLE) diff --git a/openpype/tools/publisher/widgets/create_dialog.py b/openpype/tools/publisher/widgets/create_dialog.py index c5b77eca8b..27ce97955a 100644 --- a/openpype/tools/publisher/widgets/create_dialog.py +++ b/openpype/tools/publisher/widgets/create_dialog.py @@ -8,12 +8,14 @@ try: except Exception: commonmark = None from Qt import QtWidgets, QtCore, QtGui - +from openpype.lib import TaskNotSetError from openpype.pipeline.create import ( CreatorError, SUBSET_NAME_ALLOWED_SYMBOLS ) +from openpype.tools.utils import ErrorMessageBox + from .widgets import IconValuePixmapLabel from .assets_widget import CreateDialogAssetsWidget from .tasks_widget import CreateDialogTasksWidget @@ -27,7 +29,7 @@ from ..constants import ( SEPARATORS = ("---separator---", "---") -class CreateErrorMessageBox(QtWidgets.QDialog): +class CreateErrorMessageBox(ErrorMessageBox): def __init__( self, creator_label, @@ -35,24 +37,38 @@ class CreateErrorMessageBox(QtWidgets.QDialog): asset_name, exc_msg, formatted_traceback, - parent=None + parent ): - super(CreateErrorMessageBox, self).__init__(parent) - self.setWindowTitle("Creation failed") - self.setFocusPolicy(QtCore.Qt.StrongFocus) - if not parent: - self.setWindowFlags( - self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint - ) + self._creator_label = creator_label + self._subset_name = subset_name + self._asset_name = asset_name + self._exc_msg = exc_msg + self._formatted_traceback = formatted_traceback + super(CreateErrorMessageBox, self).__init__("Creation failed", parent) - body_layout = QtWidgets.QVBoxLayout(self) - - main_label = ( + def _create_top_widget(self, parent_widget): + label_widget = QtWidgets.QLabel(parent_widget) + label_widget.setText( "Failed to create" ) - main_label_widget = QtWidgets.QLabel(main_label, self) - body_layout.addWidget(main_label_widget) + return label_widget + def _get_report_data(self): + report_message = ( + "{creator}: Failed to create Subset: \"{subset}\"" + " in Asset: \"{asset}\"" + "\n\nError: {message}" + ).format( + creator=self._creator_label, + subset=self._subset_name, + asset=self._asset_name, + message=self._exc_msg, + ) + if self._formatted_traceback: + report_message += "\n\n{}".format(self._formatted_traceback) + return [report_message] + + def _create_content(self, content_layout): item_name_template = ( "Creator: {}
" "Subset: {}
" @@ -61,116 +77,124 @@ class CreateErrorMessageBox(QtWidgets.QDialog): exc_msg_template = "{}" line = self._create_line() - body_layout.addWidget(line) + content_layout.addWidget(line) - item_name = item_name_template.format( - creator_label, subset_name, asset_name - ) - item_name_widget = QtWidgets.QLabel( - item_name.replace("\n", "
"), self - ) - body_layout.addWidget(item_name_widget) - - exc_msg = exc_msg_template.format(exc_msg.replace("\n", "
")) - message_label_widget = QtWidgets.QLabel(exc_msg, self) - body_layout.addWidget(message_label_widget) - - if formatted_traceback: - tb_widget = QtWidgets.QLabel( - formatted_traceback.replace("\n", "
"), self + item_name_widget = QtWidgets.QLabel(self) + item_name_widget.setText( + item_name_template.format( + self._creator_label, self._subset_name, self._asset_name ) - tb_widget.setTextInteractionFlags( - QtCore.Qt.TextBrowserInteraction - ) - body_layout.addWidget(tb_widget) - - footer_widget = QtWidgets.QWidget(self) - footer_layout = QtWidgets.QHBoxLayout(footer_widget) - button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Vertical) - button_box.setStandardButtons( - QtWidgets.QDialogButtonBox.StandardButton.Ok ) - button_box.accepted.connect(self._on_accept) - footer_layout.addWidget(button_box, alignment=QtCore.Qt.AlignRight) - body_layout.addWidget(footer_widget) + content_layout.addWidget(item_name_widget) - def _on_accept(self): - self.close() + message_label_widget = QtWidgets.QLabel(self) + message_label_widget.setText( + exc_msg_template.format(self.convert_text_for_html(self._exc_msg)) + ) + content_layout.addWidget(message_label_widget) - def _create_line(self): - line = QtWidgets.QFrame(self) - line.setFixedHeight(2) - line.setFrameShape(QtWidgets.QFrame.HLine) - line.setFrameShadow(QtWidgets.QFrame.Sunken) - return line + if self._formatted_traceback: + line_widget = self._create_line() + tb_widget = self._create_traceback_widget( + self._formatted_traceback + ) + content_layout.addWidget(line_widget) + content_layout.addWidget(tb_widget) # TODO add creator identifier/label to details -class CreatorDescriptionWidget(QtWidgets.QWidget): +class CreatorShortDescWidget(QtWidgets.QWidget): def __init__(self, parent=None): - super(CreatorDescriptionWidget, self).__init__(parent=parent) + super(CreatorShortDescWidget, self).__init__(parent=parent) + # --- Short description widget --- icon_widget = IconValuePixmapLabel(None, self) icon_widget.setObjectName("FamilyIconLabel") - family_label = QtWidgets.QLabel("family") + # --- Short description inputs --- + short_desc_input_widget = QtWidgets.QWidget(self) + + family_label = QtWidgets.QLabel(short_desc_input_widget) family_label.setAlignment( QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft ) - description_label = QtWidgets.QLabel("description") + description_label = QtWidgets.QLabel(short_desc_input_widget) description_label.setAlignment( QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft ) - detail_description_widget = QtWidgets.QTextEdit(self) - detail_description_widget.setObjectName("InfoText") - detail_description_widget.setTextInteractionFlags( - QtCore.Qt.TextBrowserInteraction + short_desc_input_layout = QtWidgets.QVBoxLayout( + short_desc_input_widget ) + short_desc_input_layout.setSpacing(0) + short_desc_input_layout.addWidget(family_label) + short_desc_input_layout.addWidget(description_label) + # -------------------------------- - label_layout = QtWidgets.QVBoxLayout() - label_layout.setSpacing(0) - label_layout.addWidget(family_label) - label_layout.addWidget(description_label) - - top_layout = QtWidgets.QHBoxLayout() - top_layout.setContentsMargins(0, 0, 0, 0) - top_layout.addWidget(icon_widget, 0) - top_layout.addLayout(label_layout, 1) - - layout = QtWidgets.QVBoxLayout(self) + layout = QtWidgets.QHBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) - layout.addLayout(top_layout, 0) - layout.addWidget(detail_description_widget, 1) + layout.addWidget(icon_widget, 0) + layout.addWidget(short_desc_input_widget, 1) + # -------------------------------- - self.icon_widget = icon_widget - self.family_label = family_label - self.description_label = description_label - self.detail_description_widget = detail_description_widget + self._icon_widget = icon_widget + self._family_label = family_label + self._description_label = description_label def set_plugin(self, plugin=None): if not plugin: - self.icon_widget.set_icon_def(None) - self.family_label.setText("") - self.description_label.setText("") - self.detail_description_widget.setPlainText("") + self._icon_widget.set_icon_def(None) + self._family_label.setText("") + self._description_label.setText("") return plugin_icon = plugin.get_icon() description = plugin.get_description() or "" - detailed_description = plugin.get_detail_description() or "" - self.icon_widget.set_icon_def(plugin_icon) - self.family_label.setText("{}".format(plugin.family)) - self.family_label.setTextInteractionFlags(QtCore.Qt.NoTextInteraction) - self.description_label.setText(description) + self._icon_widget.set_icon_def(plugin_icon) + self._family_label.setText("{}".format(plugin.family)) + self._family_label.setTextInteractionFlags(QtCore.Qt.NoTextInteraction) + self._description_label.setText(description) - if commonmark: - html = commonmark.commonmark(detailed_description) - self.detail_description_widget.setHtml(html) + +class HelpButton(QtWidgets.QPushButton): + resized = QtCore.Signal() + + def __init__(self, *args, **kwargs): + super(HelpButton, self).__init__(*args, **kwargs) + self.setObjectName("CreateDialogHelpButton") + + self._expanded = None + self.set_expanded() + + def set_expanded(self, expanded=None): + if self._expanded is expanded: + if expanded is not None: + return + expanded = False + self._expanded = expanded + if expanded: + text = "<" else: - self.detail_description_widget.setMarkdown(detailed_description) + text = "?" + self.setText(text) + + self._update_size() + + def _update_size(self): + new_size = self.minimumSizeHint() + if self.size() != new_size: + self.resize(new_size) + self.resized.emit() + + def showEvent(self, event): + super(HelpButton, self).showEvent(event) + self._update_size() + + def resizeEvent(self, event): + super(HelpButton, self).resizeEvent(event) + self._update_size() class CreateDialog(QtWidgets.QDialog): @@ -201,7 +225,7 @@ class CreateDialog(QtWidgets.QDialog): self._prereq_available = False - self.message_dialog = None + self._message_dialog = None name_pattern = "^[{}]*$".format(SUBSET_NAME_ALLOWED_SYMBOLS) self._name_pattern = name_pattern @@ -218,13 +242,7 @@ class CreateDialog(QtWidgets.QDialog): context_layout.addWidget(assets_widget, 2) context_layout.addWidget(tasks_widget, 1) - # Precreate attributes widgets - pre_create_widget = PreCreateWidget(self) - - # TODO add HELP button - creator_description_widget = CreatorDescriptionWidget(self) - creator_description_widget.setVisible(False) - + # --- Creators view --- creators_view = QtWidgets.QListView(self) creators_model = QtGui.QStandardItemModel() creators_view.setModel(creators_model) @@ -263,24 +281,65 @@ class CreateDialog(QtWidgets.QDialog): mid_layout.addWidget(creators_view, 1) mid_layout.addLayout(form_layout, 0) mid_layout.addWidget(create_btn, 0) + # ------------ + + # --- Creator short info and attr defs --- + creator_attrs_widget = QtWidgets.QWidget(self) + + creator_short_desc_widget = CreatorShortDescWidget( + creator_attrs_widget + ) + + separator_widget = QtWidgets.QWidget(self) + separator_widget.setObjectName("Separator") + separator_widget.setMinimumHeight(2) + separator_widget.setMaximumHeight(2) + + # Precreate attributes widget + pre_create_widget = PreCreateWidget(creator_attrs_widget) + + creator_attrs_layout = QtWidgets.QVBoxLayout(creator_attrs_widget) + creator_attrs_layout.setContentsMargins(0, 0, 0, 0) + creator_attrs_layout.addWidget(creator_short_desc_widget, 0) + creator_attrs_layout.addWidget(separator_widget, 0) + creator_attrs_layout.addWidget(pre_create_widget, 1) + # ------------------------------------- + + # --- Detailed information about creator --- + # Detailed description of creator + detail_description_widget = QtWidgets.QTextEdit(self) + detail_description_widget.setObjectName("InfoText") + detail_description_widget.setTextInteractionFlags( + QtCore.Qt.TextBrowserInteraction + ) + detail_description_widget.setVisible(False) + # ------------------------------------------- splitter_widget = QtWidgets.QSplitter(self) splitter_widget.addWidget(context_widget) splitter_widget.addWidget(mid_widget) - splitter_widget.addWidget(pre_create_widget) + splitter_widget.addWidget(creator_attrs_widget) + splitter_widget.addWidget(detail_description_widget) splitter_widget.setStretchFactor(0, 1) splitter_widget.setStretchFactor(1, 1) splitter_widget.setStretchFactor(2, 1) + splitter_widget.setStretchFactor(3, 1) layout = QtWidgets.QHBoxLayout(self) layout.addWidget(splitter_widget, 1) + # Floating help button + help_btn = HelpButton(self) + prereq_timer = QtCore.QTimer() prereq_timer.setInterval(50) prereq_timer.setSingleShot(True) prereq_timer.timeout.connect(self._on_prereq_timer) + help_btn.clicked.connect(self._on_help_btn) + help_btn.resized.connect(self._on_help_btn_resize) + create_btn.clicked.connect(self._on_create) variant_input.returnPressed.connect(self._on_create) variant_input.textChanged.connect(self._on_variant_change) @@ -298,12 +357,9 @@ class CreateDialog(QtWidgets.QDialog): self._splitter_widget = splitter_widget - self._pre_create_widget = pre_create_widget - self._context_widget = context_widget self._assets_widget = assets_widget self._tasks_widget = tasks_widget - self.creator_description_widget = creator_description_widget self.subset_name_input = subset_name_input @@ -316,6 +372,11 @@ class CreateDialog(QtWidgets.QDialog): self.creators_view = creators_view self.create_btn = create_btn + self._creator_short_desc_widget = creator_short_desc_widget + self._pre_create_widget = pre_create_widget + self._detail_description_widget = detail_description_widget + self._help_btn = help_btn + self._prereq_timer = prereq_timer self._first_show = True @@ -509,10 +570,61 @@ class CreateDialog(QtWidgets.QDialog): identifier = new_index.data(CREATOR_IDENTIFIER_ROLE) self._set_creator(identifier) + def _update_help_btn(self): + pos_x = self.width() - self._help_btn.width() + point = self._creator_short_desc_widget.rect().topRight() + mapped_point = self._creator_short_desc_widget.mapTo(self, point) + pos_y = mapped_point.y() + self._help_btn.move(max(0, pos_x), max(0, pos_y)) + + def _on_help_btn_resize(self): + self._update_help_btn() + + def _on_help_btn(self): + final_size = self.size() + cur_sizes = self._splitter_widget.sizes() + spacing = self._splitter_widget.handleWidth() + + sizes = [] + for idx, value in enumerate(cur_sizes): + if idx < 3: + sizes.append(value) + + now_visible = self._detail_description_widget.isVisible() + if now_visible: + width = final_size.width() - ( + spacing + self._detail_description_widget.width() + ) + + else: + last_size = self._detail_description_widget.sizeHint().width() + width = final_size.width() + spacing + last_size + sizes.append(last_size) + + final_size.setWidth(width) + + self._detail_description_widget.setVisible(not now_visible) + self._splitter_widget.setSizes(sizes) + self.resize(final_size) + + self._help_btn.set_expanded(not now_visible) + + def _set_creator_detailed_text(self, creator): + if not creator: + self._detail_description_widget.setPlainText("") + return + detailed_description = creator.get_detail_description() or "" + if commonmark: + html = commonmark.commonmark(detailed_description) + self._detail_description_widget.setHtml(html) + else: + self._detail_description_widget.setMarkdown(detailed_description) + def _set_creator(self, identifier): creator = self.controller.manual_creators.get(identifier) - self.creator_description_widget.set_plugin(creator) + self._creator_short_desc_widget.set_plugin(creator) + self._set_creator_detailed_text(creator) self._pre_create_widget.set_plugin(creator) self._selected_creator = creator @@ -566,10 +678,9 @@ class CreateDialog(QtWidgets.QDialog): if variant_value is None: variant_value = self.variant_input.text() - match = self._compiled_name_pattern.match(variant_value) - valid = bool(match) - self.create_btn.setEnabled(valid) - if not valid: + self.create_btn.setEnabled(True) + if not self._compiled_name_pattern.match(variant_value): + self.create_btn.setEnabled(False) self._set_variant_state_property("invalid") self.subset_name_input.setText("< Invalid variant >") return @@ -579,9 +690,16 @@ class CreateDialog(QtWidgets.QDialog): asset_doc = copy.deepcopy(self._asset_doc) # Calculate subset name with Creator plugin - subset_name = self._selected_creator.get_subset_name( - variant_value, task_name, asset_doc, project_name - ) + try: + subset_name = self._selected_creator.get_subset_name( + variant_value, task_name, asset_doc, project_name + ) + except TaskNotSetError: + self.create_btn.setEnabled(False) + self._set_variant_state_property("invalid") + self.subset_name_input.setText("< Missing task >") + return + self.subset_name_input.setText(subset_name) self._validate_subset_name(subset_name, variant_value) @@ -666,8 +784,14 @@ class CreateDialog(QtWidgets.QDialog): if self._last_pos is not None: self.move(self._last_pos) + self._update_help_btn() + self.refresh() + def resizeEvent(self, event): + super(CreateDialog, self).resizeEvent(event) + self._update_help_btn() + def _on_create(self): indexes = self.creators_view.selectedIndexes() if not indexes or len(indexes) > 1: @@ -694,14 +818,18 @@ class CreateDialog(QtWidgets.QDialog): "family": family } - error_info = None + error_msg = None + formatted_traceback = None try: self.controller.create( - creator_identifier, subset_name, instance_data, pre_create_data + creator_identifier, + subset_name, + instance_data, + pre_create_data ) except CreatorError as exc: - error_info = (str(exc), None) + error_msg = str(exc) # Use bare except because some hosts raise their exceptions that # do not inherit from python's `BaseException` @@ -710,12 +838,17 @@ class CreateDialog(QtWidgets.QDialog): formatted_traceback = "".join(traceback.format_exception( exc_type, exc_value, exc_traceback )) - error_info = (str(exc_value), formatted_traceback) + error_msg = str(exc_value) - if error_info: + if error_msg is not None: box = CreateErrorMessageBox( - creator_label, subset_name, asset_name, *error_info + creator_label, + subset_name, + asset_name, + error_msg, + formatted_traceback, + parent=self ) box.show() # Store dialog so is not garbage collected before is shown - self.message_dialog = box + self._message_dialog = box diff --git a/openpype/tools/publisher/widgets/list_view_widgets.py b/openpype/tools/publisher/widgets/list_view_widgets.py index 23a86cd070..6bddaf66c8 100644 --- a/openpype/tools/publisher/widgets/list_view_widgets.py +++ b/openpype/tools/publisher/widgets/list_view_widgets.py @@ -467,12 +467,22 @@ class InstanceListView(AbstractInstanceView): else: active = False + group_names = set() for instance_id in selected_instance_ids: widget = self._widgets_by_id.get(instance_id) - if widget is not None: - widget.set_active(active) + if widget is None: + continue + + widget.set_active(active) + group_name = self._group_by_instance_id.get(instance_id) + if group_name is not None: + group_names.add(group_name) + + for group_name in group_names: + self._update_group_checkstate(group_name) def _update_group_checkstate(self, group_name): + """Update checkstate of one group.""" widget = self._group_widgets.get(group_name) if widget is None: return diff --git a/openpype/tools/publisher/widgets/tasks_widget.py b/openpype/tools/publisher/widgets/tasks_widget.py index a0b3a340ae..aa239f6334 100644 --- a/openpype/tools/publisher/widgets/tasks_widget.py +++ b/openpype/tools/publisher/widgets/tasks_widget.py @@ -1,6 +1,7 @@ from Qt import QtCore, QtGui from openpype.tools.utils.tasks_widget import TasksWidget, TASK_NAME_ROLE +from openpype.tools.utils.lib import get_default_task_icon class TasksModel(QtGui.QStandardItemModel): @@ -17,9 +18,10 @@ class TasksModel(QtGui.QStandardItemModel): controller (PublisherController): Controller which handles creation and publishing. """ - def __init__(self, controller): + def __init__(self, controller, allow_empty_task=False): super(TasksModel, self).__init__() + self._allow_empty_task = allow_empty_task self._controller = controller self._items_by_name = {} self._asset_names = [] @@ -70,8 +72,14 @@ class TasksModel(QtGui.QStandardItemModel): task_name (str): Name of task which should be available in asset's tasks. """ - task_names = self._task_names_by_asset_name.get(asset_name) - if task_names and task_name in task_names: + if asset_name not in self._task_names_by_asset_name: + return False + + if self._allow_empty_task and not task_name: + return True + + task_names = self._task_names_by_asset_name[asset_name] + if task_name in task_names: return True return False @@ -92,6 +100,8 @@ class TasksModel(QtGui.QStandardItemModel): new_task_names = self.get_intersection_of_tasks( task_names_by_asset_name ) + if self._allow_empty_task: + new_task_names.add("") old_task_names = set(self._items_by_name.keys()) if new_task_names == old_task_names: return @@ -109,9 +119,13 @@ class TasksModel(QtGui.QStandardItemModel): item = QtGui.QStandardItem(task_name) item.setData(task_name, TASK_NAME_ROLE) + if task_name: + item.setData(get_default_task_icon(), QtCore.Qt.DecorationRole) self._items_by_name[task_name] = item new_items.append(item) - root_item.appendRows(new_items) + + if new_items: + root_item.appendRows(new_items) def headerData(self, section, orientation, role=None): if role is None: diff --git a/openpype/tools/publisher/widgets/widgets.py b/openpype/tools/publisher/widgets/widgets.py index fb1f0e54aa..5ced469b59 100644 --- a/openpype/tools/publisher/widgets/widgets.py +++ b/openpype/tools/publisher/widgets/widgets.py @@ -4,9 +4,9 @@ import re import copy import collections from Qt import QtWidgets, QtCore, QtGui +import qtawesome -from avalon.vendor import qtawesome - +from openpype.lib import TaskNotSetError from openpype.widgets.attribute_defs import create_widget_for_attr_def from openpype.tools import resources from openpype.tools.flickcharm import FlickCharm @@ -471,6 +471,28 @@ class AssetsField(BaseClickableFrame): self.set_selected_items(self._origin_value) +class TasksComboboxProxy(QtCore.QSortFilterProxyModel): + def __init__(self, *args, **kwargs): + super(TasksComboboxProxy, self).__init__(*args, **kwargs) + self._filter_empty = False + + def set_filter_empty(self, filter_empty): + if self._filter_empty is filter_empty: + return + self._filter_empty = filter_empty + self.invalidate() + + def filterAcceptsRow(self, source_row, parent_index): + if self._filter_empty: + model = self.sourceModel() + source_index = model.index( + source_row, self.filterKeyColumn(), parent_index + ) + if not source_index.data(QtCore.Qt.DisplayRole): + return False + return True + + class TasksCombobox(QtWidgets.QComboBox): """Combobox to show tasks for selected instances. @@ -490,13 +512,16 @@ class TasksCombobox(QtWidgets.QComboBox): delegate = QtWidgets.QStyledItemDelegate() self.setItemDelegate(delegate) - model = TasksModel(controller) - self.setModel(model) + model = TasksModel(controller, True) + proxy_model = TasksComboboxProxy() + proxy_model.setSourceModel(model) + self.setModel(proxy_model) self.currentIndexChanged.connect(self._on_index_change) self._delegate = delegate self._model = model + self._proxy_model = proxy_model self._origin_value = [] self._origin_selection = [] self._selected_items = [] @@ -507,6 +532,14 @@ class TasksCombobox(QtWidgets.QComboBox): self._text = None + def set_invalid_empty_task(self, invalid=True): + self._proxy_model.set_filter_empty(invalid) + if invalid: + self._set_is_valid(False) + self.set_text("< One or more subsets require Task selected >") + else: + self.set_text(None) + def set_multiselection_text(self, text): """Change text shown when multiple different tasks are in context.""" self._multiselection_text = text @@ -596,6 +629,8 @@ class TasksCombobox(QtWidgets.QComboBox): self._ignore_index_change = True self._model.set_asset_names(asset_names) + self._proxy_model.set_filter_empty(False) + self._proxy_model.sort(0) self._ignore_index_change = False @@ -641,6 +676,9 @@ class TasksCombobox(QtWidgets.QComboBox): asset_task_combinations (list): List of tuples. Each item in the list contain asset name and task name. """ + self._proxy_model.set_filter_empty(False) + self._proxy_model.sort(0) + if asset_task_combinations is None: asset_task_combinations = [] @@ -932,7 +970,7 @@ class GlobalAttrsWidget(QtWidgets.QWidget): family_value_widget.set_value() subset_value_widget.set_value() - submit_btn = QtWidgets.QPushButton("Submit", self) + submit_btn = QtWidgets.QPushButton("Confirm", self) cancel_btn = QtWidgets.QPushButton("Cancel", self) submit_btn.setEnabled(False) cancel_btn.setEnabled(False) @@ -944,7 +982,7 @@ class GlobalAttrsWidget(QtWidgets.QWidget): btns_layout.addWidget(cancel_btn) main_layout = QtWidgets.QFormLayout(self) - main_layout.addRow("Name", variant_input) + main_layout.addRow("Variant", variant_input) main_layout.addRow("Asset", asset_value_widget) main_layout.addRow("Task", task_value_widget) main_layout.addRow("Family", family_value_widget) @@ -998,7 +1036,33 @@ class GlobalAttrsWidget(QtWidgets.QWidget): project_name = self.controller.project_name subset_names = set() + invalid_tasks = False for instance in self._current_instances: + new_variant_value = instance.get("variant") + new_asset_name = instance.get("asset") + new_task_name = instance.get("task") + if variant_value is not None: + new_variant_value = variant_value + + if asset_name is not None: + new_asset_name = asset_name + + if task_name is not None: + new_task_name = task_name + + asset_doc = asset_docs_by_name[new_asset_name] + + try: + new_subset_name = instance.creator.get_subset_name( + new_variant_value, new_task_name, asset_doc, project_name + ) + except TaskNotSetError: + invalid_tasks = True + instance.set_task_invalid(True) + subset_names.add(instance["subset"]) + continue + + subset_names.add(new_subset_name) if variant_value is not None: instance["variant"] = variant_value @@ -1007,25 +1071,18 @@ class GlobalAttrsWidget(QtWidgets.QWidget): instance.set_asset_invalid(False) if task_name is not None: - instance["task"] = task_name + instance["task"] = task_name or None instance.set_task_invalid(False) - new_variant_value = instance.get("variant") - new_asset_name = instance.get("asset") - new_task_name = instance.get("task") - - asset_doc = asset_docs_by_name[new_asset_name] - - new_subset_name = instance.creator.get_subset_name( - new_variant_value, new_task_name, asset_doc, project_name - ) - subset_names.add(new_subset_name) instance["subset"] = new_subset_name + if invalid_tasks: + self.task_value_widget.set_invalid_empty_task() + self.subset_value_widget.set_value(subset_names) self._set_btns_enabled(False) - self._set_btns_visible(False) + self._set_btns_visible(invalid_tasks) self.instance_context_changed.emit() @@ -1098,7 +1155,7 @@ class GlobalAttrsWidget(QtWidgets.QWidget): variants.add(instance.get("variant") or self.unknown_value) families.add(instance.get("family") or self.unknown_value) asset_name = instance.get("asset") or self.unknown_value - task_name = instance.get("task") or self.unknown_value + task_name = instance.get("task") or "" asset_names.add(asset_name) asset_task_combinations.append((asset_name, task_name)) subset_names.add(instance.get("subset") or self.unknown_value) diff --git a/openpype/tools/pyblish_pype/control.py b/openpype/tools/pyblish_pype/control.py index 6f89952c22..f657936b79 100644 --- a/openpype/tools/pyblish_pype/control.py +++ b/openpype/tools/pyblish_pype/control.py @@ -389,6 +389,9 @@ class Controller(QtCore.QObject): new_current_group_order ) + # Force update to the current state + self._set_state_by_order() + if self.collect_state == 0: self.collect_state = 1 self._current_state = ( diff --git a/openpype/tools/pyblish_pype/model.py b/openpype/tools/pyblish_pype/model.py index 0faadb5940..2931a379b3 100644 --- a/openpype/tools/pyblish_pype/model.py +++ b/openpype/tools/pyblish_pype/model.py @@ -29,10 +29,9 @@ import pyblish from . import settings, util from .awesome import tags as awesome -import Qt from Qt import QtCore, QtGui +import qtawesome from six import text_type -from .vendor import qtawesome from .constants import PluginStates, InstanceStates, GroupStates, Roles from openpype.api import get_system_settings diff --git a/openpype/tools/sceneinventory/model.py b/openpype/tools/sceneinventory/model.py index 6435e5c488..7173ae751e 100644 --- a/openpype/tools/sceneinventory/model.py +++ b/openpype/tools/sceneinventory/model.py @@ -4,11 +4,13 @@ import logging from collections import defaultdict from Qt import QtCore, QtGui -from avalon import api, io, style, schema -from avalon.vendor import qtawesome +import qtawesome -from avalon.lib import HeroVersionType +from avalon import api, io, schema +from openpype.pipeline import HeroVersionType +from openpype.style import get_default_entity_icon_color from openpype.tools.utils.models import TreeModel, Item +from openpype.modules import ModulesManager from .lib import ( get_site_icons, @@ -16,8 +18,6 @@ from .lib import ( get_progress_for_repre ) -from openpype.modules import ModulesManager - class InventoryModel(TreeModel): """The model for the inventory""" @@ -38,6 +38,8 @@ class InventoryModel(TreeModel): self._hierarchy_view = False + self._default_icon_color = get_default_entity_icon_color() + manager = ModulesManager() sync_server = manager.modules_by_name["sync_server"] self.sync_enabled = sync_server.enabled @@ -131,7 +133,7 @@ class InventoryModel(TreeModel): if role == QtCore.Qt.DecorationRole: if index.column() == 0: # Override color - color = item.get("color", style.colors.default) + color = item.get("color", self._default_icon_color) if item.get("isGroupNode"): # group-item return qtawesome.icon("fa.folder", color=color) if item.get("isNotSet"): diff --git a/openpype/tools/sceneinventory/switch_dialog.py b/openpype/tools/sceneinventory/switch_dialog.py index 4946c073d4..0e7b1b759a 100644 --- a/openpype/tools/sceneinventory/switch_dialog.py +++ b/openpype/tools/sceneinventory/switch_dialog.py @@ -1,9 +1,14 @@ import collections import logging from Qt import QtWidgets, QtCore +import qtawesome -from avalon import io, api, pipeline -from avalon.vendor import qtawesome +from avalon import io, pipeline +from openpype.pipeline import ( + discover_loader_plugins, + switch_container, + get_repres_contexts, +) from .widgets import ( ButtonWithMenu, @@ -343,13 +348,13 @@ class SwitchAssetDialog(QtWidgets.QDialog): def _get_loaders(self, repre_ids): repre_contexts = None if repre_ids: - repre_contexts = pipeline.get_repres_contexts(repre_ids) + repre_contexts = get_repres_contexts(repre_ids) if not repre_contexts: return list() available_loaders = [] - for loader_plugin in api.discover(api.Loader): + for loader_plugin in discover_loader_plugins(): # Skip loaders without switch method if not hasattr(loader_plugin, "switch"): continue @@ -1352,7 +1357,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): repre_doc = repres_by_name[container_repre_name] try: - api.switch(container, repre_doc, loader) + switch_container(container, repre_doc, loader) except Exception: msg = ( "Couldn't switch asset." diff --git a/openpype/tools/sceneinventory/view.py b/openpype/tools/sceneinventory/view.py index f55a68df95..c38390c614 100644 --- a/openpype/tools/sceneinventory/view.py +++ b/openpype/tools/sceneinventory/view.py @@ -3,11 +3,16 @@ import logging from functools import partial from Qt import QtWidgets, QtCore +import qtawesome -from avalon import io, api, style -from avalon.vendor import qtawesome -from avalon.lib import HeroVersionType +from avalon import io, api +from openpype import style +from openpype.pipeline import ( + HeroVersionType, + update_container, + remove_container, +) from openpype.modules import ModulesManager from openpype.tools.utils.lib import ( get_progress_for_repre, @@ -24,12 +29,12 @@ DEFAULT_COLOR = "#fb9c15" log = logging.getLogger("SceneInventory") -class SceneInvetoryView(QtWidgets.QTreeView): +class SceneInventoryView(QtWidgets.QTreeView): data_changed = QtCore.Signal() hierarchy_view_changed = QtCore.Signal(bool) def __init__(self, parent=None): - super(SceneInvetoryView, self).__init__(parent=parent) + super(SceneInventoryView, self).__init__(parent=parent) # view settings self.setIndentation(12) @@ -195,7 +200,7 @@ class SceneInvetoryView(QtWidgets.QTreeView): version_name = version_name_by_id.get(version_id) if version_name is not None: try: - api.update(item, version_name) + update_container(item, version_name) except AssertionError: self._show_version_error_dialog( version_name, [item] @@ -223,7 +228,7 @@ class SceneInvetoryView(QtWidgets.QTreeView): def _on_update_to_latest(items): for item in items: try: - api.update(item, -1) + update_container(item, -1) except AssertionError: self._show_version_error_dialog(None, [item]) log.warning("Update failed", exc_info=True) @@ -248,7 +253,7 @@ class SceneInvetoryView(QtWidgets.QTreeView): def _on_update_to_hero(items): for item in items: try: - api.update(item, HeroVersionType(-1)) + update_container(item, HeroVersionType(-1)) except AssertionError: self._show_version_error_dialog('hero', [item]) log.warning("Update failed", exc_info=True) @@ -727,7 +732,7 @@ class SceneInvetoryView(QtWidgets.QTreeView): version = versions_by_label[label] for item in items: try: - api.update(item, version) + update_container(item, version) except AssertionError: self._show_version_error_dialog(version, [item]) log.warning("Update failed", exc_info=True) @@ -758,7 +763,7 @@ class SceneInvetoryView(QtWidgets.QTreeView): return for item in items: - api.remove(item) + remove_container(item) self.data_changed.emit() def _show_version_error_dialog(self, version, items): @@ -796,3 +801,40 @@ class SceneInvetoryView(QtWidgets.QTreeView): ).format(version_str) dialog.setText(msg) dialog.exec_() + + def update_all(self): + """Update all items that are currently 'outdated' in the view""" + # Get the source model through the proxy model + model = self.model().sourceModel() + + # Get all items from outdated groups + outdated_items = [] + for index in iter_model_rows(model, + column=0, + include_root=False): + item = index.data(model.ItemRole) + + if not item.get("isGroupNode"): + continue + + # Only the group nodes contain the "highest_version" data and as + # such we find only the groups and take its children. + if not model.outdated(item): + continue + + # Collect all children which we want to update + children = item.children() + outdated_items.extend(children) + + if not outdated_items: + log.info("Nothing to update.") + return + + # Trigger update to latest + for item in outdated_items: + try: + update_container(item, -1) + except AssertionError: + self._show_version_error_dialog(None, [item]) + log.warning("Update failed", exc_info=True) + self.data_changed.emit() diff --git a/openpype/tools/sceneinventory/window.py b/openpype/tools/sceneinventory/window.py index e363a99d07..b40fbb69e4 100644 --- a/openpype/tools/sceneinventory/window.py +++ b/openpype/tools/sceneinventory/window.py @@ -2,7 +2,7 @@ import os import sys from Qt import QtWidgets, QtCore -from avalon.vendor import qtawesome +import qtawesome from avalon import io, api from openpype import style @@ -18,7 +18,7 @@ from .model import ( InventoryModel, FilterProxyModel ) -from .view import SceneInvetoryView +from .view import SceneInventoryView module = sys.modules[__name__] @@ -54,14 +54,21 @@ class SceneInventoryWindow(QtWidgets.QDialog): outdated_only_checkbox.setToolTip("Show outdated files only") outdated_only_checkbox.setChecked(False) + icon = qtawesome.icon("fa.arrow-up", color="white") + update_all_button = QtWidgets.QPushButton(self) + update_all_button.setToolTip("Update all outdated to latest version") + update_all_button.setIcon(icon) + icon = qtawesome.icon("fa.refresh", color="white") refresh_button = QtWidgets.QPushButton(self) + refresh_button.setToolTip("Refresh") refresh_button.setIcon(icon) control_layout = QtWidgets.QHBoxLayout() control_layout.addWidget(filter_label) control_layout.addWidget(text_filter) control_layout.addWidget(outdated_only_checkbox) + control_layout.addWidget(update_all_button) control_layout.addWidget(refresh_button) # endregion control @@ -73,7 +80,7 @@ class SceneInventoryWindow(QtWidgets.QDialog): proxy.setDynamicSortFilter(True) proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) - view = SceneInvetoryView(self) + view = SceneInventoryView(self) view.setModel(proxy) # set some nice default widths for the view @@ -98,11 +105,13 @@ class SceneInventoryWindow(QtWidgets.QDialog): self._on_outdated_state_change ) view.hierarchy_view_changed.connect( - self._on_hiearchy_view_change + self._on_hierarchy_view_change ) view.data_changed.connect(self.refresh) refresh_button.clicked.connect(self.refresh) + update_all_button.clicked.connect(self._on_update_all) + self._update_all_button = update_all_button self._outdated_only_checkbox = outdated_only_checkbox self._view = view self._model = model @@ -146,7 +155,7 @@ class SceneInventoryWindow(QtWidgets.QDialog): kwargs["selected"] = self._view._selected self._model.refresh(**kwargs) - def _on_hiearchy_view_change(self, enabled): + def _on_hierarchy_view_change(self, enabled): self._proxy.set_hierarchy_view(enabled) self._model.set_hierarchy_view(enabled) @@ -158,6 +167,9 @@ class SceneInventoryWindow(QtWidgets.QDialog): self._outdated_only_checkbox.isChecked() ) + def _on_update_all(self): + self._view.update_all() + def show(root=None, debug=False, parent=None, items=None): """Display Scene Inventory GUI diff --git a/openpype/tools/settings/settings/base.py b/openpype/tools/settings/settings/base.py index 706e2fdcf0..bd48b3a966 100644 --- a/openpype/tools/settings/settings/base.py +++ b/openpype/tools/settings/settings/base.py @@ -30,6 +30,9 @@ class BaseWidget(QtWidgets.QWidget): if not self.entity.gui_type: self.entity.on_change_callbacks.append(self._on_entity_change) + if self.entity.tooltip: + self.setToolTip(self.entity.tooltip) + self.label_widget = None self.create_ui() diff --git a/openpype/tools/settings/settings/categories.py b/openpype/tools/settings/settings/categories.py index 663d497c36..a5b5cd40f0 100644 --- a/openpype/tools/settings/settings/categories.py +++ b/openpype/tools/settings/settings/categories.py @@ -1,9 +1,9 @@ -import os import sys import traceback import contextlib from enum import Enum from Qt import QtWidgets, QtCore +import qtawesome from openpype.lib import get_openpype_version from openpype.tools.utils import set_style_property @@ -63,7 +63,6 @@ from .item_widgets import ( PathInputWidget ) from .color_widget import ColorWidget -from avalon.vendor import qtawesome class CategoryState(Enum): diff --git a/openpype/tools/settings/settings/widgets.py b/openpype/tools/settings/settings/widgets.py index f793aab057..577c2630ab 100644 --- a/openpype/tools/settings/settings/widgets.py +++ b/openpype/tools/settings/settings/widgets.py @@ -2,7 +2,7 @@ import os import copy import uuid from Qt import QtWidgets, QtCore, QtGui -from avalon.vendor import qtawesome +import qtawesome from avalon.mongodb import ( AvalonMongoConnection, AvalonMongoDB diff --git a/openpype/tools/standalonepublish/widgets/model_asset.py b/openpype/tools/standalonepublish/widgets/model_asset.py index 60afe8f96c..a7316a2aa7 100644 --- a/openpype/tools/standalonepublish/widgets/model_asset.py +++ b/openpype/tools/standalonepublish/widgets/model_asset.py @@ -1,10 +1,15 @@ import logging import collections -from Qt import QtCore, QtGui -from . import TreeModel, Node -from avalon.vendor import qtawesome -from avalon import style +from Qt import QtCore, QtGui +import qtawesome + +from openpype.style import ( + get_default_entity_icon_color, + get_deprecated_entity_font_color, +) + +from . import TreeModel, Node log = logging.getLogger(__name__) @@ -49,6 +54,14 @@ class AssetModel(TreeModel): def __init__(self, dbcon, parent=None): super(AssetModel, self).__init__(parent=parent) self.dbcon = dbcon + + self._default_asset_icon_color = QtGui.QColor( + get_default_entity_icon_color() + ) + self._deprecated_asset_font_color = QtGui.QColor( + get_deprecated_entity_font_color() + ) + self.refresh() def _add_hierarchy(self, assets, parent=None, silos=None): @@ -163,7 +176,7 @@ class AssetModel(TreeModel): icon = data.get("icon", None) if icon is None and node.get("type") == "silo": icon = "database" - color = data.get("color", style.colors.default) + color = data.get("color", self._default_asset_icon_color) if icon is None: # Use default icons if no custom one is specified. @@ -189,7 +202,7 @@ class AssetModel(TreeModel): if role == QtCore.Qt.ForegroundRole: # font color if "deprecated" in node.get("tags", []): - return QtGui.QColor(style.colors.light).darker(250) + return QtGui.QColor(self._deprecated_asset_font_color) if role == self.ObjectIdRole: return node.get("_id", None) diff --git a/openpype/tools/standalonepublish/widgets/model_tasks_template.py b/openpype/tools/standalonepublish/widgets/model_tasks_template.py index 476f45391d..648f7ed479 100644 --- a/openpype/tools/standalonepublish/widgets/model_tasks_template.py +++ b/openpype/tools/standalonepublish/widgets/model_tasks_template.py @@ -1,7 +1,9 @@ from Qt import QtCore +import qtawesome + +from openpype.style import get_default_entity_icon_color + from . import Node, TreeModel -from avalon.vendor import qtawesome -from avalon import style class TasksTemplateModel(TreeModel): @@ -14,7 +16,7 @@ class TasksTemplateModel(TreeModel): self.selectable = selectable self.icon = qtawesome.icon( 'fa.calendar-check-o', - color=style.colors.default + color=get_default_entity_icon_color() ) def set_tasks(self, tasks): diff --git a/openpype/tools/standalonepublish/widgets/widget_asset.py b/openpype/tools/standalonepublish/widgets/widget_asset.py index 2886d600bf..e6b74f8f82 100644 --- a/openpype/tools/standalonepublish/widgets/widget_asset.py +++ b/openpype/tools/standalonepublish/widgets/widget_asset.py @@ -1,10 +1,10 @@ import contextlib from Qt import QtWidgets, QtCore +import qtawesome from openpype.tools.utils import PlaceholderLineEdit -from avalon.vendor import qtawesome -from avalon import style +from openpype.style import get_default_tools_icon_color from . import RecursiveSortFilterProxyModel, AssetModel from . import TasksTemplateModel, DeselectableTreeView @@ -165,7 +165,9 @@ class AssetWidget(QtWidgets.QWidget): # Header header = QtWidgets.QHBoxLayout() - icon = qtawesome.icon("fa.refresh", color=style.colors.light) + icon = qtawesome.icon( + "fa.refresh", color=get_default_tools_icon_color() + ) refresh = QtWidgets.QPushButton(icon, "") refresh.setToolTip("Refresh items") diff --git a/openpype/tools/standalonepublish/widgets/widget_family.py b/openpype/tools/standalonepublish/widgets/widget_family.py index ae44899a89..08cd45bbf2 100644 --- a/openpype/tools/standalonepublish/widgets/widget_family.py +++ b/openpype/tools/standalonepublish/widgets/widget_family.py @@ -1,14 +1,11 @@ -import os import re from Qt import QtWidgets, QtCore from . import HelpRole, FamilyRole, ExistsRole, PluginRole, PluginKeyRole from . import FamilyDescriptionWidget -from openpype.api import ( - get_project_settings, - Creator -) +from openpype.api import get_project_settings +from openpype.pipeline import LegacyCreator from openpype.lib import TaskNotSetError from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS @@ -390,7 +387,7 @@ class FamilyWidget(QtWidgets.QWidget): sp_settings = settings.get('standalonepublisher', {}) for key, creator_data in sp_settings.get("create", {}).items(): - creator = type(key, (Creator, ), creator_data) + creator = type(key, (LegacyCreator, ), creator_data) label = creator.label or creator.family item = QtWidgets.QListWidgetItem(label) diff --git a/openpype/tools/standalonepublish/widgets/widget_family_desc.py b/openpype/tools/standalonepublish/widgets/widget_family_desc.py index 8c95ddf2e4..79681615b9 100644 --- a/openpype/tools/standalonepublish/widgets/widget_family_desc.py +++ b/openpype/tools/standalonepublish/widgets/widget_family_desc.py @@ -1,7 +1,7 @@ -from Qt import QtWidgets, QtCore, QtGui -from . import FamilyRole, PluginRole -from avalon.vendor import qtawesome import six +from Qt import QtWidgets, QtCore, QtGui +import qtawesome +from . import FamilyRole, PluginRole class FamilyDescriptionWidget(QtWidgets.QWidget): diff --git a/openpype/tools/subsetmanager/window.py b/openpype/tools/subsetmanager/window.py index b7430d0626..a53af52174 100644 --- a/openpype/tools/subsetmanager/window.py +++ b/openpype/tools/subsetmanager/window.py @@ -2,9 +2,9 @@ import os import sys from Qt import QtWidgets, QtCore +import qtawesome from avalon import api -from avalon.vendor import qtawesome from openpype import style from openpype.tools.utils import PlaceholderLineEdit diff --git a/openpype/tools/traypublisher/window.py b/openpype/tools/traypublisher/window.py index 53f8ca450a..d0453c4f23 100644 --- a/openpype/tools/traypublisher/window.py +++ b/openpype/tools/traypublisher/window.py @@ -28,38 +28,50 @@ class StandaloneOverlayWidget(QtWidgets.QFrame): super(StandaloneOverlayWidget, self).__init__(publisher_window) self.setObjectName("OverlayFrame") + middle_frame = QtWidgets.QFrame(self) + middle_frame.setObjectName("ChooseProjectFrame") + + content_widget = QtWidgets.QWidget(middle_frame) + # Create db connection for projects model dbcon = AvalonMongoDB() dbcon.install() - header_label = QtWidgets.QLabel("Choose project", self) + header_label = QtWidgets.QLabel("Choose project", content_widget) header_label.setObjectName("ChooseProjectLabel") # Create project models and view projects_model = ProjectModel(dbcon) projects_proxy = ProjectSortFilterProxy() projects_proxy.setSourceModel(projects_model) - projects_view = QtWidgets.QListView(self) + projects_view = QtWidgets.QListView(content_widget) + projects_view.setObjectName("ChooseProjectView") projects_view.setModel(projects_proxy) projects_view.setEditTriggers( QtWidgets.QAbstractItemView.NoEditTriggers ) - confirm_btn = QtWidgets.QPushButton("Choose", self) + confirm_btn = QtWidgets.QPushButton("Confirm", content_widget) btns_layout = QtWidgets.QHBoxLayout() btns_layout.addStretch(1) btns_layout.addWidget(confirm_btn, 0) - layout = QtWidgets.QGridLayout(self) - layout.addWidget(header_label, 0, 1, alignment=QtCore.Qt.AlignCenter) - layout.addWidget(projects_view, 1, 1) - layout.addLayout(btns_layout, 2, 1) - layout.setColumnStretch(0, 1) - layout.setColumnStretch(1, 0) - layout.setColumnStretch(2, 1) - layout.setRowStretch(0, 0) - layout.setRowStretch(1, 1) - layout.setRowStretch(2, 0) + content_layout = QtWidgets.QVBoxLayout(content_widget) + content_layout.setContentsMargins(0, 0, 0, 0) + content_layout.setSpacing(20) + content_layout.addWidget(header_label, 0) + content_layout.addWidget(projects_view, 1) + content_layout.addLayout(btns_layout, 0) + + middle_layout = QtWidgets.QHBoxLayout(middle_frame) + middle_layout.setContentsMargins(30, 30, 10, 10) + middle_layout.addWidget(content_widget) + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.setContentsMargins(10, 10, 10, 10) + main_layout.addStretch(1) + main_layout.addWidget(middle_frame, 2) + main_layout.addStretch(1) projects_view.doubleClicked.connect(self._on_double_click) confirm_btn.clicked.connect(self._on_confirm_click) diff --git a/openpype/tools/utils/__init__.py b/openpype/tools/utils/__init__.py index c15e9f8139..ea1133c442 100644 --- a/openpype/tools/utils/__init__.py +++ b/openpype/tools/utils/__init__.py @@ -16,6 +16,7 @@ from .lib import ( set_style_property, DynamicQThread, qt_app_context, + get_asset_icon, ) from .models import ( @@ -41,6 +42,7 @@ __all__ = ( "set_style_property", "DynamicQThread", "qt_app_context", + "get_asset_icon", "RecursiveSortFilterProxyModel", ) diff --git a/openpype/tools/utils/assets_widget.py b/openpype/tools/utils/assets_widget.py index 17164d9e0f..3d4efcdd4d 100644 --- a/openpype/tools/utils/assets_widget.py +++ b/openpype/tools/utils/assets_widget.py @@ -3,11 +3,12 @@ import collections import Qt from Qt import QtWidgets, QtCore, QtGui +import qtawesome -from avalon import style -from avalon.vendor import qtawesome - -from openpype.style import get_objected_colors +from openpype.style import ( + get_objected_colors, + get_default_tools_icon_color, +) from openpype.tools.flickcharm import FlickCharm from .views import ( @@ -16,7 +17,10 @@ from .views import ( ) from .widgets import PlaceholderLineEdit from .models import RecursiveSortFilterProxyModel -from .lib import DynamicQThread +from .lib import ( + DynamicQThread, + get_asset_icon +) if Qt.__binding__ == "PySide": from PySide.QtGui import QStyleOptionViewItemV4 @@ -508,25 +512,9 @@ class AssetModel(QtGui.QStandardItemModel): item.setData(asset_label, QtCore.Qt.DisplayRole) item.setData(asset_label, ASSET_LABEL_ROLE) - icon_color = asset_data.get("color") or style.colors.default - icon_name = asset_data.get("icon") - if not icon_name: - # Use default icons if no custom one is specified. - # If it has children show a full folder, otherwise - # show an open folder - if item.rowCount() > 0: - icon_name = "folder" - else: - icon_name = "folder-o" - - try: - # font-awesome key - full_icon_name = "fa.{0}".format(icon_name) - icon = qtawesome.icon(full_icon_name, color=icon_color) - item.setData(icon, QtCore.Qt.DecorationRole) - - except Exception: - pass + has_children = item.rowCount() > 0 + icon = get_asset_icon(asset_doc, has_children) + item.setData(icon, QtCore.Qt.DecorationRole) def _threaded_fetch(self): asset_docs = self._fetch_asset_docs() @@ -602,7 +590,7 @@ class AssetsWidget(QtWidgets.QWidget): view.setModel(proxy) current_asset_icon = qtawesome.icon( - "fa.arrow-down", color=style.colors.light + "fa.arrow-down", color=get_default_tools_icon_color() ) current_asset_btn = QtWidgets.QPushButton(self) current_asset_btn.setIcon(current_asset_icon) @@ -610,7 +598,9 @@ class AssetsWidget(QtWidgets.QWidget): # Hide by default current_asset_btn.setVisible(False) - refresh_icon = qtawesome.icon("fa.refresh", color=style.colors.light) + refresh_icon = qtawesome.icon( + "fa.refresh", color=get_default_tools_icon_color() + ) refresh_btn = QtWidgets.QPushButton(self) refresh_btn.setIcon(refresh_icon) refresh_btn.setToolTip("Refresh items") diff --git a/openpype/tools/utils/delegates.py b/openpype/tools/utils/delegates.py index 4ec6079bb7..d3718b1734 100644 --- a/openpype/tools/utils/delegates.py +++ b/openpype/tools/utils/delegates.py @@ -6,7 +6,7 @@ import numbers import Qt from Qt import QtWidgets, QtGui, QtCore -from avalon.lib import HeroVersionType +from openpype.pipeline import HeroVersionType from .models import TreeModel from . import lib diff --git a/openpype/tools/utils/host_tools.py b/openpype/tools/utils/host_tools.py index 6ce9e818d9..2d9733ec94 100644 --- a/openpype/tools/utils/host_tools.py +++ b/openpype/tools/utils/host_tools.py @@ -224,20 +224,18 @@ class HostToolsHelper: def get_look_assigner_tool(self, parent): """Create, cache and return look assigner tool window.""" if self._look_assigner_tool is None: - import mayalookassigner + from openpype.tools.mayalookassigner import MayaLookAssignerWindow - mayalookassigner_window = mayalookassigner.App(parent) + mayalookassigner_window = MayaLookAssignerWindow(parent) self._look_assigner_tool = mayalookassigner_window return self._look_assigner_tool def show_look_assigner(self, parent=None): """Look manager is Maya specific tool for look management.""" - from avalon import style with qt_app_context(): look_assigner_tool = self.get_look_assigner_tool(parent) look_assigner_tool.show() - look_assigner_tool.setStyleSheet(style.load_stylesheet()) def get_experimental_tools_dialog(self, parent=None): """Dialog of experimental tools. diff --git a/openpype/tools/utils/lib.py b/openpype/tools/utils/lib.py index 01b9e25ef3..93b156bef8 100644 --- a/openpype/tools/utils/lib.py +++ b/openpype/tools/utils/lib.py @@ -4,11 +4,11 @@ import contextlib import collections from Qt import QtWidgets, QtCore, QtGui +import qtawesome import avalon.api -from avalon import style -from avalon.vendor import qtawesome +from openpype.style import get_default_entity_icon_color from openpype.api import ( get_project_settings, Logger @@ -92,12 +92,104 @@ def qt_app_context(): yield app -# Backwards compatibility -application = qt_app_context - - class SharedObjects: jobs = {} + icons = {} + + +def get_qta_icon_by_name_and_color(icon_name, icon_color): + if not icon_name or not icon_color: + return None + + full_icon_name = "{0}-{1}".format(icon_name, icon_color) + if full_icon_name in SharedObjects.icons: + return SharedObjects.icons[full_icon_name] + + variants = [icon_name] + qta_instance = qtawesome._instance() + for key in qta_instance.charmap.keys(): + variants.append("{0}.{1}".format(key, icon_name)) + + icon = None + for variant in variants: + try: + icon = qtawesome.icon(variant, color=icon_color) + break + except Exception: + pass + + SharedObjects.icons[full_icon_name] = icon + return icon + + +def get_project_icon(project_doc): + if project_doc: + icon_name = project_doc.get("data", {}).get("icon") + icon = get_qta_icon_by_name_and_color(icon_name, "white") + if icon: + return icon + + return get_qta_icon_by_name_and_color( + "fa.map", get_default_entity_icon_color() + ) + + +def get_asset_icon_name(asset_doc, has_children=True): + icon_name = asset_doc["data"].get("icon") + if icon_name: + return icon_name + + if has_children: + return "folder" + return "folder-o" + + +def get_asset_icon_color(asset_doc): + icon_color = asset_doc["data"].get("color") + if icon_color: + return icon_color + return get_default_entity_icon_color() + + +def get_asset_icon(asset_doc, has_children=False): + icon_name = get_asset_icon_name(asset_doc, has_children) + icon_color = get_asset_icon_color(asset_doc) + + return get_qta_icon_by_name_and_color(icon_name, icon_color) + + +def get_default_task_icon(color=None): + if color is None: + color = get_default_entity_icon_color() + return get_qta_icon_by_name_and_color("fa.male", color) + + +def get_task_icon(project_doc, asset_doc, task_name): + """Get icon for a task. + + Icon should be defined by task type which is stored on project. + """ + + color = get_default_entity_icon_color() + + tasks_info = asset_doc.get("data", {}).get("tasks") or {} + task_info = tasks_info.get(task_name) or {} + task_icon = task_info.get("icon") + if task_icon: + icon = get_qta_icon_by_name_and_color(task_icon, color) + if icon is not None: + return icon + + task_type = task_info.get("type") + task_types = project_doc["config"]["tasks"] + + task_type_info = task_types.get(task_type) or {} + task_type_icon = task_type_info.get("icon") + if task_type_icon: + icon = get_qta_icon_by_name_and_color(task_icon, color) + if icon is not None: + return icon + return get_default_task_icon(color) def schedule(func, time, channel="default"): @@ -360,6 +452,7 @@ class GroupsConfig: def __init__(self, dbcon): self.dbcon = dbcon self.groups = {} + self._default_group_color = get_default_entity_icon_color() @classmethod def default_group_config(cls): @@ -367,7 +460,7 @@ class GroupsConfig: cls._default_group_config = { "icon": qtawesome.icon( "fa.object-group", - color=style.colors.default + color=get_default_entity_icon_color() ), "order": 0 } @@ -401,7 +494,7 @@ class GroupsConfig: for config in group_configs: name = config["name"] icon = "fa." + config.get("icon", "object-group") - color = config.get("color", style.colors.default) + color = config.get("color", self._default_group_color) order = float(config.get("order", 0)) self.groups[name] = { diff --git a/openpype/tools/utils/tasks_widget.py b/openpype/tools/utils/tasks_widget.py index 2a8a45626c..eab183d5f3 100644 --- a/openpype/tools/utils/tasks_widget.py +++ b/openpype/tools/utils/tasks_widget.py @@ -1,7 +1,8 @@ from Qt import QtWidgets, QtCore, QtGui +import qtawesome -from avalon import style -from avalon.vendor import qtawesome +from openpype.style import get_disabled_entity_icon_color +from openpype.tools.utils.lib import get_task_icon from .views import DeselectableTreeView @@ -14,59 +15,42 @@ TASK_ASSIGNEE_ROLE = QtCore.Qt.UserRole + 4 class TasksModel(QtGui.QStandardItemModel): """A model listing the tasks combined for a list of assets""" + def __init__(self, dbcon, parent=None): super(TasksModel, self).__init__(parent=parent) self.dbcon = dbcon self.setHeaderData( 0, QtCore.Qt.Horizontal, "Tasks", QtCore.Qt.DisplayRole ) - self._default_icon = qtawesome.icon( - "fa.male", - color=style.colors.default - ) + self._no_tasks_icon = qtawesome.icon( "fa.exclamation-circle", - color=style.colors.mid + color=get_disabled_entity_icon_color() ) self._cached_icons = {} - self._project_task_types = {} + self._project_doc = {} self._empty_tasks_item = None self._last_asset_id = None self._loaded_project_name = None def _context_is_valid(self): - if self.dbcon.Session.get("AVALON_PROJECT"): + if self._get_current_project(): return True return False def refresh(self): - self._refresh_task_types() + self._refresh_project_doc() self.set_asset_id(self._last_asset_id) - def _refresh_task_types(self): + def _refresh_project_doc(self): # Get the project configured icons from database - task_types = {} + project_doc = {} if self._context_is_valid(): - project = self.dbcon.find_one( - {"type": "project"}, - {"config.tasks"} - ) - task_types = project["config"].get("tasks") or task_types - self._project_task_types = task_types + project_doc = self.dbcon.find_one({"type": "project"}) - def _try_get_awesome_icon(self, icon_name): - icon = None - if icon_name: - try: - icon = qtawesome.icon( - "fa.{}".format(icon_name), - color=style.colors.default - ) - - except Exception: - pass - return icon + self._loaded_project_name = self._get_current_project() + self._project_doc = project_doc def headerData(self, section, orientation, role=None): if role is None: @@ -81,28 +65,8 @@ class TasksModel(QtGui.QStandardItemModel): return super(TasksModel, self).headerData(section, orientation, role) - def _get_icon(self, task_icon, task_type_icon): - if task_icon in self._cached_icons: - return self._cached_icons[task_icon] - - icon = self._try_get_awesome_icon(task_icon) - if icon is not None: - self._cached_icons[task_icon] = icon - return icon - - if task_type_icon in self._cached_icons: - icon = self._cached_icons[task_type_icon] - self._cached_icons[task_icon] = icon - return icon - - icon = self._try_get_awesome_icon(task_type_icon) - if icon is None: - icon = self._default_icon - - self._cached_icons[task_icon] = icon - self._cached_icons[task_type_icon] = icon - - return icon + def _get_current_project(self): + return self.dbcon.Session.get("AVALON_PROJECT") def set_asset_id(self, asset_id): asset_doc = None @@ -127,6 +91,9 @@ class TasksModel(QtGui.QStandardItemModel): Arguments: asset_doc (dict): Asset document from MongoDB. """ + if self._loaded_project_name != self._get_current_project(): + self._refresh_project_doc() + asset_tasks = {} self._last_asset_id = None if asset_doc: @@ -137,13 +104,11 @@ class TasksModel(QtGui.QStandardItemModel): root_item.removeRows(0, root_item.rowCount()) items = [] + for task_name, task_info in asset_tasks.items(): - task_icon = task_info.get("icon") task_type = task_info.get("type") task_order = task_info.get("order") - task_type_info = self._project_task_types.get(task_type) or {} - task_type_icon = task_type_info.get("icon") - icon = self._get_icon(task_icon, task_type_icon) + icon = get_task_icon(self._project_doc, asset_doc, task_name) task_assignees = set() assignees_data = task_info.get("assignees") or [] diff --git a/openpype/tools/utils/views.py b/openpype/tools/utils/views.py index 97aaf622a4..a2f1f15b95 100644 --- a/openpype/tools/utils/views.py +++ b/openpype/tools/utils/views.py @@ -1,5 +1,5 @@ import os -from avalon import style +from openpype.resources import get_image_path from Qt import QtWidgets, QtCore, QtGui, QtSvg @@ -24,11 +24,8 @@ class TreeViewSpinner(QtWidgets.QTreeView): def __init__(self, parent=None): super(TreeViewSpinner, self).__init__(parent=parent) - loading_image_path = os.path.join( - os.path.dirname(os.path.abspath(style.__file__)), - "svg", - "spinner-200.svg" - ) + loading_image_path = get_image_path("spinner-200.svg") + self.spinner = QtSvg.QSvgRenderer(loading_image_path) self.is_loading = False diff --git a/openpype/tools/utils/widgets.py b/openpype/tools/utils/widgets.py index a4e172ea5c..d5ae909be8 100644 --- a/openpype/tools/utils/widgets.py +++ b/openpype/tools/utils/widgets.py @@ -1,8 +1,8 @@ import logging from Qt import QtWidgets, QtCore, QtGui - -from avalon.vendor import qtawesome, qargparse +import qargparse +import qtawesome from openpype.style import ( get_objected_colors, get_style_image_path diff --git a/openpype/tools/workfiles/app.py b/openpype/tools/workfiles/app.py index 3a772a038c..713992bc4b 100644 --- a/openpype/tools/workfiles/app.py +++ b/openpype/tools/workfiles/app.py @@ -2,17 +2,15 @@ import sys import os import re import copy -import getpass import shutil import logging import datetime import Qt from Qt import QtWidgets, QtCore -from avalon import io, api, pipeline +from avalon import io, api from openpype import style -from openpype.pipeline.lib import BeforeWorkfileSave from openpype.tools.utils.lib import ( qt_app_context ) @@ -21,13 +19,19 @@ from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget from openpype.tools.utils.tasks_widget import TasksWidget from openpype.tools.utils.delegates import PrettyTimeDelegate from openpype.lib import ( + emit_event, Anatomy, get_workfile_doc, create_workfile_doc, save_workfile_data_to_doc, get_workfile_template_key, create_workdir_extra_folders, - get_system_general_anatomy_data + get_workdir_data, + get_last_workfile_with_version +) +from openpype.lib.avalon_context import ( + update_current_task, + compute_session_changes ) from .model import FilesModel from .view import FilesView @@ -44,6 +48,7 @@ def build_workfile_data(session): # Set work file data for template formatting asset_name = session["AVALON_ASSET"] task_name = session["AVALON_TASK"] + host_name = session["AVALON_APP"] project_doc = io.find_one( {"type": "project"}, { @@ -59,42 +64,17 @@ def build_workfile_data(session): "name": asset_name }, { + "name": True, "data.tasks": True, "data.parents": True } ) - - task_type = asset_doc["data"]["tasks"].get(task_name, {}).get("type") - - project_task_types = project_doc["config"]["tasks"] - task_short = project_task_types.get(task_type, {}).get("short_name") - - asset_parents = asset_doc["data"]["parents"] - parent_name = project_doc["name"] - if asset_parents: - parent_name = asset_parents[-1] - - data = { - "project": { - "name": project_doc["name"], - "code": project_doc["data"].get("code") - }, - "asset": asset_name, - "task": { - "name": task_name, - "type": task_type, - "short": task_short, - }, - "parent": parent_name, + data = get_workdir_data(project_doc, asset_doc, task_name, host_name) + data.update({ "version": 1, - "user": getpass.getuser(), "comment": "", "ext": None - } - - # add system general settings anatomy data - system_general_data = get_system_general_anatomy_data() - data.update(system_general_data) + }) return data @@ -461,7 +441,7 @@ class NameWindow(QtWidgets.QDialog): data["ext"] = data["ext"][1:] - version = api.last_workfile_with_version( + version = get_last_workfile_with_version( self.root, template, data, extensions )[1] @@ -489,7 +469,7 @@ class NameWindow(QtWidgets.QDialog): # Log warning if idx == 0: log.warning(( - "BUG: Function `last_workfile_with_version` " + "BUG: Function `get_last_workfile_with_version` " "didn't return last version." )) # Raise exception if even 100 version fallback didn't help @@ -667,7 +647,7 @@ class FilesWidget(QtWidgets.QWidget): session["AVALON_APP"], project_name=session["AVALON_PROJECT"] ) - changes = pipeline.compute_session_changes( + changes = compute_session_changes( session, asset=self._get_asset_doc(), task=self._task_name, @@ -681,7 +661,7 @@ class FilesWidget(QtWidgets.QWidget): """Enter the asset and task session currently selected""" session = api.Session.copy() - changes = pipeline.compute_session_changes( + changes = compute_session_changes( session, asset=self._get_asset_doc(), task=self._task_name, @@ -692,7 +672,7 @@ class FilesWidget(QtWidgets.QWidget): # to avoid any unwanted Task Changed callbacks to be triggered. return - api.update_current_task( + update_current_task( asset=self._get_asset_doc(), task=self._task_name, template_key=self.template_key @@ -819,7 +799,11 @@ class FilesWidget(QtWidgets.QWidget): return # Trigger before save event - BeforeWorkfileSave.emit(work_filename, self._workdir_path) + emit_event( + "workfile.save.before", + {"filename": work_filename, "workdir_path": self._workdir_path}, + source="workfiles.tool" + ) # Make sure workfiles root is updated # - this triggers 'workio.work_root(...)' which may change value of @@ -849,7 +833,11 @@ class FilesWidget(QtWidgets.QWidget): api.Session["AVALON_PROJECT"] ) # Trigger after save events - pipeline.emit("after.workfile.save", [filepath]) + emit_event( + "workfile.save.after", + {"filename": work_filename, "workdir_path": self._workdir_path}, + source="workfiles.tool" + ) self.workfile_created.emit(filepath) # Refresh files model diff --git a/openpype/tools/workfiles/model.py b/openpype/tools/workfiles/model.py index 3425cc3df0..e9184842fc 100644 --- a/openpype/tools/workfiles/model.py +++ b/openpype/tools/workfiles/model.py @@ -2,9 +2,13 @@ import os import logging from Qt import QtCore +import qtawesome + +from openpype.style import ( + get_default_entity_icon_color, + get_disabled_entity_icon_color, +) -from avalon import style -from avalon.vendor import qtawesome from openpype.tools.utils.models import TreeModel, Item log = logging.getLogger(__name__) @@ -25,7 +29,10 @@ class FilesModel(TreeModel): self._root = None self._file_extensions = file_extensions self._icons = { - "file": qtawesome.icon("fa.file-o", color=style.colors.default) + "file": qtawesome.icon( + "fa.file-o", + color=get_default_entity_icon_color() + ) } def set_root(self, root): @@ -64,7 +71,10 @@ class FilesModel(TreeModel): "date": None, "filepath": None, "enabled": False, - "icon": qtawesome.icon("fa.times", color=style.colors.mid) + "icon": qtawesome.icon( + "fa.times", + color=get_disabled_entity_icon_color() + ) }) self.add_child(item) self.endResetModel() diff --git a/openpype/vendor/python/common/qargparse.py b/openpype/vendor/python/common/qargparse.py new file mode 100644 index 0000000000..ebde9ae76d --- /dev/null +++ b/openpype/vendor/python/common/qargparse.py @@ -0,0 +1,817 @@ +""" +NOTE: The required `Qt` module has changed to use the one that vendorized. + Remember to change to relative import when updating this. +""" + +import re +import logging + +from collections import OrderedDict as odict +from Qt import QtCore, QtWidgets, QtGui +import qtawesome + +__version__ = "0.5.2" +_log = logging.getLogger(__name__) +_type = type # used as argument + +try: + # Python 2 + _basestring = basestring +except NameError: + _basestring = str + + +class QArgumentParser(QtWidgets.QWidget): + """User interface arguments + + Arguments: + arguments (list, optional): Instances of QArgument + description (str, optional): Long-form text of what this parser is for + storage (QSettings, optional): Persistence to disk, providing + value() and setValue() methods + + """ + + changed = QtCore.Signal(QtCore.QObject) # A QArgument + + def __init__(self, + arguments=None, + description=None, + storage=None, + parent=None): + super(QArgumentParser, self).__init__(parent) + self.setAttribute(QtCore.Qt.WA_StyledBackground) + + # Create internal settings + if storage is True: + storage = QtCore.QSettings( + QtCore.QSettings.IniFormat, + QtCore.QSettings.UserScope, + __name__, "QArgparse", + ) + + if storage is not None: + _log.info("Storing settings @ %s" % storage.fileName()) + + arguments = arguments or [] + + assert hasattr(arguments, "__iter__"), "arguments must be iterable" + assert isinstance(storage, (type(None), QtCore.QSettings)), ( + "storage must be of type QSettings" + ) + + layout = QtWidgets.QGridLayout(self) + layout.setRowStretch(999, 1) + + if description: + layout.addWidget(QtWidgets.QLabel(description), 0, 0, 1, 2) + + self._row = 1 + self._storage = storage + self._arguments = odict() + self._desciption = description + + for arg in arguments or []: + self._addArgument(arg) + + self.setStyleSheet(style) + + def setDescription(self, text): + self._desciption.setText(text) + + def addArgument(self, name, type=None, default=None, **kwargs): + # Infer type from default + if type is None and default is not None: + type = _type(default) + + # Default to string + type = type or str + + Argument = { + None: String, + int: Integer, + float: Float, + bool: Boolean, + str: String, + list: Enum, + tuple: Enum, + }.get(type, type) + + arg = Argument(name, default=default, **kwargs) + self._addArgument(arg) + return arg + + def _addArgument(self, arg): + if arg["name"] in self._arguments: + raise ValueError("Duplicate argument '%s'" % arg["name"]) + + if self._storage is not None: + default = self._storage.value(arg["name"]) + + if default: + if isinstance(arg, Boolean): + default = bool({ + None: QtCore.Qt.Unchecked, + + 0: QtCore.Qt.Unchecked, + 1: QtCore.Qt.Checked, + 2: QtCore.Qt.Checked, + + "0": QtCore.Qt.Unchecked, + "1": QtCore.Qt.Checked, + "2": QtCore.Qt.Checked, + + # May be stored as string, if used with IniFormat + "false": QtCore.Qt.Unchecked, + "true": QtCore.Qt.Checked, + }.get(default)) + + arg["default"] = default + + arg.changed.connect(lambda: self.on_changed(arg)) + + label = ( + QtWidgets.QLabel(arg["label"]) + if arg.label + else QtWidgets.QLabel() + ) + widget = arg.create() + icon = qtawesome.icon("fa.refresh", color="white") + reset = QtWidgets.QPushButton(icon, "") # default + reset.setToolTip("Reset") + reset.setProperty("type", "reset") + reset.clicked.connect(lambda: self.on_reset(arg)) + + # Shown on edit + reset.hide() + + for widget in (label, widget): + widget.setToolTip(arg["help"]) + widget.setObjectName(arg["name"]) # useful in CSS + widget.setProperty("type", type(arg).__name__) + widget.setAttribute(QtCore.Qt.WA_StyledBackground) + widget.setEnabled(arg["enabled"]) + + # Align label on top of row if widget is over two times heiger + height = (lambda w: w.sizeHint().height()) + label_on_top = height(label) * 2 < height(widget) + alignment = (QtCore.Qt.AlignTop,) if label_on_top else () + + layout = self.layout() + layout.addWidget(label, self._row, 0, *alignment) + layout.addWidget(widget, self._row, 1) + layout.addWidget(reset, self._row, 2, *alignment) + layout.setColumnStretch(1, 1) + + def on_changed(*_): + reset.setVisible(arg["edited"]) + + arg.changed.connect(on_changed) + + self._row += 1 + self._arguments[arg["name"]] = arg + + def clear(self): + assert self._storage, "Cannot clear without persistent storage" + self._storage.clear() + _log.info("Clearing settings @ %s" % self._storage.fileName()) + + def find(self, name): + return self._arguments[name] + + def on_reset(self, arg): + arg.write(arg["default"]) + + def on_changed(self, arg): + arg["edited"] = arg.read() != arg["default"] + self.changed.emit(arg) + + # Optional PEP08 syntax + add_argument = addArgument + + +class QArgument(QtCore.QObject): + """Base class of argument user interface + """ + changed = QtCore.Signal() + + # Provide a left-hand side label for this argument + label = True + # For defining default value for each argument type + default = None + + def __init__(self, name, default=None, **kwargs): + super(QArgument, self).__init__(kwargs.pop("parent", None)) + + kwargs["name"] = name + kwargs["label"] = kwargs.get("label", camel_to_title(name)) + kwargs["default"] = self.default if default is None else default + kwargs["help"] = kwargs.get("help", "") + kwargs["read"] = kwargs.get("read") + kwargs["write"] = kwargs.get("write") + kwargs["enabled"] = bool(kwargs.get("enabled", True)) + kwargs["edited"] = False + + self._data = kwargs + + def __str__(self): + return self["name"] + + def __repr__(self): + return "%s(\"%s\")" % (type(self).__name__, self["name"]) + + def __getitem__(self, key): + return self._data[key] + + def __setitem__(self, key, value): + self._data[key] = value + + def __eq__(self, other): + if isinstance(other, _basestring): + return self["name"] == other + return super(QArgument, self).__eq__(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def create(self): + return QtWidgets.QWidget() + + def read(self): + return self._read() + + def write(self, value): + self._write(value) + self.changed.emit() + + +class Boolean(QArgument): + """Boolean type user interface + + Presented by `QtWidgets.QCheckBox`. + + Arguments: + name (str): The name of argument + label (str, optional): Display name, convert from `name` if not given + help (str, optional): Tool tip message of this argument + default (bool, optional): Argument's default value, default None + enabled (bool, optional): Whether to enable this widget, default True + + """ + def create(self): + widget = QtWidgets.QCheckBox() + widget.clicked.connect(self.changed.emit) + + if isinstance(self, Tristate): + self._read = lambda: widget.checkState() + state = { + 0: QtCore.Qt.Unchecked, + 1: QtCore.Qt.PartiallyChecked, + 2: QtCore.Qt.Checked, + "1": QtCore.Qt.PartiallyChecked, + "0": QtCore.Qt.Unchecked, + "2": QtCore.Qt.Checked, + } + else: + self._read = lambda: bool(widget.checkState()) + state = { + None: QtCore.Qt.Unchecked, + + 0: QtCore.Qt.Unchecked, + 1: QtCore.Qt.Checked, + 2: QtCore.Qt.Checked, + + "0": QtCore.Qt.Unchecked, + "1": QtCore.Qt.Checked, + "2": QtCore.Qt.Checked, + + # May be stored as string, if used with QSettings(..IniFormat) + "false": QtCore.Qt.Unchecked, + "true": QtCore.Qt.Checked, + } + + self._write = lambda value: widget.setCheckState(state[value]) + widget.clicked.connect(self.changed.emit) + + if self["default"] is not None: + self._write(self["default"]) + + return widget + + def read(self): + return self._read() + + +class Tristate(QArgument): + """Not implemented""" + + +class Number(QArgument): + """Base class of numeric type user interface""" + default = 0 + + def create(self): + if isinstance(self, Float): + widget = QtWidgets.QDoubleSpinBox() + widget.setMinimum(self._data.get("min", 0.0)) + widget.setMaximum(self._data.get("max", 99.99)) + else: + widget = QtWidgets.QSpinBox() + widget.setMinimum(self._data.get("min", 0)) + widget.setMaximum(self._data.get("max", 99)) + + widget.editingFinished.connect(self.changed.emit) + self._read = lambda: widget.value() + self._write = lambda value: widget.setValue(value) + + if self["default"] != self.default: + self._write(self["default"]) + + return widget + + +class Integer(Number): + """Integer type user interface + + A subclass of `qargparse.Number`, presented by `QtWidgets.QSpinBox`. + + Arguments: + name (str): The name of argument + label (str, optional): Display name, convert from `name` if not given + help (str, optional): Tool tip message of this argument + default (int, optional): Argument's default value, default 0 + min (int, optional): Argument's minimum value, default 0 + max (int, optional): Argument's maximum value, default 99 + enabled (bool, optional): Whether to enable this widget, default True + + """ + + +class Float(Number): + """Float type user interface + + A subclass of `qargparse.Number`, presented by `QtWidgets.QDoubleSpinBox`. + + Arguments: + name (str): The name of argument + label (str, optional): Display name, convert from `name` if not given + help (str, optional): Tool tip message of this argument + default (float, optional): Argument's default value, default 0.0 + min (float, optional): Argument's minimum value, default 0.0 + max (float, optional): Argument's maximum value, default 99.99 + enabled (bool, optional): Whether to enable this widget, default True + + """ + + +class Range(Number): + """Range type user interface + + A subclass of `qargparse.Number`, not production ready. + + """ + + +class Double3(QArgument): + """Double3 type user interface + + Presented by three `QtWidgets.QLineEdit` widget with `QDoubleValidator` + installed. + + Arguments: + name (str): The name of argument + label (str, optional): Display name, convert from `name` if not given + help (str, optional): Tool tip message of this argument + default (tuple or list, optional): Default (0, 0, 0). + enabled (bool, optional): Whether to enable this widget, default True + + """ + default = (0, 0, 0) + + def create(self): + widget = QtWidgets.QWidget() + layout = QtWidgets.QHBoxLayout(widget) + layout.setContentsMargins(0, 0, 0, 0) + x, y, z = (self.child_arg(layout, i) for i in range(3)) + + self._read = lambda: ( + float(x.text()), float(y.text()), float(z.text())) + self._write = lambda value: [ + w.setText(str(float(v))) for w, v in zip([x, y, z], value)] + + if self["default"] != self.default: + self._write(self["default"]) + + return widget + + def child_arg(self, layout, index): + widget = QtWidgets.QLineEdit() + widget.setValidator(QtGui.QDoubleValidator()) + + default = str(float(self["default"][index])) + widget.setText(default) + + def focusOutEvent(event): + if not widget.text(): + widget.setText(default) # Ensure value exists for `_read` + QtWidgets.QLineEdit.focusOutEvent(widget, event) + widget.focusOutEvent = focusOutEvent + + widget.editingFinished.connect(self.changed.emit) + widget.returnPressed.connect(widget.editingFinished.emit) + + layout.addWidget(widget) + + return widget + + +class String(QArgument): + """String type user interface + + Presented by `QtWidgets.QLineEdit`. + + Arguments: + name (str): The name of argument + label (str, optional): Display name, convert from `name` if not given + help (str, optional): Tool tip message of this argument + default (str, optional): Argument's default value, default None + placeholder (str, optional): Placeholder message for the widget + enabled (bool, optional): Whether to enable this widget, default True + + """ + def __init__(self, *args, **kwargs): + super(String, self).__init__(*args, **kwargs) + self._previous = None + + def create(self): + widget = QtWidgets.QLineEdit() + widget.editingFinished.connect(self.onEditingFinished) + widget.returnPressed.connect(widget.editingFinished.emit) + self._read = lambda: widget.text() + self._write = lambda value: widget.setText(value) + + if isinstance(self, Info): + widget.setReadOnly(True) + widget.setPlaceholderText(self._data.get("placeholder", "")) + + if self["default"] is not None: + self._write(self["default"]) + self._previous = self["default"] + + return widget + + def onEditingFinished(self): + current = self._read() + + if current != self._previous: + self.changed.emit() + self._previous = current + + +class Info(String): + """String type user interface but read-only + + A subclass of `qargparse.String`, presented by `QtWidgets.QLineEdit`. + + Arguments: + name (str): The name of argument + label (str, optional): Display name, convert from `name` if not given + help (str, optional): Tool tip message of this argument + default (str, optional): Argument's default value, default None + enabled (bool, optional): Whether to enable this widget, default True + + """ + + +class Color(String): + """Color type user interface + + A subclass of `qargparse.String`, not production ready. + + """ + + +class Button(QArgument): + """Button type user interface + + Presented by `QtWidgets.QPushButton`. + + Arguments: + name (str): The name of argument + label (str, optional): Display name, convert from `name` if not given + help (str, optional): Tool tip message of this argument + default (bool, optional): Argument's default value, default None + enabled (bool, optional): Whether to enable this widget, default True + + """ + label = False + + def create(self): + widget = QtWidgets.QPushButton(self["label"]) + widget.clicked.connect(self.changed.emit) + + state = [ + QtCore.Qt.Unchecked, + QtCore.Qt.Checked, + ] + + if isinstance(self, Toggle): + widget.setCheckable(True) + if hasattr(widget, "isChecked"): + self._read = lambda: state[int(widget.isChecked())] + self._write = ( + lambda value: widget.setChecked(value) + ) + else: + self._read = lambda: widget.checkState() + self._write = ( + lambda value: widget.setCheckState(state[int(value)]) + ) + else: + self._read = lambda: "clicked" + self._write = lambda value: None + + if self["default"] is not None: + self._write(self["default"]) + + return widget + + +class Toggle(Button): + """Checkable `Button` type user interface + + Presented by `QtWidgets.QPushButton`. + + Arguments: + name (str): The name of argument + label (str, optional): Display name, convert from `name` if not given + help (str, optional): Tool tip message of this argument + default (bool, optional): Argument's default value, default None + enabled (bool, optional): Whether to enable this widget, default True + + """ + + +class InfoList(QArgument): + """String list type user interface + + Presented by `QtWidgets.QListView`, not production ready. + + """ + def __init__(self, name, **kwargs): + kwargs["default"] = kwargs.pop("default", ["Empty"]) + super(InfoList, self).__init__(name, **kwargs) + + def create(self): + class Model(QtCore.QStringListModel): + def data(self, index, role): + return super(Model, self).data(index, role) + + model = QtCore.QStringListModel(self["default"]) + widget = QtWidgets.QListView() + widget.setModel(model) + widget.setEditTriggers(widget.NoEditTriggers) + + self._read = lambda: model.stringList() + self._write = lambda value: model.setStringList(value) + + return widget + + +class Choice(QArgument): + """Argument user interface for selecting one from list + + Presented by `QtWidgets.QListView`. + + Arguments: + name (str): The name of argument + label (str, optional): Display name, convert from `name` if not given + help (str, optional): Tool tip message of this argument + items (list, optional): List of strings for select, default `["Empty"]` + default (str, optional): Default item in `items`, use first of `items` + if not given. + enabled (bool, optional): Whether to enable this widget, default True + + """ + def __init__(self, name, **kwargs): + kwargs["items"] = kwargs.get("items", ["Empty"]) + kwargs["default"] = kwargs.pop("default", kwargs["items"][0]) + super(Choice, self).__init__(name, **kwargs) + + def index(self, value): + """Return numerical equivalent to self.read()""" + return self["items"].index(value) + + def create(self): + def on_changed(selected, deselected): + try: + selected = selected.indexes()[0] + except IndexError: + # At least one item must be selected at all times + selected = deselected.indexes()[0] + + value = selected.data(QtCore.Qt.DisplayRole) + set_current(value) + self.changed.emit() + + def set_current(current): + options = model.stringList() + + if current == "Empty": + index = 0 + else: + for index, member in enumerate(options): + if member == current: + break + else: + raise ValueError( + "%s not a member of %s" % (current, options) + ) + + qindex = model.index(index, 0, QtCore.QModelIndex()) + smodel.setCurrentIndex(qindex, smodel.ClearAndSelect) + self["current"] = options[index] + + def reset(items, default=None): + items = items or ["Empty"] + model.setStringList(items) + set_current(default or items[0]) + + model = QtCore.QStringListModel() + widget = QtWidgets.QListView() + widget.setModel(model) + widget.setEditTriggers(widget.NoEditTriggers) + widget.setSelectionMode(widget.SingleSelection) + smodel = widget.selectionModel() + smodel.selectionChanged.connect(on_changed) + + self._read = lambda: self["current"] + self._write = lambda value: set_current(value) + self.reset = reset + + reset(self["items"], self["default"]) + + return widget + + +class Separator(QArgument): + """Visual separator + + Example: + + item1 + item2 + ------------ + item3 + item4 + + """ + + def create(self): + widget = QtWidgets.QWidget() + + self._read = lambda: None + self._write = lambda value: None + + return widget + + +class Enum(QArgument): + """Argument user interface for selecting one from dropdown list + + Presented by `QtWidgets.QComboBox`. + + Arguments: + name (str): The name of argument + label (str, optional): Display name, convert from `name` if not given + help (str, optional): Tool tip message of this argument + items (list, optional): List of strings for select, default `[]` + default (int, optional): Index of default item, use first of `items` + if not given. + enabled (bool, optional): Whether to enable this widget, default True + + """ + def __init__(self, name, **kwargs): + kwargs["default"] = kwargs.pop("default", 0) + kwargs["items"] = kwargs.get("items", []) + + assert isinstance(kwargs["items"], (tuple, list)), ( + "items must be list" + ) + + super(Enum, self).__init__(name, **kwargs) + + def create(self): + widget = QtWidgets.QComboBox() + widget.addItems(self["items"]) + widget.currentIndexChanged.connect( + lambda index: self.changed.emit()) + + self._read = lambda: widget.currentText() + self._write = lambda value: widget.setCurrentIndex(value) + + if self["default"] is not None: + self._write(self["default"]) + + return widget + + +style = """\ +QWidget { + /* Explicitly specify a size, to account for automatic HDPi */ + font-size: 11px; +} + +*[type="Button"] { + text-align:left; +} + +*[type="Info"] { + background: transparent; + border: none; +} + +QLabel[type="Separator"] { + min-height: 20px; + text-decoration: underline; +} + +QPushButton[type="reset"] { + max-width: 11px; + max-height: 11px; +} + +""" + + +def camelToTitle(text): + """Convert camelCase `text` to Title Case + + Example: + >>> camelToTitle("mixedCase") + "Mixed Case" + >>> camelToTitle("myName") + "My Name" + >>> camelToTitle("you") + "You" + >>> camelToTitle("You") + "You" + >>> camelToTitle("This is That") + "This Is That" + + """ + + return re.sub( + r"((?<=[a-z])[A-Z]|(?= 3 + except: + import thread as _thread # Python < 3 + except ImportError: + import _dummy_thread as _thread + +""" +# Exports only things specified by thread documentation; +# skipping obsolete synonyms allocate(), start_new(), exit_thread(). +__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock', + 'interrupt_main', 'LockType'] + +# A dummy value +TIMEOUT_MAX = 2**31 + +# NOTE: this module can be imported early in the extension building process, +# and so top level imports of other modules should be avoided. Instead, all +# imports are done when needed on a function-by-function basis. Since threads +# are disabled, the import lock should not be an issue anyway (??). + +class error(Exception): + """Dummy implementation of _thread.error.""" + + def __init__(self, *args): + self.args = args + +def start_new_thread(function, args, kwargs={}): + """Dummy implementation of _thread.start_new_thread(). + + Compatibility is maintained by making sure that ``args`` is a + tuple and ``kwargs`` is a dictionary. If an exception is raised + and it is SystemExit (which can be done by _thread.exit()) it is + caught and nothing is done; all other exceptions are printed out + by using traceback.print_exc(). + + If the executed function calls interrupt_main the KeyboardInterrupt will be + raised when the function returns. + + """ + if type(args) != type(tuple()): + raise TypeError("2nd arg must be a tuple") + if type(kwargs) != type(dict()): + raise TypeError("3rd arg must be a dict") + global _main + _main = False + try: + function(*args, **kwargs) + except SystemExit: + pass + except: + import traceback + traceback.print_exc() + _main = True + global _interrupt + if _interrupt: + _interrupt = False + raise KeyboardInterrupt + +def exit(): + """Dummy implementation of _thread.exit().""" + raise SystemExit + +def get_ident(): + """Dummy implementation of _thread.get_ident(). + + Since this module should only be used when _threadmodule is not + available, it is safe to assume that the current process is the + only thread. Thus a constant can be safely returned. + """ + return -1 + +def allocate_lock(): + """Dummy implementation of _thread.allocate_lock().""" + return LockType() + +def stack_size(size=None): + """Dummy implementation of _thread.stack_size().""" + if size is not None: + raise error("setting thread stack size not supported") + return 0 + +class LockType(object): + """Class implementing dummy implementation of _thread.LockType. + + Compatibility is maintained by maintaining self.locked_status + which is a boolean that stores the state of the lock. Pickling of + the lock, though, should not be done since if the _thread module is + then used with an unpickled ``lock()`` from here problems could + occur from this class not having atomic methods. + + """ + + def __init__(self): + self.locked_status = False + + def acquire(self, waitflag=None, timeout=-1): + """Dummy implementation of acquire(). + + For blocking calls, self.locked_status is automatically set to + True and returned appropriately based on value of + ``waitflag``. If it is non-blocking, then the value is + actually checked and not set if it is already acquired. This + is all done so that threading.Condition's assert statements + aren't triggered and throw a little fit. + + """ + if waitflag is None or waitflag: + self.locked_status = True + return True + else: + if not self.locked_status: + self.locked_status = True + return True + else: + if timeout > 0: + import time + time.sleep(timeout) + return False + + __enter__ = acquire + + def __exit__(self, typ, val, tb): + self.release() + + def release(self): + """Release the dummy lock.""" + # XXX Perhaps shouldn't actually bother to test? Could lead + # to problems for complex, threaded code. + if not self.locked_status: + raise error + self.locked_status = False + return True + + def locked(self): + return self.locked_status + +# Used to signal that interrupt_main was called in a "thread" +_interrupt = False +# True when not executing in a "thread" +_main = True + +def interrupt_main(): + """Set _interrupt flag to True to have start_new_thread raise + KeyboardInterrupt upon exiting.""" + if _main: + raise KeyboardInterrupt + else: + global _interrupt + _interrupt = True diff --git a/openpype/vendor/python/python_2/functools32/functools32.py b/openpype/vendor/python/python_2/functools32/functools32.py new file mode 100644 index 0000000000..c44551fac0 --- /dev/null +++ b/openpype/vendor/python/python_2/functools32/functools32.py @@ -0,0 +1,423 @@ +"""functools.py - Tools for working with functions and callable objects +""" +# Python module wrapper for _functools C module +# to allow utilities written in Python to be added +# to the functools module. +# Written by Nick Coghlan +# and Raymond Hettinger +# Copyright (C) 2006-2010 Python Software Foundation. +# See C source code for _functools credits/copyright + +__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', + 'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial'] + +from _functools import partial, reduce +from collections import MutableMapping, namedtuple +from .reprlib32 import recursive_repr as _recursive_repr +from weakref import proxy as _proxy +import sys as _sys +try: + from thread import allocate_lock as Lock +except ImportError: + from ._dummy_thread32 import allocate_lock as Lock + +################################################################################ +### OrderedDict +################################################################################ + +class _Link(object): + __slots__ = 'prev', 'next', 'key', '__weakref__' + +class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # The sentinel is in self.__hardroot with a weakref proxy in self.__root. + # The prev links are weakref proxies (to prevent circular references). + # Individual links are kept alive by the hard reference in self.__map. + # Those hard references disappear when a key is deleted from an OrderedDict. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries, but keyword arguments are not recommended because + their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__hardroot = _Link() + self.__root = root = _proxy(self.__hardroot) + root.prev = root.next = root + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, + dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + self.__map[key] = link = Link() + root = self.__root + last = root.prev + link.prev, link.next, link.key = last, root, key + last.next = link + root.prev = proxy(link) + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link = self.__map.pop(key) + link_prev = link.prev + link_next = link.next + link_prev.next = link_next + link_next.prev = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root.next + while curr is not root: + yield curr.key + curr = curr.next + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root.prev + while curr is not root: + yield curr.key + curr = curr.prev + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root.prev = root.next = root + self.__map.clear() + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root.prev + link_prev = link.prev + link_prev.next = root + root.prev = link_prev + else: + link = root.next + link_next = link.next + root.next = link_next + link_next.prev = root + key = link.key + del self.__map[key] + value = dict.pop(self, key) + return key, value + + def move_to_end(self, key, last=True): + '''Move an existing element to the end (or beginning if last==False). + + Raises KeyError if the element does not exist. + When last=True, acts like a fast version of self[key]=self.pop(key). + + ''' + link = self.__map[key] + link_prev = link.prev + link_next = link.next + link_prev.next = link_next + link_next.prev = link_prev + root = self.__root + if last: + last = root.prev + link.prev = last + link.next = root + last.next = root.prev = link + else: + first = root.next + link.prev = root + link.next = first + root.next = first.prev = link + + def __sizeof__(self): + sizeof = _sys.getsizeof + n = len(self) + 1 # number of links including root + size = sizeof(self.__dict__) # instance dictionary + size += sizeof(self.__map) * 2 # internal dict and inherited dict + size += sizeof(self.__hardroot) * n # link objects + size += sizeof(self.__root) * n # proxy objects + return size + + update = __update = MutableMapping.update + keys = MutableMapping.keys + values = MutableMapping.values + items = MutableMapping.items + __ne__ = MutableMapping.__ne__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + @_recursive_repr() + def __repr__(self): + 'od.__repr__() <==> repr(od)' + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, list(self.items())) + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. + If not specified, the value defaults to None. + + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self)==len(other) and \ + all(p==q for p, q in zip(self.items(), other.items())) + return dict.__eq__(self, other) + +# update_wrapper() and wraps() are tools to help write +# wrapper functions that can handle naive introspection + +WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__') +WRAPPER_UPDATES = ('__dict__',) +def update_wrapper(wrapper, + wrapped, + assigned = WRAPPER_ASSIGNMENTS, + updated = WRAPPER_UPDATES): + """Update a wrapper function to look like the wrapped function + + wrapper is the function to be updated + wrapped is the original function + assigned is a tuple naming the attributes assigned directly + from the wrapped function to the wrapper function (defaults to + functools.WRAPPER_ASSIGNMENTS) + updated is a tuple naming the attributes of the wrapper that + are updated with the corresponding attribute from the wrapped + function (defaults to functools.WRAPPER_UPDATES) + """ + wrapper.__wrapped__ = wrapped + for attr in assigned: + try: + value = getattr(wrapped, attr) + except AttributeError: + pass + else: + setattr(wrapper, attr, value) + for attr in updated: + getattr(wrapper, attr).update(getattr(wrapped, attr, {})) + # Return the wrapper so this can be used as a decorator via partial() + return wrapper + +def wraps(wrapped, + assigned = WRAPPER_ASSIGNMENTS, + updated = WRAPPER_UPDATES): + """Decorator factory to apply update_wrapper() to a wrapper function + + Returns a decorator that invokes update_wrapper() with the decorated + function as the wrapper argument and the arguments to wraps() as the + remaining arguments. Default arguments are as for update_wrapper(). + This is a convenience function to simplify applying partial() to + update_wrapper(). + """ + return partial(update_wrapper, wrapped=wrapped, + assigned=assigned, updated=updated) + +def total_ordering(cls): + """Class decorator that fills in missing ordering methods""" + convert = { + '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)), + ('__le__', lambda self, other: self < other or self == other), + ('__ge__', lambda self, other: not self < other)], + '__le__': [('__ge__', lambda self, other: not self <= other or self == other), + ('__lt__', lambda self, other: self <= other and not self == other), + ('__gt__', lambda self, other: not self <= other)], + '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)), + ('__ge__', lambda self, other: self > other or self == other), + ('__le__', lambda self, other: not self > other)], + '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other), + ('__gt__', lambda self, other: self >= other and not self == other), + ('__lt__', lambda self, other: not self >= other)] + } + roots = set(dir(cls)) & set(convert) + if not roots: + raise ValueError('must define at least one ordering operation: < > <= >=') + root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ + for opname, opfunc in convert[root]: + if opname not in roots: + opfunc.__name__ = opname + opfunc.__doc__ = getattr(int, opname).__doc__ + setattr(cls, opname, opfunc) + return cls + +def cmp_to_key(mycmp): + """Convert a cmp= function into a key= function""" + class K(object): + __slots__ = ['obj'] + def __init__(self, obj): + self.obj = obj + def __lt__(self, other): + return mycmp(self.obj, other.obj) < 0 + def __gt__(self, other): + return mycmp(self.obj, other.obj) > 0 + def __eq__(self, other): + return mycmp(self.obj, other.obj) == 0 + def __le__(self, other): + return mycmp(self.obj, other.obj) <= 0 + def __ge__(self, other): + return mycmp(self.obj, other.obj) >= 0 + def __ne__(self, other): + return mycmp(self.obj, other.obj) != 0 + __hash__ = None + return K + +_CacheInfo = namedtuple("CacheInfo", "hits misses maxsize currsize") + +def lru_cache(maxsize=100): + """Least-recently-used cache decorator. + + If *maxsize* is set to None, the LRU features are disabled and the cache + can grow without bound. + + Arguments to the cached function must be hashable. + + View the cache statistics named tuple (hits, misses, maxsize, currsize) with + f.cache_info(). Clear the cache and statistics with f.cache_clear(). + Access the underlying function with f.__wrapped__. + + See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used + + """ + # Users should only access the lru_cache through its public API: + # cache_info, cache_clear, and f.__wrapped__ + # The internals of the lru_cache are encapsulated for thread safety and + # to allow the implementation to change (including a possible C version). + + def decorating_function(user_function, + tuple=tuple, sorted=sorted, len=len, KeyError=KeyError): + + hits, misses = [0], [0] + kwd_mark = (object(),) # separates positional and keyword args + lock = Lock() # needed because OrderedDict isn't threadsafe + + if maxsize is None: + cache = dict() # simple cache without ordering or size limit + + @wraps(user_function) + def wrapper(*args, **kwds): + key = args + if kwds: + key += kwd_mark + tuple(sorted(kwds.items())) + try: + result = cache[key] + hits[0] += 1 + return result + except KeyError: + pass + result = user_function(*args, **kwds) + cache[key] = result + misses[0] += 1 + return result + else: + cache = OrderedDict() # ordered least recent to most recent + cache_popitem = cache.popitem + cache_renew = cache.move_to_end + + @wraps(user_function) + def wrapper(*args, **kwds): + key = args + if kwds: + key += kwd_mark + tuple(sorted(kwds.items())) + with lock: + try: + result = cache[key] + cache_renew(key) # record recent use of this key + hits[0] += 1 + return result + except KeyError: + pass + result = user_function(*args, **kwds) + with lock: + cache[key] = result # record recent use of this key + misses[0] += 1 + if len(cache) > maxsize: + cache_popitem(0) # purge least recently used cache entry + return result + + def cache_info(): + """Report cache statistics""" + with lock: + return _CacheInfo(hits[0], misses[0], maxsize, len(cache)) + + def cache_clear(): + """Clear the cache and cache statistics""" + with lock: + cache.clear() + hits[0] = misses[0] = 0 + + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return wrapper + + return decorating_function diff --git a/openpype/vendor/python/python_2/functools32/reprlib32.py b/openpype/vendor/python/python_2/functools32/reprlib32.py new file mode 100644 index 0000000000..af919758ca --- /dev/null +++ b/openpype/vendor/python/python_2/functools32/reprlib32.py @@ -0,0 +1,157 @@ +"""Redo the builtin repr() (representation) but with limits on most sizes.""" + +__all__ = ["Repr", "repr", "recursive_repr"] + +import __builtin__ as builtins +from itertools import islice +try: + from thread import get_ident +except ImportError: + from _dummy_thread32 import get_ident + +def recursive_repr(fillvalue='...'): + 'Decorator to make a repr function return fillvalue for a recursive call' + + def decorating_function(user_function): + repr_running = set() + + def wrapper(self): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + + # Can't use functools.wraps() here because of bootstrap issues + wrapper.__module__ = getattr(user_function, '__module__') + wrapper.__doc__ = getattr(user_function, '__doc__') + wrapper.__name__ = getattr(user_function, '__name__') + wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) + return wrapper + + return decorating_function + +class Repr: + + def __init__(self): + self.maxlevel = 6 + self.maxtuple = 6 + self.maxlist = 6 + self.maxarray = 5 + self.maxdict = 4 + self.maxset = 6 + self.maxfrozenset = 6 + self.maxdeque = 6 + self.maxstring = 30 + self.maxlong = 40 + self.maxother = 30 + + def repr(self, x): + return self.repr1(x, self.maxlevel) + + def repr1(self, x, level): + typename = type(x).__name__ + if ' ' in typename: + parts = typename.split() + typename = '_'.join(parts) + if hasattr(self, 'repr_' + typename): + return getattr(self, 'repr_' + typename)(x, level) + else: + return self.repr_instance(x, level) + + def _repr_iterable(self, x, level, left, right, maxiter, trail=''): + n = len(x) + if level <= 0 and n: + s = '...' + else: + newlevel = level - 1 + repr1 = self.repr1 + pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)] + if n > maxiter: pieces.append('...') + s = ', '.join(pieces) + if n == 1 and trail: right = trail + right + return '%s%s%s' % (left, s, right) + + def repr_tuple(self, x, level): + return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',') + + def repr_list(self, x, level): + return self._repr_iterable(x, level, '[', ']', self.maxlist) + + def repr_array(self, x, level): + header = "array('%s', [" % x.typecode + return self._repr_iterable(x, level, header, '])', self.maxarray) + + def repr_set(self, x, level): + x = _possibly_sorted(x) + return self._repr_iterable(x, level, 'set([', '])', self.maxset) + + def repr_frozenset(self, x, level): + x = _possibly_sorted(x) + return self._repr_iterable(x, level, 'frozenset([', '])', + self.maxfrozenset) + + def repr_deque(self, x, level): + return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque) + + def repr_dict(self, x, level): + n = len(x) + if n == 0: return '{}' + if level <= 0: return '{...}' + newlevel = level - 1 + repr1 = self.repr1 + pieces = [] + for key in islice(_possibly_sorted(x), self.maxdict): + keyrepr = repr1(key, newlevel) + valrepr = repr1(x[key], newlevel) + pieces.append('%s: %s' % (keyrepr, valrepr)) + if n > self.maxdict: pieces.append('...') + s = ', '.join(pieces) + return '{%s}' % (s,) + + def repr_str(self, x, level): + s = builtins.repr(x[:self.maxstring]) + if len(s) > self.maxstring: + i = max(0, (self.maxstring-3)//2) + j = max(0, self.maxstring-3-i) + s = builtins.repr(x[:i] + x[len(x)-j:]) + s = s[:i] + '...' + s[len(s)-j:] + return s + + def repr_int(self, x, level): + s = builtins.repr(x) # XXX Hope this isn't too slow... + if len(s) > self.maxlong: + i = max(0, (self.maxlong-3)//2) + j = max(0, self.maxlong-3-i) + s = s[:i] + '...' + s[len(s)-j:] + return s + + def repr_instance(self, x, level): + try: + s = builtins.repr(x) + # Bugs in x.__repr__() can cause arbitrary + # exceptions -- then make up something + except Exception: + return '<%s instance at %x>' % (x.__class__.__name__, id(x)) + if len(s) > self.maxother: + i = max(0, (self.maxother-3)//2) + j = max(0, self.maxother-3-i) + s = s[:i] + '...' + s[len(s)-j:] + return s + + +def _possibly_sorted(x): + # Since not all sequences of items can be sorted and comparison + # functions may raise arbitrary exceptions, return an unsorted + # sequence in that case. + try: + return sorted(x) + except Exception: + return list(x) + +aRepr = Repr() +repr = aRepr.repr diff --git a/openpype/version.py b/openpype/version.py index b41951a34c..1ef25e3f48 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.9.0-nightly.5" +__version__ = "3.9.1" diff --git a/poetry.lock b/poetry.lock index b6eba33e0a..ee7b839b8d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -674,7 +674,7 @@ ansicon = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "jsonschema" -version = "3.2.0" +version = "2.6.0" description = "An implementation of JSON Schema validation for Python" category = "main" optional = false @@ -1219,6 +1219,26 @@ category = "main" optional = false python-versions = "*" +[[package]] +name = "qtawesome" +version = "0.7.3" +description = "FontAwesome icons in PyQt and PySide applications" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +qtpy = "*" +six = "*" + +[[package]] +name = "qtpy" +version = "1.11.3" +description = "Provides an abstraction layer on top of the various Qt bindings (PyQt5, PyQt4 and PySide) and additional custom QWidgets." +category = "main" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*" + [[package]] name = "recommonmark" version = "0.7.1" @@ -2101,8 +2121,8 @@ jinxed = [ {file = "jinxed-1.1.0.tar.gz", hash = "sha256:d8f1731f134e9e6b04d95095845ae6c10eb15cb223a5f0cabdea87d4a279c305"}, ] jsonschema = [ - {file = "jsonschema-3.2.0-py2.py3-none-any.whl", hash = "sha256:4e5b3cf8216f577bee9ce139cbe72eca3ea4f292ec60928ff24758ce626cd163"}, - {file = "jsonschema-3.2.0.tar.gz", hash = "sha256:c8a85b28d377cc7737e46e2d9f2b4f44ee3c0e1deac6bf46ddefc7187d30797a"}, + {file = "jsonschema-2.6.0-py2.py3-none-any.whl", hash = "sha256:000e68abd33c972a5248544925a0cae7d1125f9bf6c58280d37546b946769a08"}, + {file = "jsonschema-2.6.0.tar.gz", hash = "sha256:6ff5f3180870836cae40f06fa10419f557208175f13ad7bc26caa77beb1f6e02"}, ] keyring = [ {file = "keyring-22.4.0-py3-none-any.whl", hash = "sha256:d6c531f6d12f3304db6029af1d19894bd446ecbbadd22465fa0f096b3e12d258"}, @@ -2651,6 +2671,14 @@ pywin32-ctypes = [ {file = "Qt.py-1.3.6-py2.py3-none-any.whl", hash = "sha256:7edf6048d07a6924707506b5ba34a6e05d66dde9a3f4e3a62f9996ccab0b91c7"}, {file = "Qt.py-1.3.6.tar.gz", hash = "sha256:0d78656a2f814602eee304521c7bf5da0cec414818b3833712c77524294c404a"}, ] +qtawesome = [ + {file = "QtAwesome-0.7.3-py2.py3-none-any.whl", hash = "sha256:ddf4530b4af71cec13b24b88a4cdb56ec85b1e44c43c42d0698804c7137b09b0"}, + {file = "QtAwesome-0.7.3.tar.gz", hash = "sha256:b98b9038d19190e83ab26d91c4d8fc3a36591ee2bc7f5016d4438b8240d097bd"}, +] +qtpy = [ + {file = "QtPy-1.11.3-py2.py3-none-any.whl", hash = "sha256:e121fbee8e95645af29c5a4aceba8d657991551fc1aa3b6b6012faf4725a1d20"}, + {file = "QtPy-1.11.3.tar.gz", hash = "sha256:d427addd37386a8d786db81864a5536700861d95bf085cb31d1bea855d699557"}, +] recommonmark = [ {file = "recommonmark-0.7.1-py2.py3-none-any.whl", hash = "sha256:1b1db69af0231efce3fa21b94ff627ea33dee7079a01dd0a7f8482c3da148b3f"}, {file = "recommonmark-0.7.1.tar.gz", hash = "sha256:bdb4db649f2222dcd8d2d844f0006b958d627f732415d399791ee436a3686d67"}, diff --git a/pyproject.toml b/pyproject.toml index 851bf3f735..7c09495a99 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "OpenPype" -version = "3.9.0-nightly.5" # OpenPype +version = "3.9.1" # OpenPype description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" @@ -41,7 +41,7 @@ Click = "^7" dnspython = "^2.1.0" ftrack-python-api = "2.0.*" google-api-python-client = "^1.12.8" # sync server google support (should be separate?) -jsonschema = "^3.2.0" +jsonschema = "^2.6.0" keyring = "^22.0.1" log4mongo = "^1.7" pathlib2= "^2.3.5" # deadline submit publish job only (single place, maybe not needed?) @@ -50,6 +50,8 @@ pyblish-base = "^1.8.8" pynput = "^1.7.2" # idle manager in tray pymongo = "^3.11.2" "Qt.py" = "^1.3.3" +qtpy = "^1.11.3" +qtawesome = "0.7.3" speedcopy = "^2.1" six = "^1.15" semver = "^2.13.0" # for version resolution diff --git a/tests/README.md b/tests/README.md index bb1cdbdef8..69828cdbc2 100644 --- a/tests/README.md +++ b/tests/README.md @@ -21,3 +21,27 @@ Specific location could be provided to this command as an argument, either as ab (eg. `python ${OPENPYPE_ROOT}/start.py start.py runtests ../tests/integration`) will trigger only tests in `integration` folder. See `${OPENPYPE_ROOT}/cli.py:runtests` for other arguments. + +Run in IDE: +----------- +If you prefer to run/debug single file directly in IDE of your choice, you might encounter issues with imports. +It would manifest like `KeyError: 'OPENPYPE_DATABASE_NAME'`. That means you are importing module that depends on OP to be running, eg. all expected variables are set. + +In some cases your tests might be so localized, that you don't care about all env vars to be set properly. +In that case you might add this dummy configuration BEFORE any imports in your test file +``` +import os +os.environ["AVALON_MONGO"] = "mongodb://localhost:27017" +os.environ["OPENPYPE_MONGO"] = "mongodb://localhost:27017" +os.environ["AVALON_DB"] = "avalon" +os.environ["OPENPYPE_DATABASE_NAME"] = "openpype" +os.environ["AVALON_TIMEOUT"] = '3000' +os.environ["OPENPYPE_DEBUG"] = "3" +os.environ["AVALON_CONFIG"] = "pype" +os.environ["AVALON_ASSET"] = "Asset" +os.environ["AVALON_PROJECT"] = "test_project" +``` +(AVALON_ASSET and AVALON_PROJECT values should exist in your environment) + +This might be enough to run your test file separately. Do not commit this skeleton though. +Use only when you know what you are doing! \ No newline at end of file diff --git a/tests/unit/openpype/lib/test_delivery.py b/tests/unit/openpype/lib/test_delivery.py new file mode 100644 index 0000000000..04a71655e3 --- /dev/null +++ b/tests/unit/openpype/lib/test_delivery.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +"""Test suite for delivery functions.""" +from openpype.lib.delivery import collect_frames + + +def test_collect_frames_multi_sequence(): + files = ["Asset_renderCompositingMain_v001.0000.png", + "Asset_renderCompositingMain_v001.0001.png", + "Asset_renderCompositingMain_v001.0002.png"] + ret = collect_frames(files) + + expected = { + "Asset_renderCompositingMain_v001.0000.png": "0000", + "Asset_renderCompositingMain_v001.0001.png": "0001", + "Asset_renderCompositingMain_v001.0002.png": "0002" + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_multi_sequence_different_format(): + files = ["Asset.v001.renderCompositingMain.0000.png", + "Asset.v001.renderCompositingMain.0001.png", + "Asset.v001.renderCompositingMain.0002.png"] + ret = collect_frames(files) + + expected = { + "Asset.v001.renderCompositingMain.0000.png": "0000", + "Asset.v001.renderCompositingMain.0001.png": "0001", + "Asset.v001.renderCompositingMain.0002.png": "0002" + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence(): + files = ["Asset_renderCompositingMain_v001.0000.png"] + ret = collect_frames(files) + + expected = { + "Asset_renderCompositingMain_v001.0000.png": "0000" + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_negative(): + files = ["Asset_renderCompositingMain_v001.-0000.png"] + ret = collect_frames(files) + + expected = { + "Asset_renderCompositingMain_v001.-0000.png": None + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_shot(): + files = ["testing_sh010_workfileCompositing_v001.aep"] + ret = collect_frames(files) + + expected = { + "testing_sh010_workfileCompositing_v001.aep": None + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_numbers(): + files = ["PRJ_204_430_0005_renderLayoutMain_v001.0001.exr"] + ret = collect_frames(files) + + expected = { + "PRJ_204_430_0005_renderLayoutMain_v001.0001.exr": "0001" + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_shot_with_frame(): + files = ["testing_sh010_workfileCompositing_000_v001.aep"] + ret = collect_frames(files) + + expected = { + "testing_sh010_workfileCompositing_000_v001.aep": None + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_full_path(): + files = ['C:/test_project/assets/locations/Town/work/compositing\\renders\\aftereffects\\test_project_TestAsset_compositing_v001\\TestAsset_renderCompositingMain_v001.mov'] # noqa: E501 + ret = collect_frames(files) + + expected = { + 'C:/test_project/assets/locations/Town/work/compositing\\renders\\aftereffects\\test_project_TestAsset_compositing_v001\\TestAsset_renderCompositingMain_v001.mov': None # noqa: E501 + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_different_format(): + files = ["Asset.v001.renderCompositingMain_0000.png"] + ret = collect_frames(files) + + expected = { + "Asset.v001.renderCompositingMain_0000.png": None + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_withhout_version(): + files = ["pngv001.renderCompositingMain_0000.png"] + ret = collect_frames(files) + + expected = { + "pngv001.renderCompositingMain_0000.png": None + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_as_dict(): + files = {"Asset_renderCompositingMain_v001.0000.png"} + ret = collect_frames(files) + + expected = { + "Asset_renderCompositingMain_v001.0000.png": "0000" + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_file(): + files = {"Asset_renderCompositingMain_v001.png"} + ret = collect_frames(files) + + expected = { + "Asset_renderCompositingMain_v001.png": None + } + + print(ret) + assert ret == expected, "Not matching" + diff --git a/tools/ci_tools.py b/tools/ci_tools.py index aeb367af38..4c59cd6af6 100644 --- a/tools/ci_tools.py +++ b/tools/ci_tools.py @@ -8,8 +8,12 @@ import os def get_release_type_github(Log, github_token): # print(Log) - minor_labels = ["type: feature", "type: deprecated"] - patch_labels = ["type: enhancement", "type: bug"] + minor_labels = ["Bump Minor"] + # patch_labels = [ + # "type: enhancement", + # "type: bug", + # "type: deprecated", + # "type: Feature"] g = Github(github_token) repo = g.get_repo("pypeclub/OpenPype") @@ -28,9 +32,12 @@ def get_release_type_github(Log, github_token): if any(label in labels for label in minor_labels): return "minor" - - if any(label in labels for label in patch_labels): + else: return "patch" + + # TODO: if all is working fine, this part can be cleaned up eventually + # if any(label in labels for label in patch_labels): + # return "patch" return None diff --git a/website/docs/artist_hosts_aftereffects.md b/website/docs/artist_hosts_aftereffects.md index f9ef40fe1a..a9660bd13c 100644 --- a/website/docs/artist_hosts_aftereffects.md +++ b/website/docs/artist_hosts_aftereffects.md @@ -15,7 +15,7 @@ sidebar_label: AfterEffects ## Setup -To install the extension, download, install [Anastasyi's Extension Manager](https://install.anastasiy.com/). Open Anastasyi's Extension Manager and select AfterEffects in menu. Then go to `{path to pype}/repos/avalon-core/avalon/aftereffects/extension.zxp`. +To install the extension, download, install [Anastasyi's Extension Manager](https://install.anastasiy.com/). Open Anastasyi's Extension Manager and select AfterEffects in menu. Then go to `{path to pype}hosts/aftereffects/api/extension.zxp`. Drag extension.zxp and drop it to Anastasyi's Extension Manager. The extension will install itself. diff --git a/website/docs/artist_hosts_photoshop.md b/website/docs/artist_hosts_photoshop.md index 16539bcf79..b2b5fd58da 100644 --- a/website/docs/artist_hosts_photoshop.md +++ b/website/docs/artist_hosts_photoshop.md @@ -14,7 +14,7 @@ sidebar_label: Photoshop ## Setup -To install the extension, download, install [Anastasyi's Extension Manager](https://install.anastasiy.com/). Open Anastasyi's Extension Manager and select Photoshop in menu. Then go to `{path to pype}/repos/avalon-core/avalon/photoshop/extension.zxp`. Drag extension.zxp and drop it to Anastasyi's Extension Manager. The extension will install itself. +To install the extension, download, install [Anastasyi's Extension Manager](https://install.anastasiy.com/). Open Anastasyi's Extension Manager and select Photoshop in menu. Then go to `{path to pype}hosts/photoshop/api/extension.zxp`. Drag extension.zxp and drop it to Anastasyi's Extension Manager. The extension will install itself. ## Usage diff --git a/website/src/components/BadgesSection/badges.js b/website/src/components/BadgesSection/badges.js index 4bc85df2ef..5b179d066d 100644 --- a/website/src/components/BadgesSection/badges.js +++ b/website/src/components/BadgesSection/badges.js @@ -1,58 +1,63 @@ export default { upper: [ - { - title: "License", - src: - "https://img.shields.io/github/license/pypeclub/pype?labelColor=303846", - href: "https://github.com/pypeclub/pype", - }, - { - title: "Release", - src: - "https://img.shields.io/github/v/release/pypeclub/pype?labelColor=303846", - href: "https://github.com/pypeclub/pype", - }, - { - title: "Requirements State", - src: - "https://img.shields.io/requires/github/pypeclub/pype?labelColor=303846", - href: - "https://requires.io/github/pypeclub/pype/requirements/?branch=main", - }, { title: "VFX Platform", src: "https://img.shields.io/badge/vfx%20platform-2021-lightgrey?labelColor=303846", href: "https://vfxplatform.com", }, + { + title: "License", + src: + "https://img.shields.io/github/license/pypeclub/openpype?labelColor=303846", + href: "https://github.com/pypeclub/openpype", + }, + { + title: "Release", + src: + "https://img.shields.io/github/v/release/pypeclub/openpype?labelColor=303846", + href: "https://github.com/pypeclub/openpype", + }, { title: "GitHub last commit", src: - "https://img.shields.io/github/last-commit/pypeclub/pype/develop?labelColor=303846", - href: "https://github.com/pypeclub/pype", + "https://img.shields.io/github/last-commit/pypeclub/openpype/develop?labelColor=303846", + href: "https://github.com/pypeclub/openpype", }, { title: "GitHub commit activity", src: - "https://img.shields.io/github/commit-activity/y/pypeclub/pype?labelColor=303846", - href: "https://github.com/pypeclub/pype", + "https://img.shields.io/github/commit-activity/y/pypeclub/openpype?labelColor=303846", + href: "https://github.com/pypeclub/openpype", }, { title: "Repository Size", src: - "https://img.shields.io/github/repo-size/pypeclub/pype?labelColor=303846", - href: "https://github.com/pypeclub/pype", + "https://img.shields.io/github/repo-size/pypeclub/openpype?labelColor=303846", + href: "https://github.com/pypeclub/openpype", + }, + { + title: "Repository Size", + src: + "https://img.shields.io/github/contributors/pypeclub/openpype?labelColor=303846", + href: "https://github.com/pypeclub/openpype", + }, + { + title: "Stars", + src: + "https://img.shields.io/github/stars/pypeclub?labelColor=303846", + href: "https://github.com/pypeclub/openpype", }, { title: "Forks", src: - "https://img.shields.io/github/forks/pypeclub/pype?style=social&labelColor=303846", - href: "https://github.com/pypeclub/pype", + "https://img.shields.io/github/forks/pypeclub/openpype?labelColor=303846", + href: "https://github.com/pypeclub/openpype", }, { title: "Discord", src: - "https://img.shields.io/discord/517362899170230292?label=discord&logo=discord&logoColor=white&labelColor=303846", + "https://img.shields.io/discord/517362899170230292?label=discord&logo=discord&logoColor=white&labelColor=303846", href: "https://discord.gg/sFNPWXG", }, ], diff --git a/website/src/pages/index.js b/website/src/pages/index.js index 29b81e973f..791b309bbc 100644 --- a/website/src/pages/index.js +++ b/website/src/pages/index.js @@ -129,6 +129,21 @@ const studios = [ title: "Moonrock Animation Studio", image: "/img/moonrock_logo.png", infoLink: "https://www.moonrock.eu/", + }, + { + title: "Lumine Studio", + image: "/img/LUMINE_LogoMaster_black_2k.png", + infoLink: "https://www.luminestudio.com/", + }, + { + title: "Overmind Studios", + image: "/img/OMS_logo_black_color.png", + infoLink: "https://www.overmind-studios.de/", + }, + { + title: "Ember Light", + image: "/img/EmberLight_black.png", + infoLink: "https://emberlight.se/", } ]; @@ -275,107 +290,121 @@ function Home() { @@ -409,7 +438,7 @@ function Home() { {studios && studios.length && (
-

Studios using openPYPE

+

Studios using openPype

{studios.map((props, idx) => ( diff --git a/website/static/img/EmberLight_black.png b/website/static/img/EmberLight_black.png new file mode 100644 index 0000000000..e8a9f95d06 Binary files /dev/null and b/website/static/img/EmberLight_black.png differ diff --git a/website/static/img/Ember_Light.jpg b/website/static/img/Ember_Light.jpg new file mode 100644 index 0000000000..9081bc7cea Binary files /dev/null and b/website/static/img/Ember_Light.jpg differ diff --git a/website/static/img/LUMINE_LogoMaster_black_2k.png b/website/static/img/LUMINE_LogoMaster_black_2k.png new file mode 100644 index 0000000000..37ef01486d Binary files /dev/null and b/website/static/img/LUMINE_LogoMaster_black_2k.png differ diff --git a/website/static/img/OMS_logo_black_color.png b/website/static/img/OMS_logo_black_color.png new file mode 100644 index 0000000000..9046927e32 Binary files /dev/null and b/website/static/img/OMS_logo_black_color.png differ diff --git a/website/static/img/app_aquarium.png b/website/static/img/app_aquarium.png new file mode 100644 index 0000000000..1ff0acb8aa Binary files /dev/null and b/website/static/img/app_aquarium.png differ diff --git a/website/static/img/app_flame.png b/website/static/img/app_flame.png new file mode 100644 index 0000000000..ba9b69e45f Binary files /dev/null and b/website/static/img/app_flame.png differ diff --git a/website/static/img/app_kitsu.png b/website/static/img/app_kitsu.png new file mode 100644 index 0000000000..b4e3d00ebd Binary files /dev/null and b/website/static/img/app_kitsu.png differ diff --git a/website/static/img/app_shotgrid.png b/website/static/img/app_shotgrid.png new file mode 100644 index 0000000000..102e70b049 Binary files /dev/null and b/website/static/img/app_shotgrid.png differ